#
tokens: 46515/50000 7/274 files (page 14/29)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 14 of 29. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│   ├── agents
│   │   ├── documcp-ast.md
│   │   ├── documcp-deploy.md
│   │   ├── documcp-memory.md
│   │   ├── documcp-test.md
│   │   └── documcp-tool.md
│   ├── copilot-instructions.md
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── automated-changelog.md
│   │   ├── bug_report.md
│   │   ├── bug_report.yml
│   │   ├── documentation_issue.md
│   │   ├── feature_request.md
│   │   ├── feature_request.yml
│   │   ├── npm-publishing-fix.md
│   │   └── release_improvements.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── release-drafter.yml
│   └── workflows
│       ├── auto-merge.yml
│       ├── ci.yml
│       ├── codeql.yml
│       ├── dependency-review.yml
│       ├── deploy-docs.yml
│       ├── README.md
│       ├── release-drafter.yml
│       └── release.yml
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│   ├── .docusaurus
│   │   ├── docusaurus-plugin-content-docs
│   │   │   └── default
│   │   │       └── __mdx-loader-dependency.json
│   │   └── docusaurus-plugin-content-pages
│   │       └── default
│   │           └── __plugin.json
│   ├── adrs
│   │   ├── 001-mcp-server-architecture.md
│   │   ├── 002-repository-analysis-engine.md
│   │   ├── 003-static-site-generator-recommendation-engine.md
│   │   ├── 004-diataxis-framework-integration.md
│   │   ├── 005-github-pages-deployment-automation.md
│   │   ├── 006-mcp-tools-api-design.md
│   │   ├── 007-mcp-prompts-and-resources-integration.md
│   │   ├── 008-intelligent-content-population-engine.md
│   │   ├── 009-content-accuracy-validation-framework.md
│   │   ├── 010-mcp-resource-pattern-redesign.md
│   │   └── README.md
│   ├── api
│   │   ├── .nojekyll
│   │   ├── assets
│   │   │   ├── hierarchy.js
│   │   │   ├── highlight.css
│   │   │   ├── icons.js
│   │   │   ├── icons.svg
│   │   │   ├── main.js
│   │   │   ├── navigation.js
│   │   │   ├── search.js
│   │   │   └── style.css
│   │   ├── hierarchy.html
│   │   ├── index.html
│   │   ├── modules.html
│   │   └── variables
│   │       └── TOOLS.html
│   ├── assets
│   │   └── logo.svg
│   ├── development
│   │   └── MCP_INSPECTOR_TESTING.md
│   ├── docusaurus.config.js
│   ├── explanation
│   │   ├── architecture.md
│   │   └── index.md
│   ├── guides
│   │   ├── link-validation.md
│   │   ├── playwright-integration.md
│   │   └── playwright-testing-workflow.md
│   ├── how-to
│   │   ├── analytics-setup.md
│   │   ├── custom-domains.md
│   │   ├── documentation-freshness-tracking.md
│   │   ├── github-pages-deployment.md
│   │   ├── index.md
│   │   ├── local-testing.md
│   │   ├── performance-optimization.md
│   │   ├── prompting-guide.md
│   │   ├── repository-analysis.md
│   │   ├── seo-optimization.md
│   │   ├── site-monitoring.md
│   │   ├── troubleshooting.md
│   │   └── usage-examples.md
│   ├── index.md
│   ├── knowledge-graph.md
│   ├── package-lock.json
│   ├── package.json
│   ├── phase-2-intelligence.md
│   ├── reference
│   │   ├── api-overview.md
│   │   ├── cli.md
│   │   ├── configuration.md
│   │   ├── deploy-pages.md
│   │   ├── index.md
│   │   ├── mcp-tools.md
│   │   └── prompt-templates.md
│   ├── research
│   │   ├── cross-domain-integration
│   │   │   └── README.md
│   │   ├── domain-1-mcp-architecture
│   │   │   ├── index.md
│   │   │   └── mcp-performance-research.md
│   │   ├── domain-2-repository-analysis
│   │   │   └── README.md
│   │   ├── domain-3-ssg-recommendation
│   │   │   ├── index.md
│   │   │   └── ssg-performance-analysis.md
│   │   ├── domain-4-diataxis-integration
│   │   │   └── README.md
│   │   ├── domain-5-github-deployment
│   │   │   ├── github-pages-security-analysis.md
│   │   │   └── index.md
│   │   ├── domain-6-api-design
│   │   │   └── README.md
│   │   ├── README.md
│   │   ├── research-integration-summary-2025-01-14.md
│   │   ├── research-progress-template.md
│   │   └── research-questions-2025-01-14.md
│   ├── robots.txt
│   ├── sidebars.js
│   ├── sitemap.xml
│   ├── src
│   │   └── css
│   │       └── custom.css
│   └── tutorials
│       ├── development-setup.md
│       ├── environment-setup.md
│       ├── first-deployment.md
│       ├── getting-started.md
│       ├── index.md
│       ├── memory-workflows.md
│       └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│   └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│   ├── benchmarks
│   │   └── performance.ts
│   ├── index.ts
│   ├── memory
│   │   ├── contextual-retrieval.ts
│   │   ├── deployment-analytics.ts
│   │   ├── enhanced-manager.ts
│   │   ├── export-import.ts
│   │   ├── freshness-kg-integration.ts
│   │   ├── index.ts
│   │   ├── integration.ts
│   │   ├── kg-code-integration.ts
│   │   ├── kg-health.ts
│   │   ├── kg-integration.ts
│   │   ├── kg-link-validator.ts
│   │   ├── kg-storage.ts
│   │   ├── knowledge-graph.ts
│   │   ├── learning.ts
│   │   ├── manager.ts
│   │   ├── multi-agent-sharing.ts
│   │   ├── pruning.ts
│   │   ├── schemas.ts
│   │   ├── storage.ts
│   │   ├── temporal-analysis.ts
│   │   ├── user-preferences.ts
│   │   └── visualization.ts
│   ├── prompts
│   │   └── technical-writer-prompts.ts
│   ├── scripts
│   │   └── benchmark.ts
│   ├── templates
│   │   └── playwright
│   │       ├── accessibility.spec.template.ts
│   │       ├── Dockerfile.template
│   │       ├── docs-e2e.workflow.template.yml
│   │       ├── link-validation.spec.template.ts
│   │       └── playwright.config.template.ts
│   ├── tools
│   │   ├── analyze-deployments.ts
│   │   ├── analyze-readme.ts
│   │   ├── analyze-repository.ts
│   │   ├── check-documentation-links.ts
│   │   ├── deploy-pages.ts
│   │   ├── detect-gaps.ts
│   │   ├── evaluate-readme-health.ts
│   │   ├── generate-config.ts
│   │   ├── generate-contextual-content.ts
│   │   ├── generate-llm-context.ts
│   │   ├── generate-readme-template.ts
│   │   ├── generate-technical-writer-prompts.ts
│   │   ├── kg-health-check.ts
│   │   ├── manage-preferences.ts
│   │   ├── manage-sitemap.ts
│   │   ├── optimize-readme.ts
│   │   ├── populate-content.ts
│   │   ├── readme-best-practices.ts
│   │   ├── recommend-ssg.ts
│   │   ├── setup-playwright-tests.ts
│   │   ├── setup-structure.ts
│   │   ├── sync-code-to-docs.ts
│   │   ├── test-local-deployment.ts
│   │   ├── track-documentation-freshness.ts
│   │   ├── update-existing-documentation.ts
│   │   ├── validate-content.ts
│   │   ├── validate-documentation-freshness.ts
│   │   ├── validate-readme-checklist.ts
│   │   └── verify-deployment.ts
│   ├── types
│   │   └── api.ts
│   ├── utils
│   │   ├── ast-analyzer.ts
│   │   ├── code-scanner.ts
│   │   ├── content-extractor.ts
│   │   ├── drift-detector.ts
│   │   ├── freshness-tracker.ts
│   │   ├── language-parsers-simple.ts
│   │   ├── permission-checker.ts
│   │   └── sitemap-generator.ts
│   └── workflows
│       └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│   ├── api
│   │   └── mcp-responses.test.ts
│   ├── benchmarks
│   │   └── performance.test.ts
│   ├── edge-cases
│   │   └── error-handling.test.ts
│   ├── functional
│   │   └── tools.test.ts
│   ├── integration
│   │   ├── kg-documentation-workflow.test.ts
│   │   ├── knowledge-graph-workflow.test.ts
│   │   ├── mcp-readme-tools.test.ts
│   │   ├── memory-mcp-tools.test.ts
│   │   ├── readme-technical-writer.test.ts
│   │   └── workflow.test.ts
│   ├── memory
│   │   ├── contextual-retrieval.test.ts
│   │   ├── enhanced-manager.test.ts
│   │   ├── export-import.test.ts
│   │   ├── freshness-kg-integration.test.ts
│   │   ├── kg-code-integration.test.ts
│   │   ├── kg-health.test.ts
│   │   ├── kg-link-validator.test.ts
│   │   ├── kg-storage-validation.test.ts
│   │   ├── kg-storage.test.ts
│   │   ├── knowledge-graph-enhanced.test.ts
│   │   ├── knowledge-graph.test.ts
│   │   ├── learning.test.ts
│   │   ├── manager-advanced.test.ts
│   │   ├── manager.test.ts
│   │   ├── mcp-resource-integration.test.ts
│   │   ├── mcp-tool-persistence.test.ts
│   │   ├── schemas.test.ts
│   │   ├── storage.test.ts
│   │   ├── temporal-analysis.test.ts
│   │   └── user-preferences.test.ts
│   ├── performance
│   │   ├── memory-load-testing.test.ts
│   │   └── memory-stress-testing.test.ts
│   ├── prompts
│   │   ├── guided-workflow-prompts.test.ts
│   │   └── technical-writer-prompts.test.ts
│   ├── server.test.ts
│   ├── setup.ts
│   ├── tools
│   │   ├── all-tools.test.ts
│   │   ├── analyze-coverage.test.ts
│   │   ├── analyze-deployments.test.ts
│   │   ├── analyze-readme.test.ts
│   │   ├── analyze-repository.test.ts
│   │   ├── check-documentation-links.test.ts
│   │   ├── deploy-pages-kg-retrieval.test.ts
│   │   ├── deploy-pages-tracking.test.ts
│   │   ├── deploy-pages.test.ts
│   │   ├── detect-gaps.test.ts
│   │   ├── evaluate-readme-health.test.ts
│   │   ├── generate-contextual-content.test.ts
│   │   ├── generate-llm-context.test.ts
│   │   ├── generate-readme-template.test.ts
│   │   ├── generate-technical-writer-prompts.test.ts
│   │   ├── kg-health-check.test.ts
│   │   ├── manage-sitemap.test.ts
│   │   ├── optimize-readme.test.ts
│   │   ├── readme-best-practices.test.ts
│   │   ├── recommend-ssg-historical.test.ts
│   │   ├── recommend-ssg-preferences.test.ts
│   │   ├── recommend-ssg.test.ts
│   │   ├── simple-coverage.test.ts
│   │   ├── sync-code-to-docs.test.ts
│   │   ├── test-local-deployment.test.ts
│   │   ├── tool-error-handling.test.ts
│   │   ├── track-documentation-freshness.test.ts
│   │   ├── validate-content.test.ts
│   │   ├── validate-documentation-freshness.test.ts
│   │   └── validate-readme-checklist.test.ts
│   ├── types
│   │   └── type-safety.test.ts
│   └── utils
│       ├── ast-analyzer.test.ts
│       ├── content-extractor.test.ts
│       ├── drift-detector.test.ts
│       ├── freshness-tracker.test.ts
│       └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/docs/adrs/005-github-pages-deployment-automation.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | id: 005-github-pages-deployment-automation
  3 | title: "ADR-005: GitHub Pages Deployment Automation"
  4 | sidebar_label: "ADR-5: GitHub Pages Deployment Automation"
  5 | sidebar_position: 5
  6 | documcp:
  7 |   last_updated: "2025-11-20T00:46:21.939Z"
  8 |   last_validated: "2025-11-20T00:46:21.939Z"
  9 |   auto_updated: false
 10 |   update_frequency: monthly
 11 | ---
 12 | 
 13 | # ADR-005: GitHub Pages Deployment Automation Architecture
 14 | 
 15 | ## Status
 16 | 
 17 | Accepted
 18 | 
 19 | ## Context
 20 | 
 21 | DocuMCP must provide seamless, automated deployment of documentation sites to GitHub Pages. This requires sophisticated understanding of GitHub Pages capabilities, limitations, and best practices, along with intelligent generation of CI/CD workflows that adapt to different static site generators and project configurations.
 22 | 
 23 | GitHub Pages deployment complexity factors:
 24 | 
 25 | - **Multiple deployment methods**: GitHub Actions, branch-based, legacy Jekyll
 26 | - **SSG-specific requirements**: Different build tools, dependencies, and configurations
 27 | - **Security considerations**: Secrets management, workflow permissions, dependency vulnerabilities
 28 | - **Performance optimization**: Build caching, incremental builds, deployment strategies
 29 | - **Troubleshooting support**: Common failure modes, debugging guidance, health checks
 30 | 
 31 | Key challenges:
 32 | 
 33 | - Each SSG has unique deployment requirements and optimal configurations
 34 | - GitHub Actions workflows need to be maintainable and debuggable
 35 | - Repository settings and branch configurations must be properly managed
 36 | - Users need clear guidance for initial deployment and ongoing maintenance
 37 | 
 38 | ## Decision
 39 | 
 40 | We will implement a comprehensive GitHub Pages deployment orchestration system that generates optimized, SSG-specific GitHub Actions workflows with intelligent configuration, error handling, and verification capabilities.
 41 | 
 42 | ### Deployment Architecture Components:
 43 | 
 44 | #### 1. Workflow Generation Engine
 45 | 
 46 | - **SSG-specific workflow templates** optimized for each supported generator
 47 | - **Intelligent dependency management** with version pinning and security updates
 48 | - **Build optimization** including caching strategies and incremental builds
 49 | - **Error handling and debugging** with comprehensive logging and failure analysis
 50 | 
 51 | #### 2. Repository Configuration Management (Enhanced with Security Research)
 52 | 
 53 | - **Automated repository settings** for GitHub Pages configuration
 54 | - **Branch management guidance** for different deployment strategies
 55 | - **Security configuration** including workflow permissions and secrets management
 56 | - **Health check integration** for deployment verification
 57 | 
 58 | **Research-Validated Security Enhancements**:
 59 | 
 60 | - **OIDC Token Authentication**: Implements JWT-based deployment validation with branch protection
 61 | - **Minimal Permission Principle**: Generates workflows with only required `pages: write` and `id-token: write` permissions
 62 | - **Environment Protection**: Default environment rules with required reviewers for production deployments
 63 | - **Automated Security Scanning**: Integrated secret scanning and vulnerability assessment
 64 | 
 65 | #### 3. Deployment Strategy Selection
 66 | 
 67 | - **Branch-based deployment** for simple sites with minimal build requirements
 68 | - **GitHub Actions deployment** for complex builds requiring custom environments
 69 | - **Hybrid approaches** combining native Jekyll support with custom processing
 70 | 
 71 | #### 4. Monitoring and Troubleshooting
 72 | 
 73 | - **Deployment verification** with automated health checks and accessibility testing
 74 | - **Common failure diagnosis** with specific remediation guidance
 75 | - **Performance monitoring** with build time optimization recommendations
 76 | - **Maintenance guidance** for ongoing workflow and dependency management
 77 | 
 78 | ## Alternatives Considered
 79 | 
 80 | ### Manual Deployment Setup
 81 | 
 82 | - **Pros**: Full user control, educational value, flexible configuration
 83 | - **Cons**: High learning curve, error-prone, inconsistent results
 84 | - **Decision**: Rejected due to complexity and poor user experience
 85 | 
 86 | ### Third-Party Deployment Services (Netlify, Vercel)
 87 | 
 88 | - **Pros**: Advanced features, excellent performance, minimal configuration
 89 | - **Cons**: Cost for advanced features, vendor lock-in, less GitHub integration
 90 | - **Decision**: Rejected to maintain GitHub-native workflow and free hosting
 91 | 
 92 | ### Universal Deployment Workflow
 93 | 
 94 | - **Pros**: Simpler implementation, consistent user experience
 95 | - **Cons**: Suboptimal for specific SSGs, limited optimization opportunities
 96 | - **Decision**: Rejected in favor of SSG-optimized approaches
 97 | 
 98 | ### Container-Based Deployment
 99 | 
100 | - **Pros**: Consistent environments, advanced dependency management
101 | - **Cons**: Complexity overhead, slower builds, GitHub Actions limitations
102 | - **Decision**: Rejected for initial version; consider for advanced scenarios
103 | 
104 | ## Consequences
105 | 
106 | ### Positive
107 | 
108 | - **Optimized Performance**: SSG-specific workflows provide optimal build times and caching
109 | - **Reliable Deployment**: Comprehensive error handling and verification reduce failure rates
110 | - **Maintainable Workflows**: Generated workflows follow best practices and include documentation
111 | - **Debugging Support**: Clear error messages and troubleshooting guidance reduce support burden
112 | - **Security Best Practices**: Automated security configuration and dependency management
113 | 
114 | ### Negative
115 | 
116 | - **Implementation Complexity**: Multiple SSG-specific templates require significant maintenance
117 | - **GitHub Dependency**: Tight coupling to GitHub Actions and Pages infrastructure
118 | - **Template Maintenance**: Regular updates needed as SSGs and GitHub features evolve
119 | 
120 | ### Risks and Mitigations
121 | 
122 | - **Workflow Obsolescence**: Regular testing and updates of generated workflows
123 | - **GitHub API Changes**: Monitoring of GitHub features and migration planning
124 | - **Security Vulnerabilities**: Automated dependency scanning and update recommendations
125 | 
126 | ## Implementation Details
127 | 
128 | ### Workflow Template System
129 | 
130 | ```typescript
131 | interface WorkflowTemplate {
132 |   name: string;
133 |   triggers: WorkflowTrigger[];
134 |   jobs: WorkflowJob[];
135 |   permissions: WorkflowPermissions;
136 |   environment: EnvironmentConfig;
137 | }
138 | 
139 | interface SSGWorkflowConfig {
140 |   buildCommand: string;
141 |   outputDirectory: string;
142 |   dependencies: DependencyConfig;
143 |   caching: CacheConfig;
144 |   environmentVariables: EnvironmentVariable[];
145 | }
146 | 
147 | const WORKFLOW_TEMPLATES: Record<SSGType, WorkflowTemplate> = {
148 |   hugo: createHugoWorkflow(),
149 |   jekyll: createJekyllWorkflow(),
150 |   docusaurus: createDocusaurusWorkflow(),
151 |   mkdocs: createMkDocsWorkflow(),
152 |   eleventy: createEleventyWorkflow(),
153 | };
154 | ```
155 | 
156 | ### Hugo Workflow Template
157 | 
158 | ```yaml
159 | name: Deploy Hugo Documentation
160 | 
161 | on:
162 |   push:
163 |     branches: [main]
164 |   pull_request:
165 |     branches: [main]
166 |   workflow_dispatch:
167 | 
168 | permissions:
169 |   contents: read
170 |   pages: write
171 |   id-token: write
172 | 
173 | concurrency:
174 |   group: "pages"
175 |   cancel-in-progress: false
176 | 
177 | jobs:
178 |   build:
179 |     runs-on: ubuntu-latest
180 |     steps:
181 |       - name: Checkout
182 |         uses: actions/checkout@v4
183 |         with:
184 |           submodules: recursive
185 |           fetch-depth: 0
186 | 
187 |       - name: Setup Hugo
188 |         uses: peaceiris/actions-hugo@v2
189 |         with:
190 |           hugo-version: "{{ hugo_version }}"
191 |           extended: { { hugo_extended } }
192 | 
193 |       - name: Setup Pages
194 |         id: pages
195 |         uses: actions/configure-pages@v5
196 | 
197 |       - name: Build with Hugo
198 |         env:
199 |           HUGO_ENVIRONMENT: production
200 |           HUGO_ENV: production
201 |         run: |
202 |           hugo \
203 |             --gc \
204 |             --minify \
205 |             --baseURL "${{ '{{ steps.pages.outputs.base_url }}' }}/"
206 | 
207 |       - name: Upload artifact
208 |         uses: actions/upload-pages-artifact@v4
209 |         with:
210 |           path: ./public
211 | 
212 |   deploy:
213 |     environment:
214 |       name: github-pages
215 |       url: ${{ '{{ steps.deployment.outputs.page_url }}' }}
216 |     runs-on: ubuntu-latest
217 |     needs: build
218 |     permissions:
219 |       contents: read
220 |       pages: write
221 |       id-token: write
222 |     steps:
223 |       - name: Deploy to GitHub Pages
224 |         id: deployment
225 |         uses: actions/deploy-pages@v4
226 | ```
227 | 
228 | ### Docusaurus Workflow Template
229 | 
230 | ```yaml
231 | name: Deploy Docusaurus Documentation
232 | 
233 | on:
234 |   push:
235 |     branches: [main]
236 |   workflow_dispatch:
237 | 
238 | permissions:
239 |   contents: read
240 |   pages: write
241 |   id-token: write
242 | 
243 | concurrency:
244 |   group: "pages"
245 |   cancel-in-progress: false
246 | 
247 | jobs:
248 |   build:
249 |     runs-on: ubuntu-latest
250 |     steps:
251 |       - name: Checkout
252 |         uses: actions/checkout@v4
253 | 
254 |       - name: Setup Node.js
255 |         uses: actions/setup-node@v4
256 |         with:
257 |           node-version: "{{ node_version }}"
258 |           cache: { { package_manager } }
259 | 
260 |       - name: Install dependencies
261 |         run: { { install_command } }
262 | 
263 |       - name: Build website
264 |         run: { { build_command } }
265 | 
266 |       - name: Setup Pages
267 |         uses: actions/configure-pages@v5
268 | 
269 |       - name: Upload artifact
270 |         uses: actions/upload-pages-artifact@v4
271 |         with:
272 |           path: { { build_output_directory } }
273 | 
274 |   deploy:
275 |     environment:
276 |       name: github-pages
277 |       url: ${{ '{{ steps.deployment.outputs.page_url }}' }}
278 |     runs-on: ubuntu-latest
279 |     needs: build
280 |     permissions:
281 |       contents: read
282 |       pages: write
283 |       id-token: write
284 |     steps:
285 |       - name: Deploy to GitHub Pages
286 |         id: deployment
287 |         uses: actions/deploy-pages@v4
288 | ```
289 | 
290 | ### Workflow Generation Logic
291 | 
292 | ```typescript
293 | interface WorkflowGenerationConfig {
294 |   ssg: SSGType;
295 |   projectAnalysis: ProjectAnalysis;
296 |   deploymentPreferences: DeploymentPreferences;
297 |   securityRequirements: SecurityConfig;
298 | }
299 | 
300 | class WorkflowGenerator {
301 |   generateWorkflow(config: WorkflowGenerationConfig): WorkflowDefinition {
302 |     const template = this.getSSGTemplate(config.ssg);
303 |     const customizations = this.analyzeCustomizations(config.projectAnalysis);
304 |     const optimizations = this.calculateOptimizations(config);
305 | 
306 |     return this.mergeConfiguration(template, customizations, optimizations);
307 |   }
308 | 
309 |   private getSSGTemplate(ssg: SSGType): WorkflowTemplate {
310 |     return WORKFLOW_TEMPLATES[ssg];
311 |   }
312 | 
313 |   private analyzeCustomizations(
314 |     analysis: ProjectAnalysis,
315 |   ): WorkflowCustomizations {
316 |     return {
317 |       nodeVersion: this.detectNodeVersion(analysis),
318 |       packageManager: this.detectPackageManager(analysis),
319 |       buildCommand: this.detectBuildCommand(analysis),
320 |       outputDirectory: this.detectOutputDirectory(analysis),
321 |       environmentVariables: this.extractEnvironmentNeeds(analysis),
322 |     };
323 |   }
324 | 
325 |   private calculateOptimizations(
326 |     config: WorkflowGenerationConfig,
327 |   ): WorkflowOptimizations {
328 |     return {
329 |       caching: this.calculateCachingStrategy(config),
330 |       parallelization: this.identifyParallelizationOpportunities(config),
331 |       incrementalBuild: this.assessIncrementalBuildOptions(config),
332 |       securityHardening: this.applySecurityBestPractices(config),
333 |     };
334 |   }
335 | }
336 | ```
337 | 
338 | ### Repository Configuration Management
339 | 
340 | ```typescript
341 | interface RepositoryConfiguration {
342 |   pagesSource: PagesSourceConfig;
343 |   branchProtection: BranchProtectionConfig;
344 |   secrets: SecretsConfig;
345 |   environmentSettings: EnvironmentSettings;
346 | }
347 | 
348 | class RepositoryConfigurationManager {
349 |   async configureRepository(
350 |     repoPath: string,
351 |     config: RepositoryConfiguration,
352 |   ): Promise<ConfigurationResult> {
353 |     try {
354 |       return {
355 |         pagesConfiguration: await this.configurePagesSettings(
356 |           config.pagesSource,
357 |         ),
358 |         branchSetup: await this.setupBranchConfiguration(
359 |           config.branchProtection,
360 |         ),
361 |         secretsManagement: await this.configureSecrets(config.secrets),
362 |         environmentSetup: await this.setupEnvironments(
363 |           config.environmentSettings,
364 |         ),
365 |       };
366 |     } catch (error) {
367 |       console.error(`Failed to configure repository at ${repoPath}:`, error);
368 |       throw new Error(
369 |         `Repository configuration failed: ${
370 |           error instanceof Error ? error.message : "Unknown error"
371 |         }`,
372 |       );
373 |     }
374 |   }
375 | 
376 |   private async configurePagesSettings(
377 |     config: PagesSourceConfig,
378 |   ): Promise<void> {
379 |     // Configure GitHub Pages source (GitHub Actions vs. branch-based)
380 |     // Set custom domain if specified
381 |     // Configure HTTPS enforcement
382 |   }
383 | 
384 |   private async setupBranchConfiguration(
385 |     config: BranchProtectionConfig,
386 |   ): Promise<void> {
387 |     // Create gh-pages branch if needed
388 |     // Configure branch protection rules
389 |     // Set up required status checks
390 |   }
391 | }
392 | ```
393 | 
394 | ### Deployment Verification System
395 | 
396 | ```typescript
397 | interface DeploymentVerification {
398 |   healthChecks: HealthCheck[];
399 |   performanceTests: PerformanceTest[];
400 |   accessibilityTests: AccessibilityTest[];
401 |   linkValidation: LinkValidationConfig;
402 | }
403 | 
404 | class DeploymentVerifier {
405 |   async verifyDeployment(
406 |     siteUrl: string,
407 |     config: DeploymentVerification,
408 |   ): Promise<VerificationReport> {
409 |     try {
410 |       const results = await Promise.allSettled([
411 |         this.runHealthChecks(siteUrl, config.healthChecks),
412 |         this.runPerformanceTests(siteUrl, config.performanceTests),
413 |         this.runAccessibilityTests(siteUrl, config.accessibilityTests),
414 |         this.validateLinks(siteUrl, config.linkValidation),
415 |       ]);
416 | 
417 |       // Handle partial failures gracefully
418 |       const processedResults = results
419 |         .map((result, index) => {
420 |           if (result.status === "fulfilled") {
421 |             return result.value;
422 |           } else {
423 |             console.warn(`Verification step ${index} failed:`, result.reason);
424 |             return null;
425 |           }
426 |         })
427 |         .filter((result) => result !== null);
428 | 
429 |       return this.generateVerificationReport(processedResults);
430 |     } catch (error) {
431 |       throw new Error(`Deployment verification failed: ${error.message}`);
432 |     }
433 |   }
434 | 
435 |   private async runHealthChecks(
436 |     siteUrl: string,
437 |     checks: HealthCheck[],
438 |   ): Promise<HealthCheckResult[]> {
439 |     try {
440 |       const results = await Promise.allSettled(
441 |         checks.map((check) => this.executeHealthCheck(siteUrl, check)),
442 |       );
443 | 
444 |       return results
445 |         .filter(
446 |           (result): result is PromiseFulfilledResult<HealthCheckResult> =>
447 |             result.status === "fulfilled",
448 |         )
449 |         .map((result) => result.value);
450 |     } catch (error) {
451 |       throw new Error(`Health checks failed: ${error.message}`);
452 |     }
453 |   }
454 | 
455 |   private async executeHealthCheck(
456 |     siteUrl: string,
457 |     check: HealthCheck,
458 |   ): Promise<HealthCheckResult> {
459 |     // Verify site accessibility
460 |     // Check for broken links
461 |     // Validate content rendering
462 |     // Test mobile responsiveness
463 |     // Verify search functionality if applicable
464 |   }
465 | }
466 | ```
467 | 
468 | ### Error Handling and Troubleshooting
469 | 
470 | ```typescript
471 | interface TroubleshootingGuide {
472 |   commonErrors: ErrorPattern[];
473 |   diagnosticSteps: DiagnosticStep[];
474 |   resolutionGuides: ResolutionGuide[];
475 |   escalationPaths: EscalationPath[];
476 | }
477 | 
478 | const COMMON_DEPLOYMENT_ERRORS: ErrorPattern[] = [
479 |   {
480 |     pattern: /ENOENT.*package\.json/,
481 |     category: "dependency",
482 |     description: "Package.json not found or missing dependencies",
483 |     resolution: "Verify package.json exists and run npm install",
484 |     preventionTips: [
485 |       "Always commit package.json",
486 |       "Use package-lock.json for version consistency",
487 |     ],
488 |   },
489 |   {
490 |     pattern: /Permission denied.*write/,
491 |     category: "permissions",
492 |     description: "Insufficient permissions for GitHub Pages deployment",
493 |     resolution: "Check workflow permissions and repository settings",
494 |     preventionTips: [
495 |       "Use recommended workflow permissions",
496 |       "Verify Pages deployment source",
497 |     ],
498 |   },
499 |   // ... additional error patterns
500 | ];
501 | 
502 | class DeploymentTroubleshooter {
503 |   analyzeBuildFailure(buildLog: string): TroubleshootingReport {
504 |     const detectedErrors = this.detectErrorPatterns(buildLog);
505 |     const diagnosticResults = this.runDiagnostics(detectedErrors);
506 |     const resolutionSteps = this.generateResolutionSteps(detectedErrors);
507 | 
508 |     return {
509 |       detectedIssues: detectedErrors,
510 |       diagnostics: diagnosticResults,
511 |       recommendedActions: resolutionSteps,
512 |       escalationGuidance: this.getEscalationGuidance(detectedErrors),
513 |     };
514 |   }
515 | }
516 | ```
517 | 
518 | ## Security Considerations
519 | 
520 | ### Workflow Security Best Practices
521 | 
522 | - **Minimal Permissions**: Use least privilege principle for workflow permissions
523 | - **Dependency Scanning**: Automated vulnerability detection in build dependencies
524 | - **Secrets Management**: Proper handling of sensitive configuration data
525 | - **Supply Chain Security**: Pin action versions and verify checksums
526 | 
527 | ### Security Configuration Template
528 | 
529 | ```yaml
530 | permissions:
531 |   contents: read # Read repository contents
532 |   pages: write # Deploy to GitHub Pages
533 |   id-token: write # Use OIDC token for authentication
534 | 
535 | security:
536 |   dependency-scanning:
537 |     enabled: true
538 |     auto-update: true
539 | 
540 |   workflow-hardening:
541 |     pin-actions: true
542 |     verify-checksums: true
543 |     minimal-permissions: true
544 | ```
545 | 
546 | ## Performance Optimization
547 | 
548 | ### Build Optimization Strategies
549 | 
550 | - **Intelligent Caching**: Cache dependencies, build artifacts, and intermediate files
551 | - **Incremental Builds**: Build only changed content when possible
552 | - **Parallel Processing**: Utilize available CPU cores for build tasks
553 | - **Resource Optimization**: Optimize memory usage and disk I/O
554 | 
555 | ### Performance Monitoring
556 | 
557 | ```typescript
558 | interface BuildPerformanceMetrics {
559 |   totalBuildTime: number;
560 |   dependencyInstallTime: number;
561 |   compilationTime: number;
562 |   deploymentTime: number;
563 |   cacheHitRate: number;
564 |   resourceUsage: ResourceUsageMetrics;
565 | }
566 | 
567 | class PerformanceMonitor {
568 |   trackBuildPerformance(buildLog: string): BuildPerformanceMetrics {
569 |     return {
570 |       totalBuildTime: this.extractTotalTime(buildLog),
571 |       dependencyInstallTime: this.extractDependencyTime(buildLog),
572 |       compilationTime: this.extractCompilationTime(buildLog),
573 |       deploymentTime: this.extractDeploymentTime(buildLog),
574 |       cacheHitRate: this.calculateCacheEfficiency(buildLog),
575 |       resourceUsage: this.analyzeResourceUsage(buildLog),
576 |     };
577 |   }
578 | 
579 |   generateOptimizationRecommendations(
580 |     metrics: BuildPerformanceMetrics,
581 |   ): OptimizationRecommendation[] {
582 |     const recommendations: OptimizationRecommendation[] = [];
583 | 
584 |     if (metrics.cacheHitRate < 0.8) {
585 |       recommendations.push({
586 |         type: "caching",
587 |         priority: "high",
588 |         description: "Improve caching strategy to reduce build times",
589 |         implementation:
590 |           "Configure additional cache paths and improve cache keys",
591 |       });
592 |     }
593 | 
594 |     return recommendations;
595 |   }
596 | }
597 | ```
598 | 
599 | ## Future Enhancements
600 | 
601 | ### Advanced Deployment Features
602 | 
603 | - **Multi-environment deployment**: Staging and production environment management
604 | - **Blue-green deployments**: Zero-downtime deployment strategies
605 | - **Rollback capabilities**: Automated rollback on deployment failures
606 | - **A/B testing support**: Deploy multiple versions for testing
607 | 
608 | ### Integration Enhancements
609 | 
610 | - **CDN integration**: Automatic CDN configuration for improved performance
611 | - **Analytics integration**: Built-in analytics and monitoring setup
612 | - **Search integration**: Automated search index generation and deployment
613 | - **Monitoring integration**: Health monitoring and alerting setup
614 | 
615 | ## Testing Strategy
616 | 
617 | ### Workflow Testing
618 | 
619 | - **Unit tests**: Individual workflow components and template generation
620 | - **Integration tests**: Full deployment workflows across different SSGs
621 | - **End-to-end tests**: Complete documentation site deployment and verification
622 | - **Performance tests**: Build time and resource usage benchmarks
623 | 
624 | ### Validation Framework
625 | 
626 | ```typescript
627 | describe("DeploymentWorkflows", () => {
628 |   it("should generate valid Hugo workflow for typical project");
629 |   it("should handle complex Docusaurus configuration");
630 |   it("should optimize caching for large MkDocs sites");
631 |   it("should provide meaningful error messages for common failures");
632 |   it("should verify successful deployment and site accessibility");
633 | });
634 | ```
635 | 
636 | ## References
637 | 
638 | - [GitHub Pages Documentation](https://docs.github.com/en/pages)
639 | - [GitHub Actions Best Practices](https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions)
640 | - [Static Site Deployment Strategies](https://jamstack.org/best-practices/)
641 | - [JAMstack Architecture Guide](https://jamstack.org/what-is-jamstack/)
642 | 
```

--------------------------------------------------------------------------------
/tests/tools/all-tools.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | // Comprehensive tests for all MCP tools to achieve 80% coverage
  2 | import { promises as fs } from "fs";
  3 | import path from "path";
  4 | import os from "os";
  5 | import tmp from "tmp";
  6 | import { analyzeRepository } from "../../src/tools/analyze-repository";
  7 | import { recommendSSG } from "../../src/tools/recommend-ssg";
  8 | import { generateConfig } from "../../src/tools/generate-config";
  9 | import { setupStructure } from "../../src/tools/setup-structure";
 10 | import { deployPages } from "../../src/tools/deploy-pages";
 11 | import { verifyDeployment } from "../../src/tools/verify-deployment";
 12 | import { evaluateReadmeHealth } from "../../src/tools/evaluate-readme-health";
 13 | import { readmeBestPractices } from "../../src/tools/readme-best-practices";
 14 | 
 15 | describe("All MCP Tools Coverage Tests", () => {
 16 |   let tempDir: string;
 17 | 
 18 |   beforeAll(async () => {
 19 |     tempDir = path.join(os.tmpdir(), "documcp-coverage-tests");
 20 |     await fs.mkdir(tempDir, { recursive: true });
 21 |   });
 22 | 
 23 |   afterAll(async () => {
 24 |     try {
 25 |       await fs.rm(tempDir, { recursive: true, force: true });
 26 |     } catch (error) {
 27 |       // Ignore cleanup errors
 28 |     }
 29 |   });
 30 | 
 31 |   describe("analyze_repository", () => {
 32 |     it("should analyze JavaScript project", async () => {
 33 |       const jsRepo = await createJSRepo();
 34 |       const result = await analyzeRepository({
 35 |         path: jsRepo,
 36 |         depth: "standard",
 37 |       });
 38 | 
 39 |       expect(result.content).toBeDefined();
 40 |       const analysisData = JSON.parse(
 41 |         result.content.find((c) => c.text.includes('"ecosystem"'))!.text,
 42 |       );
 43 |       expect(analysisData.dependencies.ecosystem).toBe("javascript");
 44 |     });
 45 | 
 46 |     it("should analyze Python project", async () => {
 47 |       const pyRepo = await createPythonRepo();
 48 |       const result = await analyzeRepository({
 49 |         path: pyRepo,
 50 |         depth: "standard",
 51 |       });
 52 | 
 53 |       expect(result.content).toBeDefined();
 54 |       const analysisData = JSON.parse(
 55 |         result.content.find((c) => c.text.includes('"ecosystem"'))!.text,
 56 |       );
 57 |       expect(analysisData.dependencies.ecosystem).toBe("python");
 58 |     });
 59 | 
 60 |     it("should handle different depths", async () => {
 61 |       const repo = await createJSRepo();
 62 | 
 63 |       const quickResult = await analyzeRepository({
 64 |         path: repo,
 65 |         depth: "quick",
 66 |       });
 67 |       const deepResult = await analyzeRepository({ path: repo, depth: "deep" });
 68 | 
 69 |       expect(quickResult.content).toBeDefined();
 70 |       expect(deepResult.content).toBeDefined();
 71 |     });
 72 | 
 73 |     it("should detect CI/CD workflows", async () => {
 74 |       const ciRepo = await createRepoWithCI();
 75 |       const result = await analyzeRepository({
 76 |         path: ciRepo,
 77 |         depth: "standard",
 78 |       });
 79 | 
 80 |       const analysisData = JSON.parse(
 81 |         result.content.find((c) => c.text.includes('"hasCI"'))!.text,
 82 |       );
 83 |       expect(analysisData.structure.hasCI).toBe(true);
 84 |     });
 85 | 
 86 |     it("should handle repository without dependencies", async () => {
 87 |       const emptyRepo = await createEmptyRepo();
 88 |       const result = await analyzeRepository({
 89 |         path: emptyRepo,
 90 |         depth: "standard",
 91 |       });
 92 | 
 93 |       const analysisData = JSON.parse(
 94 |         result.content.find((c) => c.text.includes('"ecosystem"'))!.text,
 95 |       );
 96 |       expect(analysisData.dependencies.ecosystem).toBe("unknown");
 97 |     });
 98 |   });
 99 | 
100 |   describe("recommend_ssg", () => {
101 |     it("should provide recommendation with confidence", async () => {
102 |       const result = await recommendSSG({ analysisId: "test-123" });
103 | 
104 |       expect(result.content).toBeDefined();
105 |       const recData = JSON.parse(
106 |         result.content.find((c) => c.text.includes('"confidence"'))!.text,
107 |       );
108 |       expect(recData.confidence).toBeGreaterThan(0);
109 |       expect(recData.confidence).toBeLessThanOrEqual(1);
110 |     });
111 | 
112 |     it("should handle preferences", async () => {
113 |       const result = await recommendSSG({
114 |         analysisId: "test-456",
115 |         preferences: {
116 |           priority: "simplicity",
117 |           ecosystem: "javascript",
118 |         },
119 |       });
120 | 
121 |       expect(result.content).toBeDefined();
122 |       const recData = JSON.parse(
123 |         result.content.find((c) => c.text.includes('"recommended"'))!.text,
124 |       );
125 |       expect(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]).toContain(
126 |         recData.recommended,
127 |       );
128 |     });
129 | 
130 |     it("should provide alternatives", async () => {
131 |       const result = await recommendSSG({ analysisId: "test-789" });
132 | 
133 |       const recData = JSON.parse(
134 |         result.content.find((c) => c.text.includes('"alternatives"'))!.text,
135 |       );
136 |       expect(Array.isArray(recData.alternatives)).toBe(true);
137 |       expect(recData.alternatives.length).toBeGreaterThan(0);
138 |     });
139 |   });
140 | 
141 |   describe("generate_config", () => {
142 |     it("should generate Docusaurus config", async () => {
143 |       const outputDir = await createTempDir("docusaurus-config");
144 |       const result = await generateConfig({
145 |         ssg: "docusaurus",
146 |         projectName: "Test Docusaurus",
147 |         outputPath: outputDir,
148 |       });
149 | 
150 |       expect(result.content).toBeDefined();
151 |       expect(
152 |         await fileExists(path.join(outputDir, "docusaurus.config.js")),
153 |       ).toBe(true);
154 |       expect(await fileExists(path.join(outputDir, "package.json"))).toBe(true);
155 |     });
156 | 
157 |     it("should generate MkDocs config", async () => {
158 |       const outputDir = await createTempDir("mkdocs-config");
159 |       const result = await generateConfig({
160 |         ssg: "mkdocs",
161 |         projectName: "Test MkDocs",
162 |         outputPath: outputDir,
163 |       });
164 | 
165 |       expect(result.content).toBeDefined();
166 |       expect(await fileExists(path.join(outputDir, "mkdocs.yml"))).toBe(true);
167 |       expect(await fileExists(path.join(outputDir, "requirements.txt"))).toBe(
168 |         true,
169 |       );
170 |     });
171 | 
172 |     it("should generate Hugo config", async () => {
173 |       const outputDir = await createTempDir("hugo-config");
174 |       const result = await generateConfig({
175 |         ssg: "hugo",
176 |         projectName: "Test Hugo",
177 |         outputPath: outputDir,
178 |       });
179 | 
180 |       expect(result.content).toBeDefined();
181 |       expect(await fileExists(path.join(outputDir, "hugo.toml"))).toBe(true);
182 |     });
183 | 
184 |     it("should generate Jekyll config", async () => {
185 |       const outputDir = await createTempDir("jekyll-config");
186 |       const result = await generateConfig({
187 |         ssg: "jekyll",
188 |         projectName: "Test Jekyll",
189 |         outputPath: outputDir,
190 |       });
191 | 
192 |       expect(result.content).toBeDefined();
193 |       expect(await fileExists(path.join(outputDir, "_config.yml"))).toBe(true);
194 |       expect(await fileExists(path.join(outputDir, "Gemfile"))).toBe(true);
195 |     });
196 | 
197 |     it("should generate Eleventy config", async () => {
198 |       const outputDir = await createTempDir("eleventy-config");
199 |       const result = await generateConfig({
200 |         ssg: "eleventy",
201 |         projectName: "Test Eleventy",
202 |         outputPath: outputDir,
203 |       });
204 | 
205 |       expect(result.content).toBeDefined();
206 |       expect(await fileExists(path.join(outputDir, ".eleventy.js"))).toBe(true);
207 |       expect(await fileExists(path.join(outputDir, "package.json"))).toBe(true);
208 |     });
209 |   });
210 | 
211 |   describe("setup_structure", () => {
212 |     it("should create Diataxis structure", async () => {
213 |       const docsDir = await createTempDir("diataxis-structure");
214 |       const result = await setupStructure({
215 |         path: docsDir,
216 |         ssg: "docusaurus",
217 |         includeExamples: true,
218 |       });
219 | 
220 |       expect(result.content).toBeDefined();
221 | 
222 |       const categories = ["tutorials", "how-to", "reference", "explanation"];
223 |       for (const category of categories) {
224 |         expect(await fileExists(path.join(docsDir, category, "index.md"))).toBe(
225 |           true,
226 |         );
227 |       }
228 |       expect(await fileExists(path.join(docsDir, "index.md"))).toBe(true);
229 |     });
230 | 
231 |     it("should create structure without examples", async () => {
232 |       const docsDir = await createTempDir("no-examples");
233 |       const result = await setupStructure({
234 |         path: docsDir,
235 |         ssg: "mkdocs",
236 |         includeExamples: false,
237 |       });
238 | 
239 |       expect(result.content).toBeDefined();
240 | 
241 |       const tutorialsFiles = await fs.readdir(path.join(docsDir, "tutorials"));
242 |       expect(tutorialsFiles).toEqual(["index.md"]); // Only index, no examples
243 |     });
244 | 
245 |     it("should handle different SSG formats", async () => {
246 |       const docusaurusDir = await createTempDir("docusaurus-format");
247 |       await setupStructure({
248 |         path: docusaurusDir,
249 |         ssg: "docusaurus",
250 |         includeExamples: true,
251 |       });
252 | 
253 |       const content = await fs.readFile(
254 |         path.join(docusaurusDir, "tutorials", "index.md"),
255 |         "utf-8",
256 |       );
257 |       expect(content).toContain("id: tutorials-index");
258 | 
259 |       const jekyllDir = await createTempDir("jekyll-format");
260 |       await setupStructure({
261 |         path: jekyllDir,
262 |         ssg: "jekyll",
263 |         includeExamples: true,
264 |       });
265 | 
266 |       const jekyllContent = await fs.readFile(
267 |         path.join(jekyllDir, "tutorials", "index.md"),
268 |         "utf-8",
269 |       );
270 |       expect(jekyllContent).toContain("title:");
271 |     });
272 |   });
273 | 
274 |   describe("deploy_pages", () => {
275 |     it("should create Docusaurus deployment workflow", async () => {
276 |       const repoDir = await createTempDir("docusaurus-deploy");
277 |       const result = await deployPages({
278 |         repository: repoDir,
279 |         ssg: "docusaurus",
280 |       });
281 | 
282 |       expect(result.content).toBeDefined();
283 | 
284 |       const workflowPath = path.join(
285 |         repoDir,
286 |         ".github",
287 |         "workflows",
288 |         "deploy-docs.yml",
289 |       );
290 |       expect(await fileExists(workflowPath)).toBe(true);
291 | 
292 |       const workflowContent = await fs.readFile(workflowPath, "utf-8");
293 |       expect(workflowContent).toContain("Deploy Docusaurus");
294 |       expect(workflowContent).toContain("npm run build");
295 |     });
296 | 
297 |     it("should create MkDocs deployment workflow", async () => {
298 |       const repoDir = await createTempDir("mkdocs-deploy");
299 |       const result = await deployPages({
300 |         repository: repoDir,
301 |         ssg: "mkdocs",
302 |       });
303 | 
304 |       const workflowContent = await fs.readFile(
305 |         path.join(repoDir, ".github", "workflows", "deploy-docs.yml"),
306 |         "utf-8",
307 |       );
308 |       expect(workflowContent).toContain("mkdocs gh-deploy");
309 |     });
310 | 
311 |     it("should handle custom domain", async () => {
312 |       const repoDir = await createTempDir("custom-domain");
313 |       const result = await deployPages({
314 |         repository: repoDir,
315 |         ssg: "docusaurus",
316 |         customDomain: "docs.example.com",
317 |       });
318 | 
319 |       expect(result.content).toBeDefined();
320 |       expect(await fileExists(path.join(repoDir, "CNAME"))).toBe(true);
321 | 
322 |       const cnameContent = await fs.readFile(
323 |         path.join(repoDir, "CNAME"),
324 |         "utf-8",
325 |       );
326 |       expect(cnameContent.trim()).toBe("docs.example.com");
327 |     });
328 | 
329 |     it("should handle different branches", async () => {
330 |       const repoDir = await createTempDir("custom-branch");
331 |       await deployPages({
332 |         repository: repoDir,
333 |         ssg: "hugo",
334 |         branch: "main",
335 |       });
336 | 
337 |       const workflowContent = await fs.readFile(
338 |         path.join(repoDir, ".github", "workflows", "deploy-docs.yml"),
339 |         "utf-8",
340 |       );
341 |       expect(workflowContent).toContain("Deploy Hugo");
342 |     });
343 |   });
344 | 
345 |   describe("verify_deployment", () => {
346 |     it("should verify complete setup", async () => {
347 |       const repoDir = await createCompleteRepo();
348 |       const result = await verifyDeployment({
349 |         repository: repoDir,
350 |       });
351 | 
352 |       expect(result.content).toBeDefined();
353 |       const verification = JSON.parse(result.content[0].text);
354 |       expect(verification.overallStatus).toBe("Ready for deployment");
355 |       expect(verification.summary.passed).toBeGreaterThan(0);
356 |     });
357 | 
358 |     it("should identify missing components", async () => {
359 |       const emptyDir = await createTempDir("empty-verify");
360 |       const result = await verifyDeployment({
361 |         repository: emptyDir,
362 |       });
363 | 
364 |       const verification = JSON.parse(result.content[0].text);
365 |       expect(verification.overallStatus).toContain("Configuration required");
366 |       expect(verification.summary.failed).toBeGreaterThan(0);
367 |       expect(
368 |         verification.checks.some((check: any) =>
369 |           check.message.includes("No .github/workflows"),
370 |         ),
371 |       ).toBe(true);
372 |     });
373 | 
374 |     it("should handle different repository paths", async () => {
375 |       const relativeResult = await verifyDeployment({ repository: "." });
376 |       expect(relativeResult.content).toBeDefined();
377 | 
378 |       const httpResult = await verifyDeployment({
379 |         repository: "https://github.com/user/repo",
380 |       });
381 |       expect(httpResult.content).toBeDefined();
382 |     });
383 | 
384 |     it("should provide recommendations", async () => {
385 |       const incompleteDir = await createTempDir("incomplete");
386 |       await fs.writeFile(path.join(incompleteDir, "README.md"), "# Test");
387 | 
388 |       const result = await verifyDeployment({
389 |         repository: incompleteDir,
390 |       });
391 | 
392 |       const resultText = result.content.map((c) => c.text).join("\n");
393 |       expect(resultText).toContain("→");
394 |       expect(resultText).toContain("deploy_pages tool");
395 |     });
396 | 
397 |     it("should check for different config patterns", async () => {
398 |       const configDir = await createTempDir("config-check");
399 |       await fs.writeFile(
400 |         path.join(configDir, "docusaurus.config.js"),
401 |         "module.exports = {};",
402 |       );
403 | 
404 |       const result = await verifyDeployment({
405 |         repository: configDir,
406 |       });
407 | 
408 |       const resultText = result.content.map((c) => c.text).join("\n");
409 |       expect(resultText).toContain("SSG Configuration");
410 |       expect(resultText).toContain("docusaurus.config.js");
411 |     });
412 |   });
413 | 
414 |   describe("evaluate_readme_health", () => {
415 |     it("should evaluate README health with minimal input", async () => {
416 |       const readmePath = await createReadmeFile("Basic project README");
417 |       const result = await evaluateReadmeHealth({
418 |         readme_path: readmePath,
419 |       });
420 | 
421 |       expect(result.content).toBeDefined();
422 |       expect(result.isError).toBe(false);
423 | 
424 |       const healthData = result.content.find((c) =>
425 |         c.text.includes("healthReport"),
426 |       );
427 |       expect(healthData).toBeDefined();
428 |     });
429 | 
430 |     it("should handle different project types", async () => {
431 |       const readmePath = await createReadmeFile(
432 |         "Enterprise tool documentation",
433 |       );
434 |       const result = await evaluateReadmeHealth({
435 |         readme_path: readmePath,
436 |         project_type: "enterprise_tool",
437 |       });
438 | 
439 |       expect(result.content).toBeDefined();
440 |       expect(result.isError).toBe(false);
441 |     });
442 | 
443 |     it("should provide health components and scoring", async () => {
444 |       const readmePath = await createReadmeFile(`# Complete Project
445 | 
446 | ## Description
447 | Detailed description
448 | 
449 | ## Installation
450 | Installation steps
451 | 
452 | ## Usage
453 | Usage examples
454 | 
455 | ## Contributing
456 | Contributing guidelines
457 | 
458 | ## License
459 | MIT License`);
460 | 
461 |       const result = await evaluateReadmeHealth({
462 |         readme_path: readmePath,
463 |       });
464 | 
465 |       const dataContent = result.content.find((c) =>
466 |         c.text.includes("healthReport"),
467 |       );
468 |       const data = JSON.parse(dataContent!.text);
469 | 
470 |       expect(data.healthReport.overallScore).toBeGreaterThanOrEqual(0);
471 |       expect(data.healthReport.grade).toBeDefined();
472 |       expect(data.healthReport.components).toBeDefined();
473 |     });
474 |   });
475 | 
476 |   describe("readme_best_practices", () => {
477 |     it("should analyze README best practices", async () => {
478 |       const readmePath = await createReadmeFile("Basic library README");
479 |       const result = await readmeBestPractices({
480 |         readme_path: readmePath,
481 |         project_type: "library",
482 |       });
483 | 
484 |       expect(result.success).toBe(true);
485 |       expect(result.data).toBeDefined();
486 |       expect(result.data!.bestPracticesReport).toBeDefined();
487 |     });
488 | 
489 |     it("should handle template generation", async () => {
490 |       const outputDir = await createTempDir("best-practices-template");
491 |       const result = await readmeBestPractices({
492 |         readme_path: path.join(outputDir, "missing.md"),
493 |         generate_template: true,
494 |         output_directory: outputDir,
495 |         project_type: "application",
496 |       });
497 | 
498 |       expect(result.success).toBe(true);
499 |       expect(result.data).toBeDefined();
500 |     });
501 | 
502 |     it("should provide checklist and recommendations", async () => {
503 |       const readmePath = await createReadmeFile(`# Library Project
504 | 
505 | ## Installation
506 | npm install my-lib
507 | 
508 | ## Usage
509 | Basic usage here
510 | 
511 | ## API
512 | API documentation
513 | `);
514 | 
515 |       const result = await readmeBestPractices({
516 |         readme_path: readmePath,
517 |       });
518 | 
519 |       expect(result.success).toBe(true);
520 |       expect(result.data!.bestPracticesReport.checklist).toBeDefined();
521 |       expect(Array.isArray(result.data!.bestPracticesReport.checklist)).toBe(
522 |         true,
523 |       );
524 |       expect(result.data!.recommendations).toBeDefined();
525 |       expect(result.data!.nextSteps).toBeDefined();
526 |     });
527 |   });
528 | 
529 |   // Helper functions
530 |   async function createJSRepo(): Promise<string> {
531 |     const repoPath = path.join(tempDir, `js-repo-${Date.now()}`);
532 |     await fs.mkdir(repoPath, { recursive: true });
533 | 
534 |     await fs.writeFile(
535 |       path.join(repoPath, "package.json"),
536 |       JSON.stringify(
537 |         {
538 |           name: "test-js-project",
539 |           dependencies: { express: "^4.0.0" },
540 |           devDependencies: { jest: "^29.0.0" },
541 |         },
542 |         null,
543 |         2,
544 |       ),
545 |     );
546 | 
547 |     await fs.writeFile(
548 |       path.join(repoPath, "index.js"),
549 |       'console.log("Hello");',
550 |     );
551 |     await fs.writeFile(path.join(repoPath, "README.md"), "# JS Project");
552 | 
553 |     return repoPath;
554 |   }
555 | 
556 |   async function createPythonRepo(): Promise<string> {
557 |     const repoPath = path.join(tempDir, `py-repo-${Date.now()}`);
558 |     await fs.mkdir(repoPath, { recursive: true });
559 | 
560 |     await fs.writeFile(path.join(repoPath, "requirements.txt"), "flask>=2.0.0");
561 |     await fs.writeFile(path.join(repoPath, "main.py"), 'print("Hello")');
562 |     await fs.writeFile(path.join(repoPath, "README.md"), "# Python Project");
563 | 
564 |     return repoPath;
565 |   }
566 | 
567 |   async function createRepoWithCI(): Promise<string> {
568 |     const repoPath = path.join(tempDir, `ci-repo-${Date.now()}`);
569 |     await fs.mkdir(path.join(repoPath, ".github", "workflows"), {
570 |       recursive: true,
571 |     });
572 | 
573 |     await fs.writeFile(
574 |       path.join(repoPath, ".github", "workflows", "ci.yml"),
575 |       "name: CI\non: push",
576 |     );
577 |     await fs.writeFile(path.join(repoPath, "README.md"), "# CI Project");
578 | 
579 |     return repoPath;
580 |   }
581 | 
582 |   async function createEmptyRepo(): Promise<string> {
583 |     const repoPath = path.join(tempDir, `empty-repo-${Date.now()}`);
584 |     await fs.mkdir(repoPath, { recursive: true });
585 |     await fs.writeFile(path.join(repoPath, "README.md"), "# Empty Project");
586 | 
587 |     return repoPath;
588 |   }
589 | 
590 |   async function createTempDir(suffix: string): Promise<string> {
591 |     const dirPath = path.join(tempDir, `${suffix}-${Date.now()}`);
592 |     await fs.mkdir(dirPath, { recursive: true });
593 |     return dirPath;
594 |   }
595 | 
596 |   async function createCompleteRepo(): Promise<string> {
597 |     const repoPath = await createTempDir("complete-repo");
598 | 
599 |     // Create workflow
600 |     await fs.mkdir(path.join(repoPath, ".github", "workflows"), {
601 |       recursive: true,
602 |     });
603 |     await fs.writeFile(
604 |       path.join(repoPath, ".github", "workflows", "deploy.yml"),
605 |       "name: Deploy\non: push",
606 |     );
607 | 
608 |     // Create docs
609 |     await fs.mkdir(path.join(repoPath, "docs"), { recursive: true });
610 |     await fs.writeFile(path.join(repoPath, "docs", "index.md"), "# Docs");
611 | 
612 |     // Create config
613 |     await fs.writeFile(
614 |       path.join(repoPath, "docusaurus.config.js"),
615 |       "module.exports = {};",
616 |     );
617 | 
618 |     // Create build output
619 |     await fs.mkdir(path.join(repoPath, "build"), { recursive: true });
620 |     await fs.writeFile(
621 |       path.join(repoPath, "build", "index.html"),
622 |       "<html></html>",
623 |     );
624 | 
625 |     return repoPath;
626 |   }
627 | 
628 |   async function fileExists(filePath: string): Promise<boolean> {
629 |     try {
630 |       await fs.access(filePath);
631 |       return true;
632 |     } catch {
633 |       return false;
634 |     }
635 |   }
636 | 
637 |   async function createReadmeFile(content: string): Promise<string> {
638 |     const file = tmp.fileSync({ postfix: ".md", keep: false });
639 |     await fs.writeFile(file.name, content);
640 |     return file.name;
641 |   }
642 | });
643 | 
```

--------------------------------------------------------------------------------
/docs/reference/mcp-tools.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | documcp:
  3 |   last_updated: "2025-11-20T00:46:21.963Z"
  4 |   last_validated: "2025-11-20T00:46:21.963Z"
  5 |   auto_updated: false
  6 |   update_frequency: monthly
  7 | ---
  8 | 
  9 | # MCP Tools API Reference
 10 | 
 11 | DocuMCP provides a comprehensive set of tools via the Model Context Protocol (MCP). These tools enable intelligent documentation deployment through repository analysis, SSG recommendations, and automated GitHub Pages setup.
 12 | 
 13 | ## Implementation Details
 14 | 
 15 | DocuMCP implements the MCP protocol using the low-level `Server` class from `@modelcontextprotocol/sdk/server/index.js` with `StdioServerTransport` for process-based communication. Tools are registered manually using `setRequestHandler` with `CallToolRequestSchema` and `ListToolsRequestSchema`, providing full control over tool execution and response formatting.
 16 | 
 17 | ## Core Documentation Tools
 18 | 
 19 | ### analyze_repository
 20 | 
 21 | **Description**: Analyze repository structure, dependencies, and documentation needs
 22 | 
 23 | **Parameters**:
 24 | 
 25 | - `path` (string, required): Path to the repository to analyze
 26 | - `depth` (enum, optional): Analysis depth level
 27 |   - `"quick"`: Fast overview focusing on basic structure
 28 |   - `"standard"`: Comprehensive analysis (default)
 29 |   - `"deep"`: Detailed analysis with advanced insights
 30 | 
 31 | **Returns**: Analysis object containing:
 32 | 
 33 | - `id`: Unique analysis identifier for use in other tools
 34 | - `timestamp`: Analysis execution time
 35 | - `structure`: File counts, languages, and project features
 36 | - `dependencies`: Package ecosystem and dependency analysis
 37 | - `documentation`: Existing documentation assessment
 38 | - `recommendations`: Project classification and team size estimates
 39 | 
 40 | **Example**:
 41 | 
 42 | ```json
 43 | {
 44 |   "path": "/path/to/repository",
 45 |   "depth": "standard"
 46 | }
 47 | ```
 48 | 
 49 | ### recommend_ssg
 50 | 
 51 | **Description**: Recommend the best static site generator based on project analysis
 52 | 
 53 | **Parameters**:
 54 | 
 55 | - `analysisId` (string, required): ID from previous repository analysis
 56 | - `preferences` (object, optional):
 57 |   - `priority`: `"simplicity"`, `"features"`, or `"performance"`
 58 |   - `ecosystem`: `"javascript"`, `"python"`, `"ruby"`, `"go"`, or `"any"`
 59 | 
 60 | **Returns**: Recommendation object with weighted scoring and justifications
 61 | 
 62 | **Example**:
 63 | 
 64 | ```json
 65 | {
 66 |   "analysisId": "analysis_abc123",
 67 |   "preferences": {
 68 |     "priority": "simplicity",
 69 |     "ecosystem": "javascript"
 70 |   }
 71 | }
 72 | ```
 73 | 
 74 | ### generate_config
 75 | 
 76 | **Description**: Generate configuration files for the selected static site generator
 77 | 
 78 | **Parameters**:
 79 | 
 80 | - `ssg` (enum, required): `"jekyll"`, `"hugo"`, `"docusaurus"`, `"mkdocs"`, or `"eleventy"`
 81 | - `projectName` (string, required): Name of the project
 82 | - `projectDescription` (string, optional): Brief description
 83 | - `outputPath` (string, required): Where to generate config files
 84 | 
 85 | **Returns**: Generated configuration files and setup instructions
 86 | 
 87 | **Example**:
 88 | 
 89 | ```json
 90 | {
 91 |   "ssg": "hugo",
 92 |   "projectName": "My Documentation Site",
 93 |   "outputPath": "./docs"
 94 | }
 95 | ```
 96 | 
 97 | ### setup_structure
 98 | 
 99 | **Description**: Create Diataxis-compliant documentation structure
100 | 
101 | **Parameters**:
102 | 
103 | - `path` (string, required): Root path for documentation
104 | - `ssg` (enum, required): Static site generator type
105 | - `includeExamples` (boolean, optional, default: true): Include example content
106 | 
107 | **Returns**: Created directory structure following Diataxis framework:
108 | 
109 | - **tutorials/**: Learning-oriented guides for skill acquisition (study context)
110 | - **how-to/**: Problem-solving guides for specific tasks (work context)
111 | - **reference/**: Information-oriented content for lookup and verification (information context)
112 | - **explanation/**: Understanding-oriented content for context and background (understanding context)
113 | 
114 | **Example**:
115 | 
116 | ```json
117 | {
118 |   "path": "./docs",
119 |   "ssg": "mkdocs",
120 |   "includeExamples": true
121 | }
122 | ```
123 | 
124 | ### deploy_pages
125 | 
126 | **Description**: Set up GitHub Pages deployment workflow
127 | 
128 | **Parameters**:
129 | 
130 | - `repository` (string, required): Repository path or URL
131 | - `ssg` (enum, required): Static site generator type
132 | - `branch` (string, optional, default: "gh-pages"): Deployment branch
133 | - `customDomain` (string, optional): Custom domain name
134 | 
135 | **Returns**: GitHub Actions workflow files for automated deployment
136 | 
137 | **Example**:
138 | 
139 | ```json
140 | {
141 |   "repository": "username/repository",
142 |   "ssg": "docusaurus",
143 |   "customDomain": "docs.example.com"
144 | }
145 | ```
146 | 
147 | ### verify_deployment
148 | 
149 | **Description**: Verify and troubleshoot GitHub Pages deployment
150 | 
151 | **Parameters**:
152 | 
153 | - `repository` (string, required): Repository path or URL
154 | - `url` (string, optional): Expected deployment URL
155 | 
156 | **Returns**: Deployment status and troubleshooting recommendations
157 | 
158 | **Example**:
159 | 
160 | ```json
161 | {
162 |   "repository": "username/repository",
163 |   "url": "https://username.github.io/repository"
164 | }
165 | ```
166 | 
167 | ## Content Management Tools
168 | 
169 | ### populate_diataxis_content
170 | 
171 | **Description**: Intelligently populate Diataxis documentation with project-specific content
172 | 
173 | **Parameters**:
174 | 
175 | - `analysisId` (string, required): Repository analysis ID
176 | - `docsPath` (string, required): Path to documentation directory
177 | - `populationLevel` (enum, optional, default: "comprehensive"): Content generation level
178 | - `includeProjectSpecific` (boolean, optional, default: true): Include project-specific content
179 | - `preserveExisting` (boolean, optional, default: true): Preserve existing content
180 | - `technologyFocus` (array of strings, optional): Specific technologies to emphasize
181 | 
182 | **Returns**: Populated content metrics and file creation summary
183 | 
184 | ### update_existing_documentation
185 | 
186 | **Description**: Intelligently analyze and update existing documentation using memory insights
187 | 
188 | **Parameters**:
189 | 
190 | - `analysisId` (string, required): Repository analysis ID
191 | - `docsPath` (string, required): Path to existing documentation directory
192 | - `compareMode` (enum, optional, default: "comprehensive"): Comparison mode
193 | - `updateStrategy` (enum, optional, default: "moderate"): Update aggressiveness
194 | - `preserveStyle` (boolean, optional, default: true): Preserve existing style
195 | - `focusAreas` (array of strings, optional): Specific areas to focus on
196 | 
197 | **Returns**: Update recommendations and gap analysis
198 | 
199 | ### detect_documentation_gaps
200 | 
201 | **Description**: Analyze repository and existing documentation to identify missing content
202 | 
203 | **Parameters**:
204 | 
205 | - `repositoryPath` (string, required): Path to the repository
206 | - `documentationPath` (string, optional): Path to existing documentation
207 | - `analysisId` (string, optional): Optional existing analysis ID to reuse
208 | - `depth` (enum, optional, default: "standard"): Analysis depth
209 | 
210 | **Returns**: Identified gaps and recommendations for improvement
211 | 
212 | ## Validation Tools
213 | 
214 | ### validate_diataxis_content
215 | 
216 | **Description**: Validate the accuracy, completeness, and compliance of generated Diataxis documentation
217 | 
218 | **Parameters**:
219 | 
220 | - `contentPath` (string, required): Path to documentation directory to validate
221 | - `analysisId` (string, optional): Repository analysis ID for context
222 | - `validationType` (enum, optional, default: "all"): Type of validation
223 | - `includeCodeValidation` (boolean, optional, default: true): Validate code examples
224 | - `confidence` (enum, optional, default: "moderate"): Validation confidence level
225 | 
226 | **Returns**: Validation results with issues, recommendations, and confidence scores
227 | 
228 | ### validate_content
229 | 
230 | **Description**: Validate general content quality including links and code syntax
231 | 
232 | **Parameters**:
233 | 
234 | - `contentPath` (string, required): Path to content directory
235 | - `validationType` (string, optional, default: "all"): Validation type
236 | - `includeCodeValidation` (boolean, optional, default: true): Validate code blocks
237 | - `followExternalLinks` (boolean, optional, default: false): Check external URLs
238 | 
239 | **Returns**: Content validation results with broken links and code errors
240 | 
241 | ### check_documentation_links
242 | 
243 | **Description**: Comprehensive link checking for documentation deployment
244 | 
245 | **Parameters**:
246 | 
247 | - `documentation_path` (string, optional, default: "./docs"): Documentation directory
248 | - `check_external_links` (boolean, optional, default: true): Validate external URLs
249 | - `check_internal_links` (boolean, optional, default: true): Validate internal references
250 | - `check_anchor_links` (boolean, optional, default: true): Validate anchor links
251 | - `timeout_ms` (number, optional, default: 5000): Request timeout
252 | - `max_concurrent_checks` (number, optional, default: 5): Concurrent check limit
253 | 
254 | **Returns**: Comprehensive link validation report
255 | 
256 | ## Testing and Deployment Tools
257 | 
258 | ### test_local_deployment
259 | 
260 | **Description**: Test documentation build and local server before deploying to GitHub Pages
261 | 
262 | **Parameters**:
263 | 
264 | - `repositoryPath` (string, required): Path to the repository
265 | - `ssg` (enum, required): Static site generator type
266 | - `port` (number, optional, default: 3000): Local server port
267 | - `timeout` (number, optional, default: 60): Build timeout in seconds
268 | - `skipBuild` (boolean, optional, default: false): Skip build step
269 | 
270 | **Returns**: Local testing results and server status
271 | 
272 | ## README Management Tools
273 | 
274 | ### evaluate_readme_health
275 | 
276 | **Description**: Evaluate README files for community health and onboarding effectiveness
277 | 
278 | **Parameters**:
279 | 
280 | - `readme_path` (string, required): Path to README file
281 | - `project_type` (enum, optional, default: "community_library"): Project type
282 | - `repository_path` (string, optional): Repository path for context
283 | 
284 | **Returns**: Health evaluation with scores and recommendations
285 | 
286 | ### readme_best_practices
287 | 
288 | **Description**: Analyze README files against best practices checklist
289 | 
290 | **Parameters**:
291 | 
292 | - `readme_path` (string, required): Path to README file
293 | - `project_type` (enum, optional, default: "library"): Project type
294 | - `generate_template` (boolean, optional, default: false): Generate templates
295 | - `target_audience` (enum, optional, default: "mixed"): Target audience
296 | 
297 | **Returns**: Best practices analysis and improvement recommendations
298 | 
299 | ### generate_readme_template
300 | 
301 | **Description**: Generate standardized README templates for different project types
302 | 
303 | **Parameters**:
304 | 
305 | - `projectName` (string, required): Name of the project
306 | - `description` (string, required): Brief project description
307 | - `templateType` (enum, required): Project template type
308 | - `author` (string, optional): Project author/organization
309 | - `license` (string, optional, default: "MIT"): Project license
310 | - `outputPath` (string, optional): Output file path
311 | 
312 | **Returns**: Generated README template content
313 | 
314 | ### validate_readme_checklist
315 | 
316 | **Description**: Validate README files against community best practices checklist
317 | 
318 | **Parameters**:
319 | 
320 | - `readmePath` (string, required): Path to README file
321 | - `projectPath` (string, optional): Project directory for context
322 | - `strict` (boolean, optional, default: false): Use strict validation
323 | - `outputFormat` (enum, optional, default: "console"): Output format
324 | 
325 | **Returns**: Validation report with detailed scoring
326 | 
327 | ### analyze_readme
328 | 
329 | **Description**: Comprehensive README analysis with length assessment and optimization opportunities
330 | 
331 | **Parameters**:
332 | 
333 | - `project_path` (string, required): Path to project directory
334 | - `target_audience` (enum, optional, default: "community_contributors"): Target audience
335 | - `optimization_level` (enum, optional, default: "moderate"): Optimization level
336 | - `max_length_target` (number, optional, default: 300): Target max length
337 | 
338 | **Returns**: README analysis with optimization recommendations
339 | 
340 | ### optimize_readme
341 | 
342 | **Description**: Optimize README content by restructuring and condensing
343 | 
344 | **Parameters**:
345 | 
346 | - `readme_path` (string, required): Path to README file
347 | - `strategy` (enum, optional, default: "community_focused"): Optimization strategy
348 | - `max_length` (number, optional, default: 300): Target maximum length
349 | - `include_tldr` (boolean, optional, default: true): Include TL;DR section
350 | - `create_docs_directory` (boolean, optional, default: true): Create docs directory
351 | 
352 | **Returns**: Optimized README content and extracted documentation
353 | 
354 | ## Documentation Freshness Tracking Tools
355 | 
356 | DocuMCP includes comprehensive tools for tracking and managing documentation freshness, ensuring your documentation stays up-to-date and identifying files that need attention.
357 | 
358 | ### track_documentation_freshness
359 | 
360 | **Description**: Scan documentation directory for staleness markers and identify files needing updates based on configurable time thresholds (minutes, hours, days)
361 | 
362 | **Parameters**:
363 | 
364 | - `docsPath` (string, required): Path to documentation directory
365 | - `projectPath` (string, optional): Path to project root (for knowledge graph tracking)
366 | - `warningThreshold` (object, optional): Warning threshold (yellow flag)
367 |   - `value` (number, positive): Threshold value
368 |   - `unit` (enum): `"minutes"`, `"hours"`, or `"days"`
369 | - `staleThreshold` (object, optional): Stale threshold (orange flag)
370 |   - `value` (number, positive): Threshold value
371 |   - `unit` (enum): `"minutes"`, `"hours"`, or `"days"`
372 | - `criticalThreshold` (object, optional): Critical threshold (red flag)
373 |   - `value` (number, positive): Threshold value
374 |   - `unit` (enum): `"minutes"`, `"hours"`, or `"days"`
375 | - `preset` (enum, optional): Use predefined threshold preset
376 |   - Options: `"realtime"`, `"active"`, `"recent"`, `"weekly"`, `"monthly"`, `"quarterly"`
377 | - `includeFileList` (boolean, optional, default: true): Include detailed file list in response
378 | - `sortBy` (enum, optional, default: "staleness"): Sort order for file list
379 |   - Options: `"age"`, `"path"`, `"staleness"`
380 | - `storeInKG` (boolean, optional, default: true): Store tracking event in knowledge graph for historical analysis
381 | 
382 | **Returns**: Freshness report with:
383 | 
384 | - Summary statistics (total files, fresh, warning, stale, critical)
385 | - Detailed file list with staleness levels
386 | - Age information for each file
387 | - Recommendations for action
388 | 
389 | **Example**:
390 | 
391 | ```json
392 | {
393 |   "docsPath": "/path/to/docs",
394 |   "preset": "monthly",
395 |   "includeFileList": true
396 | }
397 | ```
398 | 
399 | **Default Thresholds**:
400 | 
401 | - Warning: 7 days
402 | - Stale: 30 days
403 | - Critical: 90 days
404 | 
405 | ### validate_documentation_freshness
406 | 
407 | **Description**: Validate documentation freshness, initialize metadata for files without it, and update timestamps based on code changes
408 | 
409 | **Parameters**:
410 | 
411 | - `docsPath` (string, required): Path to documentation directory
412 | - `projectPath` (string, required): Path to project root (for git integration)
413 | - `initializeMissing` (boolean, optional, default: true): Initialize metadata for files without it
414 | - `updateExisting` (boolean, optional, default: false): Update last_validated timestamp for all files
415 | - `updateFrequency` (enum, optional, default: "monthly"): Default update frequency for new metadata
416 |   - Options: `"realtime"`, `"active"`, `"recent"`, `"weekly"`, `"monthly"`, `"quarterly"`
417 | - `validateAgainstGit` (boolean, optional, default: true): Validate against current git commit
418 | 
419 | **Returns**: Validation report with:
420 | 
421 | - Files initialized (new metadata created)
422 | - Files updated (existing metadata refreshed)
423 | - Metadata structure for each file
424 | - Recommendations for next steps
425 | 
426 | **Example**:
427 | 
428 | ```json
429 | {
430 |   "docsPath": "/path/to/docs",
431 |   "projectPath": "/path/to/project",
432 |   "initializeMissing": true,
433 |   "validateAgainstGit": true
434 | }
435 | ```
436 | 
437 | **Use Cases**:
438 | 
439 | - First-time setup: Initialize freshness metadata for all documentation files
440 | - Regular maintenance: Update validation timestamps
441 | - After code changes: Sync documentation freshness with git history
442 | 
443 | ## Sitemap Management Tools
444 | 
445 | ### manage_sitemap
446 | 
447 | **Description**: Generate, validate, and manage sitemap.xml as the source of truth for documentation links. Sitemap.xml is used for SEO, search engine submission, and deployment tracking.
448 | 
449 | **Parameters**:
450 | 
451 | - `action` (enum, required): Action to perform
452 |   - `"generate"`: Create new sitemap.xml
453 |   - `"validate"`: Check sitemap structure
454 |   - `"update"`: Sync sitemap with documentation
455 |   - `"list"`: Show all URLs in sitemap
456 | - `docsPath` (string, required): Path to documentation root directory
457 | - `baseUrl` (string, required for generate/update): Base URL for the site (e.g., `https://user.github.io/repo`)
458 | - `includePatterns` (array, optional): File patterns to include
459 |   - Default: `["**/*.md", "**/*.html", "**/*.mdx"]`
460 | - `excludePatterns` (array, optional): File patterns to exclude
461 |   - Default: `["node_modules", ".git", "dist", "build", ".documcp"]`
462 | - `updateFrequency` (enum, optional): Default change frequency for pages
463 |   - Options: `"always"`, `"hourly"`, `"daily"`, `"weekly"`, `"monthly"`, `"yearly"`, `"never"`
464 | - `useGitHistory` (boolean, optional, default: true): Use git history for last modified dates
465 | - `sitemapPath` (string, optional): Custom path for sitemap.xml (default: `docsPath/sitemap.xml`)
466 | 
467 | **Returns**: Sitemap operation result with:
468 | 
469 | - Generated/validated sitemap structure
470 | - URL count and statistics
471 | - Validation errors (if any)
472 | - Recommendations for SEO optimization
473 | 
474 | **Example**:
475 | 
476 | ```json
477 | {
478 |   "action": "generate",
479 |   "docsPath": "/path/to/docs",
480 |   "baseUrl": "https://example.com/docs"
481 | }
482 | ```
483 | 
484 | **Use Cases**:
485 | 
486 | - SEO optimization: Generate sitemap for search engines
487 | - Link validation: Ensure all documentation pages are discoverable
488 | - Deployment tracking: Monitor documentation changes over time
489 | 
490 | ## Memory System Tools
491 | 
492 | The memory system provides intelligent learning and pattern recognition across documentation projects.
493 | 
494 | ### memory_recall
495 | 
496 | **Description**: Recall memories about a project or topic
497 | 
498 | **Parameters**:
499 | 
500 | - `query` (string, required): Search query or project ID
501 | - `type` (enum, optional): Memory type to recall
502 | - `limit` (number, optional, default: 10): Maximum results
503 | 
504 | ### memory_insights
505 | 
506 | **Description**: Get insights and patterns from memory
507 | 
508 | **Parameters**:
509 | 
510 | - `projectId` (string, optional): Project ID to analyze
511 | - `timeRange` (object, optional): Time range for analysis
512 | 
513 | ### memory_similar
514 | 
515 | **Description**: Find similar projects from memory
516 | 
517 | **Parameters**:
518 | 
519 | - `analysisId` (string, required): Analysis ID to find similar projects for
520 | - `limit` (number, optional, default: 5): Maximum similar projects
521 | 
522 | ### memory_export
523 | 
524 | **Description**: Export memories to JSON or CSV
525 | 
526 | **Parameters**:
527 | 
528 | - `filter` (object, optional): Filter memories to export
529 | - `format` (enum, optional, default: "json"): Export format
530 | 
531 | ### memory_cleanup
532 | 
533 | **Description**: Clean up old memories
534 | 
535 | **Parameters**:
536 | 
537 | - `daysToKeep` (number, optional, default: 30): Number of days to retain
538 | - `dryRun` (boolean, optional, default: false): Preview without deleting
539 | 
540 | ## Tool Chaining and Workflows
541 | 
542 | DocuMCP tools are designed to work together in workflows:
543 | 
544 | 1. **Analysis → Recommendation → Implementation**:
545 | 
546 |    ```
547 |    analyze_repository → recommend_ssg → generate_config → setup_structure → deploy_pages
548 |    ```
549 | 
550 | 2. **Content Management**:
551 | 
552 |    ```
553 |    analyze_repository → populate_diataxis_content → validate_diataxis_content
554 |    ```
555 | 
556 | 3. **Documentation Maintenance**:
557 | 
558 |    ```
559 |    detect_documentation_gaps → update_existing_documentation → validate_content
560 |    ```
561 | 
562 | 4. **Freshness Tracking**:
563 | 
564 |    ```
565 |    validate_documentation_freshness → track_documentation_freshness → (update files as needed)
566 |    ```
567 | 
568 | 5. **SEO and Sitemap Management**:
569 |    ```
570 |    manage_sitemap (generate) → deploy_pages → manage_sitemap (validate)
571 |    ```
572 | 
573 | ## Error Handling
574 | 
575 | All tools return structured responses with error information when failures occur:
576 | 
577 | ```json
578 | {
579 |   "content": [
580 |     {
581 |       "type": "text",
582 |       "text": "Error executing tool_name: error_message"
583 |     }
584 |   ],
585 |   "isError": true
586 | }
587 | ```
588 | 
589 | ## Resource Storage
590 | 
591 | Tool results are automatically stored as MCP resources with URIs like:
592 | 
593 | - `documcp://analysis/{id}`: Analysis results
594 | - `documcp://config/{ssg}/{id}`: Configuration files
595 | - `documcp://deployment/{id}`: Deployment workflows
596 | 
597 | These resources can be accessed later for reference or further processing.
598 | 
599 | ## Version Information
600 | 
601 | Current DocuMCP version: **0.5.2**
602 | 
603 | For the latest updates and detailed changelog, see the project repository.
604 | 
605 | ## Recent Additions (v0.5.2)
606 | 
607 | ### Documentation Freshness Tracking
608 | 
609 | - `track_documentation_freshness`: Monitor documentation staleness with configurable thresholds
610 | - `validate_documentation_freshness`: Initialize and update freshness metadata
611 | 
612 | ### Sitemap Management
613 | 
614 | - `manage_sitemap`: Generate, validate, and manage sitemap.xml for SEO and deployment tracking
615 | 
616 | These tools integrate with the knowledge graph system to provide historical analysis and intelligent recommendations.
617 | 
```

--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Changelog
  2 | 
  3 | All notable changes to this project will be documented in this file.
  4 | 
  5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
  6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
  7 | 
  8 | ### [0.5.2](https://github.com/tosin2013/documcp/compare/v0.5.1...v0.5.2) (2025-11-19)
  9 | 
 10 | ### 🚀 Features
 11 | 
 12 | - add GitHub Copilot instructions and specialized agents ([3ba709f](https://github.com/tosin2013/documcp/commit/3ba709f2e209ae603f0142fa7f55a1d486f67829))
 13 | - add MCP prompts and resources for documentation freshness tracking ([2820c0e](https://github.com/tosin2013/documcp/commit/2820c0ed8fe35627e2434f78d2c172a7cdfa7370))
 14 | - implement documentation freshness tracking with KG integration and fix js-yaml vulnerability ([978aa5a](https://github.com/tosin2013/documcp/commit/978aa5a7f84049d2a1d8b2da8a30e53a7d3fbf99))
 15 | - improve branch coverage to 81.41%, add KG efficiency improvements ([e0f9641](https://github.com/tosin2013/documcp/commit/e0f96419a3b9566677e5defa54df24f0607371ae))
 16 | 
 17 | ### 🐛 Bug Fixes
 18 | 
 19 | - correct sidebar reference for mcp-resource-pattern-redesign ([1168821](https://github.com/tosin2013/documcp/commit/1168821ab933ab84c4d66dcfce1caa7c15e16765)), closes [#19519888749](https://github.com/tosin2013/documcp/issues/19519888749)
 20 | - resolve KG storage issues and test failures, improve branch coverage to 81.32% ([9fd72d5](https://github.com/tosin2013/documcp/commit/9fd72d57e67faf370ac9ab13a9aa3bef0e81be49))
 21 | - update deploy-docs workflow from Jekyll to Docusaurus ([fdc363a](https://github.com/tosin2013/documcp/commit/fdc363a2af6e4f1bf4155210d4e3e2fe62304236)), closes [#19519733512](https://github.com/tosin2013/documcp/issues/19519733512)
 22 | - upgrade GitHub Actions to resolve deprecation warnings ([67fea5d](https://github.com/tosin2013/documcp/commit/67fea5ddf6e1a09907019a3c54edaca39f882936))
 23 | 
 24 | ### [0.5.1](https://github.com/tosin2013/documcp/compare/v0.5.0...v0.5.1) (2025-11-18)
 25 | 
 26 | ### 📚 Documentation
 27 | 
 28 | - comprehensive documentation validation and updates ([abc8bc5](https://github.com/tosin2013/documcp/commit/abc8bc5d6ac537f27ed83e690a99a3c668356bcd))
 29 | 
 30 | ### 🐛 Bug Fixes
 31 | 
 32 | - add forceExit to Jest config for CI to prevent worker process hangs ([c5dada6](https://github.com/tosin2013/documcp/commit/c5dada607907adb6a04536accb50704435bb2516)), closes [#216](https://github.com/tosin2013/documcp/issues/216)
 33 | - remove ADR-010 from sidebar to fix Docker build ([1ce448f](https://github.com/tosin2013/documcp/commit/1ce448ff69e9184f3a48ac9f0143aabb134d7205))
 34 | - resolve GitHub Actions build failure ([a1b6b23](https://github.com/tosin2013/documcp/commit/a1b6b23e723f29b829c369feea386ebea601940a))
 35 | - update dependencies and adjust security audit workflow ([033a4f3](https://github.com/tosin2013/documcp/commit/033a4f340af903ceb84610531f3d1817964e91b9))
 36 | 
 37 | ## [0.5.0](https://github.com/tosin2013/documcp/compare/v0.4.1...v0.5.0) (2025-10-12)
 38 | 
 39 | ### ⚠ BREAKING CHANGES
 40 | 
 41 | - GitHub Actions workflow now requires Docusaurus instead of Jekyll
 42 |   Fixes: Deprecated artifact actions error
 43 |   Closes: Documentation completeness gaps
 44 | 
 45 | ### ♻️ Code Refactoring
 46 | 
 47 | - implement MCP best practices and resource pattern redesign ([e3de334](https://github.com/tosin2013/documcp/commit/e3de334336580dc1e2e8f0e2302cc7f903a58b7d))
 48 | 
 49 | ### 🐛 Bug Fixes
 50 | 
 51 | - ensure memory storage directory exists before file operations ([19961a1](https://github.com/tosin2013/documcp/commit/19961a1bb017f90931fff8957fd040723a5d0810))
 52 | - resolve sitemap test failures and type issues ([232ce57](https://github.com/tosin2013/documcp/commit/232ce57ae1d1d0537122db6fa46e2614416ad929))
 53 | 
 54 | ### 🚀 Features
 55 | 
 56 | - add generate_llm_context tool for dynamic LLM reference generation ([c8e3282](https://github.com/tosin2013/documcp/commit/c8e32823e2eb5f048b98b82f2840e1b4dee61094))
 57 | - enhance ListRoots to auto-detect and list documentation directories ([d432c4c](https://github.com/tosin2013/documcp/commit/d432c4c0bf74f1944d142c24c074365c67e3bdd5))
 58 | - implement MCP Phase 1 - progress notifications and logging support ([7d0ceeb](https://github.com/tosin2013/documcp/commit/7d0ceeb5e74611ae0f8276c63fed61e58ad16789))
 59 | - implement MCP Phase 2 - roots permission system ([ba307af](https://github.com/tosin2013/documcp/commit/ba307afc3fb186db699f10d0fccf6d6935ceee4d))
 60 | - implement Phase 3 code-to-docs synchronization and sitemap.xml management ([bbde81b](https://github.com/tosin2013/documcp/commit/bbde81be27938bacd05f1c30765e673e8679e6c7))
 61 | - integrate AST analyzer into kg-code-integration for enhanced code parsing ([ef47894](https://github.com/tosin2013/documcp/commit/ef478940bf50616c672201a9c7720a7054eb0456))
 62 | 
 63 | ### 📚 Documentation
 64 | 
 65 | - comprehensive documentation improvements and pipeline fixes ([be64fd2](https://github.com/tosin2013/documcp/commit/be64fd2ffbe277b87afd9bd214ee1c4d4d54b224))
 66 | 
 67 | ### [0.4.1](https://github.com/tosin2013/documcp/compare/v0.4.0...v0.4.1) (2025-10-04)
 68 | 
 69 | ### 📚 Documentation
 70 | 
 71 | - add release workflow fixes documentation ([44b8bc9](https://github.com/tosin2013/documcp/commit/44b8bc96adcedaeff70f5bdea0a8b0c7a49f1e52))
 72 | 
 73 | ### 🚀 Features
 74 | 
 75 | - add Playwright testing integration and knowledge graph enhancements ([39dc058](https://github.com/tosin2013/documcp/commit/39dc058842dfcf2195ac71d0edc7d1f14077cb84))
 76 | 
 77 | ### 🐛 Bug Fixes
 78 | 
 79 | - add cleanup step to CI workflow to prevent corrupted memory files ([b07692d](https://github.com/tosin2013/documcp/commit/b07692d41b6c57e4e0b8d50c84f886bd77f86adf))
 80 | - correct workflow to build Docusaurus instead of Jekyll ([246302c](https://github.com/tosin2013/documcp/commit/246302c16b779d4caa38980496d2d211e8a1f2cd))
 81 | - improve GitHub Actions release workflow for future releases ([f83e930](https://github.com/tosin2013/documcp/commit/f83e9308db3b7be4afc4d0afebb0d782269b7df8))
 82 | - remove problematic setup-playwright-tests test file ([adb20f2](https://github.com/tosin2013/documcp/commit/adb20f2f09dd58143d12767ba9da92a686ca4237)), closes [#18245578186](https://github.com/tosin2013/documcp/issues/18245578186)
 83 | - update deprecated GitHub Actions in deploy-docs workflow ([dc877d7](https://github.com/tosin2013/documcp/commit/dc877d748429ec1ccbe9873449f56315b3bae14b)), closes [#18246024667](https://github.com/tosin2013/documcp/issues/18246024667)
 84 | 
 85 | ## [0.4.0](https://github.com/tosin2013/documcp/compare/v0.3.4...v0.4.0) (2025-10-02)
 86 | 
 87 | ### 🚀 Features
 88 | 
 89 | - Complete API documentation and user onboarding system ([7e7944e](https://github.com/tosin2013/documcp/commit/7e7944e65d576d2531c627560288d61ae88717d1))
 90 | - implement Phase 2 Intelligence & Learning System ([26b3370](https://github.com/tosin2013/documcp/commit/26b3370e64796a6f02534b6e6a9170043edc0a0a))
 91 | - integrate Release Drafter for automated release notes ([d06d88a](https://github.com/tosin2013/documcp/commit/d06d88a116ee56f42c3a8bcd8adc58220fce9b95))
 92 | 
 93 | ### 🐛 Bug Fixes
 94 | 
 95 | - adjust coverage threshold for kg-storage error handling ([0b3e121](https://github.com/tosin2013/documcp/commit/0b3e1210e778aa2b7a9b05e7d64403076da8eaaa))
 96 | - resolve GitHub Actions workflow build failures ([0baddff](https://github.com/tosin2013/documcp/commit/0baddff738519ca598555af4c32ab582394d98b0))
 97 | - resolve Phase 2.1 edge case test failures with nuanced logic ([52d1f32](https://github.com/tosin2013/documcp/commit/52d1f32c4c4f26eba9b5b0894cc61b7637349f26))
 98 | - resolve Phase 2.1 test failures with comprehensive fixes ([736f104](https://github.com/tosin2013/documcp/commit/736f1049c1ac1b9deb61d63d7f69ff970e4ccb49))
 99 | - resolve pre-commit shellcheck and prettier issues ([54b90bf](https://github.com/tosin2013/documcp/commit/54b90bf9753ae018ed7983c6e703415f35f71156))
100 | - update Docusaurus sidebar configuration with correct document IDs ([b9dcd99](https://github.com/tosin2013/documcp/commit/b9dcd99fea130a69cfef205b20fa6ca0ee9b143a))
101 | - update GitHub Actions to latest versions to resolve deprecated artifact actions ([fabbbf3](https://github.com/tosin2013/documcp/commit/fabbbf3a22fcc07b546f383c5ff67570adeab9e3))
102 | - update GitHub Actions to use latest artifact actions ([37bdda0](https://github.com/tosin2013/documcp/commit/37bdda01ef3cdb29ab805cfcecb98067846bfa52))
103 | - update GitHub Pages deployment from Jekyll to Docusaurus ([09d7133](https://github.com/tosin2013/documcp/commit/09d7133580c228b5cb0615228e3e7946b6e6889d))
104 | - update GitHub Pages deployment workflow for Docusaurus ([7623671](https://github.com/tosin2013/documcp/commit/7623671d72f7fbc088dba9923f45eb01265278a1))
105 | 
106 | ### [0.3.4](https://github.com/tosin2013/documcp/compare/v0.3.3...v0.3.4) (2025-09-18)
107 | 
108 | ### 🐛 Bug Fixes
109 | 
110 | - exclude remaining experimental memory files from coverage ([6c436b0](https://github.com/tosin2013/documcp/commit/6c436b018d0e072f25058617fe728b39279b51fc))
111 | 
112 | ### 🚀 Features
113 | 
114 | - achieve 90%+ coverage by focusing on core functionality ([561b8a5](https://github.com/tosin2013/documcp/commit/561b8a56a14ddc39387fce35a1efd2ad0c2983bc))
115 | 
116 | ### [0.3.3](https://github.com/tosin2013/documcp/compare/v0.3.2...v0.3.3) (2025-09-18)
117 | 
118 | ### 🚀 Features
119 | 
120 | - achieve 85%+ test coverage with comprehensive test suite ([d607514](https://github.com/tosin2013/documcp/commit/d60751449d9fdc431f4c25d1465ab8731c31d3d9))
121 | - add comprehensive pre-commit hooks configuration ([46e71ee](https://github.com/tosin2013/documcp/commit/46e71eec6f26c8e8b560480ec75e7f8c300ec9ae))
122 | - comprehensive documentation updates with memory-enhanced capabilities ([9b13be9](https://github.com/tosin2013/documcp/commit/9b13be938b11cafee151a071b7406d5d6fb32366))
123 | - configure project-local storage with startup visibility ([dfe60f0](https://github.com/tosin2013/documcp/commit/dfe60f0afa4073d4e1b05a9cc569a7ad203a3716))
124 | - implement complete MCP prompts and resources system (ADR-007) ([1c9b5c2](https://github.com/tosin2013/documcp/commit/1c9b5c2cdaf41b793ae0c956f5de59f102cf35de))
125 | - implement comprehensive memory system with advanced AI capabilities ([e4c9d06](https://github.com/tosin2013/documcp/commit/e4c9d0608037bc6f2ff239cd2107c77972c4eaa9)), closes [#45-54](https://github.com/tosin2013/documcp/issues/45-54) [#45-46](https://github.com/tosin2013/documcp/issues/45-46) [#47-48](https://github.com/tosin2013/documcp/issues/47-48) [#49-50](https://github.com/tosin2013/documcp/issues/49-50) [#51-52](https://github.com/tosin2013/documcp/issues/51-52) [#53-54](https://github.com/tosin2013/documcp/issues/53-54)
126 | - implement Docusaurus documentation deployment with GitHub Actions ([7b78e7b](https://github.com/tosin2013/documcp/commit/7b78e7b80deb9fb8f074c0209bd1c88e539cb329))
127 | - implement missing memory tool handlers for DocuMCP ([576bab5](https://github.com/tosin2013/documcp/commit/576bab50545b9eb57b8c2a74e50b0c555bcb3c80))
128 | 
129 | ### ♻️ Code Refactoring
130 | 
131 | - simplify documentation deployment and add GitHub Actions linting ([6996c55](https://github.com/tosin2013/documcp/commit/6996c553d35a1c7cbd473c6150a8994e00a0526c))
132 | 
133 | ### 🐛 Bug Fixes
134 | 
135 | - correct Dockerfile syntax for heredocs ([7d3556d](https://github.com/tosin2013/documcp/commit/7d3556d783b9f4bb251d8c47ac8f3aed441b1764))
136 | - deploy Docusaurus documentation instead of Jekyll ([48400ae](https://github.com/tosin2013/documcp/commit/48400ae40f2a77bb9c8e446a9db3deb726a1e252))
137 | - **generate-config:** correct Docusaurus file paths for test compatibility ([72522b4](https://github.com/tosin2013/documcp/commit/72522b4dab07ab9f96454a32c81599119b09cfe3))
138 | - improve error handling test for cross-environment compatibility ([676e1da](https://github.com/tosin2013/documcp/commit/676e1dafdd2cc87a267591d5c244252efdf10222))
139 | - remove invalid exclude field from Docusaurus config ([ebac637](https://github.com/tosin2013/documcp/commit/ebac6376dfe15ef76f688e42a86c9b4e01391316))
140 | - resolve analysis ID retrieval issues across DocuMCP tools ([37610d0](https://github.com/tosin2013/documcp/commit/37610d0c79b1e8d97dad3a87746a7533a1f27740))
141 | - resolve analysis ID retrieval issues across DocuMCP tools ([1f141d4](https://github.com/tosin2013/documcp/commit/1f141d4de0fa97fecee27a401d7870e13b42a630))
142 | - resolve critical memory system failures and improve functionality ([9d009dc](https://github.com/tosin2013/documcp/commit/9d009dcf8cfaa721d6163546bc919bc318e8a1ee))
143 | - resolve ESLint errors in memory system implementation ([a500719](https://github.com/tosin2013/documcp/commit/a50071901f7ec05b4ae2fa464ec1d38feb8f670d))
144 | - resolve ESLint unused variable errors while preserving memory functionality ([3412abe](https://github.com/tosin2013/documcp/commit/3412abe08c44766660388d9fab438a2221544eb5))
145 | - resolve memory system import/export issues and performance bottlenecks ([7164e3d](https://github.com/tosin2013/documcp/commit/7164e3dbe00cac5d1e82d9bea79ae6ced71e2ce5))
146 | - resolve remaining TypeScript compilation errors ([3674069](https://github.com/tosin2013/documcp/commit/3674069cf722f5bc4060af999ad3a2f1480301a2))
147 | - resolve test failures and improve test reliability to 99.3% ([56f9bc8](https://github.com/tosin2013/documcp/commit/56f9bc842a19f7841b8a5b508daf5c8f58c0ec76))
148 | - resolve test failures and restore MCP server functionality ([0755bd3](https://github.com/tosin2013/documcp/commit/0755bd3f4398d172ee42e571971755d8a2779412))
149 | - resolve TypeScript build errors and add package.json protection ([315a601](https://github.com/tosin2013/documcp/commit/315a601d5375142c2f4dc15d271c1088c8a8608c))
150 | - resolve TypeScript compilation errors from ESLint fixes ([0f628f7](https://github.com/tosin2013/documcp/commit/0f628f7f8e3788a658432dd4983be8b063ccdd08))
151 | - resolve TypeScript compilation errors in memory system tests ([47d9afe](https://github.com/tosin2013/documcp/commit/47d9afe238f3e0723c813b6d4ef640c22c3e1659))
152 | - resolve TypeScript compilation errors in memory system tests ([3003d0f](https://github.com/tosin2013/documcp/commit/3003d0f608f7b70d35dbddf14500fa5d91774e91))
153 | - restore Docusaurus workflow and remove npm cache dependency ([bb2bc85](https://github.com/tosin2013/documcp/commit/bb2bc8518a00ea4a4807f7de1956956bcb4af74e))
154 | - update GitHub Actions workflow to use Docker-based Docusaurus build ([65deb79](https://github.com/tosin2013/documcp/commit/65deb79f7e8caf8accd99183e7f91ec89b94261f))
155 | - use Docker-based build for GitHub Actions documentation deployment ([1777268](https://github.com/tosin2013/documcp/commit/1777268ca82d8471480e6c17d4e1fb80fc45dcd4))
156 | - use npm install instead of npm ci for dynamic package.json ([d4c9d5b](https://github.com/tosin2013/documcp/commit/d4c9d5b584b26868203badbf7e01d90cac04f02c))
157 | 
158 | ### [0.3.2](https://github.com/tosin2013/documcp/compare/v0.3.1...v0.3.2) (2025-09-11)
159 | 
160 | ### 🐛 Bug Fixes
161 | 
162 | - resolve documentation inconsistencies and improve type safety ([eeb5dde](https://github.com/tosin2013/documcp/commit/eeb5dde09885fdf94dd5fd91a31e7aa6dc157084))
163 | - update deploy-docs workflow from Jekyll to Docusaurus ([26b4a30](https://github.com/tosin2013/documcp/commit/26b4a307dc2c558d15008e0f2624645b7d4b1a08))
164 | - update deprecated GitHub Actions to latest versions ([e219410](https://github.com/tosin2013/documcp/commit/e2194109170fbb6d56513bbe8b4d93e950b98da9))
165 | 
166 | ### [0.3.1](https://github.com/tosin2013/documcp/compare/v0.3.0...v0.3.1) (2025-09-11)
167 | 
168 | ## [0.3.0](https://github.com/tosin2013/documcp/compare/v0.2.4...v0.3.0) (2025-09-11)
169 | 
170 | ### 🐛 Bug Fixes
171 | 
172 | - add error handling for package.json parsing in project context analyzer ([0a5a3e6](https://github.com/tosin2013/documcp/commit/0a5a3e6d2802397d83bf87483a083b51fe3a1a8c))
173 | - disable strict ESLint rules to resolve CI pipeline failures on main branch ([5a1dda4](https://github.com/tosin2013/documcp/commit/5a1dda4870472e074733b597ab3f0325a8c65d1d))
174 | - regenerate package-lock.json to resolve CodeQL workflow failure ([613e6c0](https://github.com/tosin2013/documcp/commit/613e6c0f4319ee244e5037f1036b86085e97201a)), closes [#25](https://github.com/tosin2013/documcp/issues/25)
175 | - resolve all failing test cases in optimize-readme.test.ts ([7353338](https://github.com/tosin2013/documcp/commit/7353338b33a5a98f6f0f87bbc090f068d38430fb))
176 | - Resolve ESLint errors in generate-technical-writer-prompts.ts ([5a176f6](https://github.com/tosin2013/documcp/commit/5a176f672e1556450383a03c4d0f0475ca92e25d))
177 | - resolve ESLint errors in README Technical Writer tools ([68810b0](https://github.com/tosin2013/documcp/commit/68810b0ceba74f541968f51ac6bc3ec6b8524cad))
178 | - Resolve ESLint errors in validate-readme-checklist.ts ([0b3beab](https://github.com/tosin2013/documcp/commit/0b3beab437802b8c1393759b96ffd907683923b2))
179 | - Update index.ts to use new Diataxis-aligned prompt API ([28dc2c0](https://github.com/tosin2013/documcp/commit/28dc2c0e727aa90219ae32f2b2036c2f9b206b3e))
180 | 
181 | ### 🚀 Features
182 | 
183 | - Achieve 85%+ branch coverage for critical DocuMCP tools ([0111a1b](https://github.com/tosin2013/documcp/commit/0111a1b3aae09a27ab9db236ec1acfbe636d3361))
184 | - add comprehensive technical writer prompts system ([7509f91](https://github.com/tosin2013/documcp/commit/7509f91de043237a528864f4b11cb485b0b2c03a))
185 | - add Dependabot config for Docusaurus documentation dependencies and security updates ([16fbee7](https://github.com/tosin2013/documcp/commit/16fbee7fad535e4b4cc4960a88daf3062add19ba))
186 | - Add README template generator and checklist validator tools ([4899e12](https://github.com/tosin2013/documcp/commit/4899e1217cd1fe60246f23c4d43731cc6ecbb0e6)), closes [#11](https://github.com/tosin2013/documcp/issues/11)
187 | - Implement Diataxis-aligned technical writer prompts ([f32558a](https://github.com/tosin2013/documcp/commit/f32558a031a571579fb02da64f3e1e3bf8518664))
188 | - implement README Technical Writer MCP tools ([728da0a](https://github.com/tosin2013/documcp/commit/728da0a21ec586b5f8361337edf42fec79dc70d0)), closes [#10](https://github.com/tosin2013/documcp/issues/10)
189 | - improve validate-content.ts test coverage from 79.31% to 83.15% ([a51c0a7](https://github.com/tosin2013/documcp/commit/a51c0a7f1e7232db99d444fbe94ea7a74ec04ece)), closes [#7](https://github.com/tosin2013/documcp/issues/7)
190 | - integrate main branch updates and fix merge conflicts ([6d30ddf](https://github.com/tosin2013/documcp/commit/6d30ddf63ccca01f67b90ecfef2fb438a16a369e))
191 | 
192 | ## [0.2.3] - 2025-08-24
193 | 
194 | ### Fixed
195 | 
196 | - Added missing `bin` field to package.json to enable npx execution (Fixes #3)
197 | - Made dist/index.js executable with proper permissions
198 | 
199 | ### Added
200 | 
201 | - CLI executable support via `npx documcp` command
202 | - Direct command-line invocation capability
203 | 
204 | ## [0.2.2] - 2025-08-24
205 | 
206 | ### Added
207 | 
208 | - Version display and badges to documentation pages
209 | - Enhanced documentation structure
210 | 
211 | ## [0.2.1] - 2025-08-24
212 | 
213 | ### Changed
214 | 
215 | - Minor documentation updates
216 | 
217 | ## [0.2.0] - 2025-08-24
218 | 
219 | ### Changed
220 | 
221 | - **BREAKING**: Updated Node.js requirements from >=18.0.0 to >=20.0.0
222 | - Updated CI/CD pipeline to test Node.js 20.x and 22.x (removed 18.x)
223 | - Updated all generated GitHub Actions workflows to use Node.js 20
224 | - Updated Docker base images from node:18 to node:20 in test fixtures
225 | - Updated @types/node references from ^18.0.0 to ^20.0.0
226 | - Enhanced content validation capabilities and improved documentation structure
227 | - Improved Docusaurus deployment build path configuration
228 | - Refactored local deployment tests with better input validation and response structure
229 | 
230 | ### Added
231 | 
232 | - Created .nvmrc file specifying Node.js 22 for development
233 | - Added Node.js version requirements to README.md
234 | - Comprehensive test suite additions with improved coverage
235 | - Enhanced error handling and response structure improvements
236 | - Content generation methods with consistent parameter naming
237 | 
238 | ### Fixed
239 | 
240 | - Critical linting errors resolved (lexical declarations in case blocks)
241 | - Unused variable cleanup in validation tools
242 | - Correct build path for Docusaurus deployment
243 | 
244 | ### Technical Details
245 | 
246 | - CI/CD pipeline now tests compatibility with Node.js 20.x and 22.x
247 | - All generated deployment workflows use Node.js 20 by default
248 | - Test coverage maintained at 82.76% (exceeds 80% requirement)
249 | - All builds and tests pass with updated Node.js requirements
250 | - 161 tests passing across 13 test suites
251 | - Enhanced documentation gap detection and content validation
252 | 
253 | ## [0.1.0] - 2025-08-22
254 | 
255 | ### Added
256 | 
257 | - Initial release of DocuMCP
258 | - Complete MCP Server implementation with 6 core tools
259 | - Comprehensive CI/CD pipeline with GitHub Actions
260 | - 82% test coverage (exceeds 80% requirement)
261 | - Performance benchmarking system (PERF-001 compliant)
262 | - Security scanning and dependency review
263 | - Automated release and deployment workflows
264 | 
```

--------------------------------------------------------------------------------
/tests/performance/memory-stress-testing.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Memory System Stress Testing
  3 |  * Tests memory system under extreme conditions and edge cases
  4 |  * Part of Issue #57 - Memory System Performance and Load Testing
  5 |  */
  6 | 
  7 | import { promises as fs } from "fs";
  8 | import path from "path";
  9 | import os from "os";
 10 | import { performance } from "perf_hooks";
 11 | import { MemoryManager } from "../../src/memory/manager.js";
 12 | import { JSONLStorage } from "../../src/memory/storage.js";
 13 | 
 14 | describe("Memory System Stress Testing", () => {
 15 |   let tempDir: string;
 16 |   let memoryManager: MemoryManager;
 17 | 
 18 |   beforeEach(async () => {
 19 |     tempDir = path.join(
 20 |       os.tmpdir(),
 21 |       `memory-stress-test-${Date.now()}-${Math.random()
 22 |         .toString(36)
 23 |         .substr(2, 9)}`,
 24 |     );
 25 |     await fs.mkdir(tempDir, { recursive: true });
 26 | 
 27 |     memoryManager = new MemoryManager(tempDir);
 28 |     await memoryManager.initialize();
 29 |   });
 30 | 
 31 |   afterEach(async () => {
 32 |     try {
 33 |       await fs.rm(tempDir, { recursive: true, force: true });
 34 |     } catch (error) {
 35 |       // Ignore cleanup errors
 36 |     }
 37 |   });
 38 | 
 39 |   describe("High Volume Stress Tests", () => {
 40 |     test("should handle extremely large datasets", async () => {
 41 |       memoryManager.setContext({ projectId: "extreme-volume-test" });
 42 | 
 43 |       const largeDatasetSize = 10000; // 10K memories
 44 |       const batchSize = 1000;
 45 |       const startTime = performance.now();
 46 | 
 47 |       console.log(
 48 |         `Starting extreme volume test with ${largeDatasetSize} memories...`,
 49 |       );
 50 | 
 51 |       let processedCount = 0;
 52 |       for (let batch = 0; batch < largeDatasetSize / batchSize; batch++) {
 53 |         const batchData = Array.from({ length: batchSize }, (_, i) => ({
 54 |           projectId: "extreme-volume-test",
 55 |           batch,
 56 |           index: i,
 57 |           globalIndex: processedCount + i,
 58 |           data: `stress-test-data-${processedCount + i}`,
 59 |           timestamp: new Date().toISOString(),
 60 |           metadata: {
 61 |             batch,
 62 |             processingOrder: processedCount + i,
 63 |             complexity: i % 5,
 64 |           },
 65 |         }));
 66 | 
 67 |         // Process batch
 68 |         const batchPromises = batchData.map((data) =>
 69 |           memoryManager.remember("analysis", data),
 70 |         );
 71 | 
 72 |         await Promise.all(batchPromises);
 73 |         processedCount += batchSize;
 74 | 
 75 |         // Progress update
 76 |         if (batch % 2 === 0) {
 77 |           const elapsed = performance.now() - startTime;
 78 |           const rate = processedCount / (elapsed / 1000);
 79 |           console.log(
 80 |             `Processed ${processedCount}/${largeDatasetSize} memories (${rate.toFixed(
 81 |               0,
 82 |             )} memories/sec)`,
 83 |           );
 84 |         }
 85 | 
 86 |         // Verify memory usage doesn't spiral out of control
 87 |         const memUsage = process.memoryUsage();
 88 |         expect(memUsage.heapUsed).toBeLessThan(500 * 1024 * 1024); // Less than 500MB
 89 |       }
 90 | 
 91 |       const endTime = performance.now();
 92 |       const totalTime = endTime - startTime;
 93 |       const averageRate = largeDatasetSize / (totalTime / 1000);
 94 | 
 95 |       console.log(
 96 |         `Completed ${largeDatasetSize} memories in ${(totalTime / 1000).toFixed(
 97 |           2,
 98 |         )}s (${averageRate.toFixed(0)} memories/sec)`,
 99 |       );
100 | 
101 |       // Verify all memories were stored
102 |       const allMemories = await memoryManager.search({
103 |         projectId: "extreme-volume-test",
104 |       });
105 |       expect(allMemories.length).toBe(largeDatasetSize);
106 | 
107 |       // Performance expectations
108 |       expect(totalTime).toBeLessThan(300000); // Should complete within 5 minutes
109 |       expect(averageRate).toBeGreaterThan(30); // At least 30 memories per second
110 |     }, 360000); // 6 minute timeout
111 | 
112 |     test("should handle rapid burst operations", async () => {
113 |       memoryManager.setContext({ projectId: "burst-test" });
114 | 
115 |       const burstSize = 1000;
116 |       const burstCount = 5;
117 |       const burstResults: number[] = [];
118 | 
119 |       console.log(
120 |         `Testing ${burstCount} bursts of ${burstSize} operations each...`,
121 |       );
122 | 
123 |       for (let burst = 0; burst < burstCount; burst++) {
124 |         const burstStartTime = performance.now();
125 | 
126 |         // Create burst data
127 |         const burstData = Array.from({ length: burstSize }, (_, i) => ({
128 |           projectId: "burst-test",
129 |           burst,
130 |           index: i,
131 |           data: `burst-${burst}-item-${i}`,
132 |           timestamp: new Date().toISOString(),
133 |         }));
134 | 
135 |         // Execute burst
136 |         const burstPromises = burstData.map((data) =>
137 |           memoryManager.remember("analysis", data),
138 |         );
139 | 
140 |         await Promise.all(burstPromises);
141 | 
142 |         const burstEndTime = performance.now();
143 |         const burstTime = burstEndTime - burstStartTime;
144 |         burstResults.push(burstTime);
145 | 
146 |         console.log(
147 |           `Burst ${burst + 1}: ${burstTime.toFixed(2)}ms (${(
148 |             burstSize /
149 |             (burstTime / 1000)
150 |           ).toFixed(0)} ops/sec)`,
151 |         );
152 | 
153 |         // Small delay between bursts
154 |         await new Promise((resolve) => setTimeout(resolve, 100));
155 |       }
156 | 
157 |       // Analyze burst performance consistency
158 |       const avgBurstTime =
159 |         burstResults.reduce((sum, time) => sum + time, 0) / burstResults.length;
160 |       const maxBurstTime = Math.max(...burstResults);
161 |       const minBurstTime = Math.min(...burstResults);
162 | 
163 |       const performanceVariation = (maxBurstTime - minBurstTime) / avgBurstTime;
164 | 
165 |       console.log(
166 |         `Burst analysis: Avg ${avgBurstTime.toFixed(
167 |           2,
168 |         )}ms, Min ${minBurstTime.toFixed(2)}ms, Max ${maxBurstTime.toFixed(
169 |           2,
170 |         )}ms`,
171 |       );
172 |       console.log(
173 |         `Performance variation: ${(performanceVariation * 100).toFixed(1)}%`,
174 |       );
175 | 
176 |       // Performance should be consistent across bursts
177 |       expect(avgBurstTime).toBeLessThan(10000); // Average burst < 10 seconds
178 |       expect(performanceVariation).toBeLessThan(3); // Less than 300% variation
179 |     });
180 |   });
181 | 
182 |   describe("Resource Exhaustion Tests", () => {
183 |     test("should handle memory pressure gracefully", async () => {
184 |       memoryManager.setContext({ projectId: "memory-pressure-test" });
185 | 
186 |       const largeItemSize = 1024 * 1024; // 1MB per item
187 |       const maxItems = 100; // 100MB of data
188 |       const memorySnapshots: Array<{
189 |         count: number;
190 |         heapUsed: number;
191 |         time: number;
192 |       }> = [];
193 | 
194 |       console.log("Testing memory pressure handling...");
195 | 
196 |       const startMemory = process.memoryUsage();
197 |       const startTime = performance.now();
198 | 
199 |       for (let i = 0; i < maxItems; i++) {
200 |         const largeData = {
201 |           projectId: "memory-pressure-test",
202 |           index: i,
203 |           payload: "x".repeat(largeItemSize),
204 |           timestamp: new Date().toISOString(),
205 |         };
206 | 
207 |         await memoryManager.remember("analysis", largeData);
208 | 
209 |         if (i % 10 === 0) {
210 |           const currentMemory = process.memoryUsage();
211 |           memorySnapshots.push({
212 |             count: i + 1,
213 |             heapUsed: currentMemory.heapUsed - startMemory.heapUsed,
214 |             time: performance.now() - startTime,
215 |           });
216 | 
217 |           // Force garbage collection if available
218 |           if (global.gc) {
219 |             global.gc();
220 |           }
221 |         }
222 | 
223 |         // Check for memory leaks - memory shouldn't grow unbounded
224 |         const currentMemory = process.memoryUsage();
225 |         const memoryUsed = currentMemory.heapUsed - startMemory.heapUsed;
226 | 
227 |         // Allow for reasonable memory growth but prevent runaway usage
228 |         const expectedMaxMemory = (i + 1) * largeItemSize * 2; // 2x overhead allowance
229 |         expect(memoryUsed).toBeLessThan(
230 |           Math.max(expectedMaxMemory, 200 * 1024 * 1024),
231 |         ); // Max 200MB
232 |       }
233 | 
234 |       const finalSnapshot = memorySnapshots[memorySnapshots.length - 1];
235 |       console.log(
236 |         `Memory pressure test: ${finalSnapshot.count} items, ${(
237 |           finalSnapshot.heapUsed /
238 |           1024 /
239 |           1024
240 |         ).toFixed(2)}MB used`,
241 |       );
242 | 
243 |       // Verify data integrity under pressure
244 |       const allMemories = await memoryManager.search({
245 |         projectId: "memory-pressure-test",
246 |       });
247 |       expect(allMemories.length).toBe(maxItems);
248 |     });
249 | 
250 |     test("should handle storage device pressure", async () => {
251 |       memoryManager.setContext({ projectId: "storage-pressure-test" });
252 | 
253 |       // Create many small files to stress the storage system
254 |       const fileCount = 1000;
255 |       const operationResults: boolean[] = [];
256 | 
257 |       console.log(`Testing storage pressure with ${fileCount} operations...`);
258 | 
259 |       for (let i = 0; i < fileCount; i++) {
260 |         try {
261 |           const data = {
262 |             projectId: "storage-pressure-test",
263 |             index: i,
264 |             data: `storage-pressure-item-${i}`,
265 |             timestamp: new Date().toISOString(),
266 |           };
267 | 
268 |           await memoryManager.remember("analysis", data);
269 |           operationResults.push(true);
270 | 
271 |           if (i % 100 === 0) {
272 |             console.log(`Storage operations completed: ${i + 1}/${fileCount}`);
273 |           }
274 |         } catch (error) {
275 |           operationResults.push(false);
276 |           console.error(`Storage operation ${i} failed:`, error);
277 |         }
278 |       }
279 | 
280 |       const successRate =
281 |         operationResults.filter((result) => result).length /
282 |         operationResults.length;
283 |       console.log(
284 |         `Storage pressure test: ${(successRate * 100).toFixed(
285 |           1,
286 |         )}% success rate`,
287 |       );
288 | 
289 |       // Should handle most operations successfully
290 |       expect(successRate).toBeGreaterThan(0.95); // At least 95% success rate
291 | 
292 |       // Verify storage integrity
293 |       const storedMemories = await memoryManager.search({
294 |         projectId: "storage-pressure-test",
295 |       });
296 |       expect(storedMemories.length).toBeGreaterThan(fileCount * 0.9); // At least 90% stored
297 |     });
298 |   });
299 | 
300 |   describe("Edge Case Stress Tests", () => {
301 |     test("should handle extremely large individual memories", async () => {
302 |       memoryManager.setContext({ projectId: "large-individual-test" });
303 | 
304 |       const extremeSizes = [
305 |         { name: "huge", size: 5 * 1024 * 1024 }, // 5MB
306 |         { name: "massive", size: 10 * 1024 * 1024 }, // 10MB
307 |         { name: "gigantic", size: 25 * 1024 * 1024 }, // 25MB
308 |       ];
309 | 
310 |       for (const testSize of extremeSizes) {
311 |         console.log(
312 |           `Testing ${testSize.name} memory (${(
313 |             testSize.size /
314 |             1024 /
315 |             1024
316 |           ).toFixed(1)}MB)...`,
317 |         );
318 | 
319 |         const startTime = performance.now();
320 |         const largeData = {
321 |           projectId: "large-individual-test",
322 |           size: testSize.name,
323 |           payload: "x".repeat(testSize.size),
324 |           metadata: { originalSize: testSize.size },
325 |         };
326 | 
327 |         try {
328 |           const memory = await memoryManager.remember("analysis", largeData);
329 |           const createTime = performance.now() - startTime;
330 | 
331 |           // Verify storage
332 |           const readStartTime = performance.now();
333 |           const retrieved = await memoryManager.recall(memory.id);
334 |           const readTime = performance.now() - readStartTime;
335 | 
336 |           expect(retrieved).not.toBeNull();
337 |           expect(retrieved?.data.payload.length).toBe(testSize.size);
338 | 
339 |           console.log(
340 |             `${testSize.name}: Create ${createTime.toFixed(
341 |               2,
342 |             )}ms, Read ${readTime.toFixed(2)}ms`,
343 |           );
344 | 
345 |           // Performance should be reasonable even for large items
346 |           expect(createTime).toBeLessThan(30000); // 30 seconds max
347 |           expect(readTime).toBeLessThan(10000); // 10 seconds max
348 |         } catch (error) {
349 |           console.error(`Failed to handle ${testSize.name} memory:`, error);
350 |           throw error;
351 |         }
352 |       }
353 |     });
354 | 
355 |     test("should handle deeply nested data structures", async () => {
356 |       memoryManager.setContext({ projectId: "nested-structure-test" });
357 | 
358 |       // Create deeply nested object
359 |       const createNestedObject = (depth: number): any => {
360 |         if (depth === 0) {
361 |           return { value: "leaf-node", depth: 0 };
362 |         }
363 |         return {
364 |           level: depth,
365 |           children: Array.from({ length: 3 }, (_, i) => ({
366 |             id: `child-${depth}-${i}`,
367 |             data: createNestedObject(depth - 1),
368 |             metadata: { parent: depth, index: i },
369 |           })),
370 |           metadata: { depth, totalChildren: 3 },
371 |         };
372 |       };
373 | 
374 |       const testDepths = [10, 15, 20];
375 | 
376 |       for (const depth of testDepths) {
377 |         console.log(`Testing nested structure depth ${depth}...`);
378 | 
379 |         const startTime = performance.now();
380 |         const nestedData = {
381 |           projectId: "nested-structure-test",
382 |           depth,
383 |           structure: createNestedObject(depth),
384 |           metadata: { maxDepth: depth, type: "stress-test" },
385 |         };
386 | 
387 |         try {
388 |           const memory = await memoryManager.remember("analysis", nestedData);
389 |           const createTime = performance.now() - startTime;
390 | 
391 |           // Verify retrieval
392 |           const readStartTime = performance.now();
393 |           const retrieved = await memoryManager.recall(memory.id);
394 |           const readTime = performance.now() - readStartTime;
395 | 
396 |           expect(retrieved).not.toBeNull();
397 |           expect(retrieved?.data.depth).toBe(depth);
398 |           expect(retrieved?.data.structure.level).toBe(depth);
399 | 
400 |           console.log(
401 |             `Depth ${depth}: Create ${createTime.toFixed(
402 |               2,
403 |             )}ms, Read ${readTime.toFixed(2)}ms`,
404 |           );
405 | 
406 |           // Should handle complex structures efficiently
407 |           expect(createTime).toBeLessThan(5000); // 5 seconds max
408 |           expect(readTime).toBeLessThan(2000); // 2 seconds max
409 |         } catch (error) {
410 |           console.error(
411 |             `Failed to handle nested structure depth ${depth}:`,
412 |             error,
413 |           );
414 |           throw error;
415 |         }
416 |       }
417 |     });
418 | 
419 |     test("should handle rapid context switching", async () => {
420 |       const contextCount = 100;
421 |       const operationsPerContext = 10;
422 |       const totalOperations = contextCount * operationsPerContext;
423 | 
424 |       console.log(
425 |         `Testing rapid context switching: ${contextCount} contexts, ${operationsPerContext} ops each...`,
426 |       );
427 | 
428 |       const startTime = performance.now();
429 |       const results: Array<{ context: string; operationTime: number }> = [];
430 | 
431 |       for (let context = 0; context < contextCount; context++) {
432 |         const contextId = `rapid-context-${context}`;
433 | 
434 |         const contextStartTime = performance.now();
435 |         memoryManager.setContext({ projectId: contextId });
436 | 
437 |         // Perform operations in this context
438 |         const contextPromises = Array.from(
439 |           { length: operationsPerContext },
440 |           async (_, i) => {
441 |             return await memoryManager.remember("analysis", {
442 |               projectId: contextId,
443 |               contextIndex: context,
444 |               operationIndex: i,
445 |               data: `context-${context}-operation-${i}`,
446 |               timestamp: new Date().toISOString(),
447 |             });
448 |           },
449 |         );
450 | 
451 |         await Promise.all(contextPromises);
452 | 
453 |         const contextTime = performance.now() - contextStartTime;
454 |         results.push({ context: contextId, operationTime: contextTime });
455 | 
456 |         if (context % 20 === 0) {
457 |           console.log(`Completed context ${context}/${contextCount}`);
458 |         }
459 |       }
460 | 
461 |       const totalTime = performance.now() - startTime;
462 |       const avgContextTime =
463 |         results.reduce((sum, r) => sum + r.operationTime, 0) / results.length;
464 |       const totalRate = totalOperations / (totalTime / 1000);
465 | 
466 |       console.log(
467 |         `Context switching test: ${(totalTime / 1000).toFixed(
468 |           2,
469 |         )}s total, ${avgContextTime.toFixed(2)}ms avg per context`,
470 |       );
471 |       console.log(`Overall rate: ${totalRate.toFixed(0)} operations/sec`);
472 | 
473 |       // Verify all operations completed
474 |       const allMemories = await memoryManager.search("");
475 |       expect(allMemories.length).toBeGreaterThanOrEqual(totalOperations * 0.95); // Allow for 5% loss
476 | 
477 |       // Performance should remain reasonable
478 |       expect(totalTime).toBeLessThan(60000); // Complete within 1 minute
479 |       expect(totalRate).toBeGreaterThan(50); // At least 50 ops/sec overall
480 |     });
481 |   });
482 | 
483 |   describe("Failure Recovery Stress Tests", () => {
484 |     test("should recover from simulated storage failures", async () => {
485 |       memoryManager.setContext({ projectId: "storage-failure-test" });
486 | 
487 |       // Create initial data
488 |       const initialMemories = [];
489 |       for (let i = 0; i < 100; i++) {
490 |         const memory = await memoryManager.remember("analysis", {
491 |           projectId: "storage-failure-test",
492 |           index: i,
493 |           data: `initial-data-${i}`,
494 |           phase: "before-failure",
495 |         });
496 |         initialMemories.push(memory);
497 |       }
498 | 
499 |       // Simulate storage failure recovery by creating new manager instance
500 |       const recoveryManager = new MemoryManager(tempDir);
501 |       await recoveryManager.initialize();
502 | 
503 |       // Verify recovery
504 |       const recoveredMemories = await recoveryManager.search({
505 |         projectId: "storage-failure-test",
506 |       });
507 |       expect(recoveredMemories.length).toBe(100);
508 | 
509 |       // Continue operations after recovery
510 |       recoveryManager.setContext({ projectId: "storage-failure-test" });
511 |       for (let i = 0; i < 50; i++) {
512 |         await recoveryManager.remember("analysis", {
513 |           projectId: "storage-failure-test",
514 |           index: 100 + i,
515 |           data: `post-recovery-data-${i}`,
516 |           phase: "after-recovery",
517 |         });
518 |       }
519 | 
520 |       // Verify total state
521 |       const finalMemories = await recoveryManager.search({
522 |         projectId: "storage-failure-test",
523 |       });
524 |       expect(finalMemories.length).toBe(150);
525 | 
526 |       const beforeFailure = finalMemories.filter(
527 |         (m) => m.data.phase === "before-failure",
528 |       );
529 |       const afterRecovery = finalMemories.filter(
530 |         (m) => m.data.phase === "after-recovery",
531 |       );
532 | 
533 |       expect(beforeFailure.length).toBe(100);
534 |       expect(afterRecovery.length).toBe(50);
535 | 
536 |       console.log("Storage failure recovery test completed successfully");
537 |     });
538 | 
539 |     test("should handle concurrent access corruption scenarios", async () => {
540 |       memoryManager.setContext({ projectId: "corruption-test" });
541 | 
542 |       const concurrentWorkers = 5;
543 |       const operationsPerWorker = 100;
544 |       const conflictData = Array.from(
545 |         { length: concurrentWorkers },
546 |         (_, workerId) =>
547 |           Array.from({ length: operationsPerWorker }, (_, opId) => ({
548 |             projectId: "corruption-test",
549 |             workerId,
550 |             operationId: opId,
551 |             data: `worker-${workerId}-operation-${opId}`,
552 |             timestamp: new Date().toISOString(),
553 |           })),
554 |       );
555 | 
556 |       console.log(
557 |         `Testing concurrent access with ${concurrentWorkers} workers, ${operationsPerWorker} ops each...`,
558 |       );
559 | 
560 |       // Execute concurrent operations that might cause conflicts
561 |       const workerPromises = conflictData.map(async (workerData, workerId) => {
562 |         const results = [];
563 |         for (const data of workerData) {
564 |           try {
565 |             const memory = await memoryManager.remember("analysis", data);
566 |             results.push({ success: true, id: memory.id });
567 |           } catch (error) {
568 |             results.push({ success: false, error: (error as Error).message });
569 |           }
570 |         }
571 |         return { workerId, results };
572 |       });
573 | 
574 |       const workerResults = await Promise.all(workerPromises);
575 | 
576 |       // Analyze results
577 |       let totalOperations = 0;
578 |       let successfulOperations = 0;
579 | 
580 |       workerResults.forEach(({ workerId, results }) => {
581 |         const successful = results.filter((r) => r.success).length;
582 |         totalOperations += results.length;
583 |         successfulOperations += successful;
584 | 
585 |         console.log(
586 |           `Worker ${workerId}: ${successful}/${results.length} operations successful`,
587 |         );
588 |       });
589 | 
590 |       const successRate = successfulOperations / totalOperations;
591 |       console.log(
592 |         `Overall concurrent access success rate: ${(successRate * 100).toFixed(
593 |           1,
594 |         )}%`,
595 |       );
596 | 
597 |       // Should handle most concurrent operations successfully
598 |       expect(successRate).toBeGreaterThan(0.9); // At least 90% success rate
599 | 
600 |       // Verify data integrity
601 |       const allMemories = await memoryManager.search({
602 |         projectId: "corruption-test",
603 |       });
604 |       expect(allMemories.length).toBeGreaterThanOrEqual(totalOperations * 0.85); // Allow for some conflicts
605 |     });
606 |   });
607 | });
608 | 
```

--------------------------------------------------------------------------------
/tests/tools/generate-technical-writer-prompts.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { generateTechnicalWriterPrompts } from "../../src/tools/generate-technical-writer-prompts.js";
  2 | import { promises as fs } from "fs";
  3 | import { join } from "path";
  4 | import { tmpdir } from "os";
  5 | 
  6 | describe("generate-technical-writer-prompts", () => {
  7 |   let testProjectPath: string;
  8 | 
  9 |   beforeEach(async () => {
 10 |     // Create temporary test project directory
 11 |     testProjectPath = join(tmpdir(), `test-project-${Date.now()}`);
 12 |     await fs.mkdir(testProjectPath, { recursive: true });
 13 |   });
 14 | 
 15 |   afterEach(async () => {
 16 |     // Clean up test directory
 17 |     try {
 18 |       await fs.rm(testProjectPath, { recursive: true, force: true });
 19 |     } catch (error) {
 20 |       // Ignore cleanup errors
 21 |     }
 22 |   });
 23 | 
 24 |   describe("Input Validation", () => {
 25 |     it("should require project_path parameter", async () => {
 26 |       const result = await generateTechnicalWriterPrompts({});
 27 | 
 28 |       expect(result.isError).toBe(true);
 29 |       expect(result.content[0].text).toContain("Required");
 30 |     });
 31 | 
 32 |     it("should accept valid context_sources", async () => {
 33 |       await fs.writeFile(
 34 |         join(testProjectPath, "package.json"),
 35 |         JSON.stringify({
 36 |           name: "test-project",
 37 |           dependencies: { react: "^18.0.0" },
 38 |         }),
 39 |       );
 40 | 
 41 |       const result = await generateTechnicalWriterPrompts({
 42 |         project_path: testProjectPath,
 43 |         context_sources: [
 44 |           "repository_analysis",
 45 |           "readme_health",
 46 |           "documentation_gaps",
 47 |         ],
 48 |       });
 49 | 
 50 |       expect(result.isError).toBe(false);
 51 |       expect(result.generation.prompts.length).toBeGreaterThan(0);
 52 |     });
 53 | 
 54 |     it("should validate audience parameter", async () => {
 55 |       await fs.writeFile(
 56 |         join(testProjectPath, "package.json"),
 57 |         JSON.stringify({
 58 |           name: "test-project",
 59 |         }),
 60 |       );
 61 | 
 62 |       const result = await generateTechnicalWriterPrompts({
 63 |         project_path: testProjectPath,
 64 |         audience: "developer",
 65 |       });
 66 | 
 67 |       expect(result.isError).toBe(false);
 68 |       expect(result.generation.contextSummary.integrationLevel).toBe(
 69 |         "comprehensive",
 70 |       );
 71 |     });
 72 |   });
 73 | 
 74 |   describe("Project Context Analysis", () => {
 75 |     it("should detect Node.js project with React", async () => {
 76 |       await fs.writeFile(
 77 |         join(testProjectPath, "package.json"),
 78 |         JSON.stringify({
 79 |           name: "test-react-app",
 80 |           dependencies: {
 81 |             react: "^18.0.0",
 82 |             "react-dom": "^18.0.0",
 83 |           },
 84 |           devDependencies: {
 85 |             typescript: "^5.0.0",
 86 |           },
 87 |         }),
 88 |       );
 89 | 
 90 |       const result = await generateTechnicalWriterPrompts({
 91 |         project_path: testProjectPath,
 92 |         context_sources: ["repository_analysis"],
 93 |       });
 94 | 
 95 |       expect(result.isError).toBe(false);
 96 |       expect(result.generation.contextSummary.projectContext.projectType).toBe(
 97 |         "web_application",
 98 |       );
 99 |       expect(
100 |         result.generation.contextSummary.projectContext.frameworks,
101 |       ).toContain("React");
102 |       expect(
103 |         result.generation.contextSummary.projectContext.languages,
104 |       ).toContain("TypeScript");
105 |       expect(
106 |         result.generation.contextSummary.projectContext.languages,
107 |       ).toContain("JavaScript");
108 |     });
109 | 
110 |     it("should detect Python project", async () => {
111 |       await fs.writeFile(
112 |         join(testProjectPath, "main.py"),
113 |         'print("Hello, World!")',
114 |       );
115 |       await fs.writeFile(
116 |         join(testProjectPath, "requirements.txt"),
117 |         "flask==2.0.0",
118 |       );
119 | 
120 |       const result = await generateTechnicalWriterPrompts({
121 |         project_path: testProjectPath,
122 |         context_sources: ["repository_analysis"],
123 |       });
124 | 
125 |       expect(result.isError).toBe(false);
126 |       expect(result.generation.contextSummary.projectContext.projectType).toBe(
127 |         "python_application",
128 |       );
129 |       expect(
130 |         result.generation.contextSummary.projectContext.languages,
131 |       ).toContain("Python");
132 |     });
133 | 
134 |     it("should detect CI/CD configuration", async () => {
135 |       await fs.mkdir(join(testProjectPath, ".github", "workflows"), {
136 |         recursive: true,
137 |       });
138 |       await fs.writeFile(
139 |         join(testProjectPath, ".github", "workflows", "ci.yml"),
140 |         "name: CI\non: [push]",
141 |       );
142 |       await fs.writeFile(
143 |         join(testProjectPath, "package.json"),
144 |         JSON.stringify({ name: "test" }),
145 |       );
146 | 
147 |       const result = await generateTechnicalWriterPrompts({
148 |         project_path: testProjectPath,
149 |         context_sources: ["repository_analysis"],
150 |       });
151 | 
152 |       expect(result.isError).toBe(false);
153 |       expect(result.generation.contextSummary.projectContext.hasCI).toBe(true);
154 |     });
155 | 
156 |     it("should detect test files", async () => {
157 |       await fs.writeFile(
158 |         join(testProjectPath, "test.js"),
159 |         'describe("test", () => {})',
160 |       );
161 |       await fs.writeFile(
162 |         join(testProjectPath, "package.json"),
163 |         JSON.stringify({ name: "test" }),
164 |       );
165 | 
166 |       const result = await generateTechnicalWriterPrompts({
167 |         project_path: testProjectPath,
168 |         context_sources: ["repository_analysis"],
169 |       });
170 | 
171 |       expect(result.isError).toBe(false);
172 |       expect(result.generation.contextSummary.projectContext.hasTests).toBe(
173 |         true,
174 |       );
175 |     });
176 |   });
177 | 
178 |   describe("Documentation Context Analysis", () => {
179 |     it("should detect existing README", async () => {
180 |       await fs.writeFile(
181 |         join(testProjectPath, "README.md"),
182 |         "# Test Project\nA test project",
183 |       );
184 |       await fs.writeFile(
185 |         join(testProjectPath, "package.json"),
186 |         JSON.stringify({ name: "test" }),
187 |       );
188 | 
189 |       const result = await generateTechnicalWriterPrompts({
190 |         project_path: testProjectPath,
191 |         context_sources: ["readme_health"],
192 |       });
193 | 
194 |       expect(result.isError).toBe(false);
195 |       expect(
196 |         result.generation.contextSummary.documentationContext.readmeExists,
197 |       ).toBe(true);
198 |     });
199 | 
200 |     it("should handle missing README", async () => {
201 |       await fs.writeFile(
202 |         join(testProjectPath, "package.json"),
203 |         JSON.stringify({ name: "test" }),
204 |       );
205 | 
206 |       const result = await generateTechnicalWriterPrompts({
207 |         project_path: testProjectPath,
208 |         context_sources: ["readme_health"],
209 |       });
210 | 
211 |       expect(result.isError).toBe(false);
212 |       expect(
213 |         result.generation.contextSummary.documentationContext.readmeExists,
214 |       ).toBe(false);
215 |     });
216 |   });
217 | 
218 |   describe("Prompt Generation", () => {
219 |     beforeEach(async () => {
220 |       await fs.writeFile(
221 |         join(testProjectPath, "package.json"),
222 |         JSON.stringify({
223 |           name: "test-project",
224 |           dependencies: { react: "^18.0.0" },
225 |         }),
226 |       );
227 |     });
228 | 
229 |     it("should generate content generation prompts", async () => {
230 |       const result = await generateTechnicalWriterPrompts({
231 |         project_path: testProjectPath,
232 |         prompt_types: ["content_generation"],
233 |       });
234 | 
235 |       expect(result.isError).toBe(false);
236 |       const contentPrompts = result.generation.prompts.filter(
237 |         (p) => p.category === "content_generation",
238 |       );
239 |       expect(contentPrompts.length).toBeGreaterThan(0);
240 |       expect(contentPrompts[0].title).toContain("Project Overview");
241 |       expect(contentPrompts[0].prompt).toContain("web_application");
242 |       expect(contentPrompts[0].prompt).toContain("React");
243 |     });
244 | 
245 |     it("should generate gap filling prompts when gaps exist", async () => {
246 |       const result = await generateTechnicalWriterPrompts({
247 |         project_path: testProjectPath,
248 |         context_sources: ["documentation_gaps"],
249 |         prompt_types: ["gap_filling"],
250 |       });
251 | 
252 |       expect(result.isError).toBe(false);
253 |       const gapPrompts = result.generation.prompts.filter(
254 |         (p) => p.category === "gap_filling",
255 |       );
256 |       expect(gapPrompts.length).toBeGreaterThan(0);
257 |       expect(
258 |         gapPrompts.some(
259 |           (p) =>
260 |             p.title.includes("installation") ||
261 |             p.title.includes("api") ||
262 |             p.title.includes("contributing"),
263 |         ),
264 |       ).toBe(true);
265 |     });
266 | 
267 |     it("should generate style improvement prompts for low health scores", async () => {
268 |       await fs.writeFile(
269 |         join(testProjectPath, "README.md"),
270 |         "# Test\nBad readme",
271 |       );
272 | 
273 |       const result = await generateTechnicalWriterPrompts({
274 |         project_path: testProjectPath,
275 |         context_sources: ["readme_health"],
276 |         prompt_types: ["style_improvement"],
277 |       });
278 | 
279 |       expect(result.isError).toBe(false);
280 |       const stylePrompts = result.generation.prompts.filter(
281 |         (p) => p.category === "style_improvement",
282 |       );
283 |       expect(stylePrompts.length).toBeGreaterThan(0);
284 |       expect(stylePrompts[0].title).toContain("Style Enhancement");
285 |     });
286 | 
287 |     it("should generate deployment prompts for comprehensive integration", async () => {
288 |       const result = await generateTechnicalWriterPrompts({
289 |         project_path: testProjectPath,
290 |         integration_level: "comprehensive",
291 |         prompt_types: ["deployment_optimization"],
292 |       });
293 | 
294 |       expect(result.isError).toBe(false);
295 |       const deploymentPrompts = result.generation.prompts.filter(
296 |         (p) => p.category === "deployment_optimization",
297 |       );
298 |       expect(deploymentPrompts.length).toBeGreaterThan(0);
299 |       expect(deploymentPrompts[0].title).toContain("Deployment Documentation");
300 |     });
301 | 
302 |     it("should include integration hints and related tools", async () => {
303 |       const result = await generateTechnicalWriterPrompts({
304 |         project_path: testProjectPath,
305 |         prompt_types: ["content_generation"],
306 |       });
307 | 
308 |       expect(result.isError).toBe(false);
309 |       const prompt = result.generation.prompts[0];
310 |       expect(prompt.integrationHints).toBeDefined();
311 |       expect(prompt.integrationHints.length).toBeGreaterThan(0);
312 |       expect(prompt.relatedTools).toBeDefined();
313 |       expect(prompt.relatedTools.length).toBeGreaterThan(0);
314 |     });
315 |   });
316 | 
317 |   describe("Audience-Specific Prompts", () => {
318 |     beforeEach(async () => {
319 |       await fs.writeFile(
320 |         join(testProjectPath, "package.json"),
321 |         JSON.stringify({
322 |           name: "test-project",
323 |           dependencies: { express: "^4.0.0" },
324 |         }),
325 |       );
326 |     });
327 | 
328 |     it("should generate developer-focused prompts", async () => {
329 |       const result = await generateTechnicalWriterPrompts({
330 |         project_path: testProjectPath,
331 |         audience: "developer",
332 |         prompt_types: ["content_generation"],
333 |       });
334 | 
335 |       expect(result.isError).toBe(false);
336 |       const prompts = result.generation.prompts;
337 |       expect(prompts.every((p) => p.audience === "developer")).toBe(true);
338 |       expect(prompts[0].prompt).toContain("developer");
339 |     });
340 | 
341 |     it("should generate enterprise-focused prompts", async () => {
342 |       const result = await generateTechnicalWriterPrompts({
343 |         project_path: testProjectPath,
344 |         audience: "enterprise",
345 |         prompt_types: ["content_generation"],
346 |       });
347 | 
348 |       expect(result.isError).toBe(false);
349 |       const prompts = result.generation.prompts;
350 |       expect(prompts.every((p) => p.audience === "enterprise")).toBe(true);
351 |       expect(prompts[0].prompt).toContain("enterprise");
352 |     });
353 |   });
354 | 
355 |   describe("Integration Levels", () => {
356 |     beforeEach(async () => {
357 |       await fs.writeFile(
358 |         join(testProjectPath, "package.json"),
359 |         JSON.stringify({
360 |           name: "test-project",
361 |         }),
362 |       );
363 |     });
364 | 
365 |     it("should generate basic prompts for basic integration", async () => {
366 |       const result = await generateTechnicalWriterPrompts({
367 |         project_path: testProjectPath,
368 |         integration_level: "basic",
369 |         prompt_types: ["content_generation"],
370 |       });
371 | 
372 |       expect(result.isError).toBe(false);
373 |       expect(result.generation.contextSummary.integrationLevel).toBe("basic");
374 |       expect(result.generation.prompts.length).toBeGreaterThan(0);
375 |     });
376 | 
377 |     it("should generate comprehensive prompts for comprehensive integration", async () => {
378 |       const result = await generateTechnicalWriterPrompts({
379 |         project_path: testProjectPath,
380 |         integration_level: "comprehensive",
381 |       });
382 | 
383 |       expect(result.isError).toBe(false);
384 |       expect(result.generation.contextSummary.integrationLevel).toBe(
385 |         "comprehensive",
386 |       );
387 |       const deploymentPrompts = result.generation.prompts.filter(
388 |         (p) => p.category === "deployment_optimization",
389 |       );
390 |       expect(deploymentPrompts.length).toBeGreaterThan(0);
391 |     });
392 | 
393 |     it("should generate advanced prompts for advanced integration", async () => {
394 |       const result = await generateTechnicalWriterPrompts({
395 |         project_path: testProjectPath,
396 |         integration_level: "advanced",
397 |       });
398 | 
399 |       expect(result.isError).toBe(false);
400 |       expect(result.generation.contextSummary.integrationLevel).toBe(
401 |         "advanced",
402 |       );
403 |       const deploymentPrompts = result.generation.prompts.filter(
404 |         (p) => p.category === "deployment_optimization",
405 |       );
406 |       expect(deploymentPrompts.length).toBeGreaterThan(0);
407 |     });
408 |   });
409 | 
410 |   describe("Recommendations and Next Steps", () => {
411 |     beforeEach(async () => {
412 |       await fs.writeFile(
413 |         join(testProjectPath, "package.json"),
414 |         JSON.stringify({
415 |           name: "test-project",
416 |         }),
417 |       );
418 |     });
419 | 
420 |     it("should generate integration recommendations", async () => {
421 |       const result = await generateTechnicalWriterPrompts({
422 |         project_path: testProjectPath,
423 |       });
424 | 
425 |       expect(result.isError).toBe(false);
426 |       expect(result.generation.recommendations).toBeDefined();
427 |       expect(result.generation.recommendations.length).toBeGreaterThan(0);
428 |       expect(result.generation.recommendations[0]).toContain(
429 |         "analyze_repository",
430 |       );
431 |     });
432 | 
433 |     it("should generate structured next steps", async () => {
434 |       const result = await generateTechnicalWriterPrompts({
435 |         project_path: testProjectPath,
436 |       });
437 | 
438 |       expect(result.isError).toBe(false);
439 |       expect(result.nextSteps).toBeDefined();
440 |       expect(result.nextSteps.length).toBeGreaterThan(0);
441 |       expect(result.nextSteps[0]).toHaveProperty("action");
442 |       expect(result.nextSteps[0]).toHaveProperty("toolRequired");
443 |       expect(result.nextSteps[0]).toHaveProperty("priority");
444 |     });
445 | 
446 |     it("should recommend README template creation for missing README", async () => {
447 |       // Ensure no README exists
448 |       await fs.writeFile(
449 |         join(testProjectPath, "package.json"),
450 |         JSON.stringify({ name: "test" }),
451 |       );
452 | 
453 |       const result = await generateTechnicalWriterPrompts({
454 |         project_path: testProjectPath,
455 |         context_sources: ["readme_health"],
456 |       });
457 | 
458 |       expect(result.isError).toBe(false);
459 |       expect(
460 |         result.generation.recommendations.some((r) =>
461 |           r.includes("generate_readme_template"),
462 |         ),
463 |       ).toBe(true);
464 |     });
465 | 
466 |     it("should recommend testing documentation for projects with tests", async () => {
467 |       await fs.writeFile(join(testProjectPath, "test.js"), "test code");
468 | 
469 |       const result = await generateTechnicalWriterPrompts({
470 |         project_path: testProjectPath,
471 |         context_sources: ["repository_analysis"],
472 |       });
473 | 
474 |       expect(result.isError).toBe(false);
475 |       expect(
476 |         result.generation.recommendations.some((r) =>
477 |           r.includes("testing documentation"),
478 |         ),
479 |       ).toBe(true);
480 |     });
481 |   });
482 | 
483 |   describe("Metadata and Scoring", () => {
484 |     beforeEach(async () => {
485 |       await fs.writeFile(
486 |         join(testProjectPath, "package.json"),
487 |         JSON.stringify({
488 |           name: "test-project",
489 |           dependencies: { react: "^18.0.0" },
490 |         }),
491 |       );
492 |     });
493 | 
494 |     it("should include comprehensive metadata", async () => {
495 |       const result = await generateTechnicalWriterPrompts({
496 |         project_path: testProjectPath,
497 |       });
498 | 
499 |       expect(result.isError).toBe(false);
500 |       expect(result.generation.metadata).toBeDefined();
501 |       expect(result.generation.metadata.totalPrompts).toBeGreaterThan(0);
502 |       expect(result.generation.metadata.promptsByCategory).toBeDefined();
503 |       expect(result.generation.metadata.confidenceScore).toBeGreaterThan(0);
504 |       expect(result.generation.metadata.generatedAt).toBeDefined();
505 |     });
506 | 
507 |     it("should calculate confidence score based on available context", async () => {
508 |       await fs.writeFile(join(testProjectPath, "README.md"), "# Test Project");
509 | 
510 |       const result = await generateTechnicalWriterPrompts({
511 |         project_path: testProjectPath,
512 |         context_sources: ["repository_analysis", "readme_health"],
513 |       });
514 | 
515 |       expect(result.isError).toBe(false);
516 |       expect(result.generation.metadata.confidenceScore).toBeGreaterThan(70);
517 |     });
518 | 
519 |     it("should categorize prompts correctly", async () => {
520 |       const result = await generateTechnicalWriterPrompts({
521 |         project_path: testProjectPath,
522 |         prompt_types: ["content_generation", "gap_filling"],
523 |         context_sources: ["documentation_gaps"],
524 |       });
525 | 
526 |       expect(result.isError).toBe(false);
527 |       const categories = result.generation.metadata.promptsByCategory;
528 |       expect(categories.content_generation).toBeGreaterThan(0);
529 |       expect(categories.gap_filling).toBeGreaterThan(0);
530 |     });
531 |   });
532 | 
533 |   describe("Error Handling", () => {
534 |     it("should handle non-existent project path gracefully", async () => {
535 |       const result = await generateTechnicalWriterPrompts({
536 |         project_path: "/non/existent/path",
537 |       });
538 | 
539 |       expect(result.isError).toBe(false); // Should not error, just provide limited context
540 |       expect(result.generation.contextSummary.projectContext.projectType).toBe(
541 |         "unknown",
542 |       );
543 |     });
544 | 
545 |     it("should handle invalid context sources", async () => {
546 |       const result = await generateTechnicalWriterPrompts({
547 |         project_path: testProjectPath,
548 |         // @ts-ignore - testing invalid input
549 |         context_sources: ["invalid_source"],
550 |       });
551 | 
552 |       expect(result.isError).toBe(true);
553 |       expect(result.content[0].text).toContain(
554 |         "Error generating technical writer prompts",
555 |       );
556 |     });
557 | 
558 |     it("should provide empty result structure on error", async () => {
559 |       const result = await generateTechnicalWriterPrompts({
560 |         // @ts-ignore - testing invalid input
561 |         project_path: null,
562 |       });
563 | 
564 |       expect(result.isError).toBe(true);
565 |       expect(result.generation).toBeDefined();
566 |       expect(result.generation.prompts).toEqual([]);
567 |       expect(result.generation.metadata.totalPrompts).toBe(0);
568 |       expect(result.nextSteps).toEqual([]);
569 |     });
570 |   });
571 | 
572 |   describe("Cross-Tool Integration", () => {
573 |     beforeEach(async () => {
574 |       await fs.writeFile(
575 |         join(testProjectPath, "package.json"),
576 |         JSON.stringify({
577 |           name: "integration-test",
578 |           dependencies: { next: "^13.0.0" },
579 |         }),
580 |       );
581 |     });
582 | 
583 |     it("should reference multiple DocuMCP tools in integration hints", async () => {
584 |       const result = await generateTechnicalWriterPrompts({
585 |         project_path: testProjectPath,
586 |         integration_level: "comprehensive",
587 |       });
588 | 
589 |       expect(result.isError).toBe(false);
590 |       const allHints = result.generation.prompts.flatMap(
591 |         (p) => p.integrationHints,
592 |       );
593 |       const allTools = result.generation.prompts.flatMap((p) => p.relatedTools);
594 | 
595 |       // Should reference multiple DocuMCP tools
596 |       expect(allTools).toContain("analyze_repository");
597 |       expect(allTools).toContain("detect_documentation_gaps");
598 |       expect(allTools).toContain("readme_best_practices");
599 |       // Check for any deployment-related tools since validate_content may not always be included
600 |       expect(
601 |         allTools.some((tool) =>
602 |           ["validate_content", "deploy_pages", "verify_deployment"].includes(
603 |             tool,
604 |           ),
605 |         ),
606 |       ).toBe(true);
607 |     });
608 | 
609 |     it("should provide workflow guidance for tool chaining", async () => {
610 |       const result = await generateTechnicalWriterPrompts({
611 |         project_path: testProjectPath,
612 |         integration_level: "advanced",
613 |       });
614 | 
615 |       expect(result.isError).toBe(false);
616 |       expect(
617 |         result.generation.recommendations.some((r) =>
618 |           r.includes("analyze_repository first"),
619 |         ),
620 |       ).toBe(true);
621 |       expect(
622 |         result.generation.recommendations.some((r) =>
623 |           r.includes("validate_content"),
624 |         ),
625 |       ).toBe(true);
626 |     });
627 |   });
628 | });
629 | 
```

--------------------------------------------------------------------------------
/src/memory/schemas.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Knowledge Graph Schema Definitions
  3 |  * Implements Phase 1.1: Enhanced Knowledge Graph Schema Implementation
  4 |  *
  5 |  * Defines comprehensive Zod schemas for all entity types and relationships
  6 |  * in the DocuMCP knowledge graph.
  7 |  */
  8 | 
  9 | import { z } from "zod";
 10 | 
 11 | // ============================================================================
 12 | // Entity Schemas
 13 | // ============================================================================
 14 | 
 15 | /**
 16 |  * Project Entity Schema
 17 |  * Represents a software project analyzed by DocuMCP
 18 |  */
 19 | export const ProjectEntitySchema = z.object({
 20 |   name: z.string().min(1, "Project name is required"),
 21 |   path: z.string().min(1, "Project path is required"),
 22 |   technologies: z.array(z.string()).default([]),
 23 |   size: z.enum(["small", "medium", "large"]).default("medium"),
 24 |   domain: z.string().optional(),
 25 |   lastAnalyzed: z.string().datetime(),
 26 |   analysisCount: z.number().int().min(0).default(0),
 27 |   primaryLanguage: z.string().optional(),
 28 |   hasTests: z.boolean().default(false),
 29 |   hasCI: z.boolean().default(false),
 30 |   hasDocs: z.boolean().default(false),
 31 |   totalFiles: z.number().int().min(0).default(0),
 32 |   linesOfCode: z.number().int().min(0).optional(),
 33 | });
 34 | 
 35 | export type ProjectEntity = z.infer<typeof ProjectEntitySchema>;
 36 | 
 37 | /**
 38 |  * User Entity Schema
 39 |  * Represents a DocuMCP user with their preferences and behavior patterns
 40 |  */
 41 | export const UserEntitySchema = z.object({
 42 |   userId: z.string().min(1, "User ID is required"),
 43 |   expertiseLevel: z
 44 |     .enum(["beginner", "intermediate", "advanced"])
 45 |     .default("intermediate"),
 46 |   preferredTechnologies: z.array(z.string()).default([]),
 47 |   preferredSSGs: z.array(z.string()).default([]),
 48 |   documentationStyle: z
 49 |     .enum(["minimal", "comprehensive", "tutorial-heavy"])
 50 |     .default("comprehensive"),
 51 |   preferredDiataxisCategories: z
 52 |     .array(z.enum(["tutorials", "how-to", "reference", "explanation"]))
 53 |     .default([]),
 54 |   projectCount: z.number().int().min(0).default(0),
 55 |   lastActive: z.string().datetime(),
 56 |   createdAt: z.string().datetime(),
 57 | });
 58 | 
 59 | export type UserEntity = z.infer<typeof UserEntitySchema>;
 60 | 
 61 | /**
 62 |  * Configuration Entity Schema
 63 |  * Represents a deployment configuration with success metrics
 64 |  */
 65 | export const ConfigurationEntitySchema = z.object({
 66 |   ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
 67 |   settings: z.record(z.string(), z.any()).default({}),
 68 |   deploymentSuccessRate: z.number().min(0).max(1).default(1.0),
 69 |   usageCount: z.number().int().min(0).default(0),
 70 |   lastUsed: z.string().datetime(),
 71 |   buildTimeAverage: z.number().min(0).optional(), // in seconds
 72 |   failureReasons: z.array(z.string()).default([]),
 73 |   compatibleTechnologies: z.array(z.string()).default([]),
 74 | });
 75 | 
 76 | export type ConfigurationEntity = z.infer<typeof ConfigurationEntitySchema>;
 77 | 
 78 | /**
 79 |  * Documentation Entity Schema
 80 |  * Represents a documentation structure or pattern
 81 |  */
 82 | export const DocumentationEntitySchema = z.object({
 83 |   type: z.enum(["structure", "pattern", "template"]),
 84 |   framework: z.enum(["diataxis", "custom", "mixed"]).default("diataxis"),
 85 |   categories: z.array(z.string()).default([]),
 86 |   effectivenessScore: z.number().min(0).max(1).optional(),
 87 |   usageCount: z.number().int().min(0).default(0),
 88 |   lastUsed: z.string().datetime(),
 89 |   contentPatterns: z.record(z.string(), z.any()).default({}),
 90 |   suitableFor: z
 91 |     .array(z.enum(["library", "application", "tool", "framework"]))
 92 |     .default([]),
 93 | });
 94 | 
 95 | export type DocumentationEntity = z.infer<typeof DocumentationEntitySchema>;
 96 | 
 97 | /**
 98 |  * CodeFile Entity Schema
 99 |  * Represents a source code file with its structure and metadata
100 |  */
101 | export const CodeFileEntitySchema = z.object({
102 |   path: z.string().min(1, "File path is required"),
103 |   language: z.string().min(1, "Language is required"),
104 |   functions: z.array(z.string()).default([]),
105 |   classes: z.array(z.string()).default([]),
106 |   dependencies: z.array(z.string()).default([]),
107 |   imports: z.array(z.string()).default([]),
108 |   exports: z.array(z.string()).default([]),
109 |   lastModified: z.string().datetime(),
110 |   linesOfCode: z.number().int().min(0).default(0),
111 |   contentHash: z.string().min(1, "Content hash is required"),
112 |   complexity: z.enum(["low", "medium", "high"]).optional(),
113 | });
114 | 
115 | export type CodeFileEntity = z.infer<typeof CodeFileEntitySchema>;
116 | 
117 | /**
118 |  * DocumentationSection Entity Schema
119 |  * Represents a specific section of documentation
120 |  */
121 | export const DocumentationSectionEntitySchema = z.object({
122 |   filePath: z.string().min(1, "File path is required"),
123 |   sectionTitle: z.string().min(1, "Section title is required"),
124 |   contentHash: z.string().min(1, "Content hash is required"),
125 |   referencedCodeFiles: z.array(z.string()).default([]),
126 |   referencedFunctions: z.array(z.string()).default([]),
127 |   referencedClasses: z.array(z.string()).default([]),
128 |   lastUpdated: z.string().datetime(),
129 |   category: z
130 |     .enum(["tutorial", "how-to", "reference", "explanation"])
131 |     .optional(),
132 |   effectivenessScore: z.number().min(0).max(1).optional(),
133 |   wordCount: z.number().int().min(0).default(0),
134 |   hasCodeExamples: z.boolean().default(false),
135 | });
136 | 
137 | export type DocumentationSectionEntity = z.infer<
138 |   typeof DocumentationSectionEntitySchema
139 | >;
140 | 
141 | /**
142 |  * Technology Entity Schema
143 |  * Represents a technology, framework, or language
144 |  */
145 | export const TechnologyEntitySchema = z.object({
146 |   name: z.string().min(1, "Technology name is required"),
147 |   category: z.enum(["language", "framework", "library", "tool", "platform"]),
148 |   version: z.string().optional(),
149 |   ecosystem: z
150 |     .enum(["javascript", "python", "ruby", "go", "rust", "java", "other"])
151 |     .optional(),
152 |   popularityScore: z.number().min(0).max(1).optional(),
153 |   usageCount: z.number().int().min(0).default(0),
154 | });
155 | 
156 | export type TechnologyEntity = z.infer<typeof TechnologyEntitySchema>;
157 | 
158 | /**
159 |  * LinkValidation Entity Schema
160 |  * Represents link validation results for documentation
161 |  */
162 | export const LinkValidationEntitySchema = z.object({
163 |   totalLinks: z.number().int().min(0).default(0),
164 |   validLinks: z.number().int().min(0).default(0),
165 |   brokenLinks: z.number().int().min(0).default(0),
166 |   warningLinks: z.number().int().min(0).default(0),
167 |   unknownLinks: z.number().int().min(0).default(0),
168 |   healthScore: z.number().min(0).max(100).default(100),
169 |   lastValidated: z.string().datetime(),
170 |   brokenLinksList: z.array(z.string()).default([]),
171 | });
172 | 
173 | export type LinkValidationEntity = z.infer<typeof LinkValidationEntitySchema>;
174 | 
175 | /**
176 |  * Sitemap Entity Schema
177 |  * Represents a sitemap.xml file with generation and update tracking
178 |  */
179 | export const SitemapEntitySchema = z.object({
180 |   baseUrl: z.string().url("Valid base URL required"),
181 |   docsPath: z.string().min(1, "Documentation path is required"),
182 |   totalUrls: z.number().int().min(0).default(0),
183 |   lastGenerated: z.string().datetime(),
184 |   lastUpdated: z.string().datetime().optional(),
185 |   urlsByCategory: z.record(z.string(), z.number()).default({}),
186 |   urlsByPriority: z
187 |     .object({
188 |       high: z.number().int().min(0).default(0), // priority >= 0.9
189 |       medium: z.number().int().min(0).default(0), // priority 0.5-0.9
190 |       low: z.number().int().min(0).default(0), // priority < 0.5
191 |     })
192 |     .default({ high: 0, medium: 0, low: 0 }),
193 |   updateFrequency: z
194 |     .enum(["always", "hourly", "daily", "weekly", "monthly", "yearly", "never"])
195 |     .default("monthly"),
196 |   validationStatus: z
197 |     .enum(["valid", "invalid", "not_validated"])
198 |     .default("not_validated"),
199 |   validationErrors: z.array(z.string()).default([]),
200 |   sitemapPath: z.string().min(1),
201 |   ssg: z
202 |     .enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"])
203 |     .optional(),
204 |   submittedToSearchEngines: z.boolean().default(false),
205 |   searchEngines: z.array(z.string()).default([]),
206 | });
207 | 
208 | export type SitemapEntity = z.infer<typeof SitemapEntitySchema>;
209 | 
210 | /**
211 |  * Documentation Freshness Event Entity Schema
212 |  * Represents a documentation freshness tracking event with staleness metrics
213 |  */
214 | export const DocumentationFreshnessEventEntitySchema = z.object({
215 |   docsPath: z.string().min(1, "Documentation path is required"),
216 |   projectPath: z.string().min(1, "Project path is required"),
217 |   scannedAt: z.string().datetime(),
218 |   totalFiles: z.number().int().min(0).default(0),
219 |   freshFiles: z.number().int().min(0).default(0),
220 |   warningFiles: z.number().int().min(0).default(0),
221 |   staleFiles: z.number().int().min(0).default(0),
222 |   criticalFiles: z.number().int().min(0).default(0),
223 |   filesWithoutMetadata: z.number().int().min(0).default(0),
224 |   thresholds: z
225 |     .object({
226 |       warning: z.object({
227 |         value: z.number().positive(),
228 |         unit: z.enum(["minutes", "hours", "days"]),
229 |       }),
230 |       stale: z.object({
231 |         value: z.number().positive(),
232 |         unit: z.enum(["minutes", "hours", "days"]),
233 |       }),
234 |       critical: z.object({
235 |         value: z.number().positive(),
236 |         unit: z.enum(["minutes", "hours", "days"]),
237 |       }),
238 |     })
239 |     .optional(),
240 |   averageAge: z.number().min(0).optional(), // Average age in days
241 |   oldestFile: z
242 |     .object({
243 |       path: z.string(),
244 |       ageInDays: z.number().min(0),
245 |     })
246 |     .optional(),
247 |   mostStaleFiles: z.array(z.string()).default([]),
248 |   validatedAgainstCommit: z.string().optional(),
249 |   eventType: z
250 |     .enum(["scan", "validation", "initialization", "update"])
251 |     .default("scan"),
252 | });
253 | 
254 | export type DocumentationFreshnessEventEntity = z.infer<
255 |   typeof DocumentationFreshnessEventEntitySchema
256 | >;
257 | 
258 | // ============================================================================
259 | // Relationship Schemas
260 | // ============================================================================
261 | 
262 | /**
263 |  * Base Relationship Schema
264 |  * Common fields for all relationship types
265 |  */
266 | export const BaseRelationshipSchema = z.object({
267 |   weight: z.number().min(0).max(1).default(1.0),
268 |   confidence: z.number().min(0).max(1).default(1.0),
269 |   createdAt: z.string().datetime(),
270 |   lastUpdated: z.string().datetime(),
271 |   metadata: z.record(z.string(), z.any()).default({}),
272 | });
273 | 
274 | /**
275 |  * Project Uses Technology Relationship
276 |  */
277 | export const ProjectUsesTechnologySchema = BaseRelationshipSchema.extend({
278 |   type: z.literal("project_uses_technology"),
279 |   fileCount: z.number().int().min(0).default(0),
280 |   percentage: z.number().min(0).max(100).optional(),
281 |   isPrimary: z.boolean().default(false),
282 | });
283 | 
284 | export type ProjectUsesTechnologyRelationship = z.infer<
285 |   typeof ProjectUsesTechnologySchema
286 | >;
287 | 
288 | /**
289 |  * User Prefers SSG Relationship
290 |  */
291 | export const UserPrefersSSGSchema = BaseRelationshipSchema.extend({
292 |   type: z.literal("user_prefers_ssg"),
293 |   usageCount: z.number().int().min(0).default(0),
294 |   lastUsed: z.string().datetime(),
295 |   successRate: z.number().min(0).max(1).optional(),
296 | });
297 | 
298 | export type UserPrefersSSGRelationship = z.infer<typeof UserPrefersSSGSchema>;
299 | 
300 | /**
301 |  * Project Deployed With Configuration Relationship
302 |  */
303 | export const ProjectDeployedWithSchema = BaseRelationshipSchema.extend({
304 |   type: z.literal("project_deployed_with"),
305 |   success: z.boolean(),
306 |   timestamp: z.string().datetime(),
307 |   buildTime: z.number().min(0).optional(), // in seconds
308 |   errorMessage: z.string().optional(),
309 |   deploymentUrl: z.string().url().optional(),
310 | });
311 | 
312 | export type ProjectDeployedWithRelationship = z.infer<
313 |   typeof ProjectDeployedWithSchema
314 | >;
315 | 
316 | /**
317 |  * Similar To Relationship
318 |  */
319 | export const SimilarToSchema = BaseRelationshipSchema.extend({
320 |   type: z.literal("similar_to"),
321 |   similarityScore: z.number().min(0).max(1),
322 |   sharedTechnologies: z.array(z.string()).default([]),
323 |   sharedPatterns: z.array(z.string()).default([]),
324 |   reason: z.string().optional(),
325 | });
326 | 
327 | export type SimilarToRelationship = z.infer<typeof SimilarToSchema>;
328 | 
329 | /**
330 |  * Documents Relationship (CodeFile -> DocumentationSection)
331 |  */
332 | export const DocumentsSchema = BaseRelationshipSchema.extend({
333 |   type: z.literal("documents"),
334 |   coverage: z.enum(["partial", "complete", "comprehensive"]).default("partial"),
335 |   lastVerified: z.string().datetime(),
336 |   quality: z.enum(["low", "medium", "high"]).optional(),
337 | });
338 | 
339 | export type DocumentsRelationship = z.infer<typeof DocumentsSchema>;
340 | 
341 | /**
342 |  * References Relationship (DocumentationSection -> CodeFile)
343 |  */
344 | export const ReferencesSchema = BaseRelationshipSchema.extend({
345 |   type: z.literal("references"),
346 |   referenceType: z.enum([
347 |     "example",
348 |     "api-reference",
349 |     "tutorial",
350 |     "explanation",
351 |   ]),
352 |   isAccurate: z.boolean().optional(),
353 |   lastVerified: z.string().datetime().optional(),
354 | });
355 | 
356 | export type ReferencesRelationship = z.infer<typeof ReferencesSchema>;
357 | 
358 | /**
359 |  * Outdated For Relationship
360 |  */
361 | export const OutdatedForSchema = BaseRelationshipSchema.extend({
362 |   type: z.literal("outdated_for"),
363 |   detectedAt: z.string().datetime(),
364 |   changeType: z.enum([
365 |     "function_signature",
366 |     "class_structure",
367 |     "dependency",
368 |     "behavior",
369 |     "removed",
370 |   ]),
371 |   severity: z.enum(["low", "medium", "high", "critical"]).default("medium"),
372 |   autoFixable: z.boolean().default(false),
373 | });
374 | 
375 | export type OutdatedForRelationship = z.infer<typeof OutdatedForSchema>;
376 | 
377 | /**
378 |  * Depends On Relationship
379 |  */
380 | export const DependsOnSchema = BaseRelationshipSchema.extend({
381 |   type: z.literal("depends_on"),
382 |   dependencyType: z.enum(["import", "inheritance", "composition", "usage"]),
383 |   isRequired: z.boolean().default(true),
384 |   version: z.string().optional(),
385 | });
386 | 
387 | export type DependsOnRelationship = z.infer<typeof DependsOnSchema>;
388 | 
389 | /**
390 |  * Recommends Relationship
391 |  */
392 | export const RecommendsSchema = BaseRelationshipSchema.extend({
393 |   type: z.literal("recommends"),
394 |   reason: z.string(),
395 |   basedOn: z.array(z.string()).default([]), // IDs of supporting evidence
396 |   contextFactors: z.array(z.string()).default([]),
397 | });
398 | 
399 | export type RecommendsRelationship = z.infer<typeof RecommendsSchema>;
400 | 
401 | /**
402 |  * Results In Relationship
403 |  */
404 | export const ResultsInSchema = BaseRelationshipSchema.extend({
405 |   type: z.literal("results_in"),
406 |   outcomeType: z.enum(["success", "failure", "partial"]),
407 |   metrics: z.record(z.string(), z.number()).default({}),
408 |   notes: z.string().optional(),
409 | });
410 | 
411 | export type ResultsInRelationship = z.infer<typeof ResultsInSchema>;
412 | 
413 | /**
414 |  * Created By Relationship
415 |  */
416 | export const CreatedBySchema = BaseRelationshipSchema.extend({
417 |   type: z.literal("created_by"),
418 |   role: z.enum(["author", "contributor", "maintainer"]).default("author"),
419 |   timestamp: z.string().datetime(),
420 | });
421 | 
422 | export type CreatedByRelationship = z.infer<typeof CreatedBySchema>;
423 | 
424 | /**
425 |  * Project Has Sitemap Relationship
426 |  * Links a project to its sitemap with generation metrics
427 |  */
428 | export const ProjectHasSitemapSchema = BaseRelationshipSchema.extend({
429 |   type: z.literal("project_has_sitemap"),
430 |   generationCount: z.number().int().min(0).default(0),
431 |   lastAction: z.enum(["generate", "update", "validate"]).default("generate"),
432 |   urlsAdded: z.number().int().min(0).default(0),
433 |   urlsRemoved: z.number().int().min(0).default(0),
434 |   urlsUpdated: z.number().int().min(0).default(0),
435 |   successRate: z.number().min(0).max(1).default(1.0),
436 | });
437 | 
438 | export type ProjectHasSitemapRelationship = z.infer<
439 |   typeof ProjectHasSitemapSchema
440 | >;
441 | 
442 | /**
443 |  * Project Has Freshness Event Relationship
444 |  * Links a project to a documentation freshness tracking event
445 |  */
446 | export const ProjectHasFreshnessEventSchema = BaseRelationshipSchema.extend({
447 |   type: z.literal("project_has_freshness_event"),
448 |   eventType: z
449 |     .enum(["scan", "validation", "initialization", "update"])
450 |     .default("scan"),
451 |   filesScanned: z.number().int().min(0).default(0),
452 |   freshFiles: z.number().int().min(0).default(0),
453 |   staleFiles: z.number().int().min(0).default(0),
454 |   criticalFiles: z.number().int().min(0).default(0),
455 |   filesInitialized: z.number().int().min(0).default(0),
456 |   filesUpdated: z.number().int().min(0).default(0),
457 |   averageStaleness: z.number().min(0).optional(), // in days
458 |   improvementScore: z.number().min(0).max(1).optional(), // 0-1, higher is better
459 | });
460 | 
461 | export type ProjectHasFreshnessEventRelationship = z.infer<
462 |   typeof ProjectHasFreshnessEventSchema
463 | >;
464 | 
465 | // ============================================================================
466 | // Union Types and Type Guards
467 | // ============================================================================
468 | 
469 | /**
470 |  * All Entity Types Union
471 |  */
472 | const ProjectEntityWithType = ProjectEntitySchema.extend({
473 |   type: z.literal("project"),
474 | });
475 | const UserEntityWithType = UserEntitySchema.extend({ type: z.literal("user") });
476 | const ConfigurationEntityWithType = ConfigurationEntitySchema.extend({
477 |   type: z.literal("configuration"),
478 | });
479 | const DocumentationEntityWithType = DocumentationEntitySchema.extend({
480 |   type: z.literal("documentation"),
481 | });
482 | const CodeFileEntityWithType = CodeFileEntitySchema.extend({
483 |   type: z.literal("code_file"),
484 | });
485 | const DocumentationSectionEntityWithType =
486 |   DocumentationSectionEntitySchema.extend({
487 |     type: z.literal("documentation_section"),
488 |   });
489 | const TechnologyEntityWithType = TechnologyEntitySchema.extend({
490 |   type: z.literal("technology"),
491 | });
492 | const LinkValidationEntityWithType = LinkValidationEntitySchema.extend({
493 |   type: z.literal("link_validation"),
494 | });
495 | const SitemapEntityWithType = SitemapEntitySchema.extend({
496 |   type: z.literal("sitemap"),
497 | });
498 | const DocumentationFreshnessEventEntityWithType =
499 |   DocumentationFreshnessEventEntitySchema.extend({
500 |     type: z.literal("documentation_freshness_event"),
501 |   });
502 | 
503 | export const EntitySchema = z.union([
504 |   ProjectEntityWithType,
505 |   UserEntityWithType,
506 |   ConfigurationEntityWithType,
507 |   DocumentationEntityWithType,
508 |   CodeFileEntityWithType,
509 |   DocumentationSectionEntityWithType,
510 |   TechnologyEntityWithType,
511 |   LinkValidationEntityWithType,
512 |   SitemapEntityWithType,
513 |   DocumentationFreshnessEventEntityWithType,
514 | ]);
515 | 
516 | export type Entity = z.infer<typeof EntitySchema>;
517 | 
518 | /**
519 |  * All Relationship Types Union
520 |  */
521 | export const RelationshipSchema = z.union([
522 |   ProjectUsesTechnologySchema,
523 |   UserPrefersSSGSchema,
524 |   ProjectDeployedWithSchema,
525 |   SimilarToSchema,
526 |   DocumentsSchema,
527 |   ReferencesSchema,
528 |   OutdatedForSchema,
529 |   DependsOnSchema,
530 |   RecommendsSchema,
531 |   ResultsInSchema,
532 |   CreatedBySchema,
533 |   ProjectHasSitemapSchema,
534 |   ProjectHasFreshnessEventSchema,
535 | ]);
536 | 
537 | export type Relationship =
538 |   | ProjectUsesTechnologyRelationship
539 |   | UserPrefersSSGRelationship
540 |   | ProjectDeployedWithRelationship
541 |   | SimilarToRelationship
542 |   | DocumentsRelationship
543 |   | ReferencesRelationship
544 |   | OutdatedForRelationship
545 |   | DependsOnRelationship
546 |   | RecommendsRelationship
547 |   | ResultsInRelationship
548 |   | CreatedByRelationship
549 |   | ProjectHasSitemapRelationship
550 |   | ProjectHasFreshnessEventRelationship;
551 | 
552 | // ============================================================================
553 | // Validation Helpers
554 | // ============================================================================
555 | 
556 | /**
557 |  * Validate an entity against its schema
558 |  */
559 | export function validateEntity(entity: any): Entity {
560 |   return EntitySchema.parse(entity);
561 | }
562 | 
563 | /**
564 |  * Validate a relationship against its schema
565 |  */
566 | export function validateRelationship(relationship: any): Relationship {
567 |   return RelationshipSchema.parse(relationship);
568 | }
569 | 
570 | /**
571 |  * Type guard for specific entity types
572 |  */
573 | export function isProjectEntity(
574 |   entity: Entity,
575 | ): entity is ProjectEntity & { type: "project" } {
576 |   return entity.type === "project";
577 | }
578 | 
579 | export function isUserEntity(
580 |   entity: Entity,
581 | ): entity is UserEntity & { type: "user" } {
582 |   return entity.type === "user";
583 | }
584 | 
585 | export function isConfigurationEntity(
586 |   entity: Entity,
587 | ): entity is ConfigurationEntity & { type: "configuration" } {
588 |   return entity.type === "configuration";
589 | }
590 | 
591 | export function isCodeFileEntity(
592 |   entity: Entity,
593 | ): entity is CodeFileEntity & { type: "code_file" } {
594 |   return entity.type === "code_file";
595 | }
596 | 
597 | export function isDocumentationSectionEntity(
598 |   entity: Entity,
599 | ): entity is DocumentationSectionEntity & { type: "documentation_section" } {
600 |   return entity.type === "documentation_section";
601 | }
602 | 
603 | // ============================================================================
604 | // Schema Metadata
605 | // ============================================================================
606 | 
607 | /**
608 |  * Schema version for migration support
609 |  */
610 | export const SCHEMA_VERSION = "1.0.0";
611 | 
612 | /**
613 |  * Schema metadata for documentation and validation
614 |  */
615 | export const SCHEMA_METADATA = {
616 |   version: SCHEMA_VERSION,
617 |   entityTypes: [
618 |     "project",
619 |     "user",
620 |     "configuration",
621 |     "documentation",
622 |     "code_file",
623 |     "documentation_section",
624 |     "technology",
625 |     "documentation_freshness_event",
626 |   ] as const,
627 |   relationshipTypes: [
628 |     "project_uses_technology",
629 |     "user_prefers_ssg",
630 |     "project_deployed_with",
631 |     "similar_to",
632 |     "documents",
633 |     "references",
634 |     "outdated_for",
635 |     "depends_on",
636 |     "recommends",
637 |     "results_in",
638 |     "created_by",
639 |     "project_has_sitemap",
640 |     "project_has_freshness_event",
641 |   ] as const,
642 |   lastUpdated: "2025-10-01",
643 | } as const;
644 | 
```
Page 14/29FirstPrevNextLast