This is page 6 of 23. Use http://codebase.md/tosin2013/documcp?page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── ARCHITECTURAL_CHANGES_SUMMARY.md
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── adr-0001-mcp-server-architecture.md
│ │ ├── adr-0002-repository-analysis-engine.md
│ │ ├── adr-0003-static-site-generator-recommendation-engine.md
│ │ ├── adr-0004-diataxis-framework-integration.md
│ │ ├── adr-0005-github-pages-deployment-automation.md
│ │ ├── adr-0006-mcp-tools-api-design.md
│ │ ├── adr-0007-mcp-prompts-and-resources-integration.md
│ │ ├── adr-0008-intelligent-content-population-engine.md
│ │ ├── adr-0009-content-accuracy-validation-framework.md
│ │ ├── adr-0010-mcp-resource-pattern-redesign.md
│ │ ├── adr-0011-ce-mcp-compatibility.md
│ │ ├── adr-0012-priority-scoring-system-for-documentation-drift.md
│ │ ├── adr-0013-release-pipeline-and-package-distribution.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── CE-MCP-FINDINGS.md
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── change-watcher.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── drift-priority-scoring.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── llm-integration.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── ISSUE_IMPLEMENTATION_SUMMARY.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── change-watcher.ts
│ │ ├── check-documentation-links.ts
│ │ ├── cleanup-agent-artifacts.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── simulate-execution.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── artifact-detector.ts
│ │ ├── ast-analyzer.ts
│ │ ├── change-watcher.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── execution-simulator.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── llm-client.ts
│ │ ├── permission-checker.ts
│ │ ├── semantic-analyzer.ts
│ │ ├── sitemap-generator.ts
│ │ ├── usage-metadata.ts
│ │ └── user-feedback-integration.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── call-graph-builder.test.ts
│ ├── change-watcher-priority.integration.test.ts
│ ├── change-watcher.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── execution-simulator.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-documentation-examples.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas-documentation-examples.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── cleanup-agent-artifacts.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── artifact-detector.test.ts
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector-diataxis.test.ts
│ ├── drift-detector-priority.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ ├── llm-client.test.ts
│ ├── semantic-analyzer.test.ts
│ ├── sitemap-generator.test.ts
│ ├── usage-metadata.test.ts
│ └── user-feedback-integration.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/docs/api/assets/icons.js:
--------------------------------------------------------------------------------
```javascript
(function () {
addIcons();
function addIcons() {
if (document.readyState === "loading")
return document.addEventListener("DOMContentLoaded", addIcons);
const svg = document.body.appendChild(
document.createElementNS("http://www.w3.org/2000/svg", "svg"),
);
svg.innerHTML = `<g id="icon-1" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-module)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">M</text></g><g id="icon-2" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-module)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">M</text></g><g id="icon-4" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-namespace)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">N</text></g><g id="icon-8" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-enum)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">E</text></g><g id="icon-16" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-32" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-variable)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">V</text></g><g id="icon-64" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-function)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">F</text></g><g id="icon-128" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-class)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">C</text></g><g id="icon-256" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-interface)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">I</text></g><g id="icon-512" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-constructor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">C</text></g><g id="icon-1024" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-2048" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-method)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">M</text></g><g id="icon-4096" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-function)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">F</text></g><g id="icon-8192" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-16384" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-constructor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">C</text></g><g id="icon-32768" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-65536" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-type-alias)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">T</text></g><g id="icon-131072" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-type-alias)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">T</text></g><g id="icon-262144" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-accessor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">A</text></g><g id="icon-524288" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-accessor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">A</text></g><g id="icon-1048576" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-accessor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">A</text></g><g id="icon-2097152" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-type-alias)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">T</text></g><g id="icon-4194304" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-reference)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">R</text></g><g id="icon-8388608" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-document)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><g stroke="var(--color-icon-text)" fill="none" stroke-width="1.5"><polygon points="6,5 6,19 18,19, 18,10 13,5"></polygon><line x1="9" y1="9" x2="13" y2="9"></line><line x1="9" y1="12" x2="15" y2="12"></line><line x1="9" y1="15" x2="15" y2="15"></line></g></g><g id="icon-folder" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-document)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><g stroke="var(--color-icon-text)" fill="none" stroke-width="1.5"><polygon points="5,5 10,5 12,8 19,8 19,18 5,18"></polygon></g></g><g id="icon-chevronDown" class="tsd-no-select"><path d="M4.93896 8.531L12 15.591L19.061 8.531L16.939 6.409L12 11.349L7.06098 6.409L4.93896 8.531Z" fill="var(--color-icon-text)"></path></g><g id="icon-chevronSmall" class="tsd-no-select"><path d="M1.5 5.50969L8 11.6609L14.5 5.50969L12.5466 3.66086L8 7.96494L3.45341 3.66086L1.5 5.50969Z" fill="var(--color-icon-text)"></path></g><g id="icon-checkbox" class="tsd-no-select"><rect class="tsd-checkbox-background" width="30" height="30" x="1" y="1" rx="6" fill="none"></rect><path class="tsd-checkbox-checkmark" d="M8.35422 16.8214L13.2143 21.75L24.6458 10.25" stroke="none" stroke-width="3.5" stroke-linejoin="round" fill="none"></path></g><g id="icon-menu" class="tsd-no-select"><rect x="1" y="3" width="14" height="2" fill="var(--color-icon-text)"></rect><rect x="1" y="7" width="14" height="2" fill="var(--color-icon-text)"></rect><rect x="1" y="11" width="14" height="2" fill="var(--color-icon-text)"></rect></g><g id="icon-search" class="tsd-no-select"><path d="M15.7824 13.833L12.6666 10.7177C12.5259 10.5771 12.3353 10.499 12.1353 10.499H11.6259C12.4884 9.39596 13.001 8.00859 13.001 6.49937C13.001 2.90909 10.0914 0 6.50048 0C2.90959 0 0 2.90909 0 6.49937C0 10.0896 2.90959 12.9987 6.50048 12.9987C8.00996 12.9987 9.39756 12.4863 10.5008 11.6239V12.1332C10.5008 12.3332 10.5789 12.5238 10.7195 12.6644L13.8354 15.7797C14.1292 16.0734 14.6042 16.0734 14.8948 15.7797L15.7793 14.8954C16.0731 14.6017 16.0731 14.1267 15.7824 13.833ZM6.50048 10.499C4.29094 10.499 2.50018 8.71165 2.50018 6.49937C2.50018 4.29021 4.28781 2.49976 6.50048 2.49976C8.71001 2.49976 10.5008 4.28708 10.5008 6.49937C10.5008 8.70852 8.71314 10.499 6.50048 10.499Z" fill="var(--color-icon-text)"></path></g><g id="icon-anchor" class="tsd-no-select"><g stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"></path><path d="M10 14a3.5 3.5 0 0 0 5 0l4 -4a3.5 3.5 0 0 0 -5 -5l-.5 .5"></path><path d="M14 10a3.5 3.5 0 0 0 -5 0l-4 4a3.5 3.5 0 0 0 5 5l.5 -.5"></path></g></g><g id="icon-alertNote" class="tsd-no-select"><path fill="var(--color-alert-note)" d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8Zm8-6.5a6.5 6.5 0 1 0 0 13 6.5 6.5 0 0 0 0-13ZM6.5 7.75A.75.75 0 0 1 7.25 7h1a.75.75 0 0 1 .75.75v2.75h.25a.75.75 0 0 1 0 1.5h-2a.75.75 0 0 1 0-1.5h.25v-2h-.25a.75.75 0 0 1-.75-.75ZM8 6a1 1 0 1 1 0-2 1 1 0 0 1 0 2Z"></path></g><g id="icon-alertTip" class="tsd-no-select"><path fill="var(--color-alert-tip)" d="M8 1.5c-2.363 0-4 1.69-4 3.75 0 .984.424 1.625.984 2.304l.214.253c.223.264.47.556.673.848.284.411.537.896.621 1.49a.75.75 0 0 1-1.484.211c-.04-.282-.163-.547-.37-.847a8.456 8.456 0 0 0-.542-.68c-.084-.1-.173-.205-.268-.32C3.201 7.75 2.5 6.766 2.5 5.25 2.5 2.31 4.863 0 8 0s5.5 2.31 5.5 5.25c0 1.516-.701 2.5-1.328 3.259-.095.115-.184.22-.268.319-.207.245-.383.453-.541.681-.208.3-.33.565-.37.847a.751.751 0 0 1-1.485-.212c.084-.593.337-1.078.621-1.489.203-.292.45-.584.673-.848.075-.088.147-.173.213-.253.561-.679.985-1.32.985-2.304 0-2.06-1.637-3.75-4-3.75ZM5.75 12h4.5a.75.75 0 0 1 0 1.5h-4.5a.75.75 0 0 1 0-1.5ZM6 15.25a.75.75 0 0 1 .75-.75h2.5a.75.75 0 0 1 0 1.5h-2.5a.75.75 0 0 1-.75-.75Z"></path></g><g id="icon-alertImportant" class="tsd-no-select"><path fill="var(--color-alert-important)" d="M0 1.75C0 .784.784 0 1.75 0h12.5C15.216 0 16 .784 16 1.75v9.5A1.75 1.75 0 0 1 14.25 13H8.06l-2.573 2.573A1.458 1.458 0 0 1 3 14.543V13H1.75A1.75 1.75 0 0 1 0 11.25Zm1.75-.25a.25.25 0 0 0-.25.25v9.5c0 .138.112.25.25.25h2a.75.75 0 0 1 .75.75v2.19l2.72-2.72a.749.749 0 0 1 .53-.22h6.5a.25.25 0 0 0 .25-.25v-9.5a.25.25 0 0 0-.25-.25Zm7 2.25v2.5a.75.75 0 0 1-1.5 0v-2.5a.75.75 0 0 1 1.5 0ZM9 9a1 1 0 1 1-2 0 1 1 0 0 1 2 0Z"></path></g><g id="icon-alertWarning" class="tsd-no-select"><path fill="var(--color-alert-warning)" d="M6.457 1.047c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0 1 14.082 15H1.918a1.75 1.75 0 0 1-1.543-2.575Zm1.763.707a.25.25 0 0 0-.44 0L1.698 13.132a.25.25 0 0 0 .22.368h12.164a.25.25 0 0 0 .22-.368Zm.53 3.996v2.5a.75.75 0 0 1-1.5 0v-2.5a.75.75 0 0 1 1.5 0ZM9 11a1 1 0 1 1-2 0 1 1 0 0 1 2 0Z"></path></g><g id="icon-alertCaution" class="tsd-no-select"><path fill="var(--color-alert-caution)" d="M4.47.22A.749.749 0 0 1 5 0h6c.199 0 .389.079.53.22l4.25 4.25c.141.14.22.331.22.53v6a.749.749 0 0 1-.22.53l-4.25 4.25A.749.749 0 0 1 11 16H5a.749.749 0 0 1-.53-.22L.22 11.53A.749.749 0 0 1 0 11V5c0-.199.079-.389.22-.53Zm.84 1.28L1.5 5.31v5.38l3.81 3.81h5.38l3.81-3.81V5.31L10.69 1.5ZM8 4a.75.75 0 0 1 .75.75v3.5a.75.75 0 0 1-1.5 0v-3.5A.75.75 0 0 1 8 4Zm0 8a1 1 0 1 1 0-2 1 1 0 0 1 0 2Z"></path></g>`;
svg.style.display = "none";
if (location.protocol === "file:") updateUseElements();
}
function updateUseElements() {
document.querySelectorAll("use").forEach((el) => {
if (el.getAttribute("href").includes("#icon-")) {
el.setAttribute("href", el.getAttribute("href").replace(/.*#/, "#"));
}
});
}
})();
```
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
```yaml
name: Release
on:
push:
tags:
- "v*.*.*"
workflow_dispatch:
inputs:
version_type:
description: "Version bump type"
required: true
default: "patch"
type: choice
options:
- patch
- minor
- major
jobs:
test:
name: Pre-release Tests
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20.x"
cache: "npm"
- name: Install dependencies
run: npm ci
- name: Run full test suite
run: npm test -- --coverage
- name: Verify 85% coverage threshold
run: |
coverage=$(npm test -- --coverage --silent 2>&1 | grep "All files" | awk '{print $4}' | sed 's/%//')
if (( $(echo "$coverage < 85" | bc -l) )); then
echo "Coverage $coverage% is below 85% threshold"
exit 1
fi
echo "Coverage $coverage% meets requirement (target: 85%)"
- name: Performance benchmarks
run: npm run test:performance
- name: Build verification
run: npm run build
release:
name: Create Release
runs-on: ubuntu-latest
needs: test
permissions:
contents: write
packages: write
id-token: write # Required for OIDC trusted publishing
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20.x"
cache: "npm"
registry-url: "https://registry.npmjs.org"
- name: Update npm to latest version
run: npm install -g npm@latest
# npm 11.5.1+ required for trusted publishing (OIDC)
- name: Install dependencies
run: npm ci
- name: Build project
run: npm run build
- name: Validate commit messages
run: |
echo "Validating commit messages follow conventional format..."
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
# For manual releases, validate commits since last tag
LAST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
if [ -n "$LAST_TAG" ]; then
npx commitlint --from "$LAST_TAG" --to HEAD --verbose || {
echo "Commit message validation failed"
echo "All commits must follow conventional commit format: type(scope): subject"
exit 1
}
fi
else
# For tag-based releases, validate the tag commit
npx commitlint --from HEAD~1 --to HEAD --verbose || {
echo "Commit message validation failed"
exit 1
}
fi
echo "✅ All commit messages are valid"
- name: Generate changelog and release
id: release
run: |
git config --local user.email "[email protected]"
git config --local user.name "GitHub Action"
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
printf "Manual release triggered with version type: %s\n" "${{ github.event.inputs.version_type }}"
# Fetch latest changes and ensure we're up to date before creating release
echo "Fetching latest changes from remote..."
git fetch origin main || {
echo "Failed to fetch from remote"
exit 1
}
# Pull latest changes to ensure we're on the latest commit
# Use rebase to avoid merge commits in release workflow
git pull --rebase origin main || {
echo "Failed to pull latest changes. Remote may have diverged."
echo "Attempting to merge instead..."
git pull origin main || {
echo "Failed to sync with remote repository"
exit 1
}
}
# Disable husky hooks for automated standard-version commits
# These commits are already validated by CI pipeline
export HUSKY=0
export CI=true
export GITHUB_ACTIONS=true
# Also disable git hooks entirely for standard-version commits
git config core.hooksPath /dev/null || true
# Run standard-version to generate changelog and bump version
npm run "release:${{ github.event.inputs.version_type }}" || {
echo "standard-version failed"
exit 1
}
# Restore git hooks path after standard-version
git config --unset core.hooksPath || true
# Get the new version and changelog
NEW_VERSION=$(node -p "require('./package.json').version")
printf "new_version=v%s\n" "$NEW_VERSION" >> "$GITHUB_OUTPUT"
# Extract changelog for this version from CHANGELOG.md
if [ -f "CHANGELOG.md" ]; then
# Get changelog section for current version (between ## [version] and next ##)
CHANGELOG_CONTENT=$(awk "/## \[$NEW_VERSION\]/{flag=1; next} /^## \[/{flag=0} flag" CHANGELOG.md | sed '/^$/d' || printf "## Changes\n\nAutomated release %s\n" "$NEW_VERSION")
{
echo "changelog_content<<EOF"
printf "%s\n" "$CHANGELOG_CONTENT"
echo "EOF"
} >> "$GITHUB_OUTPUT"
else
echo "Warning: CHANGELOG.md not found after standard-version"
printf "changelog_content=Automated release v%s\n" "$NEW_VERSION" >> "$GITHUB_OUTPUT"
fi
# Push the changes (version bump, changelog, and tag)
git push --follow-tags origin main || {
echo "Failed to push changes to repository"
exit 1
}
else
echo "Tag-based release"
tag="${GITHUB_REF#refs/tags/}"
printf "new_version=%s\n" "$tag" >> "$GITHUB_OUTPUT"
# Extract changelog for tagged version
if [ -f "CHANGELOG.md" ]; then
VERSION_NUM=$(echo "$tag" | sed 's/^v//')
CHANGELOG_CONTENT=$(awk "/## \[$VERSION_NUM\]/{flag=1; next} /^## \[/{flag=0} flag" CHANGELOG.md | sed '/^$/d' || printf "Release %s\n" "$tag")
{
echo "changelog_content<<EOF"
printf "%s\n" "$CHANGELOG_CONTENT"
echo "EOF"
} >> "$GITHUB_OUTPUT"
else
printf "changelog_content=Release %s\n" "$tag" >> "$GITHUB_OUTPUT"
fi
fi
- name: Create GitHub Release
uses: ncipollo/release-action@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
tag: ${{ steps.release.outputs.new_version }}
name: Release ${{ steps.release.outputs.new_version }}
body: |
## DocuMCP Release ${{ steps.release.outputs.new_version }}
${{ steps.release.outputs.changelog_content }}
### Installation
```bash
npm install -g documcp@${{ steps.release.outputs.new_version }}
```
### System Requirements
- Node.js 20.x or higher
- npm 9.x or higher
### Quick Start
```bash
# Install globally
npm install -g documcp
# Use with MCP client
documcp analyze-repository --path ./my-project
```
draft: false
prerelease: false
skipIfReleaseExists: true
makeLatest: true
- name: Publish to npm
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
id: publish
run: |
echo "Publishing to npm using trusted publishing (OIDC)..."
# Verify npm version supports trusted publishing (11.5.1+ required)
NPM_VERSION=$(npm --version)
echo "npm version: $NPM_VERSION"
# Simple version check: compare major.minor.patch
NPM_MAJOR=$(echo "$NPM_VERSION" | cut -d. -f1)
NPM_MINOR=$(echo "$NPM_VERSION" | cut -d. -f2)
NPM_PATCH=$(echo "$NPM_VERSION" | cut -d. -f3)
if [ "$NPM_MAJOR" -lt 11 ] || ([ "$NPM_MAJOR" -eq 11 ] && [ "$NPM_MINOR" -lt 5 ]) || ([ "$NPM_MAJOR" -eq 11 ] && [ "$NPM_MINOR" -eq 5 ] && [ "$NPM_PATCH" -lt 1 ]); then
echo "Warning: npm version $NPM_VERSION may not support trusted publishing"
echo "npm 11.5.1+ is required for trusted publishing (OIDC)"
else
echo "✅ npm version $NPM_VERSION supports trusted publishing"
fi
# Get package version
PACKAGE_VERSION=$(node -p "require('./package.json').version")
echo "Publishing version: $PACKAGE_VERSION"
# Publish with retry mechanism
# Trusted publishing uses OIDC - no NODE_AUTH_TOKEN needed
MAX_RETRIES=3
RETRY_COUNT=0
PUBLISH_SUCCESS=false
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
if npm publish --access public; then
PUBLISH_SUCCESS=true
break
else
RETRY_COUNT=$((RETRY_COUNT + 1))
if [ $RETRY_COUNT -lt $MAX_RETRIES ]; then
echo "Publication attempt $RETRY_COUNT failed, retrying in 5 seconds..."
sleep 5
else
echo "Publication failed after $MAX_RETRIES attempts"
echo ""
echo "Troubleshooting tips:"
echo "1. Verify trusted publisher is configured on npmjs.com"
echo "2. Check that workflow filename matches exactly: release.yml"
echo "3. Ensure repository and organization match npmjs.com configuration"
echo "4. Verify id-token: write permission is set in workflow"
exit 1
fi
fi
done
if [ "$PUBLISH_SUCCESS" = true ]; then
echo "✅ Successfully published to npm using trusted publishing!"
echo "package_version=$PACKAGE_VERSION" >> "$GITHUB_OUTPUT"
fi
- name: Verify npm publication
if: steps.publish.outcome == 'success'
run: |
PACKAGE_VERSION="${{ steps.publish.outputs.package_version }}"
echo "Verifying package availability: documcp@$PACKAGE_VERSION"
# Wait a moment for npm registry to update
sleep 10
# Verify package exists on npm registry
if npm view "documcp@$PACKAGE_VERSION" version > /dev/null 2>&1; then
echo "✅ Package verification successful: documcp@$PACKAGE_VERSION is available on npm"
npm view "documcp@$PACKAGE_VERSION"
else
echo "❌ Package verification failed: documcp@$PACKAGE_VERSION not found on npm registry"
echo "This may be a temporary registry delay. Please verify manually:"
echo " npm view documcp@$PACKAGE_VERSION"
exit 1
fi
- name: Test package installation
if: steps.publish.outcome == 'success'
run: |
PACKAGE_VERSION="${{ steps.publish.outputs.package_version }}"
echo "Testing package installation: documcp@$PACKAGE_VERSION"
# Install the published package globally in a clean environment
npm install -g "documcp@$PACKAGE_VERSION" || {
echo "Package installation test failed"
exit 1
}
# Verify the installed package works
if command -v documcp > /dev/null 2>&1; then
echo "✅ Package installation successful"
documcp --version || echo "Version command available"
else
echo "❌ Package installation test failed: documcp command not found"
exit 1
fi
docs:
name: Deploy Documentation
runs-on: ubuntu-latest
needs: [test, release]
permissions:
pages: write
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20.x"
cache: "npm"
- name: Install dependencies
run: npm ci
- name: Generate API documentation
run: npm run docs:generate
- name: Remove problematic media directory
run: rm -rf docs/api/media
- name: Install Docusaurus dependencies
run: cd docs && npm ci
- name: Build Docusaurus site
run: cd docs && npm run build
env:
NODE_ENV: production
- name: Setup Pages
uses: actions/configure-pages@v4
- name: Upload to GitHub Pages
uses: actions/upload-pages-artifact@v4
with:
path: ./docs/build
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
```
--------------------------------------------------------------------------------
/tests/integration/knowledge-graph-workflow.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Integration Tests for Knowledge Graph Workflow
* Phase 1: End-to-End KG-Analysis Integration
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { tmpdir } from "os";
import {
initializeKnowledgeGraph,
getKnowledgeGraph,
saveKnowledgeGraph,
createOrUpdateProject,
getProjectContext,
trackDeployment,
getKGStatistics,
} from "../../src/memory/kg-integration.js";
describe("Knowledge Graph Workflow Integration", () => {
let testDir: string;
let originalEnv: string | undefined;
beforeEach(async () => {
// Create temporary test directory
testDir = join(tmpdir(), `kg-workflow-test-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
// Set environment variable for storage
originalEnv = process.env.DOCUMCP_STORAGE_DIR;
process.env.DOCUMCP_STORAGE_DIR = testDir;
// Initialize KG
await initializeKnowledgeGraph(testDir);
});
afterEach(async () => {
// Restore environment
if (originalEnv) {
process.env.DOCUMCP_STORAGE_DIR = originalEnv;
} else {
delete process.env.DOCUMCP_STORAGE_DIR;
}
// Clean up test directory
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch (error) {
console.warn("Failed to clean up test directory:", error);
}
});
describe("Complete Analysis Workflow", () => {
it("should handle first-time project analysis", async () => {
const analysis = {
id: "analysis_001",
timestamp: new Date().toISOString(),
path: "/test/project",
projectName: "Test Project",
structure: {
totalFiles: 100,
totalDirectories: 10,
languages: {
typescript: 60,
javascript: 30,
json: 10,
},
hasTests: true,
hasCI: true,
hasDocs: false,
},
};
// Create project in KG
const projectNode = await createOrUpdateProject(analysis);
expect(projectNode).toBeDefined();
expect(projectNode.type).toBe("project");
expect(projectNode.properties.name).toBe("Test Project");
expect(projectNode.properties.analysisCount).toBe(1);
// Get context (should be empty for first analysis)
const context = await getProjectContext("/test/project");
expect(context.previousAnalyses).toBe(1);
expect(context.knownTechnologies).toContain("typescript");
});
it("should track returning project with historical context", async () => {
const analysis1 = {
id: "analysis_001",
timestamp: new Date().toISOString(),
path: "/test/project",
projectName: "Test Project",
structure: {
totalFiles: 100,
languages: { typescript: 60, javascript: 40 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
};
const analysis2 = {
id: "analysis_002",
timestamp: new Date().toISOString(),
path: "/test/project",
projectName: "Test Project",
structure: {
totalFiles: 120,
languages: { typescript: 80, javascript: 40 },
hasTests: true,
hasCI: true,
hasDocs: true,
},
};
// First analysis
await createOrUpdateProject(analysis1);
// Second analysis
const projectNode = await createOrUpdateProject(analysis2);
expect(projectNode.properties.analysisCount).toBe(2);
// Get context
const context = await getProjectContext("/test/project");
expect(context.previousAnalyses).toBe(2);
expect(context.lastAnalyzed).toBeDefined();
});
it("should find similar projects based on technologies", async () => {
// Create multiple projects with shared technologies
const project1 = {
id: "analysis_001",
timestamp: new Date().toISOString(),
path: "/test/project1",
projectName: "React App",
structure: {
totalFiles: 50,
languages: { typescript: 30, javascript: 20 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
};
const project2 = {
id: "analysis_002",
timestamp: new Date().toISOString(),
path: "/test/project2",
projectName: "Another React App",
structure: {
totalFiles: 75,
languages: { typescript: 50, javascript: 25 },
hasTests: true,
hasCI: true,
hasDocs: false,
},
};
const project3 = {
id: "analysis_003",
timestamp: new Date().toISOString(),
path: "/test/project3",
projectName: "Python Project",
structure: {
totalFiles: 40,
languages: { python: 40 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
};
// Create all projects
await createOrUpdateProject(project1);
await createOrUpdateProject(project2);
await createOrUpdateProject(project3);
// Get context for project1
const context = await getProjectContext("/test/project1");
// Should find project2 as similar (shares typescript/javascript)
// Should not find project3 (uses different stack)
expect(context.similarProjects.length).toBeGreaterThan(0);
const similarProject = context.similarProjects.find(
(p) => p.properties.name === "Another React App",
);
expect(similarProject).toBeDefined();
});
});
describe("Deployment Tracking Workflow", () => {
it("should track successful deployment", async () => {
// Create project
const analysis = {
id: "project_001",
timestamp: new Date().toISOString(),
path: "/test/project",
projectName: "Test Project",
structure: {
totalFiles: 50,
languages: { typescript: 50 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
};
const projectNode = await createOrUpdateProject(analysis);
// Track successful deployment
await trackDeployment(projectNode.id, "docusaurus", true, {
buildTime: 45,
deploymentUrl: "https://test.github.io",
});
// Verify deployment was tracked
const kg = await getKnowledgeGraph();
const edges = await kg.findEdges({
source: projectNode.id,
properties: { baseType: "project_deployed_with" },
});
expect(edges.length).toBeGreaterThan(0);
expect(edges[0].properties.success).toBe(true);
expect(edges[0].properties.buildTime).toBe(45);
});
it("should track failed deployment", async () => {
const analysis = {
id: "project_002",
timestamp: new Date().toISOString(),
path: "/test/project2",
projectName: "Test Project 2",
structure: {
totalFiles: 50,
languages: { javascript: 50 },
hasTests: false,
hasCI: false,
hasDocs: false,
},
};
const projectNode = await createOrUpdateProject(analysis);
// Track failed deployment
await trackDeployment(projectNode.id, "jekyll", false, {
errorMessage: "Ruby version mismatch",
});
const kg = await getKnowledgeGraph();
const edges = await kg.findEdges({
source: projectNode.id,
properties: { baseType: "project_deployed_with" },
});
expect(edges.length).toBeGreaterThan(0);
expect(edges[0].properties.success).toBe(false);
expect(edges[0].properties.errorMessage).toContain("Ruby version");
});
it("should update configuration success rate over time", async () => {
const analysis = {
id: "project_003",
timestamp: new Date().toISOString(),
path: "/test/project3",
projectName: "Test Project 3",
structure: {
totalFiles: 50,
languages: { typescript: 50 },
hasTests: false,
hasCI: false,
hasDocs: false,
},
};
const projectNode = await createOrUpdateProject(analysis);
// Track multiple deployments
await trackDeployment(projectNode.id, "hugo", true);
await trackDeployment(projectNode.id, "hugo", true);
await trackDeployment(projectNode.id, "hugo", false);
const kg = await getKnowledgeGraph();
const configNode = await kg.findNode({
type: "configuration",
properties: { ssg: "hugo" },
});
expect(configNode).toBeDefined();
expect(configNode!.properties.usageCount).toBe(3);
// Success rate: 2/3 = 0.666...
expect(configNode!.properties.deploymentSuccessRate).toBeCloseTo(
0.666,
2,
);
});
});
describe("Knowledge Graph Statistics", () => {
it("should return accurate statistics", async () => {
// Create multiple projects
for (let i = 0; i < 5; i++) {
const analysis = {
id: `project_00${i}`,
timestamp: new Date().toISOString(),
path: `/test/project${i}`,
projectName: `Project ${i}`,
structure: {
totalFiles: 50,
languages: { typescript: 30, javascript: 20 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
};
await createOrUpdateProject(analysis);
}
const stats = await getKGStatistics();
expect(stats.projectCount).toBe(5);
expect(stats.technologyCount).toBeGreaterThan(0);
expect(stats.nodeCount).toBeGreaterThan(5); // Projects + technologies
expect(stats.edgeCount).toBeGreaterThan(0); // project_uses_technology edges
});
});
describe("Persistence Workflow", () => {
it("should persist data across sessions", async () => {
const analysis = {
id: "persistent_project",
timestamp: new Date().toISOString(),
path: "/test/persistent",
projectName: "Persistent Project",
structure: {
totalFiles: 50,
languages: { typescript: 50 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
};
// Create project and save
await createOrUpdateProject(analysis);
await saveKnowledgeGraph();
// Reinitialize (simulating new session)
await initializeKnowledgeGraph(testDir);
// Verify data was loaded
const context = await getProjectContext("/test/persistent");
expect(context.previousAnalyses).toBe(1);
expect(context.knownTechnologies).toContain("typescript");
});
});
describe("Complex Multi-Step Workflow", () => {
it("should handle complete project lifecycle", async () => {
// Step 1: Initial analysis
const initialAnalysis = {
id: "lifecycle_project",
timestamp: new Date().toISOString(),
path: "/test/lifecycle",
projectName: "Lifecycle Project",
structure: {
totalFiles: 30,
languages: { javascript: 30 },
hasTests: false,
hasCI: false,
hasDocs: false,
},
};
const project1 = await createOrUpdateProject(initialAnalysis);
expect(project1.properties.analysisCount).toBe(1);
// Step 2: Track deployment attempt (failed)
await trackDeployment(project1.id, "jekyll", false, {
errorMessage: "Missing dependencies",
});
// Step 3: Re-analysis after fixes
const updatedAnalysis = {
...initialAnalysis,
id: "lifecycle_project_2",
timestamp: new Date().toISOString(),
structure: {
totalFiles: 35,
languages: { javascript: 30, json: 5 },
hasTests: true,
hasCI: true,
hasDocs: true,
},
};
const project2 = await createOrUpdateProject(updatedAnalysis);
expect(project2.properties.analysisCount).toBe(2);
expect(project2.properties.hasCI).toBe(true);
// Step 4: Successful deployment
await trackDeployment(project2.id, "eleventy", true, {
buildTime: 30,
deploymentUrl: "https://lifecycle.github.io",
});
// Verify complete lifecycle
const kg = await getKnowledgeGraph();
// Check project node
const projectNode = await kg.findNode({
type: "project",
properties: { path: "/test/lifecycle" },
});
expect(projectNode).toBeDefined();
expect(projectNode!.properties.analysisCount).toBe(2);
// Check deployments
const deployments = await kg.findEdges({
source: projectNode!.id,
properties: { baseType: "project_deployed_with" },
});
expect(deployments).toHaveLength(2);
// Check technologies
const techEdges = await kg.findEdges({
source: projectNode!.id,
type: "project_uses_technology",
});
expect(techEdges.length).toBeGreaterThan(0);
// Get final context
const context = await getProjectContext("/test/lifecycle");
expect(context.previousAnalyses).toBe(2);
expect(context.knownTechnologies).toContain("javascript");
});
});
});
```
--------------------------------------------------------------------------------
/docs/sitemap.xml:
--------------------------------------------------------------------------------
```
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url>
<loc>https://tosin2013.github.io/documcp/</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>weekly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/development-setup.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/environment-setup.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/first-deployment.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/getting-started.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/memory-workflows.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/user-onboarding.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/analytics-setup.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/custom-domains.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/github-pages-deployment.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/local-testing.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/performance-optimization.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/prompting-guide.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/repository-analysis.html</loc>
<lastmod>2025-09-30</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/seo-optimization.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/site-monitoring.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/troubleshooting.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/usage-examples.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-1-mcp-architecture/</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>weekly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-3-ssg-recommendation/</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>weekly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-5-github-deployment/</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>weekly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0006-mcp-tools-api-design.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/api/</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/api/hierarchy.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/api/modules.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/api/variables/TOOLS.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/api-overview.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/cli.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/configuration.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/mcp-tools.html</loc>
<lastmod>2025-09-30</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/prompt-templates.html</loc>
<lastmod>2025-09-30</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-6-api-design/README.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/explanation/</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>0.7</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/explanation/architecture.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.7</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0001-mcp-server-architecture.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0002-repository-analysis-engine.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0003-static-site-generator-recommendation-engine.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0004-diataxis-framework-integration.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0005-github-pages-deployment-automation.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0007-mcp-prompts-and-resources-integration.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0008-intelligent-content-population-engine.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0009-content-accuracy-validation-framework.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0010-mcp-resource-pattern-redesign.html</loc>
<lastmod>2025-10-09</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0011-ce-mcp-compatibility.html</loc>
<lastmod>2025-10-09</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0012-priority-scoring-system-for-documentation-drift.html</loc>
<lastmod>2025-10-09</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/adr-0013-release-pipeline-and-package-distribution.html</loc>
<lastmod>2025-10-09</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/README.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/development/MCP_INSPECTOR_TESTING.html</loc>
<lastmod>2025-10-09</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/guides/link-validation.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/guides/playwright-integration.html</loc>
<lastmod>2025-10-04</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/guides/playwright-testing-workflow.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/phase-2-intelligence.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/cross-domain-integration/README.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-1-mcp-architecture/mcp-performance-research.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-2-repository-analysis/README.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-3-ssg-recommendation/ssg-performance-analysis.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-4-diataxis-integration/README.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-5-github-deployment/github-pages-security-analysis.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/README.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/research-integration-summary-2025-01-14.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/research-progress-template.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/research-questions-2025-01-14.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
</urlset>
```
--------------------------------------------------------------------------------
/src/memory/user-preferences.ts:
--------------------------------------------------------------------------------
```typescript
/**
* User Preference Management Module
* Phase 2.2: User Preference Learning and Application
*
* Tracks and applies user preferences across DocuMCP operations
*/
import { getKnowledgeGraph, saveKnowledgeGraph } from "./kg-integration.js";
import { GraphNode } from "./knowledge-graph.js";
export interface UserPreferences {
userId: string;
preferredSSGs: string[];
documentationStyle: "minimal" | "comprehensive" | "tutorial-heavy";
expertiseLevel: "beginner" | "intermediate" | "advanced";
preferredTechnologies: string[];
preferredDiataxisCategories: Array<
"tutorials" | "how-to" | "reference" | "explanation"
>;
autoApplyPreferences: boolean;
lastUpdated: string;
}
export interface SSGUsageEvent {
ssg: string;
success: boolean;
timestamp: string;
projectType?: string;
}
/**
* User Preference Manager
* Handles storage, retrieval, and inference of user preferences
*/
export class UserPreferenceManager {
private userId: string;
private preferences: UserPreferences | null = null;
constructor(userId: string = "default") {
this.userId = userId;
}
/**
* Initialize and load user preferences from knowledge graph
*/
async initialize(): Promise<void> {
const kg = await getKnowledgeGraph();
// Find existing user node
const userNode = await kg.findNode({
type: "user",
properties: { userId: this.userId },
});
if (userNode) {
this.preferences = {
userId: this.userId,
preferredSSGs: userNode.properties.preferredSSGs || [],
documentationStyle:
userNode.properties.documentationStyle || "comprehensive",
expertiseLevel: userNode.properties.expertiseLevel || "intermediate",
preferredTechnologies: userNode.properties.preferredTechnologies || [],
preferredDiataxisCategories:
userNode.properties.preferredDiataxisCategories || [],
autoApplyPreferences:
userNode.properties.autoApplyPreferences !== false,
lastUpdated: userNode.properties.lastActive || new Date().toISOString(),
};
} else {
// Create default preferences
this.preferences = {
userId: this.userId,
preferredSSGs: [],
documentationStyle: "comprehensive",
expertiseLevel: "intermediate",
preferredTechnologies: [],
preferredDiataxisCategories: [],
autoApplyPreferences: true,
lastUpdated: new Date().toISOString(),
};
// Store in knowledge graph
await this.save();
}
}
/**
* Get current user preferences
*/
async getPreferences(): Promise<UserPreferences> {
if (!this.preferences) {
await this.initialize();
}
return this.preferences!;
}
/**
* Update user preferences
*/
async updatePreferences(
updates: Partial<Omit<UserPreferences, "userId" | "lastUpdated">>,
): Promise<UserPreferences> {
if (!this.preferences) {
await this.initialize();
}
this.preferences = {
...this.preferences!,
...updates,
lastUpdated: new Date().toISOString(),
};
await this.save();
return this.preferences;
}
/**
* Track SSG usage and infer preferences
*/
async trackSSGUsage(event: SSGUsageEvent): Promise<void> {
if (!this.preferences) {
await this.initialize();
}
const kg = await getKnowledgeGraph();
// Find user node
const userNodeId = `user:${this.userId}`;
let userNode = await kg.findNode({
type: "user",
properties: { userId: this.userId },
});
if (!userNode) {
userNode = kg.addNode({
id: userNodeId,
type: "user",
label: this.userId,
properties: {
userId: this.userId,
expertiseLevel: this.preferences!.expertiseLevel,
preferredSSGs: [],
preferredTechnologies: [],
documentationStyle: this.preferences!.documentationStyle,
projectCount: 0,
lastActive: new Date().toISOString(),
createdAt: new Date().toISOString(),
},
weight: 1.0,
});
}
// Find or create configuration node
const configNodeId = `configuration:${event.ssg}`;
let configNode = await kg.findNode({
type: "configuration",
properties: { ssg: event.ssg },
});
if (!configNode) {
configNode = kg.addNode({
id: configNodeId,
type: "configuration",
label: `${event.ssg} configuration`,
properties: {
ssg: event.ssg,
settings: {},
deploymentSuccessRate: event.success ? 1.0 : 0.0,
usageCount: 1,
lastUsed: event.timestamp,
},
weight: 1.0,
});
}
// Create or update preference relationship
const existingEdges = await kg.findEdges({
source: userNode.id,
target: configNode.id,
type: "user_prefers_ssg",
});
if (existingEdges.length > 0) {
// Update existing preference
const edge = existingEdges[0];
const currentCount = edge.properties.usageCount || 1;
const currentRate = edge.properties.successRate || 0.5;
edge.properties.usageCount = currentCount + 1;
edge.properties.successRate =
(currentRate * currentCount + (event.success ? 1.0 : 0.0)) /
(currentCount + 1);
edge.properties.lastUsed = event.timestamp;
edge.weight = edge.properties.successRate;
} else {
// Create new preference relationship
kg.addEdge({
source: userNode.id,
target: configNode.id,
type: "user_prefers_ssg",
weight: event.success ? 1.0 : 0.5,
confidence: 1.0,
properties: {
usageCount: 1,
lastUsed: event.timestamp,
successRate: event.success ? 1.0 : 0.0,
},
});
}
// Update user's preferred SSGs list based on success rate
await this.inferPreferredSSGs();
await saveKnowledgeGraph();
}
/**
* Infer preferred SSGs from usage history
*/
private async inferPreferredSSGs(): Promise<void> {
if (!this.preferences) {
await this.initialize();
}
const kg = await getKnowledgeGraph();
// Find user node
const userNode = await kg.findNode({
type: "user",
properties: { userId: this.userId },
});
if (!userNode) return;
// Get all SSG preference edges
const preferenceEdges = await kg.findEdges({
source: userNode.id,
type: "user_prefers_ssg",
});
// Calculate preference scores (usage count * success rate)
const ssgScores = new Map<string, number>();
for (const edge of preferenceEdges) {
const configNode = (await kg.getAllNodes()).find(
(n) => n.id === edge.target,
);
if (configNode && configNode.type === "configuration") {
const ssg = configNode.properties.ssg;
const usageCount = edge.properties.usageCount || 1;
const successRate = edge.properties.successRate || 0.5;
// Score = usage frequency * success rate
const score = usageCount * successRate;
ssgScores.set(ssg, score);
}
}
// Sort by score and take top 3
const topSSGs = Array.from(ssgScores.entries())
.sort((a, b) => b[1] - a[1])
.slice(0, 3)
.map(([ssg]) => ssg);
// Update preferences
this.preferences!.preferredSSGs = topSSGs;
this.preferences!.lastUpdated = new Date().toISOString();
// Update user node
userNode.properties.preferredSSGs = topSSGs;
}
/**
* Get SSG recommendations based on user preferences
*/
async getSSGRecommendations(): Promise<
Array<{ ssg: string; score: number; reason: string }>
> {
if (!this.preferences) {
await this.initialize();
}
const kg = await getKnowledgeGraph();
// Find user node
const userNode = await kg.findNode({
type: "user",
properties: { userId: this.userId },
});
if (!userNode) {
return [];
}
// Get all SSG preference edges
const preferenceEdges = await kg.findEdges({
source: userNode.id,
type: "user_prefers_ssg",
});
const recommendations: Array<{
ssg: string;
score: number;
reason: string;
}> = [];
for (const edge of preferenceEdges) {
const configNode = (await kg.getAllNodes()).find(
(n) => n.id === edge.target,
);
if (configNode && configNode.type === "configuration") {
const ssg = configNode.properties.ssg;
const usageCount = edge.properties.usageCount || 1;
const successRate = edge.properties.successRate || 0.5;
// Calculate recommendation score
const score = usageCount * successRate;
let reason = `Used ${usageCount} time(s)`;
if (successRate >= 0.8) {
reason += `, ${(successRate * 100).toFixed(0)}% success rate`;
} else if (successRate < 0.5) {
reason += `, only ${(successRate * 100).toFixed(0)}% success rate`;
}
recommendations.push({ ssg, score, reason });
}
}
return recommendations.sort((a, b) => b.score - a.score);
}
/**
* Apply user preferences to a recommendation
*/
applyPreferencesToRecommendation(
recommendation: string,
alternatives: string[],
): { recommended: string; adjustmentReason?: string } {
if (!this.preferences || !this.preferences.autoApplyPreferences) {
return { recommended: recommendation };
}
// Check if user has a strong preference
const preferredSSGs = this.preferences.preferredSSGs;
if (preferredSSGs.length > 0) {
// If recommended SSG is already in preferences, keep it
if (preferredSSGs.includes(recommendation)) {
return {
recommended: recommendation,
adjustmentReason: "Matches your preferred SSG",
};
}
// Check if any preferred SSG is in alternatives
for (const preferred of preferredSSGs) {
if (alternatives.includes(preferred)) {
return {
recommended: preferred,
adjustmentReason: `Switched to ${preferred} based on your usage history`,
};
}
}
}
return { recommended: recommendation };
}
/**
* Save preferences to knowledge graph
*/
private async save(): Promise<void> {
if (!this.preferences) return;
const kg = await getKnowledgeGraph();
const userNodeId = `user:${this.userId}`;
const existingNode = await kg.findNode({
type: "user",
properties: { userId: this.userId },
});
const userNode: GraphNode = {
id: existingNode?.id || userNodeId,
type: "user",
label: this.userId,
properties: {
userId: this.preferences.userId,
expertiseLevel: this.preferences.expertiseLevel,
preferredSSGs: this.preferences.preferredSSGs,
preferredTechnologies: this.preferences.preferredTechnologies,
documentationStyle: this.preferences.documentationStyle,
preferredDiataxisCategories:
this.preferences.preferredDiataxisCategories,
autoApplyPreferences: this.preferences.autoApplyPreferences,
projectCount: existingNode?.properties.projectCount || 0,
lastActive: this.preferences.lastUpdated,
createdAt:
existingNode?.properties.createdAt || this.preferences.lastUpdated,
},
weight: 1.0,
lastUpdated: this.preferences.lastUpdated,
};
kg.addNode(userNode);
await saveKnowledgeGraph();
}
/**
* Reset preferences to defaults
*/
async resetPreferences(): Promise<UserPreferences> {
this.preferences = {
userId: this.userId,
preferredSSGs: [],
documentationStyle: "comprehensive",
expertiseLevel: "intermediate",
preferredTechnologies: [],
preferredDiataxisCategories: [],
autoApplyPreferences: true,
lastUpdated: new Date().toISOString(),
};
await this.save();
return this.preferences;
}
/**
* Export preferences as JSON
*/
async exportPreferences(): Promise<string> {
if (!this.preferences) {
await this.initialize();
}
return JSON.stringify(this.preferences, null, 2);
}
/**
* Import preferences from JSON
*/
async importPreferences(json: string): Promise<UserPreferences> {
const imported = JSON.parse(json) as UserPreferences;
// Validate userId matches
if (imported.userId !== this.userId) {
throw new Error(
`User ID mismatch: expected ${this.userId}, got ${imported.userId}`,
);
}
this.preferences = {
...imported,
lastUpdated: new Date().toISOString(),
};
await this.save();
return this.preferences;
}
}
/**
* Get or create a user preference manager instance
*/
const userPreferenceManagers = new Map<string, UserPreferenceManager>();
export async function getUserPreferenceManager(
userId: string = "default",
): Promise<UserPreferenceManager> {
if (!userPreferenceManagers.has(userId)) {
const manager = new UserPreferenceManager(userId);
await manager.initialize();
userPreferenceManagers.set(userId, manager);
}
return userPreferenceManagers.get(userId)!;
}
/**
* Clear all cached preference managers (for testing)
*/
export function clearPreferenceManagerCache(): void {
userPreferenceManagers.clear();
}
```
--------------------------------------------------------------------------------
/src/workflows/documentation-workflow.ts:
--------------------------------------------------------------------------------
```typescript
// Documentation Workflow Guide for LLMs using DocuMCP MCP Server
export interface WorkflowStep {
tool: string;
description: string;
requiredInputs: string[];
outputs: string[];
optional: boolean;
alternatives?: string[];
}
export interface DocumentationWorkflow {
name: string;
description: string;
useCase: string;
steps: WorkflowStep[];
estimatedTime: string;
complexity: "simple" | "moderate" | "complex";
}
// Primary Documentation Creation Workflows
export const DOCUMENTATION_WORKFLOWS: Record<string, DocumentationWorkflow> = {
// Complete end-to-end documentation setup
"full-documentation-setup": {
name: "Complete Documentation Setup",
description:
"Analyze repository, recommend SSG, create structure, populate content, and deploy",
useCase:
"Starting from scratch with a new project that needs comprehensive documentation",
complexity: "complex",
estimatedTime: "5-10 minutes",
steps: [
{
tool: "analyze_repository",
description:
"Analyze the repository structure, dependencies, and documentation needs",
requiredInputs: ["path"],
outputs: ["analysisId", "projectMetadata", "technologyStack"],
optional: false,
},
{
tool: "recommend_ssg",
description:
"Get intelligent SSG recommendation based on project analysis",
requiredInputs: ["analysisId"],
outputs: ["recommendedSSG", "justification", "alternatives"],
optional: false,
},
{
tool: "generate_config",
description: "Generate configuration files for the recommended SSG",
requiredInputs: ["ssg", "projectName", "outputPath"],
outputs: ["configFiles", "setupInstructions"],
optional: false,
},
{
tool: "setup_structure",
description: "Create Diataxis-compliant documentation structure",
requiredInputs: ["path", "ssg"],
outputs: ["directoryStructure", "templateFiles"],
optional: false,
},
{
tool: "populate_diataxis_content",
description: "Intelligently populate content based on project analysis",
requiredInputs: ["analysisId", "docsPath"],
outputs: ["generatedContent", "populationMetrics"],
optional: false,
},
{
tool: "validate_diataxis_content",
description: "Validate generated content for accuracy and compliance",
requiredInputs: ["contentPath"],
outputs: ["validationResults", "recommendations"],
optional: true,
},
{
tool: "deploy_pages",
description: "Set up GitHub Pages deployment workflow",
requiredInputs: ["repository", "ssg"],
outputs: ["deploymentWorkflow", "deploymentInstructions"],
optional: true,
},
],
},
// Quick documentation setup for existing projects
"quick-documentation-setup": {
name: "Quick Documentation Setup",
description: "Rapid documentation creation using intelligent defaults",
useCase:
"Existing project needs documentation quickly with minimal customization",
complexity: "simple",
estimatedTime: "2-3 minutes",
steps: [
{
tool: "analyze_repository",
description: "Quick analysis of repository",
requiredInputs: ["path"],
outputs: ["analysisId"],
optional: false,
},
{
tool: "setup_structure",
description:
"Create documentation structure with intelligent SSG selection",
requiredInputs: ["path"],
outputs: ["directoryStructure"],
optional: false,
alternatives: ["Use analyze_repository output to auto-select SSG"],
},
{
tool: "populate_diataxis_content",
description: "Auto-populate with project-specific content",
requiredInputs: ["analysisId", "docsPath"],
outputs: ["generatedContent"],
optional: false,
},
],
},
// Content-focused workflow for existing documentation
"enhance-existing-documentation": {
name: "Enhance Existing Documentation",
description: "Improve and populate existing documentation structure",
useCase:
"Project already has basic documentation that needs content and validation",
complexity: "moderate",
estimatedTime: "3-5 minutes",
steps: [
{
tool: "analyze_repository",
description: "Analyze repository and existing documentation",
requiredInputs: ["path"],
outputs: ["analysisId", "existingDocsAnalysis"],
optional: false,
},
{
tool: "validate_diataxis_content",
description: "Validate existing content and identify gaps",
requiredInputs: ["contentPath", "analysisId"],
outputs: ["validationResults", "contentGaps"],
optional: false,
},
{
tool: "populate_diataxis_content",
description: "Fill content gaps with intelligent population",
requiredInputs: ["analysisId", "docsPath", "preserveExisting: true"],
outputs: ["enhancedContent"],
optional: false,
},
{
tool: "validate_diataxis_content",
description: "Final validation of enhanced content",
requiredInputs: ["contentPath"],
outputs: ["finalValidation"],
optional: true,
},
],
},
// Deployment-focused workflow
"deployment-only": {
name: "Documentation Deployment Setup",
description: "Set up deployment for existing documentation",
useCase: "Documentation exists but needs automated deployment setup",
complexity: "simple",
estimatedTime: "1-2 minutes",
steps: [
{
tool: "analyze_repository",
description:
"Analyze repository structure for deployment configuration",
requiredInputs: ["path"],
outputs: ["deploymentContext"],
optional: true,
},
{
tool: "deploy_pages",
description: "Set up GitHub Pages deployment workflow",
requiredInputs: ["repository", "ssg"],
outputs: ["deploymentWorkflow"],
optional: false,
},
{
tool: "verify_deployment",
description: "Verify deployment configuration and test",
requiredInputs: ["repository"],
outputs: ["deploymentStatus", "troubleshootingInfo"],
optional: true,
},
],
},
// Validation and quality assurance workflow
"documentation-audit": {
name: "Documentation Quality Audit",
description: "Comprehensive validation and quality assessment",
useCase:
"Existing documentation needs quality assessment and improvement recommendations",
complexity: "moderate",
estimatedTime: "2-3 minutes",
steps: [
{
tool: "analyze_repository",
description: "Analyze repository for context-aware validation",
requiredInputs: ["path"],
outputs: ["analysisId", "projectContext"],
optional: false,
},
{
tool: "validate_diataxis_content",
description: "Comprehensive content validation with all checks",
requiredInputs: [
"contentPath",
"analysisId",
"validationType: all",
"confidence: strict",
],
outputs: ["detailedValidation", "improvementPlan"],
optional: false,
},
{
tool: "verify_deployment",
description: "Validate deployment if applicable",
requiredInputs: ["repository"],
outputs: ["deploymentHealth"],
optional: true,
},
],
},
};
// Workflow Decision Helper
export interface WorkflowRecommendation {
recommendedWorkflow: string;
reason: string;
alternativeWorkflows: string[];
customizationSuggestions: string[];
}
export function recommendWorkflow(
projectStatus:
| "new"
| "existing-no-docs"
| "existing-basic-docs"
| "existing-full-docs",
requirements: {
needsDeployment?: boolean;
timeConstraint?: "minimal" | "moderate" | "comprehensive";
qualityFocus?: boolean;
customization?: "minimal" | "moderate" | "high";
},
): WorkflowRecommendation {
// New project or no documentation
if (projectStatus === "new" || projectStatus === "existing-no-docs") {
if (requirements.timeConstraint === "minimal") {
return {
recommendedWorkflow: "quick-documentation-setup",
reason:
"Fast setup with intelligent defaults for time-constrained scenario",
alternativeWorkflows: ["full-documentation-setup"],
customizationSuggestions: [
"Consider full-documentation-setup if time allows for better customization",
"Add deployment-only workflow later if needed",
],
};
} else {
return {
recommendedWorkflow: "full-documentation-setup",
reason:
"Comprehensive setup provides best foundation for new documentation",
alternativeWorkflows: ["quick-documentation-setup"],
customizationSuggestions: [
"Skip deploy_pages step if deployment not needed immediately",
"Use validate_diataxis_content for quality assurance",
],
};
}
}
// Existing documentation that needs enhancement
if (projectStatus === "existing-basic-docs") {
if (requirements.qualityFocus) {
return {
recommendedWorkflow: "documentation-audit",
reason: "Quality-focused validation and improvement recommendations",
alternativeWorkflows: ["enhance-existing-documentation"],
customizationSuggestions: [
"Follow up with enhance-existing-documentation based on audit results",
"Consider deployment-only if deployment setup is needed",
],
};
} else {
return {
recommendedWorkflow: "enhance-existing-documentation",
reason: "Improve and expand existing documentation content",
alternativeWorkflows: ["documentation-audit"],
customizationSuggestions: [
"Run documentation-audit first if quality is uncertain",
"Set preserveExisting: true in populate_diataxis_content",
],
};
}
}
// Full documentation that needs deployment or validation
if (projectStatus === "existing-full-docs") {
if (requirements.needsDeployment) {
return {
recommendedWorkflow: "deployment-only",
reason: "Focus on deployment setup for complete documentation",
alternativeWorkflows: ["documentation-audit"],
customizationSuggestions: [
"Run documentation-audit if quality validation is also needed",
"Consider verify_deployment for troubleshooting",
],
};
} else {
return {
recommendedWorkflow: "documentation-audit",
reason:
"Quality assurance and validation for existing complete documentation",
alternativeWorkflows: ["deployment-only"],
customizationSuggestions: [
"Focus on validation aspects most relevant to your concerns",
"Use strict confidence level for thorough quality checking",
],
};
}
}
// Fallback
return {
recommendedWorkflow: "full-documentation-setup",
reason: "Default comprehensive workflow for unclear requirements",
alternativeWorkflows: ["quick-documentation-setup", "documentation-audit"],
customizationSuggestions: [
"Analyze your specific needs to choose a more targeted workflow",
"Consider running analyze_repository first to understand your project better",
],
};
}
// Workflow execution guidance for LLMs
export const WORKFLOW_EXECUTION_GUIDANCE = {
description: "How LLMs should execute DocuMCP workflows",
principles: [
"Each tool can be called independently - workflows are guidance, not rigid requirements",
"Always check tool outputs before proceeding to next step",
"Adapt workflows based on user feedback and tool results",
"Skip optional steps if user has time constraints or different priorities",
"Use tool outputs (like analysisId) as inputs for subsequent tools",
"Validate critical assumptions before proceeding with deployment steps",
],
errorHandling: [
"If a tool fails, provide clear error information to user",
"Suggest alternative approaches or simplified workflows",
"Don't abandon the entire workflow - adapt and continue with available information",
"Use verify_deployment and validate_diataxis_content for troubleshooting",
],
customization: [
"Ask users about their priorities (speed vs quality, deployment needs, etc.)",
"Adapt tool parameters based on project analysis results",
"Suggest workflow modifications based on repository characteristics",
"Allow users to skip or modify steps based on their specific needs",
],
};
// Export workflow metadata for MCP resource exposure
export const WORKFLOW_METADATA = {
totalWorkflows: Object.keys(DOCUMENTATION_WORKFLOWS).length,
workflowNames: Object.keys(DOCUMENTATION_WORKFLOWS),
complexityLevels: ["simple", "moderate", "complex"],
estimatedTimeRange: "1-10 minutes depending on workflow and project size",
toolsUsed: [
"analyze_repository",
"recommend_ssg",
"generate_config",
"setup_structure",
"populate_diataxis_content",
"validate_diataxis_content",
"deploy_pages",
"verify_deployment",
],
};
```
--------------------------------------------------------------------------------
/src/memory/freshness-kg-integration.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Documentation Freshness Knowledge Graph Integration
*
* Provides functions for storing and retrieving documentation freshness
* tracking events in the Knowledge Graph for historical analysis and insights.
*/
import { getKnowledgeGraph, getKGStorage } from "./kg-integration.js";
import type {
DocumentationFreshnessEventEntity,
ProjectHasFreshnessEventRelationship,
} from "./schemas.js";
import type { FreshnessScanReport } from "../utils/freshness-tracker.js";
import crypto from "crypto";
/**
* Generate a unique ID for a freshness event
*/
function generateFreshnessEventId(
projectPath: string,
timestamp: string,
): string {
const hash = crypto
.createHash("sha256")
.update(`${projectPath}:${timestamp}`)
.digest("hex")
.substring(0, 16);
return `freshness_event:${hash}`;
}
/**
* Generate a project ID from project path
*/
function generateProjectId(projectPath: string): string {
const hash = crypto
.createHash("sha256")
.update(projectPath)
.digest("hex")
.substring(0, 16);
return `project:${hash}`;
}
/**
* Store a documentation freshness scan event in the Knowledge Graph
*/
export async function storeFreshnessEvent(
projectPath: string,
docsPath: string,
report: FreshnessScanReport,
eventType: "scan" | "validation" | "initialization" | "update" = "scan",
): Promise<string> {
const kg = await getKnowledgeGraph();
const storage = await getKGStorage();
const timestamp = new Date().toISOString();
const eventId = generateFreshnessEventId(projectPath, timestamp);
const projectId = generateProjectId(projectPath);
// Calculate average age in days
const filesWithAge = report.files.filter((f) => f.ageInMs !== undefined);
const averageAge =
filesWithAge.length > 0
? filesWithAge.reduce((sum, f) => sum + (f.ageInMs || 0), 0) /
filesWithAge.length /
(1000 * 60 * 60 * 24)
: undefined;
// Find oldest file
const oldestFile =
filesWithAge.length > 0
? filesWithAge.reduce((oldest, current) =>
(current.ageInMs || 0) > (oldest.ageInMs || 0) ? current : oldest,
)
: undefined;
// Get most stale files (critical and stale)
const mostStaleFiles = report.files
.filter(
(f) => f.stalenessLevel === "critical" || f.stalenessLevel === "stale",
)
.sort((a, b) => (b.ageInMs || 0) - (a.ageInMs || 0))
.slice(0, 10)
.map((f) => f.relativePath);
// Create freshness event entity
const freshnessEntity: DocumentationFreshnessEventEntity = {
docsPath,
projectPath,
scannedAt: report.scannedAt,
totalFiles: report.totalFiles,
freshFiles: report.freshFiles,
warningFiles: report.warningFiles,
staleFiles: report.staleFiles,
criticalFiles: report.criticalFiles,
filesWithoutMetadata: report.filesWithoutMetadata,
thresholds: report.thresholds,
averageAge,
oldestFile: oldestFile
? {
path: oldestFile.relativePath,
ageInDays: (oldestFile.ageInMs || 0) / (1000 * 60 * 60 * 24),
}
: undefined,
mostStaleFiles,
eventType,
};
// Add entity to knowledge graph
kg.addNode({
id: eventId,
type: "documentation_freshness_event",
label: `Freshness Event ${timestamp}`,
properties: freshnessEntity,
weight: 1.0,
});
// Check if project node exists via async findNode, if not, create a minimal one
const projectNode = await kg.findNode({
type: "project",
properties: { path: projectPath },
});
if (!projectNode) {
kg.addNode({
id: projectId,
type: "project",
label: projectPath.split("/").pop() || "Unknown Project",
properties: {
name: projectPath.split("/").pop() || "Unknown",
path: projectPath,
createdAt: timestamp,
},
weight: 1.0,
});
}
// Calculate improvement score (0-1, higher is better)
const improvementScore =
report.totalFiles > 0
? (report.freshFiles +
report.warningFiles * 0.7 +
report.staleFiles * 0.3) /
report.totalFiles
: 1.0;
// Create relationship between project and freshness event
const relationship: ProjectHasFreshnessEventRelationship = {
type: "project_has_freshness_event",
eventType,
filesScanned: report.totalFiles,
freshFiles: report.freshFiles,
staleFiles: report.staleFiles,
criticalFiles: report.criticalFiles,
filesInitialized: 0, // This will be updated by validation events
filesUpdated: 0, // This will be updated by update events
averageStaleness: averageAge,
improvementScore,
weight: 1.0,
confidence: 1.0,
createdAt: timestamp,
lastUpdated: timestamp,
metadata: {
docsPath,
thresholds: report.thresholds,
},
};
kg.addEdge({
source: projectId,
target: eventId,
type: "project_has_freshness_event",
weight: 1.0,
confidence: 1.0,
properties: relationship,
});
// Persist to storage
const nodes = await kg.getAllNodes();
const edges = await kg.getAllEdges();
await storage.saveGraph(nodes, edges);
return eventId;
}
/**
* Update a freshness event with validation/update results
*/
export async function updateFreshnessEvent(
eventId: string,
updates: {
filesInitialized?: number;
filesUpdated?: number;
eventType?: "scan" | "validation" | "initialization" | "update";
},
): Promise<void> {
const kg = await getKnowledgeGraph();
const storage = await getKGStorage();
// Find event node by ID
const eventNode = await kg.getNodeById(eventId);
if (!eventNode) {
throw new Error(`Freshness event not found: ${eventId}`);
}
// Update entity properties
if (updates.eventType) {
eventNode.properties.eventType = updates.eventType;
}
eventNode.lastUpdated = new Date().toISOString();
// Find and update the relationship
const relEdges = await kg.findEdges({
target: eventId,
type: "project_has_freshness_event",
});
for (const edge of relEdges) {
const props = edge.properties as ProjectHasFreshnessEventRelationship;
if (updates.filesInitialized !== undefined) {
props.filesInitialized = updates.filesInitialized;
}
if (updates.filesUpdated !== undefined) {
props.filesUpdated = updates.filesUpdated;
}
if (updates.eventType) {
props.eventType = updates.eventType;
}
edge.lastUpdated = new Date().toISOString();
}
// Persist to storage
const allNodes = await kg.getAllNodes();
const allEdges = await kg.getAllEdges();
await storage.saveGraph(allNodes, allEdges);
}
/**
* Get freshness event history for a project
*/
export async function getFreshnessHistory(
projectPath: string,
limit: number = 10,
): Promise<
Array<{
eventId: string;
event: DocumentationFreshnessEventEntity;
relationship: ProjectHasFreshnessEventRelationship;
}>
> {
const kg = await getKnowledgeGraph();
const projectId = generateProjectId(projectPath);
const edges = await kg.findEdges({
source: projectId,
type: "project_has_freshness_event",
});
// Sort by timestamp (most recent first)
const sorted = await Promise.all(
edges.map(async (edge) => {
const eventNode = await kg.getNodeById(edge.target);
if (!eventNode || eventNode.type !== "documentation_freshness_event") {
return null;
}
return {
eventId: edge.target,
event: eventNode.properties as DocumentationFreshnessEventEntity,
relationship: edge.properties as ProjectHasFreshnessEventRelationship,
timestamp: (eventNode.properties as DocumentationFreshnessEventEntity)
.scannedAt,
};
}),
);
const filtered = sorted
.filter((item): item is NonNullable<typeof item> => item !== null)
.sort(
(a, b) =>
new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime(),
)
.slice(0, limit);
return filtered.map(({ eventId, event, relationship }) => ({
eventId,
event,
relationship,
}));
}
/**
* Get staleness insights for a project
*/
export async function getStalenessInsights(projectPath: string): Promise<{
totalEvents: number;
averageImprovementScore: number;
trend: "improving" | "declining" | "stable";
currentStatus: {
freshFiles: number;
staleFiles: number;
criticalFiles: number;
totalFiles: number;
} | null;
recommendations: string[];
}> {
const history = await getFreshnessHistory(projectPath, 100);
if (history.length === 0) {
return {
totalEvents: 0,
averageImprovementScore: 0,
trend: "stable",
currentStatus: null,
recommendations: [
"No freshness tracking history found. Run track_documentation_freshness to begin monitoring.",
],
};
}
// Calculate average improvement score
const avgScore =
history.reduce(
(sum, h) => sum + (h.relationship.improvementScore || 0),
0,
) / history.length;
// Determine trend (compare first half to second half)
const midpoint = Math.floor(history.length / 2);
const recentScore =
history
.slice(0, midpoint)
.reduce((sum, h) => sum + (h.relationship.improvementScore || 0), 0) /
Math.max(midpoint, 1);
const olderScore =
history
.slice(midpoint)
.reduce((sum, h) => sum + (h.relationship.improvementScore || 0), 0) /
Math.max(history.length - midpoint, 1);
let trend: "improving" | "declining" | "stable";
if (recentScore > olderScore + 0.1) {
trend = "improving";
} else if (recentScore < olderScore - 0.1) {
trend = "declining";
} else {
trend = "stable";
}
// Get current status from most recent event
const latest = history[0];
const currentStatus = {
freshFiles: latest.event.freshFiles,
staleFiles: latest.event.staleFiles,
criticalFiles: latest.event.criticalFiles,
totalFiles: latest.event.totalFiles,
};
// Generate recommendations
const recommendations: string[] = [];
if (currentStatus.criticalFiles > 0) {
recommendations.push(
`🔴 ${currentStatus.criticalFiles} files are critically stale and need immediate attention`,
);
}
if (currentStatus.staleFiles > currentStatus.totalFiles * 0.3) {
recommendations.push(
`🟠 Over 30% of documentation is stale. Consider running validate_documentation_freshness`,
);
}
if (trend === "declining") {
recommendations.push(
"📉 Documentation freshness is declining. Review update processes and automation",
);
} else if (trend === "improving") {
recommendations.push(
"📈 Documentation freshness is improving. Keep up the good work!",
);
}
if (latest.event.filesWithoutMetadata > 0) {
recommendations.push(
`⚠️ ${latest.event.filesWithoutMetadata} files lack freshness metadata. Run validate_documentation_freshness with initializeMissing=true`,
);
}
// Analyze most commonly stale files
const allStaleFiles = history.flatMap((h) => h.event.mostStaleFiles);
const staleFileCounts = new Map<string, number>();
for (const file of allStaleFiles) {
staleFileCounts.set(file, (staleFileCounts.get(file) || 0) + 1);
}
const chronicallyStale = Array.from(staleFileCounts.entries())
.filter(([_, count]) => count >= Math.min(3, history.length * 0.5))
.map(([file]) => file);
if (chronicallyStale.length > 0) {
recommendations.push(
`🔄 ${
chronicallyStale.length
} files are chronically stale: ${chronicallyStale
.slice(0, 3)
.join(", ")}${chronicallyStale.length > 3 ? "..." : ""}`,
);
}
return {
totalEvents: history.length,
averageImprovementScore: avgScore,
trend,
currentStatus,
recommendations,
};
}
/**
* Compare freshness across similar projects
*/
export async function compareFreshnessAcrossProjects(
projectPath: string,
): Promise<{
currentProject: {
path: string;
improvementScore: number;
};
similarProjects: Array<{
path: string;
improvementScore: number;
similarity: number;
}>;
ranking: number; // 1-based ranking (1 = best)
}> {
const kg = await getKnowledgeGraph();
const projectId = generateProjectId(projectPath);
// Get current project's latest score
const history = await getFreshnessHistory(projectPath, 1);
const currentScore =
history.length > 0 ? history[0].relationship.improvementScore || 0 : 0;
// Find similar projects
const similarEdges = await kg.findEdges({
source: projectId,
type: "similar_to",
});
const similarProjectsPromises = similarEdges.map(async (edge) => {
const similarProjectNode = await kg.getNodeById(edge.target);
if (!similarProjectNode || similarProjectNode.type !== "project") {
return null;
}
const similarPath = (similarProjectNode.properties as any).path || "";
const similarHistory = await getFreshnessHistory(similarPath, 1);
const similarScore =
similarHistory.length > 0
? similarHistory[0].relationship.improvementScore || 0
: 0;
return {
path: similarPath,
improvementScore: similarScore,
similarity: (edge.properties as any).similarityScore || 0,
};
});
const similarProjects = await Promise.all(similarProjectsPromises);
const validSimilarProjects = similarProjects.filter(
(p): p is NonNullable<typeof p> => p !== null,
);
// Calculate ranking
const allScores = [
currentScore,
...validSimilarProjects.map((p) => p.improvementScore),
];
const sortedScores = [...allScores].sort((a, b) => b - a);
const ranking = sortedScores.indexOf(currentScore) + 1;
return {
currentProject: {
path: projectPath,
improvementScore: currentScore,
},
similarProjects: validSimilarProjects,
ranking,
};
}
```
--------------------------------------------------------------------------------
/docs/tutorials/environment-setup.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.971Z"
last_validated: "2025-12-09T19:41:38.602Z"
auto_updated: false
update_frequency: monthly
validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
---
# DocuMCP Environment Setup Guide
This guide will help you set up DocuMCP in your own environment, from basic installation to advanced configuration for team collaboration.
## 🚀 Quick Setup
### Prerequisites Check
Before installing DocuMCP, ensure you have the required software:
```bash
# Check Node.js version (requires 20.0.0+)
node --version
# Check npm version (requires 8.0.0+)
npm --version
# Check Git version
git --version
# Check GitHub CLI (optional but recommended)
gh --version
```
### Installation Methods
#### Method 1: Global Installation (Recommended)
```bash
# Install DocuMCP globally
npm install -g documcp
# Verify installation
documcp --version
# Should output: DocuMCP v0.5.0
```
#### Method 2: Local Project Installation
```bash
# Navigate to your project directory
cd /path/to/your/project
# Install DocuMCP as a dev dependency
npm install documcp --save-dev
# Add to package.json scripts
npm pkg set scripts.docs="documcp"
npm pkg set scripts.docs:analyze="documcp analyze-repository --path ."
npm pkg set scripts.docs:deploy="documcp deploy-pages --repository $(git remote get-url origin | sed 's/.*github.com[:/]\([^.]*\).*/\1/')"
```
#### Method 3: Docker Installation
```bash
# Pull the official DocuMCP Docker image
docker pull documcp/documcp:latest
# Run DocuMCP in a container
docker run -it --rm -v $(pwd):/workspace documcp/documcp:latest
```
## 🔧 Basic Configuration
### Environment Variables
Create a `.env` file in your project root:
```bash
# DocuMCP Configuration
export DOCUMCP_STORAGE_DIR="./.documcp"
export DOCUMCP_LOG_LEVEL="info"
export DOCUMCP_CACHE_ENABLED="true"
# GitHub Integration
export GITHUB_TOKEN="your_github_token_here"
export GITHUB_USERNAME="your_username"
# Optional: Custom configuration
export DOCUMCP_DEFAULT_SSG="docusaurus"
export DOCUMCP_DEFAULT_DEPTH="standard"
```
### Configuration File
Create a `documcp.config.json` file:
```json
{
"storage": {
"directory": "./.documcp",
"enableCache": true,
"maxCacheSize": "100MB"
},
"github": {
"token": "${GITHUB_TOKEN}",
"username": "${GITHUB_USERNAME}",
"defaultBranch": "main"
},
"defaults": {
"ssg": "docusaurus",
"analysisDepth": "standard",
"includeExamples": true,
"targetAudience": "community_contributors"
},
"memory": {
"enableLearning": true,
"retentionDays": 90,
"enableAnalytics": true
}
}
```
## 🏗️ Project Structure Setup
### Recommended Project Structure
```
your-project/
├── .documcp/ # DocuMCP storage and cache
│ ├── memory/ # Memory system data
│ ├── cache/ # Analysis cache
│ └── config/ # Local configuration
├── docs/ # Generated documentation
│ ├── api/ # API documentation
│ ├── tutorials/ # Tutorial content
│ ├── how-to/ # How-to guides
│ ├── reference/ # Reference documentation
│ └── explanation/ # Explanatory content
├── src/ # Source code
├── README.md # Project README
├── documcp.config.json # DocuMCP configuration
├── .env # Environment variables
└── package.json # Node.js dependencies
```
### Initialize DocuMCP in Your Project
```bash
# Initialize DocuMCP in your project
documcp init
# This creates:
# - .documcp/ directory
# - documcp.config.json
# - .env template
# - .gitignore entries
```
## 🔐 GitHub Integration Setup
### GitHub Token Setup
1. **Create a GitHub Personal Access Token:**
Go to GitHub → Settings → Developer settings → Personal access tokens → Tokens (classic)
Required permissions:
- `repo` (Full control of private repositories)
- `pages` (Write access to GitHub Pages)
- `workflow` (Update GitHub Action workflows)
- `read:org` (Read organization membership)
2. **Set the token in your environment:**
```bash
# Add to your shell profile (.bashrc, .zshrc, etc.)
export GITHUB_TOKEN="ghp_your_token_here"
# Or add to .env file
echo "GITHUB_TOKEN=ghp_your_token_here" >> .env
```
3. **Verify GitHub integration:**
```bash
# Test GitHub connection
documcp github test
# Should output: ✅ GitHub connection successful
```
### GitHub Pages Setup
1. **Enable GitHub Pages in your repository:**
Go to your repository → Settings → Pages
- Source: GitHub Actions
- Branch: main (or your preferred branch)
2. **Configure deployment:**
```bash
# Configure GitHub Pages deployment
documcp github configure-pages --repository "username/repository"
```
## 🧠 Memory System Setup
### Initialize Memory System
```bash
# Initialize memory system with custom storage
documcp memory init --storage-dir ./.documcp/memory
# Initialize with specific configuration
documcp memory init --storage-dir ./.documcp/memory --enable-learning --retention-days 90
```
### Memory System Configuration
Create a memory configuration file:
```json
{
"storage": {
"directory": "./.documcp/memory",
"enableCompression": true,
"maxSize": "500MB"
},
"learning": {
"enabled": true,
"retentionDays": 90,
"enableAnalytics": true,
"enablePatternRecognition": true
},
"userPreferences": {
"enablePersonalization": true,
"defaultUserId": "developer123"
}
}
```
### Memory System Testing
```bash
# Test memory system
documcp memory test
# Check memory statistics
documcp memory stats
# Export memories for backup
documcp memory export --format json --output ./documcp-memories-backup.json
```
## 🔧 Advanced Configuration
### Custom SSG Configuration
```bash
# Configure custom SSG settings
documcp config set --key "ssg.docusaurus.theme" --value "classic"
documcp config set --key "ssg.hugo.baseURL" --value "https://docs.example.com"
documcp config set --key "ssg.mkdocs.theme" --value "material"
```
### User Preferences Setup
```bash
# Set user preferences
documcp preferences set --user-id "developer123" --priority performance --ecosystem javascript
# Set team preferences
documcp preferences set --user-id "team" --priority simplicity --ecosystem any
# Export preferences
documcp preferences export --user-id "developer123" --output ./preferences.json
```
### Cache Configuration
```bash
# Configure caching
documcp cache config --enable --max-size "200MB" --ttl "24h"
# Clear cache
documcp cache clear
# Cache statistics
documcp cache stats
```
## 🐳 Docker Setup
### Docker Compose Configuration
Create a `docker-compose.yml` file:
```yaml
version: "3.8"
services:
documcp:
image: documcp/documcp:latest
container_name: documcp
volumes:
- ./:/workspace
- ./documcp-data:/app/.documcp
environment:
- GITHUB_TOKEN=${GITHUB_TOKEN}
- DOCUMCP_STORAGE_DIR=/app/.documcp
working_dir: /workspace
command: ["documcp", "serve", "--port", "3000", "--host", "0.0.0.0"]
ports:
- "3000:3000"
```
### Docker Usage
```bash
# Start DocuMCP with Docker Compose
docker-compose up -d
# Run specific commands
docker-compose exec documcp documcp analyze-repository --path .
# Stop services
docker-compose down
```
## 🔄 CI/CD Integration
### GitHub Actions Setup
Create `.github/workflows/docs.yml`:
```yaml
name: Documentation Deployment
on:
push:
branches: [main]
paths: ["docs/**", "src/**", "README.md"]
pull_request:
branches: [main]
jobs:
analyze:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install DocuMCP
run: npm install -g documcp
- name: Analyze Repository
run: |
ANALYSIS_ID=$(documcp analyze-repository --path . --depth standard | jq -r '.data.id')
echo "analysis_id=$ANALYSIS_ID" >> $GITHUB_OUTPUT
id: analyze
- name: Validate Documentation
run: |
documcp validate-content --docs-path ./docs
deploy:
needs: analyze
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install DocuMCP
run: npm install -g documcp
- name: Deploy Documentation
run: |
documcp deploy-pages --repository ${{ github.repository }} --ssg docusaurus
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
```
### GitLab CI Setup
Create `.gitlab-ci.yml`:
```yaml
stages:
- analyze
- deploy
variables:
NODE_VERSION: "20"
analyze:
stage: analyze
image: node:${NODE_VERSION}-alpine
before_script:
- npm install -g documcp
script:
- documcp analyze-repository --path . --depth standard
- documcp validate-content --docs-path ./docs
artifacts:
reports:
junit: documcp-results.xml
paths:
- .documcp/
expire_in: 1 hour
deploy:
stage: deploy
image: node:${NODE_VERSION}-alpine
before_script:
- npm install -g documcp
script:
- documcp deploy-pages --repository $CI_PROJECT_PATH --ssg docusaurus
only:
- main
environment:
name: production
url: https://$CI_PROJECT_NAMESPACE.gitlab.io/$CI_PROJECT_NAME
```
## 🔍 Development Setup
### Local Development
```bash
# Clone DocuMCP repository
git clone https://github.com/tosin2013/documcp.git
cd documcp
# Install dependencies
npm install
# Build the project
npm run build
# Run in development mode
npm run dev
# Run tests
npm test
# Run linting
npm run lint
```
### IDE Configuration
#### VS Code Configuration
Create `.vscode/settings.json`:
```json
{
"typescript.preferences.importModuleSpecifier": "relative",
"typescript.suggest.autoImports": true,
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.fixAll.eslint": true
},
"files.associations": {
"*.mcp": "json"
}
}
```
#### VS Code Extensions
Recommended extensions:
- TypeScript and JavaScript Language Features
- ESLint
- Prettier
- GitLens
- REST Client (for testing API endpoints)
## 🧪 Testing Setup
### Unit Testing
```bash
# Install testing dependencies
npm install --save-dev jest @types/jest ts-jest
# Create jest.config.js
cat > jest.config.js << EOF
module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
testMatch: ['**/__tests__/**/*.test.ts'],
collectCoverage: true,
coverageDirectory: 'coverage',
coverageReporters: ['text', 'lcov', 'html']
};
EOF
# Run tests
npm test
```
### Integration Testing
```bash
# Create test repository
mkdir test-repo
cd test-repo
git init
echo "# Test Repository" > README.md
git add README.md
git commit -m "Initial commit"
# Test DocuMCP with test repository
cd ..
documcp analyze-repository --path ./test-repo --depth quick
```
### Performance Testing
```bash
# Run performance benchmarks
documcp benchmark run --repository ./test-repo --iterations 10
# Check performance metrics
documcp benchmark current
```
## 🔧 Troubleshooting
### Common Issues
#### Issue 1: Permission Denied
**Problem:** `Permission denied: Cannot read directory`
**Solution:**
```bash
# Check permissions
ls -la /path/to/repository
# Fix permissions
chmod -R 755 /path/to/repository
# Or run with sudo (not recommended for production)
sudo documcp analyze-repository --path /path/to/repository
```
#### Issue 2: GitHub Token Invalid
**Problem:** `GitHub authentication failed`
**Solution:**
```bash
# Check token validity
curl -H "Authorization: token $GITHUB_TOKEN" https://api.github.com/user
# Regenerate token with correct permissions
# Go to GitHub → Settings → Developer settings → Personal access tokens
# Update environment variable
export GITHUB_TOKEN="new_token_here"
```
#### Issue 3: Memory System Errors
**Problem:** `Memory system initialization failed`
**Solution:**
```bash
# Clear memory storage
rm -rf ./.documcp/memory
# Reinitialize memory system
documcp memory init --storage-dir ./.documcp/memory
# Check memory system status
documcp memory status
```
#### Issue 4: Build Failures
**Problem:** `Documentation build failed`
**Solution:**
```bash
# Check for syntax errors
documcp validate-content --docs-path ./docs
# Test local build
documcp test-local --docs-path ./docs --ssg docusaurus
# Check SSG configuration
cat ./docs/docusaurus.config.js
```
### Debug Mode
```bash
# Enable debug logging
export DOCUMCP_LOG_LEVEL="debug"
# Run with verbose output
documcp analyze-repository --path . --depth standard --verbose
# Check logs
tail -f ./.documcp/logs/documcp.log
```
### Health Check
```bash
# Run comprehensive health check
documcp health-check
# Should output:
# ✅ Node.js version: v20.0.0
# ✅ npm version: 8.0.0
# ✅ Git version: 2.30.0
# ✅ GitHub token: valid
# ✅ Memory system: healthy
# ✅ Cache system: healthy
```
## 📚 Next Steps
After completing the environment setup:
1. **Read the [User Onboarding Guide](./user-onboarding.md)** for usage patterns
2. **Explore [Usage Examples](../how-to/usage-examples.md)** for practical examples
3. **Check the [API Reference](../api/)** for complete function documentation
4. **Join the [GitHub Issues](https://github.com/tosin2013/documcp/issues)** for community support and feature requests
## 🆘 Getting Help
- **Documentation**: Check the comprehensive documentation
- **GitHub Issues**: Report bugs and request features
- **GitHub Discussions**: Ask questions and share ideas
- **Community**: Join the DocuMCP community for support
Your DocuMCP environment is now ready! 🎉
```
--------------------------------------------------------------------------------
/docs/how-to/troubleshooting.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.956Z"
last_validated: "2025-12-09T19:41:38.587Z"
auto_updated: false
update_frequency: monthly
validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
---
# Troubleshooting Common Issues
This guide helps you diagnose and fix common problems when using DocuMCP for documentation deployment.
## Quick Diagnostic Commands
Use these DocuMCP prompts for immediate diagnosis:
```bash
# General troubleshooting
"diagnose issues with my documentation deployment"
# Specific verification
"verify my GitHub Pages deployment and identify any problems"
# Link validation
"check all my documentation links for broken references"
# Content validation
"validate my documentation content for errors and inconsistencies"
```
## Repository Analysis Issues
### Problem: Analysis Returns Empty or Incomplete Results
**Symptoms:**
- Analysis shows 0 files or minimal structure
- Missing language detection
- No dependency information
**Solutions:**
1. **Check directory permissions:**
```bash
ls -la /path/to/your/repository
# Ensure read permissions exist
```
2. **Verify Git repository:**
```bash
git status
# Must be in a valid Git repository
```
3. **Use deeper analysis:**
```bash
"analyze my repository with deep analysis to get comprehensive results"
```
4. **Check for hidden files:**
```bash
# Include hidden files in analysis
ls -la
# Look for .gitignore excluding important files
```
### Problem: Wrong Project Type Detection
**Symptoms:**
- Library detected as application
- Wrong primary language
- Incorrect team size estimation
**Solutions:**
1. **Provide more context:**
```bash
"analyze my TypeScript library project with focus on API documentation"
```
2. **Check file extensions:**
```bash
# Ensure your main files have correct extensions
find . -name "*.ts" -o -name "*.js" -o -name "*.py" | head -20
```
3. **Update package.json:**
```json
{
"type": "module",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"keywords": ["library", "typescript", "api"]
}
```
## Static Site Generator Recommendation Issues
### Problem: No Recommendations or Low Confidence Scores
**Symptoms:**
- Empty recommendation list
- All SSGs have similar low scores
- Recommendation doesn't match project needs
**Solutions:**
1. **Provide preferences:**
```bash
"recommend SSG for my project with preferences for JavaScript ecosystem and feature-rich capabilities"
```
2. **Re-analyze with specific focus:**
```bash
"analyze my repository focusing on documentation needs and complexity"
```
3. **Check project characteristics:**
- Ensure sufficient code files exist
- Verify dependencies are in package.json/requirements.txt
- Add README with project description
### Problem: Recommended SSG Doesn't Match Expectations
**Symptoms:**
- Hugo recommended for React project
- MkDocs suggested for JavaScript library
- Jekyll proposed for Python project
**Solutions:**
1. **Specify ecosystem preference:**
```bash
"recommend SSG for my project with JavaScript ecosystem preference"
```
2. **Review analysis results:**
```bash
"explain why you recommended Hugo instead of Docusaurus for my React project"
```
3. **Override with specific request:**
```bash
"generate Docusaurus configuration for my project despite the Hugo recommendation"
```
## Configuration Generation Issues
### Problem: Configuration Files Not Created
**Symptoms:**
- No config files generated
- Empty configuration
- Missing dependencies
**Solutions:**
1. **Check output path:**
```bash
# Ensure output path exists and is writable
mkdir -p ./docs
chmod 755 ./docs
```
2. **Specify absolute path:**
```bash
"generate Hugo configuration files at /full/path/to/project"
```
3. **Check project name format:**
```bash
# Avoid special characters in project names
"generate config for project 'My-Simple-Docs' not 'My Project (v2.0)'"
```
### Problem: Invalid Configuration Generated
**Symptoms:**
- Build fails with config errors
- Missing required fields
- Wrong file format
**Solutions:**
1. **Validate generated config:**
```bash
# For Docusaurus
npm run docusaurus --version
# For Hugo
hugo version && hugo config
# For MkDocs
mkdocs --version && mkdocs build --strict
```
2. **Regenerate with project details:**
```bash
"generate detailed Hugo configuration with custom theme and GitHub integration"
```
3. **Fix common issues:**
**Docusaurus baseUrl fix:**
```javascript
// Fix in docusaurus.config.js
const config = {
baseUrl: "/your-repo-name/", // Must match repository name
url: "https://yourusername.github.io",
};
```
**Hugo baseURL fix:**
```yaml
# Fix in config.yml
baseURL: "https://yourusername.github.io/your-repo-name/"
```
## Documentation Structure Issues
### Problem: Diataxis Structure Not Created
**Symptoms:**
- Missing directories
- Empty folders
- No example content
**Solutions:**
1. **Check path permissions:**
```bash
ls -ld /path/to/docs
# Ensure write permissions
```
2. **Use absolute path:**
```bash
"set up Diataxis structure at /absolute/path/to/docs"
```
3. **Force recreation:**
```bash
"recreate documentation structure with examples for my SSG"
```
### Problem: Content Population Fails
**Symptoms:**
- Empty documentation files
- Generic content only
- Missing project-specific information
**Solutions:**
1. **Provide analysis context:**
```bash
"populate documentation using analysis ID analysis_abc123 with comprehensive content"
```
2. **Specify technology focus:**
```bash
"populate docs focusing on TypeScript, React, and API documentation"
```
3. **Check source code structure:**
```bash
# Ensure code has discoverable patterns
find . -name "*.ts" -exec grep -l "export" {} \;
```
## GitHub Pages Deployment Issues
### Problem: Deployment Workflow Fails
**Symptoms:**
- GitHub Actions shows red X
- Build fails with errors
- Deployment never completes
**Solutions:**
1. **Check workflow logs:**
- Go to Actions tab in GitHub
- Click on failed workflow
- Review step-by-step logs
2. **Common fixes:**
**Node.js version mismatch:**
```yaml
# Fix in .github/workflows/deploy.yml
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20" # Match your local version
```
**Missing dependencies:**
```json
# Ensure all dependencies in package.json
{
"dependencies": {
"@docusaurus/core": "^3.0.0",
"@docusaurus/preset-classic": "^3.0.0"
}
}
```
**Build command issues:**
```yaml
# Fix build command
- name: Build
run: npm run build # Ensure this command exists in package.json
```
### Problem: Site Shows 404 Error
**Symptoms:**
- GitHub Pages URL returns 404
- Site deployed but not accessible
- Some pages work, others don't
**Solutions:**
1. **Check GitHub Pages settings:**
- Repository Settings > Pages
- Source should be "GitHub Actions"
- Custom domain configured correctly (if used)
2. **Fix baseURL configuration:**
**Docusaurus:**
```javascript
const config = {
baseUrl: "/repository-name/", // Must match your repo name exactly
url: "https://username.github.io",
};
```
**Hugo:**
```yaml
baseURL: "https://username.github.io/repository-name/"
```
**MkDocs:**
```yaml
site_url: "https://username.github.io/repository-name/"
```
3. **Check file naming:**
```bash
# Ensure index.html or index.md exists
ls docs/index.*
```
### Problem: Assets Not Loading (CSS/JS/Images)
**Symptoms:**
- Site loads but no styling
- Images show as broken
- JavaScript functionality missing
**Solutions:**
1. **Check asset paths:**
```javascript
// Use relative paths
<img src="./images/logo.png" /> // Good
<img src="/images/logo.png" /> // May fail
```
2. **Configure public path:**
**Docusaurus:**
```javascript
const config = {
baseUrl: "/repo-name/",
staticDirectories: ["static"],
};
```
**Hugo:**
```yaml
# In config.yml
baseURL: "https://username.github.io/repo-name/"
canonifyURLs: true
```
3. **Verify asset directories:**
```bash
# Check assets exist in build output
ls -la build/assets/ # Docusaurus
ls -la public/css/ # Hugo
ls -la site/css/ # MkDocs
```
## Content Validation Issues
### Problem: Link Validation Shows False Positives
**Symptoms:**
- Valid links reported as broken
- External links fail intermittently
- Anchor links not found
**Solutions:**
1. **Configure link checking:**
```bash
"check documentation links with timeout of 10 seconds and ignore external domains github.com"
```
2. **Check anchor links:**
```markdown
<!-- Ensure anchors exist -->
## My Section
Link to [My Section](#my-section) <!-- Correct -->
Link to [My Section](#my_section) <!-- May fail -->
```
3. **Handle external link timeouts:**
```bash
"validate content with longer timeout for external links and retry failed checks"
```
### Problem: Code Block Validation Fails
**Symptoms:**
- Valid code marked as invalid
- Syntax highlighting not working
- Code examples cause build failures
**Solutions:**
1. **Check language tags:**
````markdown
<!-- Correct -->
```javascript
const example = "Hello World";
```
````
<!-- Incorrect - missing language -->
```
const example = "Hello World";
```
````
2. **Validate code syntax:**
```bash
# Test code blocks separately
node -e "const example = 'Hello World'; console.log(example);"
````
3. **Configure code validation:**
```bash
"validate content with permissive code validation and syntax checking disabled"
```
## Memory System Issues
### Problem: Memory System Not Initializing
**Symptoms:**
- Memory tools return errors
- No historical data available
- Analysis doesn't include insights
**Solutions:**
1. **Check storage directory:**
```bash
ls -la .documcp/memory/
# Should contain analysis files
```
2. **Initialize manually:**
```bash
"recall all memories to initialize the memory system"
```
3. **Check permissions:**
```bash
chmod -R 755 .documcp/
```
### Problem: Similar Projects Not Found
**Symptoms:**
- No similar projects in results
- Low-quality recommendations
- Missing historical patterns
**Solutions:**
1. **Build memory with more analyses:**
```bash
"analyze multiple repositories to build memory patterns"
```
2. **Export and import memory:**
```bash
"export memories in JSON format for backup"
```
3. **Clean and rebuild:**
```bash
"cleanup old memories and rebuild with recent analyses"
```
## Performance Issues
### Problem: Slow Build Times
**Symptoms:**
- Builds take too long
- GitHub Actions timeout
- Local development is slow
**Solutions:**
1. **Optimize build configuration:**
**Docusaurus:**
```javascript
const config = {
future: {
experimental_faster: true,
},
webpack: {
jsLoader: (isServer) => ({
loader: "esbuild-loader",
options: {
loader: "tsx",
target: isServer ? "node12" : "es2017",
},
}),
},
};
```
**Hugo:**
```yaml
# config.yml
build:
writeStats: false
noJSConfigInAssets: true
```
2. **Enable caching:**
```yaml
# In GitHub Actions
- name: Cache dependencies
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
```
3. **Reduce build scope:**
```bash
# Build only changed files
npm run build -- --locale en
```
### Problem: Large Bundle Sizes
**Symptoms:**
- Slow page loads
- High bandwidth usage
- Poor mobile performance
**Solutions:**
1. **Analyze bundle:**
```bash
# Docusaurus
npm run build -- --bundle-analyzer
# Check generated files
ls -lh build/assets/
```
2. **Optimize images:**
```bash
# Convert images to WebP
find docs -name "*.png" -exec cwebp {} -o {}.webp \;
```
3. **Enable code splitting:**
```javascript
// Docusaurus config
const config = {
webpack: {
splitChunks: {
chunks: "all",
cacheGroups: {
default: {
minChunks: 2,
reuseExistingChunk: true,
},
},
},
},
};
```
## Getting Help
### Diagnostic Information
When reporting issues, include:
1. **DocuMCP version:**
```bash
npm list documcp
```
2. **System information:**
```bash
node --version
npm --version
git --version
```
3. **Error logs:**
```bash
# GitHub Actions logs
# Local build output
# Browser console errors
```
### Support Channels
- **GitHub Issues**: [Report bugs and feature requests](https://github.com/tosin2013/documcp/issues)
- **Documentation**: Check other guides in this documentation
- **Community**: Search existing issues for solutions
### Self-Diagnostic Commands
```bash
# Complete health check
"verify my entire documentation setup and identify all issues"
# Performance analysis
"analyze my documentation build performance and suggest optimizations"
# Security check
"validate my GitHub Pages deployment for security best practices"
```
## Prevention Tips
### Regular Maintenance
1. **Weekly validation:**
```bash
"check all documentation links and validate content quality"
```
2. **Monthly updates:**
```bash
# Update dependencies
npm update
# Regenerate configurations if needed
```
3. **Monitor deployment:**
- Set up GitHub Actions notifications
- Check site accessibility regularly
- Monitor build times and performance
### Best Practices
1. **Always test locally before deploying**
2. **Use DocuMCP validation before committing**
3. **Keep dependencies updated**
4. **Monitor GitHub Actions for failures**
5. **Backup memory and configurations**
## Summary
Common issue categories and solutions:
✅ Repository analysis problems - permissions and context
✅ SSG recommendation issues - preferences and project type
✅ Configuration generation - paths and project details
✅ Deployment failures - workflows and settings
✅ Content validation - links and code blocks
✅ Performance optimization - builds and bundles
✅ Memory system troubleshooting - initialization and data
Most issues can be resolved by providing more context to DocuMCP or fixing configuration details!
```
--------------------------------------------------------------------------------
/src/tools/generate-readme-template.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from "zod";
// Template types
export const TemplateType = z.enum([
"library",
"application",
"cli-tool",
"api",
"documentation",
]);
export type TemplateType = z.infer<typeof TemplateType>;
// Input schema
export const GenerateReadmeTemplateSchema = z.object({
projectName: z.string().min(1, "Project name is required"),
description: z.string().min(1, "Project description is required"),
templateType: TemplateType,
author: z.string().optional(),
license: z.string().default("MIT"),
includeScreenshots: z.boolean().default(false),
includeBadges: z.boolean().default(true),
includeContributing: z.boolean().default(true),
outputPath: z.string().optional(),
});
export type GenerateReadmeTemplateInput = z.infer<
typeof GenerateReadmeTemplateSchema
>;
interface TemplateSection {
title: string;
content: string;
required: boolean;
}
interface ReadmeTemplate {
sections: TemplateSection[];
badges: string[];
metadata: {
type: TemplateType;
estimatedLength: number;
};
}
export class ReadmeTemplateGenerator {
private templates: Map<TemplateType, ReadmeTemplate> = new Map();
constructor() {
this.initializeTemplates();
}
private initializeTemplates(): void {
// Library/Package Template
this.templates.set("library", {
sections: [
{
title: "Header",
content: "# {{projectName}}\n\n> {{description}}",
required: true,
},
{
title: "Badges",
content: "{{badges}}",
required: false,
},
{
title: "TL;DR",
content:
"## TL;DR\n\nWhat it does in 2-3 sentences. Who should use it.\n\n- ✅ Perfect for X use cases\n- ✅ Solves Y problems\n- ❌ Not suitable for Z (consider [alternative] instead)",
required: true,
},
{
title: "Quick Start",
content:
"## Quick Start\n\n### Install\n\n```bash\nnpm install {{projectName}}\n```\n\n### Use\n\n```javascript\nconst {{camelCaseName}} = require('{{projectName}}');\n\n// Basic usage example\nconst result = {{camelCaseName}}.doSomething();\nconsole.log(result);\n```",
required: true,
},
{
title: "API Documentation",
content:
"## API Documentation\n\n[Link to full API documentation]\n\n### Core Methods\n\n#### `methodName(param)`\n\n- **param** `{Type}` - Description\n- **Returns** `{Type}` - Description\n\nExample:\n```javascript\n// Example usage\n```",
required: true,
},
{
title: "Contributing",
content:
"## Contributing\n\nWe welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.\n\n### Development Setup\n\n```bash\ngit clone https://github.com/{{author}}/{{projectName}}.git\ncd {{projectName}}\nnpm install\nnpm test\n```",
required: false,
},
{
title: "License",
content: "## License\n\n{{license}} © {{author}}",
required: true,
},
],
badges: [
"[](https://badge.fury.io/js/{{projectName}})",
"[](https://travis-ci.org/{{author}}/{{projectName}})",
"[](https://opensource.org/licenses/{{license}})",
],
metadata: {
type: "library",
estimatedLength: 150,
},
});
// Application Template
this.templates.set("application", {
sections: [
{
title: "Header",
content: "# {{projectName}}\n\n> {{description}}",
required: true,
},
{
title: "Screenshot",
content: "{{screenshot}}",
required: false,
},
{
title: "What This Does",
content:
"## What This Does\n\n{{projectName}} helps you:\n\n- 🎯 **Feature 1** - Brief explanation\n- ⚡ **Feature 2** - Brief explanation\n- 🔧 **Feature 3** - Brief explanation",
required: true,
},
{
title: "Quick Start",
content:
"## Quick Start\n\n### Prerequisites\n\n- Node.js 18+ \n- npm or yarn\n- [Additional requirements]\n\n### Install & Run\n\n```bash\ngit clone https://github.com/{{author}}/{{projectName}}.git\ncd {{projectName}}\nnpm install\nnpm start\n```\n\nOpen http://localhost:3000 in your browser.",
required: true,
},
{
title: "Configuration",
content:
"## Configuration\n\nCreate a `.env` file in the root directory:\n\n```env\n# Required settings\nPORT=3000\nNODE_ENV=development\n\n# Optional settings\nDATABASE_URL=your_database_url\nAPI_KEY=your_api_key\n```\n\nSee [Configuration Guide](docs/configuration.md) for all options.",
required: true,
},
{
title: "Usage",
content:
"## Usage\n\n### Basic Operations\n\n1. **Step 1** - Description\n2. **Step 2** - Description\n3. **Step 3** - Description\n\n### Advanced Features\n\n[Link to advanced documentation]",
required: true,
},
{
title: "Contributing",
content:
"## Contributing\n\nSee [CONTRIBUTING.md](CONTRIBUTING.md) for development setup and contribution guidelines.",
required: false,
},
{
title: "License",
content: "## License\n\n{{license}} © {{author}}",
required: true,
},
],
badges: [
"[](https://github.com/{{author}}/{{projectName}}/actions)",
"[](LICENSE)",
],
metadata: {
type: "application",
estimatedLength: 200,
},
});
// CLI Tool Template
this.templates.set("cli-tool", {
sections: [
{
title: "Header",
content: "# {{projectName}}\n\n> {{description}}",
required: true,
},
{
title: "Installation",
content:
"## Installation\n\n```bash\n# Global installation\nnpm install -g {{projectName}}\n\n# Or use with npx\nnpx {{projectName}} --help\n```",
required: true,
},
{
title: "Usage",
content:
"## Usage\n\n### Basic Commands\n\n```bash\n# Basic usage\n{{projectName}} [options] [arguments]\n\n# Show help\n{{projectName}} --help\n\n# Show version\n{{projectName}} --version\n```\n\n### Examples\n\n```bash\n# Example 1\n{{projectName}} command --option value\n\n# Example 2\n{{projectName}} another-command file.txt\n```",
required: true,
},
{
title: "Options",
content:
"## Options\n\n| Option | Description | Default |\n|--------|-------------|----------|\n| `-h, --help` | Show help | |\n| `-v, --version` | Show version | |\n| `--config <path>` | Config file path | `./config.json` |\n| `--verbose` | Verbose output | `false` |",
required: true,
},
{
title: "Configuration",
content:
'## Configuration\n\nCreate a config file:\n\n```json\n{\n "setting1": "value1",\n "setting2": "value2"\n}\n```',
required: false,
},
{
title: "Contributing",
content:
"## Contributing\n\nSee [CONTRIBUTING.md](CONTRIBUTING.md) for development guidelines.",
required: false,
},
{
title: "License",
content: "## License\n\n{{license}} © {{author}}",
required: true,
},
],
badges: [
"[](https://www.npmjs.com/package/{{projectName}})",
"[](LICENSE)",
],
metadata: {
type: "cli-tool",
estimatedLength: 180,
},
});
}
generateTemplate(input: GenerateReadmeTemplateInput): string {
const template = this.templates.get(input.templateType);
if (!template) {
throw new Error(`Template type "${input.templateType}" not supported`);
}
let readme = "";
const camelCaseName = this.toCamelCase(input.projectName);
// Process each section
for (const section of template.sections) {
if (section.title === "Badges" && input.includeBadges) {
readme += this.processBadges(template.badges, input) + "\n\n";
} else if (section.title === "Screenshot" && input.includeScreenshots) {
readme += this.processScreenshot(input) + "\n\n";
} else if (
section.title === "Contributing" &&
!input.includeContributing
) {
continue;
} else {
readme +=
this.processSection(section.content, input, camelCaseName) + "\n\n";
}
}
return readme.trim();
}
private processBadges(
badges: string[],
input: GenerateReadmeTemplateInput,
): string {
return badges
.map((badge) => this.replaceVariables(badge, input))
.join("\n");
}
private processScreenshot(input: GenerateReadmeTemplateInput): string {
return `\n\n*Add a screenshot or demo GIF here*`;
}
private processSection(
content: string,
input: GenerateReadmeTemplateInput,
camelCaseName: string,
): string {
let processed = this.replaceVariables(content, input);
processed = processed.replace(/\{\{camelCaseName\}\}/g, camelCaseName);
return processed;
}
private replaceVariables(
content: string,
input: GenerateReadmeTemplateInput,
): string {
return content
.replace(/\{\{projectName\}\}/g, input.projectName)
.replace(/\{\{description\}\}/g, input.description)
.replace(/\{\{author\}\}/g, input.author || "your-username")
.replace(/\{\{license\}\}/g, input.license);
}
private toCamelCase(str: string): string {
return str
.replace(/[-_\s]+(.)?/g, (_, c) => (c ? c.toUpperCase() : ""))
.replace(/^./, (c) => c.toLowerCase());
}
getAvailableTemplates(): TemplateType[] {
return Array.from(this.templates.keys());
}
getTemplateInfo(type: TemplateType): ReadmeTemplate["metadata"] | null {
const template = this.templates.get(type);
return template ? template.metadata : null;
}
}
/**
* Generates standardized README templates for different project types with best practices.
*
* Creates comprehensive README templates tailored to specific project types (library,
* application, CLI tool, API, documentation) following community best practices. Includes
* customizable sections, badges, contributing guidelines, and project-specific content
* to ensure professional documentation standards.
*
* @param input - The input parameters for README template generation
* @param input.projectName - Name of the project (required)
* @param input.description - Brief description of what the project does (required)
* @param input.templateType - Type of project template to generate
* @param input.author - Optional project author/organization name
* @param input.license - Project license (default: "MIT")
* @param input.includeScreenshots - Whether to include screenshot placeholders (default: false)
* @param input.includeBadges - Whether to include status badges (default: true)
* @param input.includeContributing - Whether to include contributing section (default: true)
* @param input.outputPath - Optional path to write the generated README.md file
*
* @returns Promise resolving to README template generation results
* @returns template - The generated README template content
* @returns metadata - Template metadata including type and estimated length
* @returns filePath - Path where the README was written (if outputPath provided)
*
* @throws {Error} When required parameters are missing
* @throws {Error} When output path is inaccessible
* @throws {Error} When template generation fails
*
* @example
* ```typescript
* // Generate library README template
* const result = await generateReadmeTemplate({
* projectName: "MyAwesomeLibrary",
* description: "A powerful utility library for data processing",
* templateType: "library",
* author: "Your Name",
* license: "MIT",
* includeBadges: true
* });
*
* console.log(`Generated ${result.metadata.estimatedLength} line README`);
*
* // Generate CLI tool template with output file
* const cliTemplate = await generateReadmeTemplate({
* projectName: "my-cli-tool",
* description: "Command-line interface for project management",
* templateType: "cli-tool",
* outputPath: "./README.md"
* });
* ```
*
* @since 1.0.0
*/
export async function generateReadmeTemplate(
input: GenerateReadmeTemplateInput,
): Promise<{
content: string;
metadata: {
templateType: TemplateType;
estimatedLength: number;
sectionsIncluded: number;
};
}> {
const validatedInput = GenerateReadmeTemplateSchema.parse(input);
const generator = new ReadmeTemplateGenerator();
const content = generator.generateTemplate(validatedInput);
const templateInfo = generator.getTemplateInfo(validatedInput.templateType);
if (!templateInfo) {
throw new Error(`Template type "${validatedInput.templateType}" not found`);
}
// Write to file if output path specified
if (validatedInput.outputPath) {
const fs = await import("fs/promises");
await fs.writeFile(validatedInput.outputPath, content, "utf-8");
}
return {
content,
metadata: {
templateType: validatedInput.templateType,
estimatedLength: templateInfo.estimatedLength,
sectionsIncluded: content.split("##").length - 1,
},
};
}
```
--------------------------------------------------------------------------------
/docs/adrs/adr-0003-static-site-generator-recommendation-engine.md:
--------------------------------------------------------------------------------
```markdown
---
id: adr-3-static-site-generator-recommendation-engine
title: "ADR-003: SSG Recommendation Engine Design"
sidebar_label: "ADR-003: SSG Recommendation Engine Design"
sidebar_position: 3
documcp:
last_updated: "2025-11-20T00:46:21.937Z"
last_validated: "2025-12-09T19:41:38.567Z"
auto_updated: false
update_frequency: monthly
validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
---
# ADR-003: Static Site Generator Recommendation Engine Design
## Status
Accepted
## Context
DocuMCP must intelligently recommend the most appropriate static site generator (SSG) for each project based on comprehensive analysis of project characteristics, team capabilities, and technical requirements. The recommendation engine needs to move beyond simple feature comparison to provide data-driven, contextual recommendations with clear justifications.
Current SSG landscape includes:
- **Jekyll**: GitHub Pages native, Ruby-based, mature ecosystem
- **Hugo**: Go-based, fast builds, extensive theming
- **Docusaurus**: React-based, modern features, Meta-backed
- **MkDocs**: Python-based, simple, Material theme
- **Eleventy**: JavaScript-based, flexible, minimal configuration
Key challenges:
- Choice paralysis for users unfamiliar with SSG ecosystem
- Technical requirements vary significantly by project type
- Performance needs differ based on content volume and update frequency
- Team capabilities and preferences affect long-term success
- Maintenance overhead varies dramatically between options
## Decision
We will implement a multi-criteria decision analysis (MCDA) framework that evaluates project characteristics against SSG capabilities to provide ranked recommendations with confidence scores and detailed justifications.
### Recommendation Engine Architecture:
#### 1. SSG Knowledge Base
- **Comprehensive SSG profiles** with quantitative and qualitative metrics
- **Performance characteristics**: build times, memory usage, scalability limits
- **Learning curve assessments**: setup complexity, maintenance requirements
- **Feature compatibility matrices**: advanced features, plugin ecosystems
- **Community metrics**: activity, support quality, ecosystem maturity
#### 2. Decision Matrix Framework
- **Multi-criteria evaluation** across weighted factors
- **Project-specific factor weighting** based on analysis results
- **Algorithmic scoring** with transparent calculation methods
- **Confidence assessment** based on factor alignment quality
#### 3. Performance Modeling
- **Build time prediction** based on content volume and complexity
- **Scalability assessment** for projected growth patterns
- **Resource requirement estimation** for different deployment scenarios
#### 4. Compatibility Assessment
- **Technical stack alignment** with existing project technologies
- **Workflow integration** with current development processes
- **CI/CD compatibility** with existing automation infrastructure
## Alternatives Considered
### Simple Rule-Based Recommendations
- **Pros**: Easy to implement, fast execution, predictable results
- **Cons**: Inflexible, doesn't handle edge cases, poor justification quality
- **Decision**: Rejected due to insufficient sophistication for quality recommendations
### Machine Learning-Based Recommendation
- **Pros**: Could learn from successful project outcomes, adaptive over time
- **Cons**: Requires training data, model maintenance, unpredictable results
- **Decision**: Deferred to future versions; insufficient training data initially
### User Survey-Based Selection
- **Pros**: Direct user input, captures preferences and constraints
- **Cons**: Requires user expertise, time-consuming, potential analysis paralysis
- **Decision**: Integrated as preference input to algorithmic recommendation
### External Service Integration (StackShare, etc.)
- **Pros**: Real-world usage data, community insights
- **Cons**: External dependency, potential bias, limited project-specific context
- **Decision**: Rejected for core logic; may integrate for validation
## Consequences
### Positive
- **Objective Recommendations**: Data-driven approach reduces bias and subjectivity
- **Clear Justifications**: Users understand why specific SSGs are recommended
- **Confidence Indicators**: Users know when recommendations are highly certain vs. uncertain
- **Contextual Intelligence**: Recommendations adapt to specific project characteristics
- **Educational Value**: Users learn about SSG capabilities and trade-offs
### Negative
- **Algorithm Complexity**: Multi-criteria analysis requires careful tuning and validation
- **Knowledge Base Maintenance**: SSG profiles need regular updates as tools evolve
- **Subjectivity in Weights**: Factor importance assignments may not match all user preferences
### Risks and Mitigations
- **Recommendation Accuracy**: Validate against known successful project combinations
- **Algorithm Bias**: Test across diverse project types and regularly audit results
- **Knowledge Staleness**: Implement automated SSG capability monitoring and updates
## Implementation Details
### Decision Criteria Framework
```typescript
interface RecommendationCriteria {
projectSize: ProjectSizeMetrics;
technicalComplexity: ComplexityAssessment;
teamCapabilities: TeamProfile;
performanceRequirements: PerformanceNeeds;
maintenancePreferences: MaintenanceProfile;
customizationNeeds: CustomizationRequirements;
}
interface SSGProfile {
name: string;
capabilities: SSGCapabilities;
performance: PerformanceProfile;
learningCurve: LearningCurveMetrics;
ecosystem: EcosystemMetrics;
maintenanceOverhead: MaintenanceMetrics;
}
```
### Scoring Algorithm
```typescript
interface ScoringWeights {
buildPerformance: number; // 0.20
setupComplexity: number; // 0.15
technicalAlignment: number; // 0.25
customizationFlexibility: number; // 0.15
maintenanceOverhead: number; // 0.15
ecosystemMaturity: number; // 0.10
}
function calculateSSGScore(
project: ProjectAnalysis,
ssg: SSGProfile,
weights: ScoringWeights,
): RecommendationScore {
// Weighted scoring across multiple criteria
// Returns score (0-100) with component breakdown
}
```
### Performance Modeling (Updated with Research 2025-01-14)
**Research Integration**: Comprehensive SSG performance analysis validates and refines our approach:
```typescript
interface PerformanceModel {
predictBuildTime(
contentVolume: number,
complexity: number,
): BuildTimeEstimate;
assessScalability(projectedGrowth: GrowthPattern): ScalabilityRating;
estimateResourceNeeds(deployment: DeploymentTarget): ResourceRequirements;
// Research-validated performance tiers
calculatePerformanceTier(
ssg: SSGType,
projectScale: ProjectScale,
): PerformanceTier;
}
// Research-validated performance characteristics
const SSG_PERFORMANCE_MATRIX = {
hugo: {
smallSites: { buildTime: "instant", scaleFactor: 1.0, overhead: "minimal" },
mediumSites: {
buildTime: "seconds",
scaleFactor: 1.1,
overhead: "minimal",
},
largeSites: { buildTime: "seconds", scaleFactor: 1.2, overhead: "minimal" },
},
gatsby: {
smallSites: { buildTime: "slow", scaleFactor: 250, overhead: "webpack" },
mediumSites: {
buildTime: "moderate",
scaleFactor: 100,
overhead: "webpack",
},
largeSites: {
buildTime: "improving",
scaleFactor: 40,
overhead: "optimized",
},
},
eleventy: {
smallSites: { buildTime: "fast", scaleFactor: 3, overhead: "node" },
mediumSites: { buildTime: "good", scaleFactor: 8, overhead: "node" },
largeSites: { buildTime: "moderate", scaleFactor: 15, overhead: "node" },
},
jekyll: {
smallSites: { buildTime: "good", scaleFactor: 2, overhead: "ruby" },
mediumSites: { buildTime: "slowing", scaleFactor: 12, overhead: "ruby" },
largeSites: {
buildTime: "poor",
scaleFactor: 25,
overhead: "ruby-bottleneck",
},
},
} as const;
// Research-validated recommendation algorithm
const calculatePerformanceScore = (
ssg: SSGType,
projectMetrics: ProjectMetrics,
): number => {
const { pageCount, updateFrequency, teamTechnicalLevel } = projectMetrics;
// Scale-based performance weighting (research-validated)
const performanceWeight =
pageCount > 1000 ? 0.8 : pageCount > 100 ? 0.6 : 0.4;
// Research-based performance scores
const baseScores = {
hugo: 100, // Fastest across all scales
eleventy: 85, // Good balance
jekyll: pageCount > 500 ? 60 : 80, // Ruby bottleneck at scale
nextjs: 70, // Framework overhead, good at scale
gatsby: pageCount > 1000 ? 65 : 45, // Severe small-site penalty
docusaurus: 75, // Optimized for documentation
};
return (
baseScores[ssg] * performanceWeight +
baseScores[ssg] * (1 - performanceWeight) * featureScore[ssg]
);
};
```
### Recommendation Output
```typescript
interface Recommendation {
ssg: SSGProfile;
score: number;
confidence: number;
justification: RecommendationJustification;
tradeoffs: Tradeoff[];
alternativeOptions: AlternativeRecommendation[];
}
interface RecommendationJustification {
primaryStrengths: string[];
concerningWeaknesses: string[];
bestFitReasons: string[];
performancePredictions: PerformancePrediction[];
}
```
### SSG Knowledge Base Structure
```typescript
const SSG_PROFILES: Record<string, SSGProfile> = {
jekyll: {
name: "Jekyll",
capabilities: {
buildSpeed: "moderate",
themingFlexibility: "high",
pluginEcosystem: "mature",
githubPagesNative: true,
contentTypes: ["markdown", "liquid"],
i18nSupport: "plugin-based",
},
performance: {
averageBuildTime: "2-5 minutes per 100 pages",
memoryUsage: "moderate",
scalabilityLimit: "1000+ pages",
},
learningCurve: {
setupComplexity: "low-moderate",
configurationComplexity: "moderate",
customizationComplexity: "moderate-high",
},
// ... additional profile data
},
// ... other SSG profiles
};
```
### Confidence Calculation
```typescript
function calculateConfidence(
scores: SSGScore[],
projectAnalysis: ProjectAnalysis,
): number {
const scoreSpread = Math.max(...scores) - Math.min(...scores);
const analysisCompleteness = assessAnalysisCompleteness(projectAnalysis);
const criteriaAlignment = assessCriteriaAlignment(scores);
// Higher confidence when:
// - Clear winner emerges (high score spread)
// - Analysis is comprehensive
// - Criteria strongly align with one option
return calculateWeightedConfidence(
scoreSpread,
analysisCompleteness,
criteriaAlignment,
);
}
```
## Quality Assurance
### Validation Strategy
- **Benchmark Projects**: Test against known successful project-SSG combinations
- **Expert Review**: Documentation experts validate recommendation logic
- **User Feedback**: Collect real-world outcomes to refine algorithms
- **A/B Testing**: Compare algorithm versions for recommendation quality
### Testing Framework
```typescript
describe("RecommendationEngine", () => {
it("should recommend Jekyll for simple documentation sites");
it("should recommend Hugo for performance-critical large sites");
it("should recommend Docusaurus for React-based projects");
it("should provide low confidence for ambiguous project profiles");
it("should justify all recommendations with specific reasons");
});
```
### Monitoring and Metrics
- Recommendation accuracy rates by project type
- User satisfaction with recommendations
- Confidence score calibration accuracy
- Algorithm performance and execution time
## Knowledge Base Maintenance
### SSG Capability Tracking
- Regular monitoring of SSG releases and capability changes
- Community feedback integration for real-world performance data
- Automated testing of SSG performance benchmarks
- Expert review cycles for knowledge base accuracy
### Update Processes
- Quarterly comprehensive review of all SSG profiles
- Monthly monitoring of major releases and capability changes
- Community contribution process for knowledge base improvements
- Automated validation of knowledge base consistency
## Future Enhancements
### Advanced Analytics
- Historical success rate tracking by recommendation
- Machine learning integration for pattern recognition
- User preference learning and personalization
- Comparative analysis across similar projects
### Extended SSG Support
- Evaluation framework for new SSG additions
- Community-contributed SSG profiles
- Specialized SSG recommendations (e.g., Sphinx for API docs)
- Custom SSG configuration for specific use cases
### Integration Features
- Direct integration with SSG documentation and examples
- Automated setup validation and testing
- Performance monitoring and optimization recommendations
- Migration assistance between SSGs
## Security and Privacy
- No collection of sensitive project information
- Anonymized analytics for algorithm improvement
- Transparent recommendation criteria and methodology
- User control over data sharing preferences
## Implementation Status
**Status**: ✅ Implemented (2025-12-12)
**Implementation Files**:
- `src/tools/recommend-ssg.ts` - Main SSG recommendation engine
- `src/memory/enhanced-manager.ts` - Enhanced recommendation with learning system
- `src/memory/knowledge-graph.ts` - Historical data integration for recommendations
**Key Features Implemented**:
- ✅ Multi-criteria decision analysis (MCDA) framework
- ✅ SSG knowledge base with performance characteristics
- ✅ Historical deployment data integration
- ✅ User preference support (simplicity, features, performance)
- ✅ Confidence scoring and detailed justifications
- ✅ Research-validated performance modeling
**Validation**: The implementation includes research-validated performance characteristics and integrates with the knowledge graph for historical success rate tracking.
## References
- [Static Site Generator Comparison Studies](https://jamstack.org/generators/)
- [Multi-Criteria Decision Analysis](https://en.wikipedia.org/wiki/Multiple-criteria_decision_analysis)
- [Static Site Generator Performance Comparison](https://jamstack.org/generators/)
```
--------------------------------------------------------------------------------
/src/tools/manage-sitemap.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Manage Sitemap Tool
*
* MCP tool for generating, validating, and managing sitemap.xml files.
* Sitemap.xml serves as the source of truth for all documentation links.
*/
import { z } from "zod";
import path from "path";
import { promises as fs } from "fs";
import {
generateSitemap,
validateSitemap,
updateSitemap,
listSitemapUrls,
type SitemapUrl,
type SitemapStats,
} from "../utils/sitemap-generator.js";
import { formatMCPResponse } from "../types/api.js";
/**
* Input schema for manage_sitemap tool
*/
export const ManageSitemapInputSchema = z.object({
action: z
.enum(["generate", "validate", "update", "list"])
.describe(
"Action to perform: generate (create new), validate (check structure), update (sync with docs), list (show all URLs)",
),
docsPath: z.string().describe("Path to documentation root directory"),
baseUrl: z
.string()
.optional()
.describe(
"Base URL for the site (e.g., https://user.github.io/repo). Required for generate/update actions.",
),
includePatterns: z
.array(z.string())
.optional()
.describe(
"File patterns to include (default: **/*.md, **/*.html, **/*.mdx)",
),
excludePatterns: z
.array(z.string())
.optional()
.describe(
"File patterns to exclude (default: node_modules, .git, dist, build, .documcp)",
),
updateFrequency: z
.enum(["always", "hourly", "daily", "weekly", "monthly", "yearly", "never"])
.optional()
.describe("Default change frequency for pages"),
useGitHistory: z
.boolean()
.optional()
.default(true)
.describe("Use git history for last modified dates (default: true)"),
sitemapPath: z
.string()
.optional()
.describe("Custom path for sitemap.xml (default: docsPath/sitemap.xml)"),
});
export type ManageSitemapInput = z.input<typeof ManageSitemapInputSchema>;
/**
* Manage sitemap.xml for documentation
*/
export async function manageSitemap(
input: ManageSitemapInput,
): Promise<{ content: any[] }> {
const { action, docsPath, sitemapPath } = input;
// Resolve sitemap path
const resolvedSitemapPath = sitemapPath || path.join(docsPath, "sitemap.xml");
try {
// Verify docs directory exists
try {
await fs.access(docsPath);
} catch {
return formatMCPResponse({
success: false,
error: {
code: "DOCS_DIR_NOT_FOUND",
message: `Documentation directory not found: ${docsPath}`,
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
switch (action) {
case "generate":
return await generateSitemapAction(input, resolvedSitemapPath);
case "validate":
return await validateSitemapAction(resolvedSitemapPath);
case "update":
return await updateSitemapAction(input, resolvedSitemapPath);
case "list":
return await listSitemapAction(resolvedSitemapPath);
default:
return formatMCPResponse({
success: false,
error: {
code: "UNKNOWN_ACTION",
message: `Unknown action: ${action}`,
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
} catch (error) {
return formatMCPResponse({
success: false,
error: {
code: "SITEMAP_ERROR",
message: `Error managing sitemap: ${
error instanceof Error ? error.message : String(error)
}`,
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
}
/**
* Generate new sitemap.xml
*/
async function generateSitemapAction(
input: ManageSitemapInput,
sitemapPath: string,
): Promise<{ content: any[] }> {
const {
docsPath,
baseUrl,
includePatterns,
excludePatterns,
updateFrequency,
} = input;
if (!baseUrl) {
return formatMCPResponse({
success: false,
error: {
code: "BASE_URL_REQUIRED",
message: "baseUrl is required for generate action",
},
metadata: {
toolVersion: "1.0.0",
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
// Generate sitemap
const { xml, urls, stats } = await generateSitemap({
baseUrl,
docsPath,
includePatterns,
excludePatterns,
useGitHistory: input.useGitHistory,
defaultChangeFreq: updateFrequency || "monthly",
});
// Write sitemap.xml
await fs.writeFile(sitemapPath, xml, "utf-8");
// Format output
const output = formatGenerateOutput(sitemapPath, urls, stats);
return formatMCPResponse({
success: true,
data: {
action: "generate",
sitemapPath,
totalUrls: stats.totalUrls,
categories: stats.byCategory,
output,
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
/**
* Validate existing sitemap.xml
*/
async function validateSitemapAction(
sitemapPath: string,
): Promise<{ content: any[] }> {
// Check if sitemap exists
try {
await fs.access(sitemapPath);
} catch {
return formatMCPResponse({
success: false,
error: {
code: "SITEMAP_NOT_FOUND",
message: `Sitemap not found: ${sitemapPath}. Use action: "generate" to create a new sitemap.`,
},
metadata: {
toolVersion: "1.0.0",
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
// Validate sitemap
const validation = await validateSitemap(sitemapPath);
// Format output
const output = formatValidationOutput(sitemapPath, validation);
return formatMCPResponse({
success: validation.valid,
data: {
action: "validate",
valid: validation.valid,
errorCount: validation.errors.length,
warningCount: validation.warnings.length,
urlCount: validation.urlCount,
output,
},
error: validation.valid
? undefined
: {
code: "VALIDATION_FAILED",
message: `Sitemap validation failed with ${validation.errors.length} error(s)`,
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
/**
* Update existing sitemap.xml
*/
async function updateSitemapAction(
input: ManageSitemapInput,
sitemapPath: string,
): Promise<{ content: any[] }> {
const {
docsPath,
baseUrl,
includePatterns,
excludePatterns,
updateFrequency,
} = input;
if (!baseUrl) {
return formatMCPResponse({
success: false,
error: {
code: "BASE_URL_REQUIRED",
message: "baseUrl is required for update action",
},
metadata: {
toolVersion: "1.0.0",
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
// Check if sitemap exists
const sitemapExists = await fs
.access(sitemapPath)
.then(() => true)
.catch(() => false);
if (!sitemapExists) {
return formatMCPResponse({
success: false,
error: {
code: "SITEMAP_NOT_FOUND",
message: `Sitemap not found: ${sitemapPath}. Run generate action first.`,
},
metadata: {
toolVersion: "1.0.0",
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
// Update sitemap
const changes = await updateSitemap(sitemapPath, {
baseUrl,
docsPath,
includePatterns,
excludePatterns,
useGitHistory: input.useGitHistory,
defaultChangeFreq: updateFrequency || "monthly",
});
// Format output
const output = formatUpdateOutput(sitemapPath, changes);
return formatMCPResponse({
success: true,
data: {
action: "update",
added: changes.added,
removed: changes.removed,
updated: changes.updated,
total: changes.total,
output,
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
/**
* List all URLs from sitemap.xml
*/
async function listSitemapAction(
sitemapPath: string,
): Promise<{ content: any[] }> {
// Check if sitemap exists
try {
await fs.access(sitemapPath);
} catch {
return formatMCPResponse({
success: false,
error: {
code: "SITEMAP_NOT_FOUND",
message: `Sitemap not found: ${sitemapPath}. Use action: "generate" to create a new sitemap.`,
},
metadata: {
toolVersion: "1.0.0",
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
// List URLs
const urls = await listSitemapUrls(sitemapPath);
// Format output
const output = formatListOutput(sitemapPath, urls);
return formatMCPResponse({
success: true,
data: {
action: "list",
totalUrls: urls.length,
urls: urls.map((u) => ({
loc: u.loc,
priority: u.priority,
category: u.category,
lastmod: u.lastmod,
})),
output,
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
/**
* Format generate action output
*/
function formatGenerateOutput(
sitemapPath: string,
urls: SitemapUrl[],
stats: SitemapStats,
): string {
const lines: string[] = [
"✅ Sitemap generated successfully!",
"",
`📄 Location: ${sitemapPath}`,
`📊 Total URLs: ${stats.totalUrls}`,
"",
"📋 URLs by Category:",
];
// Sort categories by count (descending)
const sortedCategories = Object.entries(stats.byCategory).sort(
([, a], [, b]) => b - a,
);
for (const [category, count] of sortedCategories) {
const percentage = ((count / stats.totalUrls) * 100).toFixed(1);
lines.push(` • ${category}: ${count} (${percentage}%)`);
}
lines.push("");
lines.push("🔄 Change Frequencies:");
// Sort change frequencies
const sortedFreqs = Object.entries(stats.byChangeFreq).sort(
([, a], [, b]) => b - a,
);
for (const [freq, count] of sortedFreqs) {
lines.push(` • ${freq}: ${count}`);
}
// Show top priority URLs
const topUrls = urls.filter((u) => (u.priority || 0) >= 0.9).slice(0, 5);
if (topUrls.length > 0) {
lines.push("");
lines.push("⭐ High Priority Pages:");
for (const url of topUrls) {
lines.push(` • [${url.priority?.toFixed(1)}] ${url.title || url.loc}`);
}
}
lines.push("");
lines.push("💡 Next Steps:");
lines.push(" → Submit sitemap to search engines (Google, Bing)");
lines.push(" → Add sitemap to robots.txt");
lines.push(" → Deploy to GitHub Pages");
return lines.join("\n");
}
/**
* Format validation output
*/
function formatValidationOutput(
sitemapPath: string,
validation: {
valid: boolean;
errors: string[];
warnings: string[];
urlCount: number;
},
): string {
const lines: string[] = [];
if (validation.valid) {
lines.push("✅ Sitemap is valid!");
} else {
lines.push("❌ Sitemap validation failed!");
}
lines.push("");
lines.push(`📄 Location: ${sitemapPath}`);
lines.push(`📊 Total URLs: ${validation.urlCount}`);
if (validation.errors.length > 0) {
lines.push("");
lines.push("🔴 Errors:");
for (const error of validation.errors) {
lines.push(` • ${error}`);
}
}
if (validation.warnings.length > 0) {
lines.push("");
lines.push("⚠️ Warnings:");
for (const warning of validation.warnings) {
lines.push(` • ${warning}`);
}
}
if (validation.valid) {
lines.push("");
lines.push("💡 Recommendations:");
lines.push(" ℹ️ Sitemap follows the Sitemaps 0.9 protocol");
lines.push(" ℹ️ Ready for search engine submission");
}
return lines.join("\n");
}
/**
* Format update output
*/
function formatUpdateOutput(
sitemapPath: string,
changes: { added: number; removed: number; updated: number; total: number },
): string {
const lines: string[] = [
"✅ Sitemap updated successfully!",
"",
`📄 Location: ${sitemapPath}`,
`📊 Total URLs: ${changes.total}`,
"",
"📝 Changes:",
];
if (changes.added > 0) {
lines.push(` ✨ Added: ${changes.added} new page(s)`);
}
if (changes.removed > 0) {
lines.push(` 🗑️ Removed: ${changes.removed} deleted page(s)`);
}
if (changes.updated > 0) {
lines.push(` 🔄 Updated: ${changes.updated} modified page(s)`);
}
if (changes.added === 0 && changes.removed === 0 && changes.updated === 0) {
lines.push(" ℹ️ No changes detected");
}
lines.push("");
lines.push("💡 Next Steps:");
lines.push(" → Review changes if needed");
lines.push(" → Redeploy to GitHub Pages");
lines.push(" → Notify search engines of updates");
return lines.join("\n");
}
/**
* Format list output
*/
function formatListOutput(sitemapPath: string, urls: SitemapUrl[]): string {
const lines: string[] = [
`📄 Sitemap URLs from: ${sitemapPath}`,
`📊 Total: ${urls.length}`,
"",
];
// Group by category
const byCategory: Record<string, SitemapUrl[]> = {};
for (const url of urls) {
const category = url.category || "default";
if (!byCategory[category]) {
byCategory[category] = [];
}
byCategory[category].push(url);
}
// Display by category
for (const [category, categoryUrls] of Object.entries(byCategory)) {
lines.push(`📂 ${category} (${categoryUrls.length}):`);
// Sort by priority
const sorted = categoryUrls.sort(
(a, b) => (b.priority || 0) - (a.priority || 0),
);
for (const url of sorted.slice(0, 10)) {
// Show first 10 per category
const priority = url.priority?.toFixed(1) || "0.5";
const title = url.title || path.basename(url.loc);
lines.push(` [${priority}] ${title}`);
lines.push(` ${url.loc}`);
}
if (categoryUrls.length > 10) {
lines.push(` ... and ${categoryUrls.length - 10} more`);
}
lines.push("");
}
return lines.join("\n");
}
```
--------------------------------------------------------------------------------
/docs/adrs/adr-0013-release-pipeline-and-package-distribution.md:
--------------------------------------------------------------------------------
```markdown
---
id: adr-13-release-pipeline-and-package-distribution
title: "ADR-013: Release Pipeline and Package Distribution Architecture"
sidebar_label: "ADR-013: Release Pipeline Architecture"
sidebar_position: 13
documcp:
last_updated: "2025-01-14T00:00:00.000Z"
last_validated: "2025-12-12T00:00:00.000Z"
auto_updated: false
update_frequency: monthly
validated_against_commit: dbef13f
status: accepted
date: "2025-01-14"
---
# ADR-013: Release Pipeline and Package Distribution Architecture
## Status
Accepted
## Date
2025-01-14
## Implementation Date
2025-01-14
## Context
DocuMCP needs a robust release pipeline for package distribution via npm and automated changelog generation. Current manual processes lead to inconsistencies, missed entries, and potential publication failures. The project requires architectural decisions for automation, verification, and quality assurance in the release process.
**Core Problems**:
1. **npm Package Publishing** (Issue #1): Release workflow is configured to publish to npm, but package "documcp" is not found on the npm registry, indicating either publication failures or configuration issues
2. **Manual Changelog Updates** (Issue #2): Changelog updates are currently manual, leading to potential inconsistencies and missed entries
3. **Test Coverage Gaps** (Issue #3): Current test coverage (82.59%) is below the 85% target threshold
4. **Inconsistent Commit Messages**: Lack of conventional commit standards makes automated processing difficult
**Current State**:
- Basic release workflow exists in `.github/workflows/release.yml`
- Manual `CHANGELOG.md` updates
- Partial conventional commit adoption
- No publication verification
- No automated changelog generation
**Strategic Importance**: Release pipeline quality directly impacts:
- Package availability and distribution
- User trust and adoption
- Developer experience and workflow efficiency
- Documentation accuracy and completeness
## Decision
We will implement an automated release pipeline with npm package publishing, conventional commits enforcement via commitlint, automated changelog generation using standard-version, and comprehensive verification steps. The pipeline will include publication verification, error handling, retry mechanisms, and quality gates for test coverage and commit message standards.
### Release Pipeline Architecture:
#### 1. Conventional Commits Enforcement
**Purpose**: Standardize commit messages for automated processing
**Implementation**:
- **commitlint**: Enforce conventional commit format
- **Husky hooks**: Pre-commit validation of commit messages
- **Commit message format**: `type(scope): subject` (e.g., `feat(tools): add cleanup agent artifacts`)
- **Types**: feat, fix, docs, style, refactor, test, chore, perf, ci, build, revert
#### 2. Automated Changelog Generation
**Purpose**: Generate consistent, comprehensive changelogs automatically
**Implementation**:
- **standard-version**: Automated version bumping and changelog generation
- **Categorization**: Features, Bug Fixes, Documentation, Chores, etc.
- **Integration**: GitHub Releases with generated changelog content
- **Version management**: Semantic versioning (major.minor.patch)
#### 3. npm Package Publishing
**Purpose**: Reliable package distribution via npm registry
**Implementation**:
- **Publication verification**: Verify package exists after publish
- **Error handling**: Retry mechanisms for transient failures
- **Token management**: Secure NPM_TOKEN handling
- **Dry-run testing**: Validate publication before actual release
#### 4. Quality Gates
**Purpose**: Ensure release quality and reliability
**Gates**:
- **Test coverage**: Minimum 85% statement coverage
- **Commit message validation**: All commits follow conventional format
- **Build verification**: All tests pass before release
- **Publication verification**: Confirm package availability after publish
### Implementation Details:
#### Conventional Commits Setup
```typescript
// commitlint.config.js
module.exports = {
extends: ["@commitlint/config-conventional"],
rules: {
"type-enum": [
2,
"always",
[
"feat",
"fix",
"docs",
"style",
"refactor",
"test",
"chore",
"perf",
"ci",
"build",
"revert",
],
],
"subject-case": [2, "never", ["start-case", "pascal-case", "upper-case"]],
},
};
```
#### Automated Changelog Configuration
```javascript
// .versionrc.js
module.exports = {
types: [
{ type: "feat", section: "✨ Features" },
{ type: "fix", section: "🐛 Bug Fixes" },
{ type: "docs", section: "📚 Documentation" },
{ type: "style", section: "💎 Styles" },
{ type: "refactor", section: "📦 Code Refactoring" },
{ type: "perf", section: "⚡ Performance Improvements" },
{ type: "test", section: "✅ Tests" },
{ type: "build", section: "👷 Build System" },
{ type: "ci", section: "🔧 CI/CD" },
{ type: "chore", section: "♻️ Chores" },
{ type: "revert", section: "⏪ Reverts" },
],
releaseCommitMessageFormat: "chore(release): {{currentTag}}",
bumpFiles: [
{
filename: "package.json",
type: "json",
},
{
filename: "package-lock.json",
type: "json",
},
],
};
```
#### npm Publishing with Verification
```yaml
# .github/workflows/release.yml (excerpt)
- name: Publish to npm
run: |
npm publish --access public || {
echo "Publication failed, will retry..."
sleep 5
npm publish --access public
}
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Verify npm publication
run: |
PACKAGE_VERSION=$(node -p "require('./package.json').version")
npm view documcp@${PACKAGE_VERSION} || {
echo "Package verification failed"
exit 1
}
- name: Test package installation
run: |
npm install -g documcp@${PACKAGE_VERSION}
documcp --version
```
#### Quality Gates Implementation
```yaml
# Quality gates in release workflow
- name: Check test coverage
run: |
COVERAGE=$(npm run test:coverage -- --coverageReporters=text-summary | grep "Statements" | awk '{print $2}' | sed 's/%//')
if (( $(echo "$COVERAGE < 85" | bc -l) )); then
echo "Test coverage ${COVERAGE}% is below 85% threshold"
exit 1
fi
- name: Validate commit messages
run: |
npx commitlint --from origin/main --to HEAD --verbose
```
## Alternatives Considered
### Manual Release Process
- **Pros**: Full control, no tooling dependencies, simple workflow
- **Cons**: Error-prone, inconsistent, time-consuming, human error risk
- **Decision**: Rejected - manual processes are unreliable and don't scale
### Third-Party Release Tools (semantic-release)
- **Pros**: More features, GitHub integration, automated releases
- **Cons**: Higher complexity, more dependencies, steeper learning curve
- **Decision**: Considered but standard-version chosen for simplicity and control
### GitHub-Only Distribution
- **Pros**: Simpler, no npm account needed, GitHub-native
- **Cons**: Not standard for Node.js packages, limited discoverability, poor developer experience
- **Decision**: Rejected - npm is the standard distribution channel for Node.js packages
### No Changelog Automation
- **Pros**: Simpler implementation, no tooling overhead
- **Cons**: Manual process is unreliable, inconsistent formatting, missed entries
- **Decision**: Rejected - automated changelog is essential for documentation quality
## Consequences
### Positive
- **Consistent Releases**: Automated process ensures consistent, reliable releases
- **Reduced Errors**: Automation eliminates human error in versioning and changelog generation
- **Improved Documentation**: Automated changelog ensures comprehensive release documentation
- **Better Developer Experience**: Conventional commits provide clear project history
- **Reliable Distribution**: Publication verification ensures package availability
- **Quality Assurance**: Quality gates prevent low-quality releases
### Negative
- **Tooling Complexity**: Additional tools (commitlint, husky, standard-version) require maintenance
- **Learning Curve**: Team must learn and adopt conventional commit format
- **External Dependencies**: Dependency on npm registry availability and reliability
- **CI/CD Maintenance**: Release pipeline requires ongoing maintenance and updates
- **Initial Setup**: Requires configuration and integration work
### Risks and Mitigations
- **npm Publication Failures**: Implement retry mechanisms and verification steps
- **Commit Message Rejection**: Provide clear documentation and pre-commit hooks
- **Tool Compatibility**: Test tools with Node.js 20+ and maintain compatibility
- **Coverage Regression**: Set quality gates and monitor coverage trends
## Integration Points
### GitHub Actions Integration (ADR-005)
- Release pipeline integrates with GitHub Pages deployment workflow
- Shared secrets and configuration management
- Coordinated release and deployment processes
### MCP Server Architecture (ADR-001)
- Package distribution enables MCP server installation via npm
- Version management aligns with MCP SDK compatibility
- Release process supports MCP protocol evolution
### Documentation System (ADR-004, ADR-008)
- Automated changelog integrates with Diataxis documentation structure
- Release notes follow documentation standards
- Version documentation supports user guidance
## Implementation Roadmap
### Phase 1: Foundation (High Priority)
- Set up commitlint configuration
- Configure Husky hooks for commit validation
- Add standard-version for changelog generation
- Update release workflow with basic automation
### Phase 2: npm Publishing (High Priority)
- Verify NPM_TOKEN configuration
- Add publication verification steps
- Implement error handling and retry mechanisms
- Test complete publication flow
### Phase 3: Quality Gates (Medium Priority)
- Implement test coverage gate (85% threshold)
- Add commit message validation in CI
- Create release health monitoring
- Document release process
### Phase 4: Advanced Features (Low Priority)
- AI-enhanced release notes (Issue #7)
- Release health dashboard (Issue #8)
- Smart Dependabot auto-merge (Issue #6)
- Enhanced release notes with metrics
## Quality Assurance
### Release Process Testing
```typescript
describe("Release Pipeline", () => {
it("should validate commit messages");
it("should generate changelog correctly");
it("should publish to npm successfully");
it("should verify package availability");
it("should enforce quality gates");
});
```
### Verification Checklist
- [ ] Commit messages follow conventional format
- [ ] Changelog automatically generated and accurate
- [ ] Package published to npm successfully
- [ ] Package verification passes
- [ ] Test coverage meets 85% threshold
- [ ] All tests pass before release
- [ ] GitHub Release created with changelog
## Success Metrics
### Release Quality
- **Publication Success Rate**: 100% successful npm publications
- **Changelog Accuracy**: 100% of commits included in changelog
- **Coverage Compliance**: 100% of releases meet 85% coverage threshold
- **Commit Compliance**: 100% of commits follow conventional format
### Developer Experience
- **Release Time**: Under 10 minutes from tag to publication
- **Error Rate**: Under 1% release failures
- **Documentation Quality**: Comprehensive release notes for all releases
## Future Enhancements
### Advanced Automation
- AI-powered release note generation (Issue #7)
- Release health dashboard with metrics (Issue #8)
- Automated dependency updates with Dependabot (Issue #6)
- Multi-package monorepo support
### Quality Improvements
- Performance benchmarking in releases
- Security scanning integration
- Automated compatibility testing
- Release rollback capabilities
## Implementation Status
**Status**: ✅ Implemented (2025-01-14)
**Commit**: dbef13f - "feat(release): implement npm publishing verification and automated changelog (#1, #2)"
### Completed Features
1. **npm Publishing Verification** ✅
- Authentication verification before publishing
- Retry mechanism (3 attempts with 5-second delays)
- Publication verification step (checks package exists on npm registry)
- Package installation test after publication
2. **Automated Changelog Generation** ✅
- standard-version integration verified and working
- Enhanced changelog extraction in release workflow
- Improved error handling for changelog generation
- Proper integration with GitHub Releases
3. **Commit Message Validation** ✅
- Pre-release commit message validation added
- Validates commits follow conventional format
- Clear error messages for invalid commits
4. **Quality Gates** ✅
- Coverage threshold updated from 80% to 85%
- Test coverage check integrated (currently 91.65%)
- Build verification before release
### Implementation Files
- `.github/workflows/release.yml` - Source of implementation; it wires the `test` job (coverage gate, commitlint validation, performance and build checks) with the `release` job (conventional commits validation, npm publication + retry, publication verification, GitHub Release + changelog, and package installation smoke tests).
- `commitlint.config.js` - Already configured (no changes needed)
- `.versionrc.json` - Already configured (no changes needed)
- `.husky/commit-msg` - Already configured (no changes needed)
### Verification
- ✅ `npm run release:dry-run` tested and working
- ✅ Changelog generation verified
- ✅ All quality gates in place
- ✅ Error handling implemented throughout
## References
- [ADR-001: MCP Server Architecture](adr-0001-mcp-server-architecture.md)
- [ADR-005: GitHub Pages Deployment Automation](adr-0005-github-pages-deployment-automation.md)
- GitHub Issue: #1 - Fix npm Package Publishing (✅ Fixed)
- GitHub Issue: #2 - Implement Automated Changelog Generation (✅ Implemented)
- GitHub Issue: #3 - Improve Test Coverage to 85% (✅ Exceeded - 91.65%)
- Commit: dbef13f - Implementation commit
- [Conventional Commits](https://www.conventionalcommits.org/)
- [standard-version](https://github.com/conventional-changelog/standard-version)
- [commitlint](https://commitlint.js.org/)
```