This is page 4 of 20. Use http://codebase.md/tosin2013/documcp?page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── 001-mcp-server-architecture.md
│ │ ├── 002-repository-analysis-engine.md
│ │ ├── 003-static-site-generator-recommendation-engine.md
│ │ ├── 004-diataxis-framework-integration.md
│ │ ├── 005-github-pages-deployment-automation.md
│ │ ├── 006-mcp-tools-api-design.md
│ │ ├── 007-mcp-prompts-and-resources-integration.md
│ │ ├── 008-intelligent-content-population-engine.md
│ │ ├── 009-content-accuracy-validation-framework.md
│ │ ├── 010-mcp-resource-pattern-redesign.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── check-documentation-links.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── ast-analyzer.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── permission-checker.ts
│ │ └── sitemap-generator.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/tests/prompts/guided-workflow-prompts.test.ts:
--------------------------------------------------------------------------------
```typescript
import { generateTechnicalWriterPrompts } from "../../src/prompts/technical-writer-prompts.js";
import { promises as fs } from "fs";
import { join } from "path";
import { tmpdir } from "os";
describe("Guided Workflow Prompts", () => {
let tempDir: string;
beforeEach(async () => {
tempDir = join(
tmpdir(),
`test-prompts-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
);
await fs.mkdir(tempDir, { recursive: true });
// Create a test project structure
await fs.writeFile(
join(tempDir, "package.json"),
JSON.stringify({
name: "test-project",
version: "1.0.0",
dependencies: { react: "^18.0.0" },
scripts: { test: "jest", build: "webpack" },
}),
);
await fs.writeFile(
join(tempDir, "README.md"),
"# Test Project\n\nThis is a test project.",
);
await fs.mkdir(join(tempDir, "src"));
await fs.writeFile(join(tempDir, "src/index.js"), 'console.log("hello");');
await fs.mkdir(join(tempDir, "tests"));
await fs.writeFile(
join(tempDir, "tests/index.test.js"),
'test("basic", () => {});',
);
});
afterEach(async () => {
try {
await fs.rm(tempDir, { recursive: true });
} catch {
// Ignore cleanup errors
}
});
describe("analyze-and-recommend prompt", () => {
it("should generate comprehensive analysis and recommendation prompt", async () => {
const messages = await generateTechnicalWriterPrompts(
"analyze-and-recommend",
tempDir,
{
analysis_depth: "standard",
preferences: "performance and ease of use",
},
);
expect(messages).toHaveLength(1);
expect(messages[0]).toHaveProperty("role", "user");
expect(messages[0]).toHaveProperty("content");
expect(messages[0].content).toHaveProperty("type", "text");
expect(messages[0].content.text).toContain(
"Execute a complete repository analysis",
);
expect(messages[0].content.text).toContain("SSG recommendation workflow");
expect(messages[0].content.text).toContain("Analysis Depth: standard");
expect(messages[0].content.text).toContain(
"Preferences: performance and ease of use",
);
expect(messages[0].content.text).toContain("Repository Analysis");
expect(messages[0].content.text).toContain("Implementation Guidance");
expect(messages[0].content.text).toContain("Best Practices");
});
it("should use default values when optional parameters are not provided", async () => {
const messages = await generateTechnicalWriterPrompts(
"analyze-and-recommend",
tempDir,
{},
);
expect(messages[0].content.text).toContain("Analysis Depth: standard");
expect(messages[0].content.text).toContain(
"balanced approach with good community support",
);
});
it("should include project context information", async () => {
const messages = await generateTechnicalWriterPrompts(
"analyze-and-recommend",
tempDir,
{
analysis_depth: "deep",
},
);
expect(messages[0].content.text).toContain("Type: node_application");
expect(messages[0].content.text).toContain("Has Tests: true");
expect(messages[0].content.text).toContain("Package Manager: npm");
});
});
describe("setup-documentation prompt", () => {
it("should generate comprehensive documentation setup prompt", async () => {
const messages = await generateTechnicalWriterPrompts(
"setup-documentation",
tempDir,
{
ssg_type: "docusaurus",
include_examples: true,
},
);
expect(messages).toHaveLength(1);
expect(messages[0]).toHaveProperty("role", "user");
expect(messages[0].content.text).toContain(
"Create a comprehensive documentation structure",
);
expect(messages[0].content.text).toContain("SSG Type: docusaurus");
expect(messages[0].content.text).toContain("Include Examples: true");
expect(messages[0].content.text).toContain(
"Diataxis Framework Implementation",
);
expect(messages[0].content.text).toContain(
"Tutorials: Learning-oriented content",
);
expect(messages[0].content.text).toContain(
"How-to Guides: Problem-solving content",
);
expect(messages[0].content.text).toContain(
"Reference: Information-oriented content",
);
expect(messages[0].content.text).toContain(
"Explanations: Understanding-oriented content",
);
expect(messages[0].content.text).toContain("Configuration Setup");
expect(messages[0].content.text).toContain("GitHub Pages deployment");
expect(messages[0].content.text).toContain("with examples");
});
it("should handle minimal configuration", async () => {
const messages = await generateTechnicalWriterPrompts(
"setup-documentation",
tempDir,
{
include_examples: false,
},
);
expect(messages[0].content.text).toContain(
"SSG Type: recommended based on project analysis",
);
expect(messages[0].content.text).toContain("Include Examples: false");
expect(messages[0].content.text).toContain("templates");
expect(messages[0].content.text).not.toContain("with examples");
});
it("should include current documentation gaps", async () => {
const messages = await generateTechnicalWriterPrompts(
"setup-documentation",
tempDir,
{},
);
expect(messages[0].content.text).toContain("Current Documentation Gaps:");
expect(messages[0].content.text).toContain("Development Integration");
expect(messages[0].content.text).toContain(
"production-ready documentation system",
);
});
});
describe("troubleshoot-deployment prompt", () => {
it("should generate comprehensive troubleshooting prompt", async () => {
const messages = await generateTechnicalWriterPrompts(
"troubleshoot-deployment",
tempDir,
{
repository: "owner/repo",
deployment_url: "https://owner.github.io/repo",
issue_description: "build failing on GitHub Actions",
},
);
expect(messages).toHaveLength(1);
expect(messages[0]).toHaveProperty("role", "user");
expect(messages[0].content.text).toContain(
"Diagnose and fix GitHub Pages deployment issues",
);
expect(messages[0].content.text).toContain("Repository: owner/repo");
expect(messages[0].content.text).toContain(
"Expected URL: https://owner.github.io/repo",
);
expect(messages[0].content.text).toContain(
"Issue Description: build failing on GitHub Actions",
);
expect(messages[0].content.text).toContain("Troubleshooting Checklist");
expect(messages[0].content.text).toContain("Repository Settings");
expect(messages[0].content.text).toContain("Build Configuration");
expect(messages[0].content.text).toContain("Content Issues");
expect(messages[0].content.text).toContain("Deployment Workflow");
expect(messages[0].content.text).toContain("Performance and Security");
expect(messages[0].content.text).toContain("Root cause analysis");
expect(messages[0].content.text).toContain("Systematic Testing");
});
it("should use default values for optional parameters", async () => {
const messages = await generateTechnicalWriterPrompts(
"troubleshoot-deployment",
tempDir,
{
repository: "test/repo",
},
);
expect(messages[0].content.text).toContain(
"Expected URL: GitHub Pages URL",
);
expect(messages[0].content.text).toContain(
"Issue Description: deployment not working as expected",
);
});
it("should include project context for troubleshooting", async () => {
const messages = await generateTechnicalWriterPrompts(
"troubleshoot-deployment",
tempDir,
{
repository: "test/repo",
},
);
expect(messages[0].content.text).toContain("Project Context");
expect(messages[0].content.text).toContain("Type: node_application");
expect(messages[0].content.text).toContain("Diagnostic Approach");
expect(messages[0].content.text).toContain("Systematic Testing");
});
});
describe("Error handling", () => {
it("should throw error for unknown prompt type", async () => {
await expect(
generateTechnicalWriterPrompts("unknown-prompt-type", tempDir, {}),
).rejects.toThrow("Unknown prompt type: unknown-prompt-type");
});
it("should handle missing project directory gracefully", async () => {
const nonExistentDir = join(tmpdir(), "non-existent-dir");
// Should not throw, but may have reduced context
const messages = await generateTechnicalWriterPrompts(
"analyze-and-recommend",
nonExistentDir,
{},
);
expect(messages).toHaveLength(1);
expect(messages[0].content.text).toContain("repository analysis");
});
it("should handle malformed package.json gracefully", async () => {
await fs.writeFile(join(tempDir, "package.json"), "invalid json content");
const messages = await generateTechnicalWriterPrompts(
"setup-documentation",
tempDir,
{},
);
expect(messages).toHaveLength(1);
expect(messages[0].content.text).toContain("documentation structure");
});
});
describe("Prompt content validation", () => {
it("should generate prompts with consistent structure", async () => {
const promptTypes = [
"analyze-and-recommend",
"setup-documentation",
"troubleshoot-deployment",
];
for (const promptType of promptTypes) {
const args =
promptType === "troubleshoot-deployment"
? { repository: "test/repo" }
: {};
const messages = await generateTechnicalWriterPrompts(
promptType,
tempDir,
args,
);
expect(messages).toHaveLength(1);
expect(messages[0]).toHaveProperty("role", "user");
expect(messages[0]).toHaveProperty("content");
expect(messages[0].content).toHaveProperty("type", "text");
expect(messages[0].content.text).toBeTruthy();
expect(messages[0].content.text.length).toBeGreaterThan(100);
}
});
it("should include project-specific information in all prompts", async () => {
const promptTypes = ["analyze-and-recommend", "setup-documentation"];
for (const promptType of promptTypes) {
const messages = await generateTechnicalWriterPrompts(
promptType,
tempDir,
{},
);
expect(messages[0].content.text).toContain("Project Context");
expect(messages[0].content.text).toContain("Type:");
expect(messages[0].content.text).toContain("Languages:");
}
});
});
});
```
--------------------------------------------------------------------------------
/docs/how-to/local-testing.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.952Z"
last_validated: "2025-11-20T00:46:21.952Z"
auto_updated: false
update_frequency: monthly
---
# Local Documentation Testing
This guide shows how to test your documentation locally before deploying to GitHub Pages using containerized environments that don't affect your system.
## 🎯 Best Practice: Test Build Before Pushing
**Always test your documentation build locally before pushing to git** to ensure GitHub Actions will build successfully:
### Option 1: Test Node.js Build (Recommended - Matches GitHub Actions)
```bash
# Test the same build process GitHub Actions uses
cd docs
npm ci
npm run build
```
This uses the exact same process as GitHub Actions and catches build issues early.
### Option 2: Test Docker Build (Optional - For Container Validation)
```bash
# Quick Docker validation (if Dockerfile is configured)
docker build -f Dockerfile.docs -t documcp-docs-test . && echo "✅ Docker build ready"
```
**Note**: Docker testing validates containerized environments, but GitHub Actions uses Node.js directly, so Option 1 is more reliable for CI validation.
## Quick Start - Containerized Testing
DocuMCP automatically generates a containerized testing environment that requires only Docker or Podman:
```bash
# Run the containerized testing script
./test-docs-local.sh
```
This script will:
1. **Detect** your container runtime (Podman or Docker)
2. **Build** a documentation container
3. **Check** for broken links in your documentation
4. **Serve** the documentation at http://localhost:3001
### Prerequisites
You need either Docker or Podman installed:
**Option 1: Podman (rootless, more secure)**
```bash
# macOS
brew install podman
# Ubuntu/Debian
sudo apt-get install podman
# RHEL/CentOS/Fedora
sudo dnf install podman
```
**Option 2: Docker**
```bash
# macOS
brew install docker
# Or download from: https://docs.docker.com/get-docker/
```
## Container-Based Testing Methods
### Method 1: Using the Generated Script (Recommended)
```bash
# Simple one-command testing
./test-docs-local.sh
```
### Method 2: Using Docker Compose
```bash
# Build and run with Docker Compose
docker-compose -f docker-compose.docs.yml up --build
# Or with Podman Compose
podman-compose -f docker-compose.docs.yml up --build
```
### Method 3: Manual Container Commands
```bash
# Build the container
docker build -f Dockerfile.docs -t documcp-docs .
# or: podman build -f Dockerfile.docs -t documcp-docs .
# Run the container
docker run --rm -p 3001:3001 documcp-docs
# or: podman run --rm -p 3001:3001 documcp-docs
```
### Method 4: Pre-Push Docker Validation
**Recommended workflow before pushing to git:**
```bash
# 1. Test Docker build (validates CI will work)
docker build -f Dockerfile.docs -t documcp-docs-test .
# 2. If successful, test locally
docker run --rm -p 3001:3001 documcp-docs-test
# 3. Verify at http://localhost:3001, then push to git
```
This ensures your Docker build matches what GitHub Actions will use.
### Method 5: Legacy Local Installation (Not Recommended)
If you prefer to install dependencies locally (affects your system):
```bash
cd docs
npm install
npm run build
npm run serve
```
## Pre-Push Checklist
Before pushing documentation changes to git, ensure:
- [ ] **Node.js build succeeds**: `cd docs && npm ci && npm run build` (matches GitHub Actions)
- [ ] **Local preview works**: Documentation serves correctly at http://localhost:3001
- [ ] **No broken links**: Run link checker (included in test script)
- [ ] **Build output valid**: Check `docs/build` directory structure
- [ ] **No console errors**: Check browser console for JavaScript errors
**Quick pre-push validation command (Node.js - Recommended):**
```bash
cd docs && npm ci && npm run build && echo "✅ Ready to push!"
```
**Alternative Docker validation (if Dockerfile is configured):**
```bash
docker build -f Dockerfile.docs -t documcp-docs-test . && \
docker run --rm -d -p 3001:3001 --name docs-test documcp-docs-test && \
sleep 5 && curl -f http://localhost:3001 > /dev/null && \
docker stop docs-test && echo "✅ Ready to push!"
```
**Note**: GitHub Actions uses Node.js directly (not Docker), so testing with `npm run build` is the most reliable way to validate CI will succeed.
## Verification Checklist
### ✅ Content Verification
- [ ] All pages load without errors
- [ ] Navigation works correctly
- [ ] Links between pages function properly
- [ ] Search functionality works (if enabled)
- [ ] Code blocks render correctly with syntax highlighting
- [ ] Images and assets load properly
### ✅ Structure Verification
- [ ] Sidebar navigation reflects your documentation structure
- [ ] Categories and sections are properly organized
- [ ] Page titles and descriptions are accurate
- [ ] Breadcrumb navigation works
- [ ] Footer links are functional
### ✅ Content Quality
- [ ] No broken internal links
- [ ] No broken external links
- [ ] Code examples are up-to-date
- [ ] Screenshots are current and clear
- [ ] All content follows Diataxis framework principles
### ✅ Performance Testing
- [ ] Pages load quickly (< 3 seconds)
- [ ] Search is responsive
- [ ] No console errors in browser developer tools
- [ ] Mobile responsiveness works correctly
## Troubleshooting Common Issues
### Container Build Failures
**Problem**: Container build fails
**Solutions**:
```bash
# Clean up any existing containers and images
docker system prune -f
# or: podman system prune -f
# Rebuild from scratch
docker build --no-cache -f Dockerfile.docs -t documcp-docs .
# or: podman build --no-cache -f Dockerfile.docs -t documcp-docs .
# Check for syntax errors in markdown files
find docs -name "*.md" -exec npx markdownlint {} \;
```
### Container Runtime Issues
**Problem**: "Neither Podman nor Docker found"
**Solutions**:
```bash
# Check if Docker/Podman is installed and running
docker --version
podman --version
# On macOS, ensure Docker Desktop is running
# On Linux, ensure Docker daemon is started:
sudo systemctl start docker
# For Podman on macOS, start the machine:
podman machine start
```
### Broken Links
**Problem**: Links between documentation pages don't work
**Solutions**:
- Check that file paths in your markdown match actual file locations
- Ensure relative links use correct syntax (e.g., `[text](../reference/configuration.md)`)
- Verify that `sidebars.js` references match actual file names
### Missing Pages
**Problem**: Some documentation pages don't appear in navigation
**Solutions**:
- Update `docs-site/sidebars.js` to include new pages
- Ensure files are in the correct directory structure
- Check that frontmatter is properly formatted
### Styling Issues
**Problem**: Documentation doesn't look right
**Solutions**:
- Check `docs-site/src/css/custom.css` for custom styles
- Verify Docusaurus theme configuration
- Clear browser cache and reload
## Link Checking
### Automated Link Checking
DocuMCP provides built-in link checking:
```bash
# Check all links
npm run docs:check-links
# Check only external links
npm run docs:check-links:external
# Check only internal links
npm run docs:check-links:internal
```
### Manual Link Checking
Use markdown-link-check for comprehensive link validation:
```bash
# Install globally
npm install -g markdown-link-check
# Check specific file
markdown-link-check docs/index.md
# Check all markdown files
find docs -name "*.md" -exec markdown-link-check {} \;
```
## Container Configuration Testing
### Verify Container Configuration
```bash
# Test container health
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
# or: podman ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
# Check container logs
docker logs documcp-docs-test
# or: podman logs documcp-docs-test
# Execute commands inside running container
docker exec -it documcp-docs-test sh
# or: podman exec -it documcp-docs-test sh
```
### Test Different Container Environments
```bash
# Test production build in container
docker run --rm -e NODE_ENV=production -p 3001:3001 documcp-docs
# Interactive debugging mode
docker run --rm -it --entrypoint sh documcp-docs
# Inside container: cd docs-site && npm run build --verbose
```
## Deployment Preview
Before deploying to GitHub Pages, test with production settings:
```bash
# Build with production configuration
npm run build
# Serve the production build locally
npm run serve
```
This simulates exactly what GitHub Pages will serve.
## Integration with Development Workflow
### Pre-commit Testing
Add documentation testing to your git hooks:
```bash
# .husky/pre-commit
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
# Run documentation tests
./test-docs-local.sh --build-only
# Run your regular tests
npm test
```
### CI/CD Integration
Add documentation testing to your GitHub Actions:
```yaml
# .github/workflows/docs-test.yml
name: Documentation Tests
on:
pull_request:
paths:
- "docs/**"
- "docs-site/**"
jobs:
test-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
cache-dependency-path: "docs-site/package-lock.json"
- name: Test documentation build
run: ./test-docs-local.sh --build-only
```
## Advanced Testing
### Performance Testing
```bash
# Install lighthouse CLI
npm install -g lighthouse
# Test performance of local documentation
lighthouse http://localhost:3001 --output=json --output-path=./lighthouse-report.json
# Check specific performance metrics
lighthouse http://localhost:3001 --only-categories=performance
```
### Accessibility Testing
```bash
# Test accessibility
lighthouse http://localhost:3001 --only-categories=accessibility
# Use axe for detailed accessibility testing
npm install -g axe-cli
axe http://localhost:3001
```
### SEO Testing
```bash
# Test SEO optimization
lighthouse http://localhost:3001 --only-categories=seo
# Check meta tags and structure
curl -s http://localhost:3001 | grep -E "<title>|<meta"
```
## Automated Testing Script
Create a comprehensive test script:
```bash
#!/bin/bash
# comprehensive-docs-test.sh
echo "🧪 Running comprehensive documentation tests..."
# Build test
echo "📦 Testing build..."
cd docs-site && npm run build
# Link checking
echo "🔗 Checking links..."
cd .. && npm run docs:check-links:all
# Performance test (if lighthouse is available)
if command -v lighthouse &> /dev/null; then
echo "⚡ Testing performance..."
cd docs-site && npm run serve &
SERVER_PID=$!
sleep 5
lighthouse http://localhost:3001 --quiet --only-categories=performance
kill $SERVER_PID
fi
echo "✅ All tests completed!"
```
## Best Practices
### 1. Test Early and Often
- Test after every significant documentation change
- Include documentation testing in your regular development workflow
- Set up automated testing in CI/CD pipelines
### 2. Test Different Scenarios
- Test with different screen sizes and devices
- Test with JavaScript disabled
- Test with slow internet connections
### 3. Monitor Performance
- Keep an eye on build times
- Monitor page load speeds
- Check for large images or files that slow down the site
### 4. Validate Content Quality
- Use spell checkers and grammar tools
- Ensure code examples work and are current
- Verify that external links are still valid
By following this guide, you can ensure your documentation works perfectly before deploying to GitHub Pages, providing a better experience for your users and avoiding broken deployments.
```
--------------------------------------------------------------------------------
/MCP_PHASE2_IMPLEMENTATION.md:
--------------------------------------------------------------------------------
```markdown
# MCP Phase 2 Implementation: Roots Permission System
**Status:** ✅ Complete
**Implementation Date:** October 9, 2025
**Build Status:** ✅ Successful
**Test Status:** ✅ 127/127 tests passing
## Overview
Phase 2 implements the **Roots Permission System** for DocuMCP, adding user-granted file/folder access control following MCP best practices. This enhances security by restricting server operations to explicitly allowed directories and improves UX by enabling autonomous file discovery.
## Key Features Implemented
### 1. **Roots Capability Declaration**
- Added `roots.listChanged: true` to server capabilities
- Signals to MCP clients that the server supports roots management
- Enables clients to query allowed directories via `ListRoots` request
### 2. **CLI Argument Parsing**
- Added `--root` flag support for specifying allowed directories
- Supports multiple roots: `--root /path/one --root /path/two`
- Automatic `~` expansion for home directory paths
- Defaults to current working directory if no roots specified
### 3. **ListRoots Handler**
- Implements MCP `ListRootsRequest` protocol
- Returns all allowed roots as file:// URIs
- Provides friendly names using `path.basename()`
- Example response:
```json
{
"roots": [
{ "uri": "file:///Users/user/projects", "name": "projects" },
{ "uri": "file:///Users/user/workspace", "name": "workspace" }
]
}
```
### 4. **Permission Checker Utility**
- **Location:** `src/utils/permission-checker.ts`
- **Functions:**
- `isPathAllowed(requestedPath, allowedRoots)` - Validates path access
- `getPermissionDeniedMessage(requestedPath, allowedRoots)` - User-friendly error messages
- **Security:** Uses `path.relative()` to detect directory traversal attempts
- **Algorithm:** Resolves paths to absolute, checks if relative path doesn't start with `..`
### 5. **read_directory Tool**
- New tool for discovering files and directories within allowed roots
- Enables autonomous exploration without requiring full absolute paths from users
- Returns structured data:
```typescript
{
path: string,
files: string[],
directories: string[],
totalFiles: number,
totalDirectories: number
}
```
- Enforces permission checks before listing
### 6. **Permission Enforcement in File-Based Tools**
- Added permission checks to 5 critical tools:
- `analyze_repository`
- `setup_structure`
- `populate_diataxis_content`
- `validate_diataxis_content`
- `check_documentation_links`
- Returns structured `PERMISSION_DENIED` errors with resolution guidance
- Example error:
```json
{
"success": false,
"error": {
"code": "PERMISSION_DENIED",
"message": "Access denied: Path \"/etc/passwd\" is outside allowed roots. Allowed roots: /Users/user/project",
"resolution": "Request access to this directory by starting the server with --root argument, or use a path within allowed roots."
}
}
```
## Files Modified
### 1. `src/index.ts` (+120 lines)
**Changes:**
- Added default `path` import and permission checker imports (lines 17, 44-48)
- CLI argument parsing for `--root` flags (lines 69-84)
- Added roots capability to server (lines 101-103)
- Added `read_directory` tool definition (lines 706-717)
- Implemented `ListRoots` handler (lines 1061-1067)
- Implemented `read_directory` handler (lines 1874-1938)
- Added permission checks to 5 file-based tools (multiple sections)
### 2. `src/utils/permission-checker.ts` (NEW +49 lines)
**Functions:**
- `isPathAllowed()` - Core permission validation logic
- `getPermissionDeniedMessage()` - Standardized error messaging
- Comprehensive JSDoc documentation with examples
## Technical Implementation Details
### CLI Argument Parsing
```typescript
// Parse allowed roots from command line arguments
const allowedRoots: string[] = [];
process.argv.forEach((arg, index) => {
if (arg === "--root" && process.argv[index + 1]) {
const rootPath = process.argv[index + 1];
// Resolve to absolute path and expand ~ for home directory
const expandedPath = rootPath.startsWith("~")
? join(
process.env.HOME || process.env.USERPROFILE || "",
rootPath.slice(1),
)
: rootPath;
allowedRoots.push(path.resolve(expandedPath));
}
});
// If no roots specified, allow current working directory by default
if (allowedRoots.length === 0) {
allowedRoots.push(process.cwd());
}
```
### Permission Check Pattern
```typescript
// Check if path is allowed
const repoPath = (args as any)?.path;
if (repoPath && !isPathAllowed(repoPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(repoPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument, or use a path within allowed roots.",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
```
### Security Algorithm
The `isPathAllowed()` function uses `path.relative()` to detect directory traversal:
1. Resolve requested path to absolute path
2. For each allowed root:
- Resolve root to absolute path
- Calculate relative path from root to requested path
- If relative path doesn't start with `..` and isn't absolute, access is granted
3. Return `false` if no roots allow access
This prevents attacks like:
- `/project/../../../etc/passwd` - blocked (relative path starts with `..`)
- `/etc/passwd` when root is `/project` - blocked (not within root)
## Testing Results
### Build Status
✅ TypeScript compilation successful with no errors
### Test Suite
✅ **127/127 tests passing (100%)**
**Key Test Coverage:**
- Tool validation and error handling
- Memory system integration
- Knowledge graph operations
- Functional end-to-end workflows
- Integration tests
- Edge case handling
**No Regressions:**
- All existing tests continue to pass
- No breaking changes to tool APIs
- Backward compatible implementation
## Security Improvements
### Before Phase 2
- ❌ Server could access any file on the system
- ❌ No permission boundaries
- ❌ Users must provide full absolute paths
- ❌ No visibility into allowed directories
### After Phase 2
- ✅ Access restricted to explicitly allowed roots
- ✅ Directory traversal attacks prevented
- ✅ Users can use relative paths within roots
- ✅ Clients can query allowed directories via ListRoots
- ✅ Clear, actionable error messages when access denied
- ✅ Default to CWD for safe local development
## User Experience Improvements
### Discovery Without Full Paths
Users can now explore repositories without knowing exact file locations:
```
User: "Analyze my project"
Claude: Uses read_directory to discover project structure
Claude: Finds package.json, analyzes dependencies, generates docs
```
### Clear Error Messages
When access is denied, users receive helpful guidance:
```
Access denied: Path "/private/data" is outside allowed roots.
Allowed roots: /Users/user/projects
Resolution: Request access to this directory by starting the server
with --root argument, or use a path within allowed roots.
```
### Flexible Configuration
Server can be started with multiple allowed roots:
```bash
# Single root
npx documcp --root /Users/user/projects
# Multiple roots
npx documcp --root /Users/user/projects --root /Users/user/workspace
# Default (current directory)
npx documcp
```
## Usage Examples
### Starting Server with Roots
```bash
# Allow access to specific project
npx documcp --root /Users/user/my-project
# Allow access to multiple directories
npx documcp --root ~/projects --root ~/workspace
# Use home directory expansion
npx documcp --root ~/code
# Default to current directory
npx documcp
```
### read_directory Tool Usage
```typescript
// Discover files in allowed root
{
"name": "read_directory",
"arguments": {
"path": "/Users/user/projects/my-app"
}
}
// Response
{
"success": true,
"data": {
"path": "/Users/user/projects/my-app",
"files": ["package.json", "README.md", "tsconfig.json"],
"directories": ["src", "tests", "docs"],
"totalFiles": 3,
"totalDirectories": 3
}
}
```
### ListRoots Request
```typescript
// Request
{
"method": "roots/list"
}
// Response
{
"roots": [
{"uri": "file:///Users/user/projects", "name": "projects"}
]
}
```
## Alignment with MCP Best Practices
✅ **Roots Protocol Compliance**
- Implements `roots.listChanged` capability
- Provides `ListRoots` handler
- Uses standardized file:// URI format
✅ **Security First**
- Path validation using battle-tested algorithms
- Directory traversal prevention
- Principle of least privilege (explicit allow-list)
✅ **User-Centric Design**
- Clear error messages with actionable resolutions
- Flexible CLI configuration
- Safe defaults (CWD)
✅ **Autonomous Operation**
- `read_directory` enables file discovery
- No need for users to specify full paths
- Tools can explore within allowed roots
## Integration with Phase 1
Phase 2 builds on Phase 1's foundation:
**Phase 1 (Progress & Logging):**
- Added visibility into long-running operations
- Tools report progress at logical checkpoints
**Phase 2 (Roots & Permissions):**
- Adds security boundaries and permission checks
- Progress notifications can now include permission validation steps
- Example: "Validating path permissions..." → "Analyzing repository..."
**Combined Benefits:**
- Users see both progress AND permission enforcement
- Clear feedback when operations are blocked by permissions
- Transparent, secure, and user-friendly experience
## Performance Impact
✅ **Negligible Overhead**
- Permission checks: O(n) where n = number of allowed roots (typically 1-5)
- `path.resolve()` and `path.relative()` are highly optimized native operations
- No measurable impact on tool execution time
- All tests pass with no performance degradation
## Troubleshooting Guide
### Issue: "Access denied" errors
**Cause:** Requested path is outside allowed roots
**Solution:** Start server with `--root` flag for the desired directory
### Issue: ListRoots returns empty array
**Cause:** No roots specified and CWD not writable
**Solution:** Explicitly specify roots with `--root` flag
### Issue: ~ expansion not working
**Cause:** Server doesn't have HOME or USERPROFILE environment variable
**Solution:** Use absolute paths instead of ~ shorthand
## Next Steps (Phase 3)
Phase 3 will implement:
1. **HTTP Transport** - Remote server deployment with HTTP/HTTPS
2. **Transport Selection** - Environment-based stdio vs. HTTP choice
3. **Sampling Support** - LLM-powered content generation for creative tasks
4. **Configuration Management** - Environment variables for all settings
## Conclusion
Phase 2 successfully implements the Roots Permission System, bringing DocuMCP into full compliance with MCP security best practices. The implementation:
- ✅ Enforces strict access control without compromising usability
- ✅ Enables autonomous file discovery within allowed roots
- ✅ Provides clear, actionable feedback for permission violations
- ✅ Maintains 100% backward compatibility
- ✅ Passes all 127 tests with no regressions
- ✅ Adds minimal performance overhead
- ✅ Follows MCP protocol standards
**Total Changes:**
- 1 new file created (`permission-checker.ts`)
- 1 existing file modified (`index.ts`)
- 169 net lines added
- 6 new capabilities added (roots, ListRoots, read_directory, 5 tool permission checks)
**Quality Metrics:**
- Build: ✅ Successful
- Tests: ✅ 127/127 passing (100%)
- Regressions: ✅ None
- Performance: ✅ No measurable impact
- Security: ✅ Significantly improved
```
--------------------------------------------------------------------------------
/docs/reference/prompt-templates.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.963Z"
last_validated: "2025-11-20T00:46:21.963Z"
auto_updated: false
update_frequency: monthly
---
# Prompt Templates
DocuMCP provides a comprehensive set of prompt templates to help you interact effectively with the system. These templates are designed to get optimal results from DocuMCP's AI-powered documentation tools.
## Quick Reference
### Complete Workflow Templates
**Full Documentation Deployment:**
```
analyze my repository, recommend the best static site generator, set up Diataxis documentation structure, and deploy to GitHub Pages
```
**Documentation Audit:**
```
analyze my existing documentation for gaps, validate content accuracy, and provide recommendations for improvement
```
**Quick Setup:**
```
analyze my [LANGUAGE] project and set up documentation with the most suitable static site generator
```
## Repository Analysis Templates
### Basic Analysis
```
analyze my repository for documentation needs
```
### Specific Project Types
```
analyze my TypeScript library for API documentation requirements
analyze my Python package for comprehensive documentation needs
analyze my React application for user guide documentation
analyze my CLI tool for usage documentation
```
### Deep Analysis
```
perform deep analysis of my repository including dependency analysis, complexity assessment, and team collaboration patterns
```
### Focused Analysis
```
analyze my repository focusing on [SPECIFIC_AREA]
# Examples:
# - API documentation opportunities
# - user onboarding needs
# - developer experience gaps
# - deployment documentation requirements
```
## SSG Recommendation Templates
### Basic Recommendation
```
recommend the best static site generator for my project based on the analysis
```
### Preference-Based Recommendations
```
recommend a static site generator for my project with preferences for [ECOSYSTEM] and [PRIORITY]
# Ecosystem options: javascript, python, ruby, go, any
# Priority options: simplicity, features, performance
```
### Comparison Requests
```
compare static site generators for my [PROJECT_TYPE] with focus on [CRITERIA]
# Project types: library, application, tool, documentation
# Criteria: ease of use, customization, performance, community support
```
### Specific Requirements
```
recommend SSG for my project that supports:
- TypeScript integration
- API documentation generation
- Search functionality
- Custom theming
- Multi-language support
```
## Configuration Generation Templates
### Basic Configuration
```
generate [SSG_NAME] configuration for my project
# Examples:
# - generate Docusaurus configuration for my project
# - generate Hugo configuration for my project
# - generate MkDocs configuration for my project
```
### Detailed Configuration
```
generate comprehensive [SSG_NAME] configuration with:
- GitHub integration
- Custom domain setup
- Analytics integration
- SEO optimization
- Performance optimizations
```
### Production-Ready Setup
```
generate production-ready [SSG_NAME] configuration with security best practices and performance optimization
```
## Documentation Structure Templates
### Basic Structure
```
set up Diataxis documentation structure for my project
```
### SSG-Specific Structure
```
create [SSG_NAME] documentation structure following Diataxis principles with example content
```
### Content Population
```
set up documentation structure and populate it with project-specific content based on my code analysis
```
### Advanced Structure
```
create comprehensive documentation structure with:
- Diataxis organization
- Project-specific content
- Code examples from my repository
- API documentation templates
- Deployment guides
```
## Deployment Templates
### Basic GitHub Pages Deployment
```
deploy my documentation to GitHub Pages
```
### Complete Deployment Workflow
```
set up automated GitHub Pages deployment with:
- Build optimization
- Security best practices
- Performance monitoring
- Deployment verification
```
### Custom Domain Deployment
```
deploy to GitHub Pages with custom domain [DOMAIN_NAME] and SSL certificate
```
### Multi-Environment Deployment
```
set up documentation deployment with staging and production environments
```
## Content Management Templates
### Content Validation
```
validate all my documentation content for accuracy, broken links, and completeness
```
### Gap Analysis
```
analyze my documentation for missing content and provide recommendations for improvement
```
### Content Updates
```
update my existing documentation based on recent code changes and current best practices
```
### Quality Assurance
```
perform comprehensive quality check on my documentation including:
- Link validation
- Code example testing
- Content accuracy verification
- SEO optimization assessment
```
## Troubleshooting Templates
### General Troubleshooting
```
diagnose and fix issues with my documentation deployment
```
### Specific Problem Solving
```
troubleshoot [SPECIFIC_ISSUE] with my documentation setup
# Examples:
# - GitHub Pages deployment failures
# - build errors with my static site generator
# - broken links in my documentation
# - performance issues with my documentation site
```
### Verification and Testing
```
verify my documentation deployment is working correctly and identify any issues
```
## Memory and Learning Templates
### Memory Recall
```
show me insights from similar projects and successful documentation patterns
```
### Learning from History
```
based on previous analyses, what are the best practices for my type of project?
```
### Pattern Recognition
```
analyze patterns in my documentation workflow and suggest optimizations
```
## Advanced Workflow Templates
### Multi-Step Workflows
**Research and Planning:**
```
1. analyze my repository comprehensively
2. research best practices for my project type
3. recommend optimal documentation strategy
4. create implementation plan
```
**Implementation and Validation:**
```
1. set up recommended documentation structure
2. populate with project-specific content
3. validate all content and links
4. deploy to GitHub Pages
5. verify deployment success
```
**Maintenance and Optimization:**
```
1. audit existing documentation for gaps
2. update content based on code changes
3. optimize for performance and SEO
4. monitor deployment health
```
### Conditional Workflows
```
if my project is a [TYPE], then:
- focus on [SPECIFIC_DOCUMENTATION_NEEDS]
- use [RECOMMENDED_SSG]
- emphasize [CONTENT_PRIORITIES]
```
## Context-Aware Templates
### Project-Specific Context
```
for my [PROJECT_TYPE] written in [LANGUAGE] with [FRAMEWORK]:
- analyze documentation needs
- recommend appropriate tools
- create tailored content structure
```
### Team-Based Context
```
for a [TEAM_SIZE] team working on [PROJECT_DESCRIPTION]:
- set up collaborative documentation workflow
- implement review and approval processes
- create contribution guidelines
```
### Audience-Specific Context
```
create documentation targeting [AUDIENCE]:
- developers (API docs, technical guides)
- end users (tutorials, how-to guides)
- contributors (development setup, guidelines)
- administrators (deployment, configuration)
```
## Template Customization
### Variables and Placeholders
Use these placeholders in templates:
| Placeholder | Description | Examples |
| ---------------- | --------------------- | --------------------------------- |
| `[PROJECT_TYPE]` | Type of project | library, application, tool |
| `[LANGUAGE]` | Programming language | TypeScript, Python, Go |
| `[SSG_NAME]` | Static site generator | Docusaurus, Hugo, MkDocs |
| `[DOMAIN_NAME]` | Custom domain | docs.example.com |
| `[FRAMEWORK]` | Framework used | React, Vue, Django |
| `[TEAM_SIZE]` | Team size | small, medium, large |
| `[ECOSYSTEM]` | Package ecosystem | javascript, python, ruby |
| `[PRIORITY]` | Priority focus | simplicity, features, performance |
### Creating Custom Templates
```
create custom template for [SPECIFIC_USE_CASE]:
- define requirements
- specify desired outcomes
- include success criteria
- provide examples
```
## Best Practices for Prompting
### Effective Prompt Structure
1. **Be Specific:** Include relevant details about your project
2. **Set Context:** Mention your experience level and constraints
3. **Define Success:** Explain what a good outcome looks like
4. **Ask for Explanation:** Request reasoning behind recommendations
### Example of Well-Structured Prompt
```
I have a TypeScript library for data visualization with 50+ contributors.
I need comprehensive documentation that includes:
- API reference for all public methods
- Interactive examples with code samples
- Getting started guide for developers
- Contribution guidelines for the community
Please analyze my repository, recommend the best approach, and set up a
documentation system that can handle our scale and complexity.
```
### Common Pitfalls to Avoid
- **Too vague:** "help with documentation"
- **Missing context:** Not mentioning project type or requirements
- **No constraints:** Not specifying limitations or preferences
- **Single-step thinking:** Not considering the full workflow
## Integration with Development Workflow
### Git Hooks Integration
```
set up pre-commit hooks to:
- validate documentation changes
- check for broken links
- ensure content quality
- update generated content
```
### CI/CD Integration
```
create GitHub Actions workflow that:
- validates documentation on every PR
- deploys docs on main branch updates
- runs quality checks automatically
- notifies team of issues
```
### IDE Integration
```
configure development environment for:
- live documentation preview
- automated link checking
- content validation
- template generation
```
## Troubleshooting Prompts
### When Things Don't Work
**Analysis Issues:**
```
my repository analysis returned incomplete results, please retry with deep analysis and explain what might have caused the issue
```
**Recommendation Problems:**
```
the SSG recommendation doesn't match my needs because [REASON], please provide alternative recommendations with different priorities
```
**Deployment Failures:**
```
my GitHub Pages deployment failed with [ERROR_MESSAGE], please diagnose the issue and provide a fix
```
**Content Issues:**
```
my generated documentation has [PROBLEM], please update the content and ensure it meets [REQUIREMENTS]
```
For more troubleshooting help, see the [Troubleshooting Guide](../how-to/troubleshooting.md).
## Template Categories Summary
| Category | Purpose | Key Templates |
| ------------------- | ---------------------- | ---------------------------------- |
| **Analysis** | Understanding projects | Repository analysis, gap detection |
| **Recommendation** | Tool selection | SSG comparison, feature matching |
| **Configuration** | Setup and config | Production configs, optimization |
| **Structure** | Content organization | Diataxis setup, content population |
| **Deployment** | Going live | GitHub Pages, custom domains |
| **Validation** | Quality assurance | Link checking, content validation |
| **Troubleshooting** | Problem solving | Diagnosis, issue resolution |
| **Workflow** | Process automation | Multi-step procedures, CI/CD |
These templates provide a solid foundation for effective interaction with DocuMCP. Customize them based on your specific needs and project requirements.
```
--------------------------------------------------------------------------------
/tests/integration/kg-documentation-workflow.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Integration Tests for Knowledge Graph Documentation Workflow
* Tests end-to-end workflow from repository analysis to documentation tracking
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import path from "path";
import { tmpdir } from "os";
import { analyzeRepository } from "../../src/tools/analyze-repository.js";
import {
initializeKnowledgeGraph,
getKnowledgeGraph,
saveKnowledgeGraph,
} from "../../src/memory/kg-integration.js";
describe("KG Documentation Workflow Integration", () => {
let testDir: string;
beforeEach(async () => {
testDir = path.join(tmpdir(), `documcp-integration-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
// Initialize KG with test storage
const storageDir = path.join(testDir, ".documcp/memory");
await initializeKnowledgeGraph(storageDir);
});
afterEach(async () => {
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch {
// Ignore cleanup errors
}
});
it("should complete full workflow: analyze → create entities → link relationships", async () => {
// Setup: Create a test repository structure
const srcDir = path.join(testDir, "src");
const docsDir = path.join(testDir, "docs");
await fs.mkdir(srcDir, { recursive: true });
await fs.mkdir(docsDir, { recursive: true });
// Create source code
await fs.writeFile(
path.join(srcDir, "auth.ts"),
`
export class AuthService {
async login(username: string, password: string) {
return { token: "abc123" };
}
async logout(token: string) {
return true;
}
}
export function validateToken(token: string) {
return token.length > 0;
}
`,
"utf-8",
);
// Create documentation
await fs.writeFile(
path.join(docsDir, "api.md"),
`
# Authentication API
## Login
Use the \`login()\` method from \`AuthService\` class in \`src/auth.ts\`:
\`\`\`typescript
const auth = new AuthService();
const result = await auth.login(username, password);
\`\`\`
## Logout
Call \`logout()\` with the authentication token:
\`\`\`typescript
await auth.logout(token);
\`\`\`
## Token Validation
Use \`validateToken()\` function to validate tokens.
`,
"utf-8",
);
await fs.writeFile(
path.join(testDir, "README.md"),
"# Test Project",
"utf-8",
);
await fs.writeFile(
path.join(testDir, "package.json"),
JSON.stringify({ name: "test-project", version: "1.0.0" }),
"utf-8",
);
// Act: Run repository analysis
const analysisResult = await analyzeRepository({
path: testDir,
depth: "standard",
});
// Assert: Analysis completed (may have errors due to test environment)
expect(analysisResult.content).toBeDefined();
expect(analysisResult.content.length).toBeGreaterThan(0);
// If analysis succeeded, verify structure
if (!analysisResult.isError) {
const analysis = JSON.parse(analysisResult.content[0].text);
if (analysis.success) {
expect(analysis.data.structure.hasDocs).toBe(true);
}
}
// Wait for KG operations to complete
await new Promise((resolve) => setTimeout(resolve, 100));
// Verify: Check knowledge graph entities
const kg = await getKnowledgeGraph();
const allNodes = await kg.getAllNodes();
const allEdges = await kg.getAllEdges();
// Should have project, code files, and documentation sections
const projectNodes = allNodes.filter((n) => n.type === "project");
const codeFileNodes = allNodes.filter((n) => n.type === "code_file");
const docSectionNodes = allNodes.filter(
(n) => n.type === "documentation_section",
);
expect(projectNodes.length).toBeGreaterThan(0);
expect(codeFileNodes.length).toBeGreaterThan(0);
expect(docSectionNodes.length).toBeGreaterThan(0);
// Verify code file details
const authFile = codeFileNodes.find((n) =>
n.properties.path.includes("auth.ts"),
);
expect(authFile).toBeDefined();
expect(authFile?.properties.language).toBe("typescript");
expect(authFile?.properties.classes).toContain("AuthService");
expect(authFile?.properties.functions).toContain("validateToken");
// Verify documentation sections
const apiDoc = docSectionNodes.find((n) =>
n.properties.filePath.includes("api.md"),
);
expect(apiDoc).toBeDefined();
expect(apiDoc?.properties.hasCodeExamples).toBe(true);
expect(apiDoc?.properties.referencedFunctions.length).toBeGreaterThan(0);
// Verify relationships
const referencesEdges = allEdges.filter((e) => e.type === "references");
const documentsEdges = allEdges.filter((e) => e.type === "documents");
expect(referencesEdges.length).toBeGreaterThan(0);
expect(documentsEdges.length).toBeGreaterThan(0);
// Verify specific relationship: api.md references auth.ts
const apiToAuthEdge = referencesEdges.find(
(e) => e.source === apiDoc?.id && e.target === authFile?.id,
);
expect(apiToAuthEdge).toBeDefined();
expect(apiToAuthEdge?.properties.referenceType).toBe("api-reference");
});
it("should detect outdated documentation when code changes", async () => {
// Setup: Create initial code and docs
const srcDir = path.join(testDir, "src");
const docsDir = path.join(testDir, "docs");
await fs.mkdir(srcDir, { recursive: true });
await fs.mkdir(docsDir, { recursive: true });
await fs.writeFile(
path.join(srcDir, "user.ts"),
"export function getUser() {}",
"utf-8",
);
await fs.writeFile(
path.join(docsDir, "guide.md"),
"Call `getUser()` from `src/user.ts`",
"utf-8",
);
await fs.writeFile(path.join(testDir, "README.md"), "# Test", "utf-8");
await fs.writeFile(path.join(testDir, "package.json"), "{}", "utf-8");
// First analysis
await analyzeRepository({ path: testDir, depth: "standard" });
await new Promise((resolve) => setTimeout(resolve, 100));
// Simulate code change
await new Promise((resolve) => setTimeout(resolve, 100)); // Ensure different timestamp
await fs.writeFile(
path.join(srcDir, "user.ts"),
"export function getUser(id: string) {} // CHANGED",
"utf-8",
);
// Second analysis
await analyzeRepository({ path: testDir, depth: "standard" });
await new Promise((resolve) => setTimeout(resolve, 100));
// Verify: Check that system handled multiple analyses
// In a real scenario, outdated_for edges would be created
// For this test, just verify no crashes occurred
const kg = await getKnowledgeGraph();
const allNodes = await kg.getAllNodes();
// Should have created some nodes from both analyses
expect(allNodes.length).toBeGreaterThan(0);
});
it("should handle projects with no documentation gracefully", async () => {
// Setup: Code-only project
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
await fs.writeFile(
path.join(srcDir, "index.ts"),
"export function main() {}",
"utf-8",
);
await fs.writeFile(path.join(testDir, "package.json"), "{}", "utf-8");
// Act
await analyzeRepository({ path: testDir, depth: "standard" });
await new Promise((resolve) => setTimeout(resolve, 100));
// Verify: Should still create code entities, just no doc entities
const kg = await getKnowledgeGraph();
const allNodes = await kg.getAllNodes();
const codeFileNodes = allNodes.filter((n) => n.type === "code_file");
const docSectionNodes = allNodes.filter(
(n) => n.type === "documentation_section",
);
expect(codeFileNodes.length).toBeGreaterThan(0);
expect(docSectionNodes.length).toBe(0);
});
it("should handle multi-file projects correctly", async () => {
// Setup: Multiple source files
const srcDir = path.join(testDir, "src");
await fs.mkdir(path.join(srcDir, "auth"), { recursive: true });
await fs.mkdir(path.join(srcDir, "db"), { recursive: true });
await fs.writeFile(
path.join(srcDir, "auth", "login.ts"),
"export function login() {}",
"utf-8",
);
await fs.writeFile(
path.join(srcDir, "auth", "logout.ts"),
"export function logout() {}",
"utf-8",
);
await fs.writeFile(
path.join(srcDir, "db", "query.ts"),
"export function query() {}",
"utf-8",
);
await fs.writeFile(path.join(testDir, "package.json"), "{}", "utf-8");
// Act
await analyzeRepository({ path: testDir, depth: "standard" });
await new Promise((resolve) => setTimeout(resolve, 100));
// Verify
const kg = await getKnowledgeGraph();
const codeFileNodes = (await kg.getAllNodes()).filter(
(n) => n.type === "code_file",
);
expect(codeFileNodes.length).toBe(3);
const paths = codeFileNodes.map((n) => n.properties.path);
expect(paths).toContain("src/auth/login.ts");
expect(paths).toContain("src/auth/logout.ts");
expect(paths).toContain("src/db/query.ts");
});
it("should persist knowledge graph to storage", async () => {
// Setup
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
await fs.writeFile(
path.join(srcDir, "test.ts"),
"export function test() {}",
"utf-8",
);
await fs.writeFile(path.join(testDir, "package.json"), "{}", "utf-8");
// Act
await analyzeRepository({ path: testDir, depth: "standard" });
await new Promise((resolve) => setTimeout(resolve, 100));
// Save KG
await saveKnowledgeGraph();
// Verify storage files exist
const storageDir = path.join(testDir, ".documcp/memory");
const entitiesFile = path.join(
storageDir,
"knowledge-graph-entities.jsonl",
);
const relationshipsFile = path.join(
storageDir,
"knowledge-graph-relationships.jsonl",
);
const entitiesExist = await fs
.access(entitiesFile)
.then(() => true)
.catch(() => false);
const relationshipsExist = await fs
.access(relationshipsFile)
.then(() => true)
.catch(() => false);
expect(entitiesExist).toBe(true);
expect(relationshipsExist).toBe(true);
// Verify content
const entitiesContent = await fs.readFile(entitiesFile, "utf-8");
expect(entitiesContent).toContain("code_file");
});
it("should calculate coverage metrics for documentation", async () => {
// Setup: 3 functions, docs covering 2 of them
const srcDir = path.join(testDir, "src");
const docsDir = path.join(testDir, "docs");
await fs.mkdir(srcDir, { recursive: true });
await fs.mkdir(docsDir, { recursive: true });
await fs.writeFile(
path.join(srcDir, "api.ts"),
`
export function create() {}
export function read() {}
export function update() {} // Not documented
`,
"utf-8",
);
await fs.writeFile(
path.join(docsDir, "api.md"),
`
# API Reference
- \`create()\`: Creates a resource
- \`read()\`: Reads a resource
`,
"utf-8",
);
await fs.writeFile(path.join(testDir, "README.md"), "# Test", "utf-8");
await fs.writeFile(path.join(testDir, "package.json"), "{}", "utf-8");
// Act
await analyzeRepository({ path: testDir, depth: "standard" });
await new Promise((resolve) => setTimeout(resolve, 100));
// Verify coverage
const kg = await getKnowledgeGraph();
const documentsEdges = (await kg.getAllEdges()).filter(
(e) => e.type === "documents",
);
expect(documentsEdges.length).toBeGreaterThan(0);
const coverage = documentsEdges[0].properties.coverage;
expect(["partial", "complete", "comprehensive"]).toContain(coverage);
// 2/3 = 66% should be "complete"
expect(coverage).toBe("complete");
});
});
```
--------------------------------------------------------------------------------
/docs/how-to/github-pages-deployment.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.951Z"
last_validated: "2025-11-20T00:46:21.951Z"
auto_updated: false
update_frequency: monthly
---
# How to Deploy to GitHub Pages
This guide shows you how to deploy your documentation to GitHub Pages using DocuMCP's automated workflows. DocuMCP uses a dual-static-site-generator approach for optimal deployment.
## Architecture Overview
DocuMCP employs a **dual SSG strategy**:
- **Docusaurus**: Primary documentation system for development and rich content
- **Jekyll**: GitHub Pages deployment for reliable hosting
- **Docker**: Alternative testing and deployment method
## Quick Deployment
For immediate deployment:
```bash
# Prompt DocuMCP:
"deploy my documentation to GitHub Pages"
```
## Prerequisites
- Repository with documentation content
- GitHub account with repository access
- GitHub Pages enabled in repository settings
- Node.js 20.0.0+ for Docusaurus development
## Deployment Methods
### Method 1: Automated with DocuMCP (Recommended)
Use DocuMCP's intelligent deployment:
```bash
# Complete workflow:
"analyze my repository, recommend SSG, and deploy to GitHub Pages"
```
This will:
1. Analyze your project structure
2. Set up Docusaurus for development
3. Configure Jekyll for GitHub Pages deployment
4. Create GitHub Actions workflow
5. Deploy to Pages
### Method 2: Current DocuMCP Setup
DocuMCP currently uses the following deployment workflow:
#### GitHub Actions Workflow
```yaml
name: Deploy Jekyll to GitHub Pages
on:
push:
branches: [main]
workflow_dispatch:
permissions:
contents: read
pages: write
id-token: write
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: "3.1"
bundler-cache: true
- name: Build with Jekyll
run: bundle exec jekyll build
env:
JEKYLL_ENV: production
- name: Setup Pages
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@v4
with:
path: "./_site"
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: build
permissions:
contents: read
pages: write
id-token: write
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
```
#### Development vs Production
- **Development**: Use Docusaurus (`cd docs && npm start`)
- **Production**: Jekyll builds and deploys to GitHub Pages
- **Testing**: Use Docker (`docker-compose -f docker-compose.docs.yml up`)
### Method 3: Manual Configuration
If you prefer manual setup:
#### Step 1: Choose Your SSG
```bash
# Get recommendation first:
"recommend static site generator for my project"
```
#### Step 2: Generate Config
```bash
# For example, with Hugo:
"generate Hugo configuration for GitHub Pages deployment"
```
#### Step 3: Deploy
```bash
"set up GitHub Pages deployment workflow for Hugo"
```
## GitHub Actions Workflow
DocuMCP generates optimized workflows for each SSG:
### Docusaurus Workflow
```yaml
name: Deploy Docusaurus
on:
push:
branches: [main]
paths: ["docs/**", "docusaurus.config.js"]
permissions:
contents: read
pages: write
id-token: write
jobs:
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
- name: Install dependencies
run: npm ci
- name: Build
run: npm run build
- name: Setup Pages
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@v4
with:
path: "./build"
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
```
### Hugo Workflow
```yaml
name: Deploy Hugo
on:
push:
branches: [main]
paths: ["content/**", "config.yml", "themes/**"]
permissions:
contents: read
pages: write
id-token: write
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup Hugo
uses: peaceiris/actions-hugo@v2
with:
hugo-version: "latest"
extended: true
- name: Build
run: hugo --minify
- name: Setup Pages
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@v4
with:
path: "./public"
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
```
### MkDocs Workflow
```yaml
name: Deploy MkDocs
on:
push:
branches: [main]
paths: ["docs/**", "mkdocs.yml"]
permissions:
contents: read
pages: write
id-token: write
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: "3.x"
- name: Install dependencies
run: |
pip install mkdocs mkdocs-material
- name: Build
run: mkdocs build
- name: Setup Pages
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@v4
with:
path: "./site"
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
```
## Repository Configuration
### GitHub Pages Settings
1. Navigate to repository **Settings**
2. Go to **Pages** section
3. Set **Source** to "GitHub Actions"
4. Save configuration
### Branch Protection
Protect your main branch:
```yaml
# .github/branch-protection.yml
protection_rules:
main:
required_status_checks:
strict: true
contexts:
- "Deploy Documentation"
enforce_admins: false
required_pull_request_reviews:
required_approving_review_count: 1
```
## Custom Domain Setup
### Add Custom Domain
1. Create `CNAME` file in your docs directory:
```
docs.yourdomain.com
```
2. Configure DNS records:
```
CNAME docs yourusername.github.io
```
3. Update DocuMCP deployment:
```bash
"deploy to GitHub Pages with custom domain docs.yourdomain.com"
```
### SSL Certificate
GitHub automatically provides SSL certificates for custom domains.
Verification:
- Check `https://docs.yourdomain.com` loads correctly
- Verify SSL certificate is valid
- Test redirect from `http://` to `https://`
## Environment Configuration
### Production Optimization
DocuMCP automatically configures:
**Build optimization:**
```yaml
- name: Build with optimization
run: |
export NODE_ENV=production
npm run build
env:
CI: true
NODE_OPTIONS: --max-old-space-size=4096
```
**Caching strategy:**
```yaml
- name: Cache dependencies
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
```
### Environment Variables
Set up environment variables for production:
1. Go to repository **Settings**
2. Navigate to **Secrets and variables** > **Actions**
3. Add production variables:
- `HUGO_ENV=production`
- `NODE_ENV=production`
- Custom API keys (if needed)
## Deployment Verification
### Automatic Verification
DocuMCP includes verification:
```bash
"verify my GitHub Pages deployment is working correctly"
```
This checks:
- ✅ Site is accessible
- ✅ All pages load correctly
- ✅ Navigation works
- ✅ Search functionality (if enabled)
- ✅ Mobile responsiveness
- ✅ SSL certificate validity
### Manual Verification Checklist
- [ ] Homepage loads at `https://username.github.io/repository`
- [ ] All navigation links work
- [ ] Search functions properly
- [ ] Mobile layout is responsive
- [ ] Images and assets load
- [ ] Forms work (if applicable)
- [ ] Analytics tracking (if configured)
## Troubleshooting Deployment Issues
### Common Problems
**Build Fails:**
```bash
# Check workflow logs in GitHub Actions tab
# Common issues:
- Node.js version mismatch
- Missing dependencies
- Configuration errors
```
**404 Errors:**
```bash
# Fix baseURL configuration
# For Docusaurus:
baseUrl: '/repository-name/',
# For Hugo:
baseURL: 'https://username.github.io/repository-name/'
```
**Assets Not Loading:**
```bash
# Check publicPath configuration
# Ensure all asset paths are relative
```
### Debug Mode
Enable debug mode in workflows:
```yaml
- name: Debug build
run: |
npm run build -- --verbose
env:
DEBUG: true
ACTIONS_STEP_DEBUG: true
```
## Performance Optimization
### Build Performance
Optimize build times:
```yaml
- name: Cache build assets
uses: actions/cache@v4
with:
path: |
.next/cache
.docusaurus/cache
public/static
key: ${{ runner.os }}-build-${{ hashFiles('**/*.md', '**/*.js') }}
```
### Site Performance
DocuMCP automatically optimizes:
- **Image compression**: WebP format when possible
- **CSS minification**: Remove unused styles
- **JavaScript bundling**: Code splitting and tree shaking
- **Asset preloading**: Critical resources loaded first
## Monitoring and Analytics
### GitHub Actions Monitoring
Set up notifications for deployment failures:
```yaml
- name: Notify on failure
if: failure()
uses: actions/github-script@v7
with:
script: |
github.rest.issues.create({
owner: context.repo.owner,
repo: context.repo.repo,
title: 'Documentation Deployment Failed',
body: 'Deployment workflow failed. Check logs for details.',
labels: ['deployment', 'bug']
});
```
### Site Analytics
Add analytics to track usage:
**Google Analytics (Docusaurus):**
```javascript
// docusaurus.config.js
const config = {
presets: [
[
"classic",
{
gtag: {
trackingID: "G-XXXXXXXXXX",
anonymizeIP: true,
},
},
],
],
};
```
## Advanced Deployment Strategies
### Multi-Environment Deployment
Deploy to staging and production:
```yaml
# Deploy to staging on PR
on:
pull_request:
branches: [main]
# Deploy to production on merge
on:
push:
branches: [main]
```
### Rollback Strategy
Implement deployment rollback:
```yaml
- name: Store deployment info
run: |
echo "DEPLOYMENT_SHA=${{ github.sha }}" >> $GITHUB_ENV
echo "DEPLOYMENT_TIME=$(date)" >> $GITHUB_ENV
- name: Create rollback script
run: |
echo "#!/bin/bash" > rollback.sh
echo "git checkout ${{ env.DEPLOYMENT_SHA }}" >> rollback.sh
chmod +x rollback.sh
```
## Security Considerations
### Permissions
DocuMCP uses minimal permissions:
```yaml
permissions:
contents: read # Read repository content
pages: write # Deploy to GitHub Pages
id-token: write # OIDC authentication
```
### Secrets Management
Never commit secrets to repository:
- Use GitHub Actions secrets
- Environment variables for configuration
- OIDC tokens for authentication
## Next Steps
After successful deployment:
1. **[Monitor your site](site-monitoring.md)** for uptime and performance
2. **[Set up custom domain](custom-domains.md)** (optional)
3. **[Optimize for SEO](seo-optimization.md)**
4. **[Configure analytics](analytics-setup.md)**
## Summary
You now know how to:
✅ Deploy documentation using DocuMCP automation
✅ Configure GitHub Actions workflows
✅ Set up custom domains and SSL
✅ Verify deployments are working
✅ Troubleshoot common issues
✅ Optimize build and site performance
✅ Monitor deployments and analytics
Your documentation is now live and automatically updated!
```
--------------------------------------------------------------------------------
/docs/adrs/010-mcp-resource-pattern-redesign.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.944Z"
last_validated: "2025-11-20T00:46:21.944Z"
auto_updated: false
update_frequency: monthly
---
# ADR-010: MCP Resource Pattern Redesign
**Status:** Accepted
**Date:** 2025-10-09
**Deciders:** Development Team
**Context:** MCP Best Practices Review
---
## Context and Problem Statement
During an MCP best practices review (2025-10-09), a critical architectural misalignment was identified: DocuMCP was using MCP resources as a **persistence layer** to store tool execution results, violating the fundamental MCP control pattern philosophy.
**The Problem:**
- Resources were storing tool outputs via `storeResourceFromToolResult()`
- A `resourceStore` Map held dynamic tool results
- Resource URIs were generated at runtime (e.g., `documcp://analysis/{timestamp}-{random}`)
- This violated MCP's core principle that resources should **serve applications**, not store tool results
**Why This Matters:**
According to MCP best practices, the three primitives have distinct control patterns:
- **Tools** = Model-controlled (Claude decides when to execute) → Serve the **model**
- **Resources** = App-controlled (application decides when to fetch) → Serve the **app**
- **Prompts** = User-controlled (user triggers via actions) → Serve **users**
Using resources for tool result storage conflates model operations with app operations, creating architectural confusion and misusing the MCP protocol.
---
## Decision Drivers
### Technical Requirements
- Align with MCP specification and best practices
- Follow proper control pattern separation
- Maintain backward compatibility where possible
- Preserve existing tool functionality
### Architectural Principles
- **Separation of Concerns:** Tools handle execution, resources provide app data
- **Statelessness:** MCP servers should be stateless; persistence belongs elsewhere
- **Clear Purpose:** Each primitive serves its intended audience
### Developer Experience
- Simplify resource implementation
- Make resource purpose obvious
- Enable proper MCP Inspector testing
---
## Considered Options
### Option 1: Keep Current Pattern (Status Quo) ❌
**Description:** Continue using resources to store tool results.
**Pros:**
- No code changes required
- Existing URIs remain functional
- No migration needed
**Cons:**
- ❌ Violates MCP best practices
- ❌ Confuses model operations with app operations
- ❌ Makes MCP Inspector testing unclear
- ❌ Creates unnecessary complexity
- ❌ Misrepresents resource purpose
**Decision:** Rejected due to architectural misalignment
---
### Option 2: Remove All Resources ❌
**Description:** Eliminate resources entirely, return all data via tools only.
**Pros:**
- Simplifies implementation
- Eliminates resource confusion
- Focuses on tools as primary interface
**Cons:**
- ❌ Removes legitimate use cases for app-controlled data
- ❌ Loses template access for UI
- ❌ Prevents SSG list for dropdowns
- ❌ Underutilizes MCP capabilities
**Decision:** Rejected - throws baby out with bathwater
---
### Option 3: Redesign Resources for App Needs ✅ (CHOSEN)
**Description:** Remove tool result storage, create static resources that serve application UI needs.
**Pros:**
- ✅ Aligns with MCP best practices
- ✅ Clear separation: tools execute, resources provide app data
- ✅ Enables proper MCP Inspector testing
- ✅ Provides legitimate value to applications
- ✅ Follows control pattern philosophy
**Cons:**
- Requires code refactoring
- Changes resource URIs (but tools remain compatible)
**Decision:** **ACCEPTED** - Best aligns with MCP architecture
---
## Decision Outcome
**Chosen Option:** Option 3 - Redesign Resources for App Needs
### Implementation Details
#### 1. Remove Tool Result Storage
**Before:**
```typescript
const resourceStore = new Map<string, { content: string; mimeType: string }>();
function storeResourceFromToolResult(
toolName: string,
args: any,
result: any,
id?: string,
): string {
const uri = `documcp://analysis/${id}`;
resourceStore.set(uri, {
content: JSON.stringify(result),
mimeType: "application/json",
});
return uri;
}
// In tool handler:
const result = await analyzeRepository(args);
const resourceUri = storeResourceFromToolResult(
"analyze_repository",
args,
result,
);
(result as any).resourceUri = resourceUri;
return result;
```
**After:**
```typescript
// No resource storage! Tools return results directly
const result = await analyzeRepository(args);
return wrapToolResult(result, "analyze_repository");
```
#### 2. Create Static App-Serving Resources
**New Resource Categories:**
**A. SSG List Resource** (for UI dropdowns)
```typescript
{
uri: "documcp://ssgs/available",
name: "Available Static Site Generators",
description: "List of supported SSGs with capabilities for UI selection",
mimeType: "application/json"
}
```
Returns:
```json
{
"ssgs": [
{
"id": "jekyll",
"name": "Jekyll",
"description": "Ruby-based SSG, great for GitHub Pages",
"language": "ruby",
"complexity": "low",
"buildSpeed": "medium",
"ecosystem": "mature",
"bestFor": ["blogs", "documentation", "simple-sites"]
}
// ... 4 more SSGs
]
}
```
**B. Configuration Templates** (for SSG setup)
```typescript
{
uri: "documcp://templates/jekyll-config",
name: "Jekyll Configuration Template",
description: "Template for Jekyll _config.yml",
mimeType: "text/yaml"
}
```
Returns actual YAML template for Jekyll configuration.
**C. Workflow Resources** (for UI workflow display)
```typescript
{
uri: "documcp://workflows/all",
name: "All Documentation Workflows",
description: "Complete list of available documentation workflows",
mimeType: "application/json"
}
```
#### 3. Resource Handler Implementation
```typescript
server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
const { uri } = request.params;
// Handle SSG list (for UI)
if (uri === "documcp://ssgs/available") {
return {
contents: [{
uri,
mimeType: "application/json",
text: JSON.stringify({ ssgs: [...] })
}]
};
}
// Handle templates (static content)
if (uri.startsWith("documcp://templates/")) {
const templateType = uri.split("/").pop();
return {
contents: [{
uri,
mimeType: getTemplateMimeType(templateType),
text: getTemplateContent(templateType)
}]
};
}
throw new Error(`Resource not found: ${uri}`);
});
```
### Resource Design Principles
1. **Static Content Only:** Resources return pre-defined, static data
2. **App-Controlled:** Applications fetch resources when needed for UI
3. **Predictable URIs:** Fixed URIs (no timestamps or random IDs)
4. **Clear Purpose:** Each resource serves a specific app UI need
---
## Consequences
### Positive Consequences ✅
1. **Architectural Alignment**
- Resources now properly serve applications
- Clear separation between tools and resources
- Follows MCP control pattern philosophy
2. **Improved Developer Experience**
- Resource purpose is obvious
- MCP Inspector testing is clear
- No confusion about resource lifecycle
3. **Better Testability**
- Resources return predictable content
- Can test resources independently
- MCP Inspector works correctly
4. **Simplified Implementation**
- Removed `resourceStore` Map
- Removed `storeResourceFromToolResult()` function
- Removed 50+ lines of resource storage code
- Tools are simpler (no resource URI tracking)
5. **Legitimate App Value**
- SSG list enables UI dropdowns
- Templates provide boilerplate content
- Workflows guide user actions
### Negative Consequences ⚠️
1. **Breaking Change for Resource URIs**
- Old dynamic URIs (`documcp://analysis/{timestamp}`) no longer work
- Applications relying on these URIs need updates
- **Mitigation:** Tools return data directly; URIs were internal implementation detail
2. **No Tool Result Persistence**
- Tool results are not stored between executions
- Applications must handle result storage if needed
- **Mitigation:** MCP servers should be stateless; persistence is app responsibility
3. **Migration Effort**
- Required updating all tool handlers
- Updated resource definitions
- **Time Cost:** ~4 hours
---
## Implementation Results
### Code Changes
**Files Modified:**
- `src/index.ts` (main server file)
- Removed `resourceStore` Map (10 lines)
- Removed `storeResourceFromToolResult()` (50 lines)
- Redesigned `RESOURCES` array (12 new resources)
- Updated `ReadResourceRequestSchema` handler (150 lines)
- Removed resource storage from all tools (30+ locations)
**Lines of Code:**
- **Removed:** ~120 lines (resource storage logic)
- **Added:** ~200 lines (static resource handlers)
- **Net Change:** +80 lines (but much clearer purpose)
### Test Results
**Before Implementation:**
- Tests: 122/122 passing ✅
- TypeScript: Compiles ✅
**After Implementation:**
- Tests: 122/122 passing ✅
- TypeScript: Compiles ✅
- No broken tests
- No regression issues
### Performance Impact
**Before:**
- Resource storage: O(1) Map insertion per tool
- Memory: Growing Map of all tool results
**After:**
- Resource retrieval: O(1) static content lookup
- Memory: Fixed size (no growth)
**Improvement:** Reduced memory usage, no performance degradation
---
## Compliance with MCP Best Practices
### Before Redesign
- **Resource Implementation:** 3/10 ❌
- **Control Patterns:** 4/10 ❌
### After Redesign
- **Resource Implementation:** 9/10 ✅
- **Control Patterns:** 9/10 ✅
---
## Migration Guide
### For Client Applications
**Old Pattern (No Longer Works):**
```javascript
// Execute tool
const result = await callTool("analyze_repository", { path: "./" });
// WRONG: Try to fetch from resource URI
const resourceUri = result.resourceUri;
const resource = await readResource(resourceUri); // ❌ Will fail
```
**New Pattern (Recommended):**
```javascript
// Execute tool - result contains all data
const result = await callTool("analyze_repository", { path: "./" });
// Use result directly (no need for resources)
console.log(result.data); // ✅ All data is here
// Use resources for app UI needs
const ssgList = await readResource("documcp://ssgs/available"); // ✅ For dropdowns
const template = await readResource("documcp://templates/jekyll-config"); // ✅ For setup
```
### For Tool Developers
**Old Pattern:**
```typescript
const result = await analyzeRepository(args);
const resourceUri = storeResourceFromToolResult(
"analyze_repository",
args,
result,
);
(result as any).resourceUri = resourceUri;
return result;
```
**New Pattern:**
```typescript
const result = await analyzeRepository(args);
return wrapToolResult(result, "analyze_repository"); // Standardized wrapper
```
---
## References
- **MCP Specification:** https://modelcontextprotocol.io/docs
- **MCP Best Practices Review:** `MCP_BEST_PRACTICES_REVIEW.md`
- **MCP Inspector Guide:** `docs/development/MCP_INSPECTOR_TESTING.md`
- **Related ADRs:**
- ADR-006: MCP Tools API Design
- ADR-007: MCP Prompts and Resources Integration
---
## Notes
### Design Philosophy
The resource redesign embodies a core MCP principle: **each primitive serves its audience**.
- **Tools** answer the question: _"What can Claude do?"_
- **Resources** answer the question: _"What data does my app need?"_
- **Prompts** answer the question: _"What workflows can users trigger?"_
Mixing these purposes creates architectural debt and violates separation of concerns.
### Future Enhancements
**Potential Additional Resources:**
- `documcp://themes/available` - UI theme list
- `documcp://validators/rules` - Validation rule catalog
- `documcp://examples/{category}` - Example content library
These should all follow the same principle: **serve the application's UI needs**, not store execution results.
---
**Last Updated:** 2025-10-09
**Status:** Implemented and Verified ✅
```
--------------------------------------------------------------------------------
/tests/memory/knowledge-graph.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Basic unit tests for Knowledge Graph System
* Tests basic instantiation and core functionality
* Part of Issue #54 - Core Memory System Unit Tests
*/
import { promises as fs } from "fs";
import path from "path";
import os from "os";
import { MemoryManager } from "../../src/memory/manager.js";
import {
KnowledgeGraph,
GraphNode,
GraphEdge,
} from "../../src/memory/knowledge-graph.js";
describe("KnowledgeGraph", () => {
let tempDir: string;
let memoryManager: MemoryManager;
let graph: KnowledgeGraph;
beforeEach(async () => {
// Create unique temp directory for each test
tempDir = path.join(
os.tmpdir(),
`memory-graph-test-${Date.now()}-${Math.random()
.toString(36)
.substr(2, 9)}`,
);
await fs.mkdir(tempDir, { recursive: true });
// Create memory manager for knowledge graph
memoryManager = new MemoryManager(tempDir);
await memoryManager.initialize();
graph = new KnowledgeGraph(memoryManager);
await graph.initialize();
});
afterEach(async () => {
// Cleanup temp directory
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe("Basic Graph Operations", () => {
test("should create knowledge graph instance", () => {
expect(graph).toBeDefined();
expect(graph).toBeInstanceOf(KnowledgeGraph);
});
test("should add nodes to the graph", () => {
const projectNode: Omit<GraphNode, "lastUpdated"> = {
id: "project:test-project",
type: "project",
label: "Test Project",
properties: {
language: "typescript",
framework: "react",
},
weight: 1.0,
};
const addedNode = graph.addNode(projectNode);
expect(addedNode).toBeDefined();
expect(addedNode.id).toBe("project:test-project");
expect(addedNode.type).toBe("project");
expect(addedNode.lastUpdated).toBeDefined();
});
test("should add edges to the graph", () => {
// First add nodes
const projectNode = graph.addNode({
id: "project:web-app",
type: "project",
label: "Web App",
properties: { language: "typescript" },
weight: 1.0,
});
const techNode = graph.addNode({
id: "tech:react",
type: "technology",
label: "React",
properties: { category: "framework" },
weight: 1.0,
});
// Add edge
const edge: Omit<GraphEdge, "id" | "lastUpdated"> = {
source: projectNode.id,
target: techNode.id,
type: "uses",
weight: 1.0,
confidence: 0.9,
properties: { importance: "high" },
};
const addedEdge = graph.addEdge(edge);
expect(addedEdge).toBeDefined();
expect(addedEdge.source).toBe(projectNode.id);
expect(addedEdge.target).toBe(techNode.id);
expect(addedEdge.id).toBeDefined();
});
test("should get all nodes", async () => {
// Add some nodes
graph.addNode({
id: "project:test1",
type: "project",
label: "Test 1",
properties: {},
weight: 1.0,
});
graph.addNode({
id: "tech:vue",
type: "technology",
label: "Vue",
properties: {},
weight: 1.0,
});
const nodes = await graph.getAllNodes();
expect(Array.isArray(nodes)).toBe(true);
expect(nodes.length).toBe(2);
});
test("should get all edges", async () => {
// Add nodes and edges
const node1 = graph.addNode({
id: "project:test2",
type: "project",
label: "Test 2",
properties: {},
weight: 1.0,
});
const node2 = graph.addNode({
id: "tech:angular",
type: "technology",
label: "Angular",
properties: {},
weight: 1.0,
});
graph.addEdge({
source: node1.id,
target: node2.id,
type: "uses",
weight: 1.0,
confidence: 0.8,
properties: {},
});
const edges = await graph.getAllEdges();
expect(Array.isArray(edges)).toBe(true);
expect(edges.length).toBe(1);
});
});
describe("Graph Queries", () => {
test("should query nodes by type", () => {
// Add multiple nodes of different types
graph.addNode({
id: "project:project-a",
type: "project",
label: "Project A",
properties: {},
weight: 1.0,
});
graph.addNode({
id: "project:project-b",
type: "project",
label: "Project B",
properties: {},
weight: 1.0,
});
graph.addNode({
id: "tech:vue",
type: "technology",
label: "Vue",
properties: { category: "framework" },
weight: 1.0,
});
const results = graph.query({
nodeTypes: ["project"],
});
expect(results).toBeDefined();
expect(Array.isArray(results.nodes)).toBe(true);
expect(results.nodes.length).toBe(2);
expect(results.nodes.every((node) => node.type === "project")).toBe(true);
});
test("should find connections for a node", async () => {
// Add nodes and create connections
const projectNode = graph.addNode({
id: "project:connected-test",
type: "project",
label: "Connected Test",
properties: {},
weight: 1.0,
});
const techNode = graph.addNode({
id: "tech:express",
type: "technology",
label: "Express",
properties: {},
weight: 1.0,
});
graph.addEdge({
source: projectNode.id,
target: techNode.id,
type: "uses",
weight: 1.0,
confidence: 0.9,
properties: {},
});
const connections = await graph.getConnections(projectNode.id);
expect(Array.isArray(connections)).toBe(true);
expect(connections.length).toBe(1);
expect(connections[0]).toBe(techNode.id);
});
test("should find paths between nodes", () => {
// Add nodes and create a path
const projectNode = graph.addNode({
id: "project:path-test",
type: "project",
label: "Path Test Project",
properties: {},
weight: 1.0,
});
const techNode = graph.addNode({
id: "tech:nodejs",
type: "technology",
label: "Node.js",
properties: {},
weight: 1.0,
});
graph.addEdge({
source: projectNode.id,
target: techNode.id,
type: "uses",
weight: 1.0,
confidence: 0.9,
properties: {},
});
const path = graph.findPath(projectNode.id, techNode.id);
expect(path).toBeDefined();
expect(path?.nodes.length).toBe(2);
expect(path?.edges.length).toBe(1);
});
});
describe("Graph Analysis", () => {
test("should build from memory entries", async () => {
// Add some test memory entries first
await memoryManager.remember(
"analysis",
{
language: { primary: "python" },
framework: { name: "django" },
},
{
projectId: "analysis-project",
},
);
await memoryManager.remember(
"recommendation",
{
recommended: "mkdocs",
confidence: 0.9,
},
{
projectId: "analysis-project",
},
);
// Build graph from memories
await graph.buildFromMemories();
const nodes = await graph.getAllNodes();
// The buildFromMemories method might be implemented differently
// Just verify it doesn't throw and returns an array
expect(Array.isArray(nodes)).toBe(true);
// The graph might start empty, which is okay for this basic test
if (nodes.length > 0) {
// Optionally check node types if any were created
const nodeTypes = [...new Set(nodes.map((n) => n.type))];
expect(nodeTypes.length).toBeGreaterThan(0);
}
});
test("should generate graph-based recommendations", async () => {
// Add some memory data first
await memoryManager.remember(
"analysis",
{
language: { primary: "javascript" },
framework: { name: "react" },
},
{
projectId: "rec-test-project",
},
);
await graph.buildFromMemories();
const projectFeatures = {
language: "javascript",
framework: "react",
};
const recommendations = await graph.getGraphBasedRecommendation(
projectFeatures,
["docusaurus", "gatsby"],
);
expect(Array.isArray(recommendations)).toBe(true);
// Even if no recommendations found, should return empty array
});
test("should provide graph statistics", async () => {
// Add some nodes
graph.addNode({
id: "project:stats-test",
type: "project",
label: "Stats Test",
properties: {},
weight: 1.0,
});
graph.addNode({
id: "tech:webpack",
type: "technology",
label: "Webpack",
properties: {},
weight: 1.0,
});
const stats = await graph.getStatistics();
expect(stats).toBeDefined();
expect(typeof stats.nodeCount).toBe("number");
expect(typeof stats.edgeCount).toBe("number");
expect(typeof stats.nodesByType).toBe("object");
expect(typeof stats.averageConnectivity).toBe("number");
expect(Array.isArray(stats.mostConnectedNodes)).toBe(true);
});
});
describe("Error Handling", () => {
test("should handle removing non-existent nodes", async () => {
const removed = await graph.removeNode("non-existent-node");
expect(removed).toBe(false);
});
test("should handle concurrent graph operations", () => {
// Create multiple nodes concurrently
const nodes = Array.from({ length: 10 }, (_, i) =>
graph.addNode({
id: `project:concurrent-${i}`,
type: "project",
label: `Concurrent Project ${i}`,
properties: { index: i },
weight: 1.0,
}),
);
expect(nodes).toHaveLength(10);
expect(nodes.every((node) => typeof node.id === "string")).toBe(true);
});
test("should handle invalid query parameters", () => {
const results = graph.query({
nodeTypes: ["non-existent-type"],
});
expect(results).toBeDefined();
expect(Array.isArray(results.nodes)).toBe(true);
expect(results.nodes.length).toBe(0);
});
test("should handle empty graph operations", async () => {
// Test operations on empty graph
const path = graph.findPath("non-existent-1", "non-existent-2");
expect(path).toBeNull();
const connections = await graph.getConnections("non-existent-node");
expect(Array.isArray(connections)).toBe(true);
expect(connections.length).toBe(0);
});
});
describe("Persistence and Memory Integration", () => {
test("should save and load from memory", async () => {
// Add some data to the graph
graph.addNode({
id: "project:persistence-test",
type: "project",
label: "Persistence Test",
properties: {},
weight: 1.0,
});
// Save to memory
await graph.saveToMemory();
// Create new graph and load
const newGraph = new KnowledgeGraph(memoryManager);
await newGraph.loadFromMemory();
const nodes = await newGraph.getAllNodes();
expect(nodes.length).toBeGreaterThanOrEqual(0);
});
test("should handle empty graph statistics", async () => {
const stats = await graph.getStatistics();
expect(stats).toBeDefined();
expect(typeof stats.nodeCount).toBe("number");
expect(typeof stats.edgeCount).toBe("number");
expect(stats.nodeCount).toBe(0); // Empty graph initially
expect(stats.edgeCount).toBe(0);
});
});
});
```
--------------------------------------------------------------------------------
/tests/memory/kg-storage.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tests for Knowledge Graph Storage
* Phase 1: Core Knowledge Graph Integration
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { KGStorage } from "../../src/memory/kg-storage.js";
import { GraphNode, GraphEdge } from "../../src/memory/knowledge-graph.js";
import { tmpdir } from "os";
describe("KGStorage", () => {
let storage: KGStorage;
let testDir: string;
beforeEach(async () => {
// Create temporary test directory
testDir = join(tmpdir(), `kg-storage-test-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
storage = new KGStorage({
storageDir: testDir,
backupOnWrite: true,
validateOnRead: true,
});
await storage.initialize();
});
afterEach(async () => {
// Clean up test directory
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch (error) {
console.warn("Failed to clean up test directory:", error);
}
});
describe("Initialization", () => {
it("should create storage directory", async () => {
const stats = await fs.stat(testDir);
expect(stats.isDirectory()).toBe(true);
});
it("should create entity and relationship files", async () => {
const entityFile = join(testDir, "knowledge-graph-entities.jsonl");
const relationshipFile = join(
testDir,
"knowledge-graph-relationships.jsonl",
);
await fs.access(entityFile);
await fs.access(relationshipFile);
// Files should exist (no error thrown)
expect(true).toBe(true);
});
it("should write file markers", async () => {
const entityFile = join(testDir, "knowledge-graph-entities.jsonl");
const content = await fs.readFile(entityFile, "utf-8");
expect(content).toContain("# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES");
});
it("should reject non-DocuMCP files", async () => {
// Create a non-DocuMCP file
const fakeFile = join(testDir, "knowledge-graph-entities.jsonl");
await fs.writeFile(fakeFile, "not a documcp file\n", "utf-8");
const newStorage = new KGStorage({ storageDir: testDir });
await expect(newStorage.initialize()).rejects.toThrow(
"is not a DocuMCP knowledge graph file",
);
});
});
describe("Entity Storage", () => {
it("should save and load entities", async () => {
const entities: GraphNode[] = [
{
id: "project:test",
type: "project",
label: "Test Project",
properties: { name: "Test" },
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
{
id: "tech:typescript",
type: "technology",
label: "TypeScript",
properties: { name: "TypeScript" },
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
await storage.saveEntities(entities);
const loaded = await storage.loadEntities();
expect(loaded).toHaveLength(2);
expect(loaded[0].id).toBe("project:test");
expect(loaded[1].id).toBe("tech:typescript");
});
it("should handle empty entity list", async () => {
await storage.saveEntities([]);
const loaded = await storage.loadEntities();
expect(loaded).toHaveLength(0);
});
it("should preserve entity properties", async () => {
const entity: GraphNode = {
id: "project:complex",
type: "project",
label: "Complex Project",
properties: {
name: "Complex",
technologies: ["typescript", "react"],
metadata: { nested: { value: 123 } },
},
weight: 0.85,
lastUpdated: new Date().toISOString(),
};
await storage.saveEntities([entity]);
const loaded = await storage.loadEntities();
expect(loaded[0].properties.technologies).toEqual([
"typescript",
"react",
]);
expect(loaded[0].properties.metadata.nested.value).toBe(123);
});
});
describe("Relationship Storage", () => {
it("should save and load relationships", async () => {
const relationships: GraphEdge[] = [
{
id: "project:test-uses-tech:typescript",
source: "project:test",
target: "tech:typescript",
type: "uses",
weight: 1.0,
confidence: 0.9,
properties: {},
lastUpdated: new Date().toISOString(),
},
];
await storage.saveRelationships(relationships);
const loaded = await storage.loadRelationships();
expect(loaded).toHaveLength(1);
expect(loaded[0].source).toBe("project:test");
expect(loaded[0].target).toBe("tech:typescript");
});
it("should handle empty relationship list", async () => {
await storage.saveRelationships([]);
const loaded = await storage.loadRelationships();
expect(loaded).toHaveLength(0);
});
it("should preserve relationship properties", async () => {
const relationship: GraphEdge = {
id: "test-edge",
source: "node1",
target: "node2",
type: "similar_to",
weight: 0.75,
confidence: 0.8,
properties: {
similarityScore: 0.75,
sharedTechnologies: ["typescript"],
},
lastUpdated: new Date().toISOString(),
};
await storage.saveRelationships([relationship]);
const loaded = await storage.loadRelationships();
expect(loaded[0].properties.similarityScore).toBe(0.75);
expect(loaded[0].properties.sharedTechnologies).toEqual(["typescript"]);
});
});
describe("Complete Graph Storage", () => {
it("should save and load complete graph", async () => {
const entities: GraphNode[] = [
{
id: "project:test",
type: "project",
label: "Test",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
const relationships: GraphEdge[] = [
{
id: "test-edge",
source: "project:test",
target: "tech:ts",
type: "uses",
weight: 1.0,
confidence: 1.0,
properties: {},
lastUpdated: new Date().toISOString(),
},
];
await storage.saveGraph(entities, relationships);
const loaded = await storage.loadGraph();
expect(loaded.entities).toHaveLength(1);
expect(loaded.relationships).toHaveLength(1);
});
});
describe("Backup System", () => {
it("should create backups on write", async () => {
const entities: GraphNode[] = [
{
id: "test",
type: "project",
label: "Test",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
await storage.saveEntities(entities);
await storage.saveEntities(entities); // Second save should create backup
const backupDir = join(testDir, "backups");
const files = await fs.readdir(backupDir);
const backupFiles = files.filter((f) => f.startsWith("entities-"));
expect(backupFiles.length).toBeGreaterThan(0);
});
it("should restore from backup", async () => {
const entities1: GraphNode[] = [
{
id: "version1",
type: "project",
label: "V1",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
const entities2: GraphNode[] = [
{
id: "version2",
type: "project",
label: "V2",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
// Save first version
await storage.saveEntities(entities1);
// Small delay to ensure different timestamps
await new Promise((resolve) => setTimeout(resolve, 10));
// Save second version (creates backup of first)
await storage.saveEntities(entities2);
// Verify we have second version
let loaded = await storage.loadEntities();
expect(loaded).toHaveLength(1);
expect(loaded[0].id).toBe("version2");
// Restore from backup
await storage.restoreFromBackup("entities");
// Verify we have first version back
loaded = await storage.loadEntities();
expect(loaded).toHaveLength(1);
expect(loaded[0].id).toBe("version1");
});
});
describe("Statistics", () => {
it("should return accurate statistics", async () => {
const entities: GraphNode[] = [
{
id: "e1",
type: "project",
label: "E1",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
{
id: "e2",
type: "technology",
label: "E2",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
const relationships: GraphEdge[] = [
{
id: "r1",
source: "e1",
target: "e2",
type: "uses",
weight: 1.0,
confidence: 1.0,
properties: {},
lastUpdated: new Date().toISOString(),
},
];
await storage.saveGraph(entities, relationships);
const stats = await storage.getStatistics();
expect(stats.entityCount).toBe(2);
expect(stats.relationshipCount).toBe(1);
expect(stats.schemaVersion).toBe("1.0.0");
expect(stats.fileSize.entities).toBeGreaterThan(0);
});
});
describe("Integrity Verification", () => {
it("should detect orphaned relationships", async () => {
const entities: GraphNode[] = [
{
id: "e1",
type: "project",
label: "E1",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
const relationships: GraphEdge[] = [
{
id: "r1",
source: "e1",
target: "missing", // References non-existent entity
type: "uses",
weight: 1.0,
confidence: 1.0,
properties: {},
lastUpdated: new Date().toISOString(),
},
];
await storage.saveGraph(entities, relationships);
const result = await storage.verifyIntegrity();
expect(result.valid).toBe(true); // No errors, just warnings
expect(result.warnings.length).toBeGreaterThan(0);
expect(result.warnings[0]).toContain("missing");
});
it("should detect duplicate entities", async () => {
const entities: GraphNode[] = [
{
id: "duplicate",
type: "project",
label: "E1",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
{
id: "duplicate",
type: "project",
label: "E2",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
await storage.saveEntities(entities);
const result = await storage.verifyIntegrity();
expect(result.valid).toBe(false);
expect(result.errors.length).toBeGreaterThan(0);
expect(result.errors[0]).toContain("Duplicate entity ID");
});
});
describe("Export", () => {
it("should export graph as JSON", async () => {
const entities: GraphNode[] = [
{
id: "test",
type: "project",
label: "Test",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
await storage.saveEntities(entities);
const json = await storage.exportAsJSON();
const parsed = JSON.parse(json);
expect(parsed.metadata).toBeDefined();
expect(parsed.metadata.version).toBe("1.0.0");
expect(parsed.entities).toHaveLength(1);
expect(parsed.relationships).toHaveLength(0);
});
});
});
```
--------------------------------------------------------------------------------
/tests/memory/schemas.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tests for Knowledge Graph Schemas
* Phase 1: Core Knowledge Graph Integration
*/
import { describe, it, expect } from "@jest/globals";
import {
ProjectEntitySchema,
UserEntitySchema,
ConfigurationEntitySchema,
CodeFileEntitySchema,
DocumentationSectionEntitySchema,
TechnologyEntitySchema,
ProjectUsesTechnologySchema,
UserPrefersSSGSchema,
ProjectDeployedWithSchema,
SimilarToSchema,
DocumentsSchema,
ReferencesSchema,
OutdatedForSchema,
validateEntity,
validateRelationship,
isProjectEntity,
isUserEntity,
SCHEMA_METADATA,
} from "../../src/memory/schemas.js";
describe("Entity Schemas", () => {
describe("ProjectEntitySchema", () => {
it("should validate a valid project entity", () => {
const validProject = {
name: "test-project",
path: "/path/to/project",
technologies: ["typescript", "javascript"],
size: "medium" as const,
lastAnalyzed: new Date().toISOString(),
analysisCount: 1,
hasTests: true,
hasCI: true,
hasDocs: false,
totalFiles: 100,
};
const result = ProjectEntitySchema.parse(validProject);
expect(result).toBeDefined();
expect(result.name).toBe("test-project");
expect(result.technologies).toHaveLength(2);
});
it("should apply defaults for optional fields", () => {
const minimalProject = {
name: "minimal-project",
path: "/path/to/minimal",
lastAnalyzed: new Date().toISOString(),
};
const result = ProjectEntitySchema.parse(minimalProject);
expect(result.technologies).toEqual([]);
expect(result.size).toBe("medium");
expect(result.analysisCount).toBe(0);
expect(result.hasTests).toBe(false);
});
it("should reject invalid size values", () => {
const invalidProject = {
name: "test-project",
path: "/path/to/project",
size: "huge", // Invalid
lastAnalyzed: new Date().toISOString(),
};
expect(() => ProjectEntitySchema.parse(invalidProject)).toThrow();
});
it("should require name and path", () => {
const missingName = {
path: "/path/to/project",
lastAnalyzed: new Date().toISOString(),
};
expect(() => ProjectEntitySchema.parse(missingName)).toThrow();
});
});
describe("UserEntitySchema", () => {
it("should validate a valid user entity", () => {
const validUser = {
userId: "user123",
expertiseLevel: "intermediate" as const,
preferredTechnologies: ["react", "typescript"],
preferredSSGs: ["docusaurus"],
lastActive: new Date().toISOString(),
createdAt: new Date().toISOString(),
};
const result = UserEntitySchema.parse(validUser);
expect(result.userId).toBe("user123");
expect(result.expertiseLevel).toBe("intermediate");
});
it("should apply defaults", () => {
const minimalUser = {
userId: "user456",
lastActive: new Date().toISOString(),
createdAt: new Date().toISOString(),
};
const result = UserEntitySchema.parse(minimalUser);
expect(result.expertiseLevel).toBe("intermediate");
expect(result.preferredTechnologies).toEqual([]);
expect(result.documentationStyle).toBe("comprehensive");
});
});
describe("ConfigurationEntitySchema", () => {
it("should validate a valid configuration entity", () => {
const validConfig = {
ssg: "docusaurus" as const,
settings: { theme: "classic" },
deploymentSuccessRate: 0.95,
usageCount: 10,
lastUsed: new Date().toISOString(),
};
const result = ConfigurationEntitySchema.parse(validConfig);
expect(result.ssg).toBe("docusaurus");
expect(result.deploymentSuccessRate).toBe(0.95);
});
it("should reject invalid SSG values", () => {
const invalidConfig = {
ssg: "gatsby", // Not in enum
lastUsed: new Date().toISOString(),
};
expect(() => ConfigurationEntitySchema.parse(invalidConfig)).toThrow();
});
it("should validate success rate bounds", () => {
const invalidRate = {
ssg: "jekyll" as const,
deploymentSuccessRate: 1.5, // > 1.0
lastUsed: new Date().toISOString(),
};
expect(() => ConfigurationEntitySchema.parse(invalidRate)).toThrow();
});
});
describe("CodeFileEntitySchema", () => {
it("should validate a valid code file entity", () => {
const validCodeFile = {
path: "/src/index.ts",
language: "typescript",
functions: ["main", "helper"],
classes: ["App"],
dependencies: ["express", "zod"],
lastModified: new Date().toISOString(),
contentHash: "abc123",
linesOfCode: 150,
};
const result = CodeFileEntitySchema.parse(validCodeFile);
expect(result.language).toBe("typescript");
expect(result.functions).toHaveLength(2);
});
});
describe("DocumentationSectionEntitySchema", () => {
it("should validate a valid documentation section", () => {
const validSection = {
filePath: "/docs/api.md",
sectionTitle: "API Reference",
contentHash: "def456",
referencedCodeFiles: ["/src/api.ts"],
lastUpdated: new Date().toISOString(),
category: "reference" as const,
};
const result = DocumentationSectionEntitySchema.parse(validSection);
expect(result.category).toBe("reference");
expect(result.referencedCodeFiles).toHaveLength(1);
});
});
});
describe("Relationship Schemas", () => {
describe("ProjectUsesTechnologySchema", () => {
it("should validate a valid project-technology relationship", () => {
const validRelationship = {
type: "project_uses_technology" as const,
weight: 0.8,
confidence: 1.0,
createdAt: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
fileCount: 50,
percentage: 80,
isPrimary: true,
metadata: {},
};
const result = ProjectUsesTechnologySchema.parse(validRelationship);
expect(result.type).toBe("project_uses_technology");
expect(result.isPrimary).toBe(true);
});
});
describe("ProjectDeployedWithSchema", () => {
it("should validate a successful deployment relationship", () => {
const validDeployment = {
type: "project_deployed_with" as const,
weight: 1.0,
confidence: 1.0,
createdAt: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
success: true,
timestamp: new Date().toISOString(),
buildTime: 45,
deploymentUrl: "https://example.com",
metadata: {},
};
const result = ProjectDeployedWithSchema.parse(validDeployment);
expect(result.success).toBe(true);
expect(result.buildTime).toBe(45);
});
it("should validate a failed deployment relationship", () => {
const failedDeployment = {
type: "project_deployed_with" as const,
weight: 0.5,
confidence: 1.0,
createdAt: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
success: false,
timestamp: new Date().toISOString(),
errorMessage: "Build failed",
metadata: {},
};
const result = ProjectDeployedWithSchema.parse(failedDeployment);
expect(result.success).toBe(false);
expect(result.errorMessage).toBe("Build failed");
});
});
describe("OutdatedForSchema", () => {
it("should validate an outdated documentation relationship", () => {
const validOutdated = {
type: "outdated_for" as const,
weight: 1.0,
confidence: 0.9,
createdAt: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
detectedAt: new Date().toISOString(),
changeType: "function_signature" as const,
severity: "high" as const,
autoFixable: false,
metadata: {},
};
const result = OutdatedForSchema.parse(validOutdated);
expect(result.changeType).toBe("function_signature");
expect(result.severity).toBe("high");
});
});
});
describe("Validation Functions", () => {
describe("validateEntity", () => {
it("should validate a complete entity", () => {
const entity = {
type: "project",
name: "test-project",
path: "/test",
lastAnalyzed: new Date().toISOString(),
};
const result = validateEntity(entity);
expect(result).toBeDefined();
expect(result.type).toBe("project");
});
it("should throw on invalid entity", () => {
const invalidEntity = {
type: "invalid_type",
name: "test",
};
expect(() => validateEntity(invalidEntity)).toThrow();
});
});
describe("validateRelationship", () => {
it("should validate a complete relationship", () => {
const relationship = {
type: "similar_to",
weight: 0.85,
confidence: 0.9,
createdAt: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
similarityScore: 0.85,
sharedTechnologies: ["typescript"],
metadata: {},
};
const result = validateRelationship(relationship);
expect(result).toBeDefined();
});
});
});
describe("Type Guards", () => {
describe("isProjectEntity", () => {
it("should return true for project entities", () => {
const entity = {
type: "project" as const,
name: "test",
path: "/test",
technologies: ["typescript"],
size: "medium" as const,
lastAnalyzed: new Date().toISOString(),
analysisCount: 1,
hasTests: false,
hasCI: false,
hasDocs: false,
totalFiles: 10,
};
expect(isProjectEntity(entity)).toBe(true);
});
it("should return false for non-project entities", () => {
const entity = {
type: "user" as const,
userId: "user123",
expertiseLevel: "intermediate" as const,
preferredTechnologies: [],
preferredSSGs: [],
documentationStyle: "comprehensive" as const,
preferredDiataxisCategories: [],
projectCount: 0,
lastActive: new Date().toISOString(),
createdAt: new Date().toISOString(),
};
expect(isProjectEntity(entity as any)).toBe(false);
});
});
describe("isUserEntity", () => {
it("should return true for user entities", () => {
const entity = {
type: "user" as const,
userId: "user123",
expertiseLevel: "intermediate" as const,
preferredTechnologies: [],
preferredSSGs: [],
documentationStyle: "comprehensive" as const,
preferredDiataxisCategories: [],
projectCount: 0,
lastActive: new Date().toISOString(),
createdAt: new Date().toISOString(),
};
expect(isUserEntity(entity)).toBe(true);
});
});
});
describe("Schema Metadata", () => {
it("should have correct version", () => {
expect(SCHEMA_METADATA.version).toBe("1.0.0");
});
it("should list all entity types", () => {
expect(SCHEMA_METADATA.entityTypes).toContain("project");
expect(SCHEMA_METADATA.entityTypes).toContain("user");
expect(SCHEMA_METADATA.entityTypes).toContain("configuration");
expect(SCHEMA_METADATA.entityTypes).toContain("code_file");
expect(SCHEMA_METADATA.entityTypes).toContain(
"documentation_freshness_event",
);
expect(SCHEMA_METADATA.entityTypes).toHaveLength(8);
});
it("should list all relationship types", () => {
expect(SCHEMA_METADATA.relationshipTypes).toContain(
"project_uses_technology",
);
expect(SCHEMA_METADATA.relationshipTypes).toContain("outdated_for");
expect(SCHEMA_METADATA.relationshipTypes).toContain("project_has_sitemap");
expect(SCHEMA_METADATA.relationshipTypes).toContain(
"project_has_freshness_event",
);
expect(SCHEMA_METADATA.relationshipTypes).toHaveLength(13);
});
});
```
--------------------------------------------------------------------------------
/docs/tutorials/user-onboarding.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.973Z"
last_validated: "2025-11-20T00:46:21.973Z"
auto_updated: false
update_frequency: monthly
---
# DocuMCP User Onboarding Guide
Welcome to DocuMCP! This comprehensive guide will help you get started with DocuMCP in your own environment, from initial setup to advanced usage patterns.
## 🚀 Quick Start
### Prerequisites
- **Node.js**: Version 20.0.0 or higher
- **npm**: Version 8.0.0 or higher
- **Git**: For repository analysis
- **GitHub Account**: For GitHub Pages deployment
### Installation
```bash
# Install DocuMCP globally
npm install -g documcp
# Or install locally in your project
npm install documcp --save-dev
```
### Verify Installation
```bash
# Check if DocuMCP is installed correctly
documcp --version
# Should output: DocuMCP v0.5.0
```
## 📋 Basic Usage Patterns
### Pattern 1: Repository Analysis
Start by analyzing your repository to understand its structure and documentation needs.
```bash
# Basic repository analysis
documcp analyze-repository --path ./my-project --depth standard
# Quick analysis for large repositories
documcp analyze-repository --path ./large-project --depth quick
# Deep analysis for comprehensive documentation
documcp analyze-repository --path ./complex-project --depth deep
```
**Example Output:**
```json
{
"success": true,
"data": {
"id": "analysis_abc123_def456",
"structure": {
"totalFiles": 150,
"totalDirectories": 25,
"languages": { ".ts": 100, ".md": 20, ".json": 10 },
"hasTests": true,
"hasCI": true,
"hasDocs": false
},
"recommendations": {
"primaryLanguage": "TypeScript",
"projectType": "Library",
"teamSize": "small"
}
}
}
```
### Pattern 2: SSG Recommendation
Get intelligent recommendations for the best static site generator for your project.
```bash
# Get SSG recommendation based on analysis
documcp recommend-ssg --analysis-id analysis_abc123_def456
# With user preferences
documcp recommend-ssg --analysis-id analysis_abc123_def456 --priority performance --ecosystem javascript
# For enterprise users
documcp recommend-ssg --analysis-id analysis_abc123_def456 --priority simplicity
```
**Example Output:**
```json
{
"success": true,
"data": {
"recommended": "docusaurus",
"confidence": 0.92,
"reasoning": [
"React-based project detected",
"Documentation focus identified",
"Team size suitable for Docusaurus"
],
"alternatives": [
{
"name": "hugo",
"score": 0.85,
"pros": ["Performance", "Fast builds"],
"cons": ["Learning curve", "Go templates"]
}
]
}
}
```
### Pattern 3: Documentation Structure Setup
Create a Diataxis-compliant documentation structure.
```bash
# Set up documentation structure
documcp setup-structure --path ./docs --ssg docusaurus --include-examples
# Minimal structure for existing projects
documcp setup-structure --path ./site --ssg hugo --include-examples false
```
### Pattern 4: Configuration Generation
Generate configuration files for your chosen SSG.
```bash
# Generate Docusaurus configuration
documcp generate-config --ssg docusaurus --project-name "My Project" --output-path ./docs
# Generate Hugo configuration
documcp generate-config --ssg hugo --project-name "My Site" --output-path ./site
```
### Pattern 5: Content Population
Populate your documentation with intelligent content based on your repository.
```bash
# Populate documentation content
documcp populate-content --analysis-id analysis_abc123_def456 --docs-path ./docs
# With specific focus areas
documcp populate-content --analysis-id analysis_abc123_def456 --docs-path ./docs --focus-areas api,examples
```
### Pattern 6: GitHub Pages Deployment
Deploy your documentation to GitHub Pages.
```bash
# Deploy to GitHub Pages
documcp deploy-pages --repository "user/repo" --ssg docusaurus
# With custom domain
documcp deploy-pages --repository "user/repo" --ssg docusaurus --custom-domain "docs.example.com"
```
## 🎯 Common Use Cases
### Use Case 1: New Open Source Project
For a new open source project, follow this workflow:
```bash
# 1. Analyze your repository
ANALYSIS_ID=$(documcp analyze-repository --path . --depth standard | jq -r '.data.id')
# 2. Get SSG recommendation
documcp recommend-ssg --analysis-id $ANALYSIS_ID --priority community_focused
# 3. Set up documentation structure
documcp setup-structure --path ./docs --ssg docusaurus --include-examples
# 4. Generate configuration
documcp generate-config --ssg docusaurus --project-name "My Open Source Project" --output-path ./docs
# 5. Populate content
documcp populate-content --analysis-id $ANALYSIS_ID --docs-path ./docs
# 6. Deploy to GitHub Pages
documcp deploy-pages --repository "$(git remote get-url origin | sed 's/.*github.com[:/]\([^.]*\).*/\1/')" --ssg docusaurus
```
### Use Case 2: Enterprise Documentation
For enterprise documentation with specific requirements:
```bash
# 1. Analyze with enterprise focus
ANALYSIS_ID=$(documcp analyze-repository --path . --depth deep | jq -r '.data.id')
# 2. Get enterprise-focused recommendation
documcp recommend-ssg --analysis-id $ANALYSIS_ID --priority enterprise_focused
# 3. Set up minimal structure
documcp setup-structure --path ./enterprise-docs --ssg hugo --include-examples false
# 4. Generate enterprise configuration
documcp generate-config --ssg hugo --project-name "Enterprise Documentation" --output-path ./enterprise-docs
# 5. Populate with enterprise focus
documcp populate-content --analysis-id $ANALYSIS_ID --docs-path ./enterprise-docs --focus-areas security,compliance,api
```
### Use Case 3: API Documentation
For API-focused projects:
```bash
# 1. Analyze API project
ANALYSIS_ID=$(documcp analyze-repository --path . --depth standard | jq -r '.data.id')
# 2. Get API-focused recommendation
documcp recommend-ssg --analysis-id $ANALYSIS_ID --priority features
# 3. Set up API documentation structure
documcp setup-structure --path ./api-docs --ssg docusaurus --include-examples
# 4. Generate API documentation configuration
documcp generate-config --ssg docusaurus --project-name "API Documentation" --output-path ./api-docs
# 5. Populate with API focus
documcp populate-content --analysis-id $ANALYSIS_ID --docs-path ./api-docs --focus-areas api,examples,integration
```
## 🔧 Advanced Configuration
### Environment Variables
Set up environment variables for advanced configuration:
```bash
# GitHub token for deployment
export GITHUB_TOKEN="your_github_token"
# Custom storage directory for memory
export DOCUMCP_STORAGE_DIR="./.documcp"
# Development mode for debugging
export NODE_ENV="development"
```
### Memory System Configuration
Configure the memory system for learning and pattern recognition:
```bash
# Initialize memory system
documcp memory initialize --storage-dir ./.documcp
# Export memories for backup
documcp memory export --format json --output ./documcp-memories.json
# Import memories from backup
documcp memory import --format json --input ./documcp-memories.json
```
### User Preferences
Set up user preferences for personalized recommendations:
```bash
# Set user preferences
documcp preferences set --user-id "developer123" --priority performance --ecosystem javascript
# Get personalized recommendations
documcp recommend-ssg --analysis-id $ANALYSIS_ID --user-id "developer123"
# Export preferences
documcp preferences export --user-id "developer123" --output ./preferences.json
```
## 🚨 Troubleshooting
### Common Issues
#### Issue 1: Repository Analysis Fails
**Problem:** `Permission denied: Cannot read directory`
**Solution:**
```bash
# Check directory permissions
ls -la /path/to/repository
# Fix permissions if needed
chmod -R 755 /path/to/repository
# Run analysis again
documcp analyze-repository --path /path/to/repository --depth standard
```
#### Issue 2: SSG Recommendation Returns Low Confidence
**Problem:** Low confidence scores in recommendations
**Solution:**
```bash
# Try deeper analysis
documcp analyze-repository --path . --depth deep
# Use specific preferences
documcp recommend-ssg --analysis-id $ANALYSIS_ID --priority simplicity --ecosystem any
# Check for similar projects in memory
documcp memory similar --analysis-id $ANALYSIS_ID
```
#### Issue 3: GitHub Pages Deployment Fails
**Problem:** Deployment fails with permission errors
**Solution:**
```bash
# Check GitHub token permissions
curl -H "Authorization: token $GITHUB_TOKEN" https://api.github.com/user
# Ensure token has repo and pages permissions
# Regenerate token with correct permissions if needed
# Try deployment again
documcp deploy-pages --repository "user/repo" --ssg docusaurus
```
#### Issue 4: Content Population Generates Empty Content
**Problem:** No content is generated during population
**Solution:**
```bash
# Check if repository has sufficient content
documcp analyze-repository --path . --depth deep
# Ensure documentation structure exists
documcp setup-structure --path ./docs --ssg docusaurus
# Try with different population level
documcp populate-content --analysis-id $ANALYSIS_ID --docs-path ./docs --population-level comprehensive
```
## 📚 Best Practices
### 1. Repository Organization
- Keep your repository well-organized with clear directory structure
- Include a comprehensive README.md file
- Use consistent naming conventions
- Include package.json or equivalent dependency files
### 2. Documentation Structure
- Follow Diataxis framework principles
- Use clear, descriptive headings
- Include code examples and use cases
- Keep documentation up-to-date with code changes
### 3. Memory System Usage
- Regularly export memories for backup
- Use consistent user IDs for preference tracking
- Clean up old memories periodically
- Share memories across team members for better recommendations
### 4. Deployment Strategy
- Test documentation locally before deployment
- Use staging environments for testing
- Monitor deployment success rates
- Keep deployment configurations in version control
## 🔗 Integration Examples
### GitHub Actions Integration
```yaml
name: Deploy Documentation
on:
push:
branches: [main]
paths: ["docs/**", "src/**"]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install DocuMCP
run: npm install -g documcp
- name: Analyze Repository
id: analyze
run: |
ANALYSIS_ID=$(documcp analyze-repository --path . --depth standard | jq -r '.data.id')
echo "analysis_id=$ANALYSIS_ID" >> $GITHUB_OUTPUT
- name: Deploy Documentation
run: |
documcp deploy-pages --repository ${{ github.repository }} --ssg docusaurus
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
```
### Docker Integration
```dockerfile
FROM node:20-alpine
# Install DocuMCP
RUN npm install -g documcp
# Set working directory
WORKDIR /app
# Copy project files
COPY . .
# Analyze and deploy documentation
RUN documcp analyze-repository --path . --depth standard && \
documcp recommend-ssg --analysis-id $(documcp analyze-repository --path . | jq -r '.data.id') && \
documcp deploy-pages --repository $REPOSITORY --ssg docusaurus
EXPOSE 3000
CMD ["documcp", "serve", "--port", "3000"]
```
## 📖 Additional Resources
- [API Reference](../api/) - Complete API documentation
- [Configuration Guide](../reference/configuration.md) - Detailed configuration options
- [MCP Tools Reference](../reference/mcp-tools.md) - MCP tool specifications
- [GitHub Pages Deployment](../how-to/github-pages-deployment.md) - Deployment guide
- [Troubleshooting Guide](../how-to/troubleshooting.md) - Common issues and solutions
## 🤝 Getting Help
- **GitHub Issues**: Report bugs and request features
- **GitHub Discussions**: Ask questions and share ideas
- **Documentation**: Check the comprehensive documentation
- **API Reference**: Explore the complete API documentation
Welcome to the DocuMCP community! 🎉
```
--------------------------------------------------------------------------------
/docs/api/assets/icons.svg:
--------------------------------------------------------------------------------
```
<svg xmlns="http://www.w3.org/2000/svg"><g id="icon-1" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-module)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">M</text></g><g id="icon-2" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-module)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">M</text></g><g id="icon-4" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-namespace)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">N</text></g><g id="icon-8" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-enum)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">E</text></g><g id="icon-16" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-32" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-variable)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">V</text></g><g id="icon-64" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-function)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">F</text></g><g id="icon-128" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-class)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">C</text></g><g id="icon-256" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-interface)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">I</text></g><g id="icon-512" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-constructor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">C</text></g><g id="icon-1024" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-2048" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-method)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">M</text></g><g id="icon-4096" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-function)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">F</text></g><g id="icon-8192" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-16384" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-constructor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">C</text></g><g id="icon-32768" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-65536" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-type-alias)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">T</text></g><g id="icon-131072" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-type-alias)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">T</text></g><g id="icon-262144" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-accessor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">A</text></g><g id="icon-524288" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-accessor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">A</text></g><g id="icon-1048576" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-accessor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">A</text></g><g id="icon-2097152" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-type-alias)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">T</text></g><g id="icon-4194304" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-reference)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">R</text></g><g id="icon-8388608" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-document)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><g stroke="var(--color-icon-text)" fill="none" stroke-width="1.5"><polygon points="6,5 6,19 18,19, 18,10 13,5"></polygon><line x1="9" y1="9" x2="13" y2="9"></line><line x1="9" y1="12" x2="15" y2="12"></line><line x1="9" y1="15" x2="15" y2="15"></line></g></g><g id="icon-folder" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-document)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><g stroke="var(--color-icon-text)" fill="none" stroke-width="1.5"><polygon points="5,5 10,5 12,8 19,8 19,18 5,18"></polygon></g></g><g id="icon-chevronDown" class="tsd-no-select"><path d="M4.93896 8.531L12 15.591L19.061 8.531L16.939 6.409L12 11.349L7.06098 6.409L4.93896 8.531Z" fill="var(--color-icon-text)"></path></g><g id="icon-chevronSmall" class="tsd-no-select"><path d="M1.5 5.50969L8 11.6609L14.5 5.50969L12.5466 3.66086L8 7.96494L3.45341 3.66086L1.5 5.50969Z" fill="var(--color-icon-text)"></path></g><g id="icon-checkbox" class="tsd-no-select"><rect class="tsd-checkbox-background" width="30" height="30" x="1" y="1" rx="6" fill="none"></rect><path class="tsd-checkbox-checkmark" d="M8.35422 16.8214L13.2143 21.75L24.6458 10.25" stroke="none" stroke-width="3.5" stroke-linejoin="round" fill="none"></path></g><g id="icon-menu" class="tsd-no-select"><rect x="1" y="3" width="14" height="2" fill="var(--color-icon-text)"></rect><rect x="1" y="7" width="14" height="2" fill="var(--color-icon-text)"></rect><rect x="1" y="11" width="14" height="2" fill="var(--color-icon-text)"></rect></g><g id="icon-search" class="tsd-no-select"><path d="M15.7824 13.833L12.6666 10.7177C12.5259 10.5771 12.3353 10.499 12.1353 10.499H11.6259C12.4884 9.39596 13.001 8.00859 13.001 6.49937C13.001 2.90909 10.0914 0 6.50048 0C2.90959 0 0 2.90909 0 6.49937C0 10.0896 2.90959 12.9987 6.50048 12.9987C8.00996 12.9987 9.39756 12.4863 10.5008 11.6239V12.1332C10.5008 12.3332 10.5789 12.5238 10.7195 12.6644L13.8354 15.7797C14.1292 16.0734 14.6042 16.0734 14.8948 15.7797L15.7793 14.8954C16.0731 14.6017 16.0731 14.1267 15.7824 13.833ZM6.50048 10.499C4.29094 10.499 2.50018 8.71165 2.50018 6.49937C2.50018 4.29021 4.28781 2.49976 6.50048 2.49976C8.71001 2.49976 10.5008 4.28708 10.5008 6.49937C10.5008 8.70852 8.71314 10.499 6.50048 10.499Z" fill="var(--color-icon-text)"></path></g><g id="icon-anchor" class="tsd-no-select"><g stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"></path><path d="M10 14a3.5 3.5 0 0 0 5 0l4 -4a3.5 3.5 0 0 0 -5 -5l-.5 .5"></path><path d="M14 10a3.5 3.5 0 0 0 -5 0l-4 4a3.5 3.5 0 0 0 5 5l.5 -.5"></path></g></g><g id="icon-alertNote" class="tsd-no-select"><path fill="var(--color-alert-note)" d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8Zm8-6.5a6.5 6.5 0 1 0 0 13 6.5 6.5 0 0 0 0-13ZM6.5 7.75A.75.75 0 0 1 7.25 7h1a.75.75 0 0 1 .75.75v2.75h.25a.75.75 0 0 1 0 1.5h-2a.75.75 0 0 1 0-1.5h.25v-2h-.25a.75.75 0 0 1-.75-.75ZM8 6a1 1 0 1 1 0-2 1 1 0 0 1 0 2Z"></path></g><g id="icon-alertTip" class="tsd-no-select"><path fill="var(--color-alert-tip)" d="M8 1.5c-2.363 0-4 1.69-4 3.75 0 .984.424 1.625.984 2.304l.214.253c.223.264.47.556.673.848.284.411.537.896.621 1.49a.75.75 0 0 1-1.484.211c-.04-.282-.163-.547-.37-.847a8.456 8.456 0 0 0-.542-.68c-.084-.1-.173-.205-.268-.32C3.201 7.75 2.5 6.766 2.5 5.25 2.5 2.31 4.863 0 8 0s5.5 2.31 5.5 5.25c0 1.516-.701 2.5-1.328 3.259-.095.115-.184.22-.268.319-.207.245-.383.453-.541.681-.208.3-.33.565-.37.847a.751.751 0 0 1-1.485-.212c.084-.593.337-1.078.621-1.489.203-.292.45-.584.673-.848.075-.088.147-.173.213-.253.561-.679.985-1.32.985-2.304 0-2.06-1.637-3.75-4-3.75ZM5.75 12h4.5a.75.75 0 0 1 0 1.5h-4.5a.75.75 0 0 1 0-1.5ZM6 15.25a.75.75 0 0 1 .75-.75h2.5a.75.75 0 0 1 0 1.5h-2.5a.75.75 0 0 1-.75-.75Z"></path></g><g id="icon-alertImportant" class="tsd-no-select"><path fill="var(--color-alert-important)" d="M0 1.75C0 .784.784 0 1.75 0h12.5C15.216 0 16 .784 16 1.75v9.5A1.75 1.75 0 0 1 14.25 13H8.06l-2.573 2.573A1.458 1.458 0 0 1 3 14.543V13H1.75A1.75 1.75 0 0 1 0 11.25Zm1.75-.25a.25.25 0 0 0-.25.25v9.5c0 .138.112.25.25.25h2a.75.75 0 0 1 .75.75v2.19l2.72-2.72a.749.749 0 0 1 .53-.22h6.5a.25.25 0 0 0 .25-.25v-9.5a.25.25 0 0 0-.25-.25Zm7 2.25v2.5a.75.75 0 0 1-1.5 0v-2.5a.75.75 0 0 1 1.5 0ZM9 9a1 1 0 1 1-2 0 1 1 0 0 1 2 0Z"></path></g><g id="icon-alertWarning" class="tsd-no-select"><path fill="var(--color-alert-warning)" d="M6.457 1.047c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0 1 14.082 15H1.918a1.75 1.75 0 0 1-1.543-2.575Zm1.763.707a.25.25 0 0 0-.44 0L1.698 13.132a.25.25 0 0 0 .22.368h12.164a.25.25 0 0 0 .22-.368Zm.53 3.996v2.5a.75.75 0 0 1-1.5 0v-2.5a.75.75 0 0 1 1.5 0ZM9 11a1 1 0 1 1-2 0 1 1 0 0 1 2 0Z"></path></g><g id="icon-alertCaution" class="tsd-no-select"><path fill="var(--color-alert-caution)" d="M4.47.22A.749.749 0 0 1 5 0h6c.199 0 .389.079.53.22l4.25 4.25c.141.14.22.331.22.53v6a.749.749 0 0 1-.22.53l-4.25 4.25A.749.749 0 0 1 11 16H5a.749.749 0 0 1-.53-.22L.22 11.53A.749.749 0 0 1 0 11V5c0-.199.079-.389.22-.53Zm.84 1.28L1.5 5.31v5.38l3.81 3.81h5.38l3.81-3.81V5.31L10.69 1.5ZM8 4a.75.75 0 0 1 .75.75v3.5a.75.75 0 0 1-1.5 0v-3.5A.75.75 0 0 1 8 4Zm0 8a1 1 0 1 1 0-2 1 1 0 0 1 0 2Z"></path></g></svg>
```
--------------------------------------------------------------------------------
/src/memory/manager.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Memory Management Module for DocuMCP
* Implements Issue #46: Memory Management Module
*/
import { JSONLStorage, MemoryEntry } from "./storage.js";
import { EventEmitter } from "events";
export interface MemoryContext {
projectId: string;
repository?: string;
branch?: string;
user?: string;
session?: string;
}
export interface MemorySearchOptions {
semantic?: boolean;
fuzzy?: boolean;
sortBy?: "relevance" | "timestamp" | "type";
groupBy?: "type" | "project" | "date";
}
export class MemoryManager extends EventEmitter {
private storage: JSONLStorage;
private context: MemoryContext | null = null;
private cache: Map<string, MemoryEntry>;
private readonly maxCacheSize = 200; // Reduced cache size for better memory efficiency
constructor(storageDir?: string) {
super();
this.storage = new JSONLStorage(storageDir);
this.cache = new Map();
}
async initialize(): Promise<void> {
await this.storage.initialize();
this.emit("initialized");
}
setContext(context: MemoryContext): void {
this.context = context;
this.emit("context-changed", context);
}
async remember(
type: MemoryEntry["type"],
data: Record<string, any>,
metadata?: Partial<MemoryEntry["metadata"]>,
): Promise<MemoryEntry> {
const entry = await this.storage.append({
type,
timestamp: new Date().toISOString(),
data,
metadata: {
...metadata,
projectId: this.context?.projectId,
repository: this.context?.repository || metadata?.repository,
},
});
this.addToCache(entry);
this.emit("memory-created", entry);
return entry;
}
async recall(id: string): Promise<MemoryEntry | null> {
if (this.cache.has(id)) {
return this.cache.get(id)!;
}
const entry = await this.storage.get(id);
if (entry) {
this.addToCache(entry);
}
return entry;
}
async search(
query: string | Partial<MemoryEntry["metadata"]>,
options?: MemorySearchOptions,
): Promise<MemoryEntry[]> {
let filter: any = {};
if (typeof query === "string") {
// Text-based search - search in multiple fields
// Try to match projectId first, then tags
const results: MemoryEntry[] = [];
// Search by projectId
const projectResults = await this.storage.query({ projectId: query });
results.push(...projectResults);
// Search by tags (excluding already found entries)
const tagResults = await this.storage.query({ tags: [query] });
const existingIds = new Set(results.map((r) => r.id));
results.push(...tagResults.filter((r) => !existingIds.has(r.id)));
// Apply sorting and grouping if requested
let finalResults = results;
if (options?.sortBy) {
finalResults = this.sortResults(finalResults, options.sortBy);
}
if (options?.groupBy) {
return this.groupResults(finalResults, options.groupBy);
}
return finalResults;
} else {
filter = { ...query };
}
if (this.context) {
filter.projectId = filter.projectId || this.context.projectId;
filter.repository = filter.repository || this.context.repository;
}
let results = await this.storage.query(filter);
if (options?.sortBy) {
results = this.sortResults(results, options.sortBy);
}
if (options?.groupBy) {
return this.groupResults(results, options.groupBy);
}
return results;
}
async update(
id: string,
updates: Partial<MemoryEntry>,
): Promise<MemoryEntry | null> {
const existing = await this.recall(id);
if (!existing) return null;
const updated: MemoryEntry = {
...existing,
...updates,
id: existing.id,
timestamp: new Date().toISOString(),
};
await this.storage.delete(id);
const newEntry = await this.storage.append(updated);
this.cache.delete(id);
this.addToCache(newEntry);
this.emit("memory-updated", newEntry);
return newEntry;
}
async forget(id: string): Promise<boolean> {
const result = await this.storage.delete(id);
if (result) {
this.cache.delete(id);
this.emit("memory-deleted", id);
}
return result;
}
async getRelated(
entry: MemoryEntry,
limit: number = 10,
): Promise<MemoryEntry[]> {
const related: MemoryEntry[] = [];
// Find by same project
if (entry.metadata.projectId) {
const projectMemories = await this.search({
projectId: entry.metadata.projectId,
});
related.push(...projectMemories.filter((m: any) => m.id !== entry.id));
}
// Find by same type
const typeMemories = await this.storage.query({
type: entry.type,
limit: limit * 2,
});
related.push(...typeMemories.filter((m: any) => m.id !== entry.id));
// Find by overlapping tags
if (entry.metadata.tags && entry.metadata.tags.length > 0) {
const tagMemories = await this.storage.query({
tags: entry.metadata.tags,
limit: limit * 2,
});
related.push(...tagMemories.filter((m: any) => m.id !== entry.id));
}
// Deduplicate and limit
const uniqueRelated = Array.from(
new Map(related.map((m: any) => [m.id, m])).values(),
).slice(0, limit);
return uniqueRelated;
}
async analyze(timeRange?: { start: string; end: string }): Promise<{
patterns: Record<string, any>;
insights: string[];
statistics: any;
}> {
const stats = await this.storage.getStatistics();
const memories = await this.storage.query({
startDate: timeRange?.start,
endDate: timeRange?.end,
});
const patterns = this.extractPatterns(memories);
const insights = this.generateInsights(patterns, stats);
return {
patterns,
insights,
statistics: stats,
};
}
private extractPatterns(memories: MemoryEntry[]): Record<string, any> {
const patterns: Record<string, any> = {
mostCommonSSG: {},
projectTypes: {},
deploymentSuccess: { success: 0, failed: 0 },
timeDistribution: {},
};
for (const memory of memories) {
// SSG patterns
if (memory.metadata.ssg) {
patterns.mostCommonSSG[memory.metadata.ssg] =
(patterns.mostCommonSSG[memory.metadata.ssg] || 0) + 1;
}
// Deployment patterns
if (memory.type === "deployment") {
if (memory.data.status === "success") {
patterns.deploymentSuccess.success++;
} else if (memory.data.status === "failed") {
patterns.deploymentSuccess.failed++;
}
}
// Time patterns
const hour = new Date(memory.timestamp).getHours();
patterns.timeDistribution[hour] =
(patterns.timeDistribution[hour] || 0) + 1;
}
return patterns;
}
private generateInsights(patterns: any, stats: any): string[] {
const insights: string[] = [];
// SSG preference insight
if (Object.keys(patterns.mostCommonSSG).length > 0) {
const topSSG = Object.entries(patterns.mostCommonSSG).sort(
([, a]: any, [, b]: any) => b - a,
)[0];
insights.push(
`Most frequently used SSG: ${topSSG[0]} (${topSSG[1]} projects)`,
);
}
// Deployment success rate
const total =
patterns.deploymentSuccess.success + patterns.deploymentSuccess.failed;
if (total > 0) {
const successRate = (
(patterns.deploymentSuccess.success / total) *
100
).toFixed(1);
insights.push(`Deployment success rate: ${successRate}%`);
}
// Activity patterns
if (Object.keys(patterns.timeDistribution).length > 0) {
const peakHour = Object.entries(patterns.timeDistribution).sort(
([, a]: any, [, b]: any) => b - a,
)[0];
insights.push(`Peak activity hour: ${peakHour[0]}:00`);
}
// Storage insights
const sizeMB = (stats.totalSize / 1024 / 1024).toFixed(2);
insights.push(
`Total memory storage: ${sizeMB} MB across ${stats.totalEntries} entries`,
);
return insights;
}
private sortResults(
results: MemoryEntry[],
sortBy: "relevance" | "timestamp" | "type",
): MemoryEntry[] {
switch (sortBy) {
case "timestamp":
return results.sort(
(a, b) =>
new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime(),
);
case "type":
return results.sort((a, b) => a.type.localeCompare(b.type));
default:
return results;
}
}
private groupResults(
results: MemoryEntry[],
groupBy: "type" | "project" | "date",
): any {
const grouped: Record<string, MemoryEntry[]> = {};
for (const entry of results) {
let key: string;
switch (groupBy) {
case "type":
key = entry.type;
break;
case "project":
key = entry.metadata.projectId || "unknown";
break;
case "date":
key = entry.timestamp.split("T")[0];
break;
default:
key = "all";
}
if (!grouped[key]) {
grouped[key] = [];
}
grouped[key].push(entry);
}
return grouped;
}
private addToCache(entry: MemoryEntry): void {
// More aggressive cache eviction to prevent memory growth
while (this.cache.size >= this.maxCacheSize) {
const firstKey = this.cache.keys().next().value;
if (firstKey) {
this.cache.delete(firstKey);
}
}
// Store a shallow copy to avoid retaining large objects
const cacheEntry = {
id: entry.id,
timestamp: entry.timestamp,
type: entry.type,
data: entry.data,
metadata: entry.metadata,
tags: entry.tags,
};
this.cache.set(entry.id, cacheEntry as MemoryEntry);
}
async export(
format: "json" | "csv" = "json",
projectId?: string,
): Promise<string> {
const filter = projectId ? { projectId } : {};
const allMemories = await this.storage.query(filter);
if (format === "json") {
return JSON.stringify(allMemories, null, 2);
} else {
// CSV export
const headers = [
"id",
"timestamp",
"type",
"projectId",
"repository",
"ssg",
];
const rows = allMemories.map((m: any) => [
m.id,
m.timestamp,
m.type,
m.metadata?.projectId || "",
m.metadata?.repository || "",
m.metadata?.ssg || "",
]);
return [headers, ...rows].map((r: any) => r.join(",")).join("\n");
}
}
async import(data: string, format: "json" | "csv" = "json"): Promise<number> {
let entries: MemoryEntry[] = [];
if (format === "json") {
entries = JSON.parse(data);
} else {
// CSV import - simplified for now
const lines = data.split("\n");
const headers = lines[0].split(",");
for (let i = 1; i < lines.length; i++) {
const values = lines[i].split(",");
if (values.length === headers.length) {
entries.push({
id: values[0],
timestamp: values[1],
type: values[2] as MemoryEntry["type"],
data: {},
metadata: {
projectId: values[3],
repository: values[4],
ssg: values[5],
},
});
}
}
}
let imported = 0;
for (const entry of entries) {
// Use store to preserve the original ID when importing
await this.storage.store(entry);
imported++;
}
this.emit("import-complete", imported);
return imported;
}
async cleanup(olderThan?: Date): Promise<number> {
const cutoff = olderThan || new Date(Date.now() - 30 * 24 * 60 * 60 * 1000); // 30 days
const oldMemories = await this.storage.query({
endDate: cutoff.toISOString(),
});
let deleted = 0;
for (const memory of oldMemories) {
if (await this.storage.delete(memory.id)) {
deleted++;
}
}
await this.storage.compact();
this.emit("cleanup-complete", deleted);
return deleted;
}
async close(): Promise<void> {
await this.storage.close();
this.cache.clear();
this.emit("closed");
}
/**
* Get the storage instance for use with other systems
*/
getStorage(): JSONLStorage {
return this.storage;
}
}
export default MemoryManager;
```
--------------------------------------------------------------------------------
/tests/tools/analyze-readme.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { analyzeReadme } from "../../src/tools/analyze-readme.js";
import { tmpdir } from "os";
describe("analyze_readme", () => {
let testDir: string;
let readmePath: string;
beforeEach(async () => {
// Create temporary test directory
testDir = join(tmpdir(), `test-readme-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
readmePath = join(testDir, "README.md");
});
afterEach(async () => {
// Cleanup test directory
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch {
// Ignore cleanup errors
}
});
describe("input validation", () => {
it("should require project_path parameter", async () => {
const result = await analyzeReadme({});
expect(result.success).toBe(false);
expect(result.error?.code).toBe("ANALYSIS_FAILED");
});
it("should handle non-existent project directory", async () => {
const result = await analyzeReadme({
project_path: "/non/existent/path",
});
expect(result.success).toBe(false);
expect(result.error?.code).toBe("README_NOT_FOUND");
});
});
describe("README detection", () => {
it("should find README.md file", async () => {
const readmeContent = `# Test Project\n\n> A simple test project\n\n## Installation\n\n\`\`\`bash\nnpm install\n\`\`\`\n\n## Usage\n\nExample usage here.`;
await fs.writeFile(readmePath, readmeContent);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis).toBeDefined();
});
it("should find alternative README file names", async () => {
const readmeContent = `# Test Project\n\nBasic content`;
await fs.writeFile(join(testDir, "readme.md"), readmeContent);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
});
});
describe("length analysis", () => {
it("should analyze README length correctly", async () => {
const longReadme = Array(400)
.fill("# Section\n\nContent here.\n")
.join("\n");
await fs.writeFile(readmePath, longReadme);
const result = await analyzeReadme({
project_path: testDir,
max_length_target: 300,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.lengthAnalysis.exceedsTarget).toBe(true);
expect(
result.data?.analysis.lengthAnalysis.reductionNeeded,
).toBeGreaterThan(0);
});
it("should handle README within target length", async () => {
const shortReadme = `# Project\n\n## Quick Start\n\nInstall and use.`;
await fs.writeFile(readmePath, shortReadme);
const result = await analyzeReadme({
project_path: testDir,
max_length_target: 300,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.lengthAnalysis.exceedsTarget).toBe(false);
expect(result.data?.analysis.lengthAnalysis.reductionNeeded).toBe(0);
});
});
describe("structure analysis", () => {
it("should evaluate scannability score", async () => {
const wellStructuredReadme = `# Project Title
> Clear description
## Installation
\`\`\`bash
npm install
\`\`\`
## Usage
- Feature 1
- Feature 2
- Feature 3
### Advanced Usage
More details here.
## Contributing
Guidelines here.`;
await fs.writeFile(readmePath, wellStructuredReadme);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(
result.data?.analysis.structureAnalysis.scannabilityScore,
).toBeGreaterThan(50);
expect(
result.data?.analysis.structureAnalysis.headingHierarchy.length,
).toBeGreaterThan(0);
});
it("should detect poor structure", async () => {
const poorStructure = `ProjectTitle\nSome text without proper headings or spacing.More text.Even more text without breaks.`;
await fs.writeFile(readmePath, poorStructure);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(
result.data?.analysis.structureAnalysis.scannabilityScore,
).toBeLessThan(50);
});
});
describe("content analysis", () => {
it("should detect TL;DR section", async () => {
const readmeWithTldr = `# Project\n\n## TL;DR\n\nQuick overview here.\n\n## Details\n\nMore info.`;
await fs.writeFile(readmePath, readmeWithTldr);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.contentAnalysis.hasTldr).toBe(true);
});
it("should detect quick start section", async () => {
const readmeWithQuickStart = `# Project\n\n## Quick Start\n\nGet started quickly.\n\n## Installation\n\nDetailed setup.`;
await fs.writeFile(readmePath, readmeWithQuickStart);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.contentAnalysis.hasQuickStart).toBe(true);
});
it("should count code blocks and links", async () => {
const readmeWithCodeAndLinks = `# Project
## Installation
\`\`\`bash
npm install
\`\`\`
## Usage
\`\`\`javascript
const lib = require('lib');
\`\`\`
See [documentation](https://example.com) and [API reference](https://api.example.com).`;
await fs.writeFile(readmePath, readmeWithCodeAndLinks);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.contentAnalysis.codeBlockCount).toBe(2);
expect(result.data?.analysis.contentAnalysis.linkCount).toBe(2);
});
});
describe("community readiness", () => {
it("should detect community files", async () => {
const readmeContent = `# Project\n\nSee [CONTRIBUTING.md](CONTRIBUTING.md) and [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md).`;
await fs.writeFile(readmePath, readmeContent);
await fs.writeFile(
join(testDir, "CONTRIBUTING.md"),
"Contributing guidelines",
);
await fs.writeFile(
join(testDir, "CODE_OF_CONDUCT.md"),
"Code of conduct",
);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.communityReadiness.hasContributing).toBe(
true,
);
expect(result.data?.analysis.communityReadiness.hasCodeOfConduct).toBe(
true,
);
});
it("should count badges", async () => {
const readmeWithBadges = `# Project
[](https://travis-ci.org/user/repo)
[](https://badge.fury.io/js/package)
Description here.`;
await fs.writeFile(readmePath, readmeWithBadges);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.communityReadiness.badgeCount).toBe(2);
});
});
describe("optimization opportunities", () => {
it("should identify length reduction opportunities", async () => {
const longReadme = Array(500)
.fill("# Section\n\nLong content here that exceeds target length.\n")
.join("\n");
await fs.writeFile(readmePath, longReadme);
const result = await analyzeReadme({
project_path: testDir,
max_length_target: 200,
optimization_level: "aggressive",
});
expect(result.success).toBe(true);
expect(
result.data?.analysis.optimizationOpportunities.length,
).toBeGreaterThan(0);
expect(
result.data?.analysis.optimizationOpportunities.some(
(op) => op.type === "length_reduction",
),
).toBe(true);
});
it("should identify content enhancement opportunities", async () => {
const basicReadme = `# Project\n\nBasic description.\n\n## Installation\n\nnpm install`;
await fs.writeFile(readmePath, basicReadme);
const result = await analyzeReadme({
project_path: testDir,
target_audience: "community_contributors",
});
expect(result.success).toBe(true);
expect(
result.data?.analysis.optimizationOpportunities.some(
(op) => op.type === "content_enhancement",
),
).toBe(true);
});
});
describe("scoring system", () => {
it("should calculate overall score", async () => {
const goodReadme = `# Excellent Project
> Clear, concise description of what this project does
[](https://travis-ci.org/user/repo)
[](https://opensource.org/licenses/MIT)
## TL;DR
This project solves X problem for Y users. Perfect for Z use cases.
## Quick Start
\`\`\`bash
npm install excellent-project
\`\`\`
\`\`\`javascript
const project = require('excellent-project');
project.doSomething();
\`\`\`
## Prerequisites
- Node.js 16+
- npm or yarn
## Usage
Detailed usage examples here.
## Contributing
See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
## License
MIT © Author`;
await fs.writeFile(readmePath, goodReadme);
await fs.writeFile(join(testDir, "CONTRIBUTING.md"), "Guidelines");
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.overallScore).toBeGreaterThan(70);
});
it("should provide lower score for poor README", async () => {
const poorReadme = `ProjectName\nSome description\nInstall it\nUse it`;
await fs.writeFile(readmePath, poorReadme);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.overallScore).toBeLessThan(50);
});
});
describe("recommendations and next steps", () => {
it("should provide relevant recommendations", async () => {
const basicReadme = `# Project\n\nDescription`;
await fs.writeFile(readmePath, basicReadme);
const result = await analyzeReadme({
project_path: testDir,
target_audience: "community_contributors",
optimization_level: "moderate",
});
expect(result.success).toBe(true);
expect(result.data?.analysis.recommendations.length).toBeGreaterThan(0);
expect(result.data?.nextSteps.length).toBeGreaterThan(0);
});
it("should tailor recommendations to target audience", async () => {
const readmeContent = `# Enterprise Tool\n\nBasic description`;
await fs.writeFile(readmePath, readmeContent);
const result = await analyzeReadme({
project_path: testDir,
target_audience: "enterprise_users",
});
expect(result.success).toBe(true);
expect(
result.data?.analysis.recommendations.some(
(rec) =>
rec.includes("enterprise") ||
rec.includes("security") ||
rec.includes("support"),
),
).toBe(true);
});
});
describe("project context detection", () => {
it("should detect JavaScript project", async () => {
const readmeContent = `# JS Project\n\nA JavaScript project`;
await fs.writeFile(readmePath, readmeContent);
await fs.writeFile(join(testDir, "package.json"), '{"name": "test"}');
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
// Should analyze successfully with project context
expect(result.data?.analysis).toBeDefined();
});
it("should handle projects without specific type indicators", async () => {
const readmeContent = `# Generic Project\n\nSome project`;
await fs.writeFile(readmePath, readmeContent);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis).toBeDefined();
});
});
});
```
--------------------------------------------------------------------------------
/docs/sitemap.xml:
--------------------------------------------------------------------------------
```
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url>
<loc>https://tosin2013.github.io/documcp/</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>weekly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/development-setup.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/environment-setup.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/first-deployment.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/getting-started.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/memory-workflows.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/tutorials/user-onboarding.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/analytics-setup.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/custom-domains.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/github-pages-deployment.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/local-testing.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/performance-optimization.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/prompting-guide.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/repository-analysis.html</loc>
<lastmod>2025-09-30</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/seo-optimization.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/site-monitoring.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/troubleshooting.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/how-to/usage-examples.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-1-mcp-architecture/</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>weekly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-3-ssg-recommendation/</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>weekly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-5-github-deployment/</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>weekly</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/006-mcp-tools-api-design.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/api/</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/api/hierarchy.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/api/modules.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/api/variables/TOOLS.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/api-overview.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/cli.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/configuration.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/mcp-tools.html</loc>
<lastmod>2025-09-30</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/reference/prompt-templates.html</loc>
<lastmod>2025-09-30</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-6-api-design/README.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/explanation/</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>0.7</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/explanation/architecture.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.7</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/001-mcp-server-architecture.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/002-repository-analysis-engine.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/003-static-site-generator-recommendation-engine.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/004-diataxis-framework-integration.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/005-github-pages-deployment-automation.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/007-mcp-prompts-and-resources-integration.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/008-intelligent-content-population-engine.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/009-content-accuracy-validation-framework.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/010-mcp-resource-pattern-redesign.html</loc>
<lastmod>2025-10-09</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/adrs/README.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/development/MCP_INSPECTOR_TESTING.html</loc>
<lastmod>2025-10-09</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/guides/link-validation.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/guides/playwright-integration.html</loc>
<lastmod>2025-10-04</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/guides/playwright-testing-workflow.html</loc>
<lastmod>2025-10-12</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/phase-2-intelligence.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/cross-domain-integration/README.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-1-mcp-architecture/mcp-performance-research.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-2-repository-analysis/README.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-3-ssg-recommendation/ssg-performance-analysis.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-4-diataxis-integration/README.html</loc>
<lastmod>2025-10-02</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/domain-5-github-deployment/github-pages-security-analysis.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/README.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/research-integration-summary-2025-01-14.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/research-progress-template.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://tosin2013.github.io/documcp/research/research-questions-2025-01-14.html</loc>
<lastmod>2025-10-01</lastmod>
<changefreq>monthly</changefreq>
<priority>0.5</priority>
</url>
</urlset>
```