This is page 3 of 20. Use http://codebase.md/tosin2013/documcp?page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── 001-mcp-server-architecture.md
│ │ ├── 002-repository-analysis-engine.md
│ │ ├── 003-static-site-generator-recommendation-engine.md
│ │ ├── 004-diataxis-framework-integration.md
│ │ ├── 005-github-pages-deployment-automation.md
│ │ ├── 006-mcp-tools-api-design.md
│ │ ├── 007-mcp-prompts-and-resources-integration.md
│ │ ├── 008-intelligent-content-population-engine.md
│ │ ├── 009-content-accuracy-validation-framework.md
│ │ ├── 010-mcp-resource-pattern-redesign.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── check-documentation-links.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── ast-analyzer.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── permission-checker.ts
│ │ └── sitemap-generator.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/docs/reference/cli.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.960Z"
last_validated: "2025-11-20T00:46:21.960Z"
auto_updated: false
update_frequency: monthly
---
# Command Line Interface
DocuMCP primarily operates as an MCP server integrated with AI assistants, but it also provides command-line utilities for direct usage and debugging.
## MCP Server Usage
The primary way to use DocuMCP is through MCP-compatible clients:
### Starting the MCP Server
```bash
# Using npx (recommended)
npx documcp
# Using global installation
documcp
# Using Node.js directly
node dist/index.js
```
### Server Information
```bash
# Check version
documcp --version
# Show help
documcp --help
# Debug mode
DEBUG=* documcp
```
## MCP Client Integration
### Claude Desktop Configuration
Add to `claude_desktop_config.json`:
```json
{
"mcpServers": {
"documcp": {
"command": "npx",
"args": ["documcp"],
"env": {
"DOCUMCP_STORAGE_DIR": "/path/to/storage"
}
}
}
}
```
### Environment Variables
| Variable | Description | Default |
| --------------------- | ------------------------ | ----------------- |
| `DOCUMCP_STORAGE_DIR` | Memory storage directory | `.documcp/memory` |
| `DEBUG` | Enable debug logging | `false` |
| `NODE_ENV` | Node.js environment | `development` |
## Development Commands
For development and testing:
### Build Commands
```bash
# Build TypeScript
npm run build
# Build in watch mode
npm run dev
# Type checking
npm run typecheck
```
### Testing Commands
```bash
# Run all tests
npm test
# Run tests with coverage
npm run test:coverage
# Run performance benchmarks
npm run test:performance
# CI test run
npm run test:ci
```
### Code Quality Commands
```bash
# Lint code
npm run lint
# Fix linting issues
npm run lint:fix
# Format code
npm run format
# Check formatting
npm run format:check
# Full validation
npm run validate:rules
```
### Documentation Commands
```bash
# Check documentation links
npm run docs:check-links
# Check external links
npm run docs:check-links:external
# Check internal links only
npm run docs:check-links:internal
# Validate documentation structure
npm run docs:validate
# Complete documentation test
npm run docs:test
```
### Security Commands
```bash
# Check for vulnerabilities
npm run security:check
# Audit dependencies
npm audit
# Fix security issues
npm audit fix
```
### Benchmark Commands
```bash
# Run performance benchmarks
npm run benchmark:run
# Show current performance metrics
npm run benchmark:current
# Create benchmark configuration
npm run benchmark:create-config
# Show benchmark help
npm run benchmark:help
```
## Tool Invocation via CLI
While DocuMCP is designed for MCP integration, you can test tools via Node.js:
### Direct Tool Testing
```javascript
// test-tool.js
import { analyzeRepository } from "./dist/tools/analyze-repository.js";
async function test() {
const result = await analyzeRepository({
path: process.cwd(),
depth: "standard",
});
console.log(JSON.stringify(result, null, 2));
}
test().catch(console.error);
```
```bash
# Run test
node test-tool.js
```
### Tool-Specific Examples
**Repository Analysis:**
```javascript
import { analyzeRepository } from "./dist/tools/analyze-repository.js";
const analysis = await analyzeRepository({
path: "/path/to/repository",
depth: "deep",
});
```
**SSG Recommendation:**
```javascript
import { recommendSSG } from "./dist/tools/recommend-ssg.js";
const recommendation = await recommendSSG({
analysisId: "analysis_12345",
preferences: {
ecosystem: "javascript",
priority: "features",
},
});
```
**Configuration Generation:**
```javascript
import { generateConfig } from "./dist/tools/generate-config.js";
const config = await generateConfig({
ssg: "docusaurus",
projectName: "My Project",
outputPath: "./docs",
});
```
## Debugging
### Debug Modes
Enable detailed logging:
```bash
# All debug info
DEBUG=* documcp
# Specific modules
DEBUG=documcp:* documcp
DEBUG=documcp:analysis documcp
DEBUG=documcp:memory documcp
```
### Log Levels
DocuMCP supports different log levels:
```bash
# Error only
NODE_ENV=production documcp
# Development (verbose)
NODE_ENV=development documcp
# Custom logging
DEBUG=documcp:error,documcp:warn documcp
```
### Performance Debugging
```bash
# Enable performance tracking
DEBUG=documcp:perf documcp
# Memory usage tracking
DEBUG=documcp:memory documcp
# Network requests
DEBUG=documcp:http documcp
```
## Configuration Files
### Project-level Configuration
Create `.documcprc.json` in your project:
```json
{
"storage": {
"directory": ".documcp/memory",
"maxEntries": 1000,
"cleanupDays": 30
},
"analysis": {
"defaultDepth": "standard",
"excludePatterns": ["node_modules", ".git", "dist"]
},
"deployment": {
"defaultBranch": "gh-pages",
"verifyDeployment": true
}
}
```
### Global Configuration
Create `~/.documcp/config.json`:
```json
{
"defaultPreferences": {
"ecosystem": "any",
"priority": "simplicity"
},
"github": {
"defaultOrg": "your-username"
},
"memory": {
"enableLearning": true,
"shareAnonymousData": false
}
}
```
## Exit Codes
DocuMCP uses standard exit codes:
| Code | Meaning |
| ---- | -------------------- |
| 0 | Success |
| 1 | General error |
| 2 | Invalid arguments |
| 3 | File system error |
| 4 | Network error |
| 5 | Configuration error |
| 6 | Tool execution error |
## Scripting and Automation
### Batch Operations
Create scripts for common workflows:
```bash
#!/bin/bash
# deploy-docs.sh
set -e
echo "Starting documentation deployment..."
# Test locally first
echo "Testing local build..."
npm run docs:validate
# Deploy via DocuMCP
echo "Analyzing repository..."
# Trigger MCP analysis through your client
echo "Deployment complete!"
```
### CI/CD Integration
DocuMCP can be used in CI/CD pipelines:
```yaml
# .github/workflows/docs.yml
name: Documentation
on:
push:
branches: [main]
paths: ["docs/**", "*.md"]
jobs:
deploy-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install DocuMCP
run: npm install -g documcp
- name: Validate documentation
run: |
# Use DocuMCP validation tools
npm run docs:validate
```
### Programmatic Usage
For advanced integration:
```javascript
// integration.js
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { analyzeRepository } from "./dist/tools/analyze-repository.js";
import { recommendSSG } from "./dist/tools/recommend-ssg.js";
import { deployPages } from "./dist/tools/deploy-pages.js";
class DocuMCPIntegration {
async deployDocumentation(repoPath) {
// Analyze
const analysis = await analyzeRepository({
path: repoPath,
depth: "standard",
});
// Get recommendation
const recommendation = await recommendSSG({
analysisId: analysis.id,
});
// Deploy
const deployment = await deployPages({
repository: repoPath,
ssg: recommendation.recommended,
});
return { analysis, recommendation, deployment };
}
}
```
## Troubleshooting CLI Issues
### Common Problems
**Command not found:**
```bash
# Check installation
which documcp
npm list -g documcp
# Reinstall if needed
npm uninstall -g documcp
npm install -g documcp
```
**Permission errors:**
```bash
# Check permissions
ls -la $(which documcp)
# Fix permissions
chmod +x $(which documcp)
```
**Module resolution errors:**
```bash
# Clear npm cache
npm cache clean --force
# Rebuild
npm run build
```
### Getting Help
```bash
# Show help
documcp --help
# Show version
documcp --version
# Contact support
echo "Report issues: https://github.com/tosin2013/documcp/issues"
```
For more detailed troubleshooting, see the [Troubleshooting Guide](../how-to/troubleshooting.md).
```
--------------------------------------------------------------------------------
/docs/adrs/007-mcp-prompts-and-resources-integration.md:
--------------------------------------------------------------------------------
```markdown
---
id: 007-mcp-prompts-and-resources-integration
title: "ADR-007: MCP Prompts and Resources Integration"
sidebar_label: "ADR-7: MCP Prompts and Resources Integration"
sidebar_position: 7
documcp:
last_updated: "2025-11-20T00:46:21.941Z"
last_validated: "2025-11-20T00:46:21.941Z"
auto_updated: false
update_frequency: monthly
---
# ADR-007: MCP Prompts and Resources Integration for AI Assistance
## Status
Proposed
## Context
DocuMCP needs AI assistance capabilities, and the Model Context Protocol provides native support for exactly this use case through **Prompts** and **Resources**. Rather than extending the protocol, we should leverage MCP's built-in capabilities:
- **MCP Prompts**: Pre-written templates that help users accomplish specific tasks
- **MCP Resources**: File-like data that can be read by clients (like API responses, file contents, or generated documentation)
Current MCP Core Concepts that we can utilize:
1. **Tools**: Interactive functions (already implemented - analyze_repository, recommend_ssg, etc.)
2. **Prompts**: Template-based assistance for common workflows
3. **Resources**: Readable data and content that clients can access
This approach maintains full MCP compliance while providing rich AI assistance through the protocol's intended mechanisms.
## Decision
We will implement AI assistance using MCP's native **Prompts** and **Resources** capabilities, providing pre-written prompt templates for documentation workflows and exposing generated content through the MCP resource system.
### Core Implementation Strategy:
#### 1. MCP Prompts for Documentation Workflows
```typescript
// Implement MCP ListPromptsRequestSchema and GetPromptRequestSchema
const DOCUMENTATION_PROMPTS = [
{
name: "analyze-and-recommend",
description: "Complete repository analysis and SSG recommendation workflow",
arguments: [
{
name: "repository_path",
description: "Path to repository",
required: true,
},
{
name: "priority",
description: "Priority: simplicity, features, performance",
},
],
},
{
name: "setup-documentation",
description:
"Create comprehensive documentation structure with best practices",
arguments: [
{ name: "project_name", description: "Project name", required: true },
{ name: "ssg_type", description: "Static site generator type" },
],
},
{
name: "troubleshoot-deployment",
description: "Diagnose and fix GitHub Pages deployment issues",
arguments: [
{
name: "repository_url",
description: "GitHub repository URL",
required: true,
},
{ name: "error_message", description: "Deployment error message" },
],
},
];
```
#### 2. MCP Resources for Generated Content
```typescript
// Implement ListResourcesRequestSchema and ReadResourceRequestSchema
interface DocuMCPResource {
uri: string; // e.g., "documcp://analysis/repo-123"
name: string; // Human-readable name
description: string; // What this resource contains
mimeType: string; // Content type
}
// Resource types we'll expose:
const RESOURCE_TYPES = [
"documcp://analysis/{analysisId}", // Repository analysis results
"documcp://config/{ssgType}/{projectId}", // Generated configuration files
"documcp://structure/{projectId}", // Documentation structure templates
"documcp://deployment/{workflowId}", // GitHub Actions workflows
"documcp://templates/{templateType}", // Reusable templates
];
```
#### 3. Integration with Existing Tools
- **Tools remain unchanged**: analyze_repository, recommend_ssg, generate_config, etc.
- **Prompts provide workflows**: Chain multiple tool calls with guided prompts
- **Resources expose results**: Make tool outputs accessible as MCP resources
### Example Workflow Integration:
```typescript
// MCP Prompt: "analyze-and-recommend"
// Generated prompt text that guides the user through:
// 1. Call analyze_repository tool
// 2. Review analysis results via documcp://analysis/{id} resource
// 3. Call recommend_ssg tool with analysis results
// 4. Access recommendations via documcp://recommendations/{id} resource
// 5. Call generate_config with selected SSG
```
## Alternatives Considered
### Alternative 1: Custom Protocol Extensions (Previous Approach)
- **Pros**: Maximum flexibility, custom AI features
- **Cons**: Protocol complexity, compatibility issues, non-standard
- **Decision**: Rejected in favor of MCP-native approach
### Alternative 2: Tools-Only Approach
- **Pros**: Simple, already implemented
- **Cons**: No guided workflows, no template assistance, harder user experience
- **Decision**: Insufficient for comprehensive AI assistance
### Alternative 3: External AI Service Integration
- **Pros**: Leverage existing AI platforms
- **Cons**: Breaks MCP cohesion, additional dependencies, latency
- **Decision**: Conflicts with MCP server simplicity
## Consequences
### Positive Consequences
- **MCP Compliance**: Uses protocol as designed, no custom extensions needed
- **Client Compatibility**: Works with all MCP clients (Claude Desktop, GitHub Copilot, etc.)
- **Guided Workflows**: Prompts provide step-by-step assistance for complex tasks
- **Rich Content Access**: Resources make generated content easily accessible
- **Template Reusability**: Prompts can be customized and reused across projects
- **Simplified Architecture**: No need for custom protocol handling or AI-specific interfaces
### Negative Consequences
- **Prompt Complexity**: Complex workflows require sophisticated prompt engineering
- **Resource Management**: Need efficient resource caching and lifecycle management
- **Limited AI Features**: Constrained to MCP's prompt/resource model
- **Template Maintenance**: Prompts need regular updates as tools evolve
## Implementation Plan
### Phase 1: Core MCP Integration (Week 1-2)
1. Implement `ListPromptsRequestSchema` and `GetPromptRequestSchema` handlers
2. Implement `ListResourcesRequestSchema` and `ReadResourceRequestSchema` handlers
3. Create resource URI schema and routing system
4. Add MCP capabilities registration for prompts and resources
### Phase 2: Documentation Prompts (Week 3-4)
1. Create "analyze-and-recommend" workflow prompt
2. Create "setup-documentation" structure prompt
3. Create "troubleshoot-deployment" diagnostic prompt
4. Add prompt argument validation and help text
### Phase 3: Resource Management (Week 5-6)
1. Implement resource caching for analysis results
2. Add generated configuration file resources
3. Create template library resources
4. Add resource cleanup and lifecycle management
### Phase 4: Advanced Features (Week 7-8)
1. Dynamic prompt generation based on project characteristics
2. Contextual resource recommendations
3. Prompt composition for complex workflows
4. Integration testing with major MCP clients
## Integration with Existing Architecture
### ADR-001 (MCP Server Architecture)
- Extends the TypeScript MCP SDK usage to include prompts and resources
- Maintains stateless operation model
- Leverages existing modular design
### ADR-006 (MCP Tools API Design)
- Tools remain the primary interface for actions
- Prompts provide guided workflows using existing tools
- Resources expose tool outputs in structured format
### ADR-007 (Pluggable Prompt Tool Architecture)
- **Modified Approach**: Instead of custom prompt engines, use MCP prompts
- Template system becomes MCP prompt templates
- Configuration-driven approach still applies for prompt customization
## MCP Server Capabilities Declaration
```typescript
server.setRequestHandler(InitializeRequestSchema, async () => ({
protocolVersion: "2024-11-05",
capabilities: {
tools: {}, // Existing tool capabilities
prompts: {}, // NEW: Prompt template capabilities
resources: {}, // NEW: Resource access capabilities
},
serverInfo: {
name: "documcp",
version: "0.2.0",
},
}));
```
## Future Considerations
- Integration with MCP sampling for AI-powered responses
- Advanced prompt chaining and conditional workflows
- Resource subscriptions for real-time updates
- Community prompt template sharing and marketplace
```
--------------------------------------------------------------------------------
/docs/tutorials/first-deployment.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.971Z"
last_validated: "2025-11-20T00:46:21.971Z"
auto_updated: false
update_frequency: monthly
---
# Your First Documentation Deployment
This tutorial walks you through deploying your first documentation site using DocuMCP, from analysis to live GitHub Pages deployment.
## What You'll Build
By the end of this tutorial, you'll have:
- A live documentation site on GitHub Pages
- Automated deployment workflow
- Professional Diataxis-structured content
- Understanding of DocuMCP's deployment process
## Prerequisites
- Completed the [Getting Started](getting-started.md) tutorial
- GitHub repository with your code
- Write access to the repository
- GitHub Pages enabled in repository settings
## Step-by-Step Deployment
### Step 1: Complete Repository Analysis
If you haven't already, analyze your repository:
```bash
# Prompt to DocuMCP:
"analyze my repository for documentation deployment"
```
Expected output includes analysis ID (e.g., `analysis_xyz789`) that we'll use throughout the deployment.
### Step 2: Get Deployment-Optimized Recommendations
Request recommendations specifically for deployment:
```bash
# Prompt:
"recommend the best static site generator for GitHub Pages deployment based on analysis_xyz789"
```
DocuMCP will consider:
- **GitHub Pages compatibility** (native Jekyll support vs. Actions required)
- **Build time** (Hugo's speed vs. Docusaurus features)
- **Maintenance overhead** (MkDocs simplicity vs. Eleventy flexibility)
### Step 3: Generate Deployment Configuration
Create production-ready configuration:
```bash
# For example, if Docusaurus was recommended:
"generate Docusaurus configuration for production deployment to GitHub Pages"
```
This creates:
- **docusaurus.config.js**: Optimized for GitHub Pages
- **package.json updates**: Required dependencies
- **Build scripts**: Production build configuration
### Step 4: Set Up Documentation Structure
Create comprehensive documentation structure:
```bash
# Prompt:
"set up Diataxis documentation structure for Docusaurus deployment"
```
Creates organized folders:
```
docs/
├── tutorials/ # Learning-oriented
├── how-to-guides/ # Problem-solving
├── reference/ # Information-oriented
├── explanation/ # Understanding-oriented
└── index.md # Landing page
```
### Step 5: Populate with Initial Content
Generate starter content based on your project:
```bash
# Prompt:
"populate the documentation structure with content based on my project analysis"
```
DocuMCP creates:
- **Project-specific tutorials** based on your codebase
- **API documentation** extracted from your code
- **Installation guides** tailored to your tech stack
- **Configuration examples** using your actual project structure
### Step 6: Deploy to GitHub Pages
Set up automated deployment:
```bash
# Prompt:
"deploy my Docusaurus documentation to GitHub Pages with automated workflow"
```
This generates:
- **.github/workflows/deploy.yml**: GitHub Actions workflow
- **Optimized build process**: Cached dependencies, parallel builds
- **Security configuration**: OIDC tokens, minimal permissions
### Step 7: Verify Deployment
Check that everything is working:
```bash
# Prompt:
"verify my GitHub Pages deployment is working correctly"
```
DocuMCP checks:
- **Workflow status**: Build and deployment success
- **Site accessibility**: Homepage loads correctly
- **Navigation**: All sections are reachable
- **Asset loading**: CSS, JS, images work properly
## Example: Complete TypeScript Library Deployment
Here's a real example for a TypeScript library:
### 1. Analysis Results
```json
{
"id": "analysis_ts_lib_001",
"structure": {
"totalFiles": 47,
"languages": { ".ts": 32, ".md": 5, ".json": 3 },
"hasTests": true,
"hasCI": true
},
"recommendations": {
"primaryLanguage": "typescript",
"projectType": "library"
}
}
```
### 2. Recommendation
```json
{
"recommended": "docusaurus",
"confidence": 0.88,
"reasoning": [
"TypeScript ecosystem alignment",
"Excellent API documentation support",
"React component integration for examples"
]
}
```
### 3. Generated Configuration
**docusaurus.config.js**:
```javascript
const config = {
title: "TypeScript Library Docs",
tagline: "Comprehensive API documentation",
url: "https://yourusername.github.io",
baseUrl: "/your-repo-name/",
// GitHub Pages deployment config
organizationName: "yourusername",
projectName: "your-repo-name",
deploymentBranch: "gh-pages",
trailingSlash: false,
presets: [
[
"classic",
{
docs: {
routeBasePath: "/",
sidebarPath: require.resolve("./sidebars.js"),
},
theme: {
customCss: require.resolve("./src/css/custom.css"),
},
},
],
],
};
```
### 4. GitHub Actions Workflow
**.github/workflows/deploy.yml**:
```yaml
name: Deploy Documentation
on:
push:
branches: [main]
paths: ["docs/**", "docusaurus.config.js"]
permissions:
contents: read
pages: write
id-token: write
jobs:
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
- name: Install dependencies
run: npm ci
- name: Build documentation
run: npm run build
- name: Setup Pages
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@v4
with:
path: "./build"
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
```
## Verification Checklist
After deployment, verify:
- [ ] **Site is live** at `https://yourusername.github.io/repository-name`
- [ ] **All sections load** (Tutorials, How-to, Reference, Explanation)
- [ ] **Search works** (if enabled)
- [ ] **Mobile responsive** design
- [ ] **Fast loading** (check Core Web Vitals)
- [ ] **SEO optimized** (meta tags, sitemap)
## Common Deployment Issues
### Build Failures
**Problem**: Workflow fails during build
**Solution**:
- Check Node.js version compatibility
- Verify all dependencies are in package.json
- Review build logs in Actions tab
### Page Not Found (404)
**Problem**: Site shows 404 error
**Solution**:
- Verify `baseUrl` in config matches repository name
- Check GitHub Pages source is set to GitHub Actions
- Confirm deployment branch exists
### Assets Not Loading
**Problem**: CSS/JS files return 404
**Solution**:
- Ensure `publicPath` is configured correctly
- Check trailing slash configuration
- Verify asset paths are relative
## Performance Optimization
### Build Speed
- **Caching**: Enable npm cache in GitHub Actions
- **Parallel builds**: Use appropriate number of workers
- **Incremental builds**: Only rebuild changed files
### Site Performance
- **Image optimization**: Compress and use modern formats
- **Code splitting**: Load only necessary JavaScript
- **CDN integration**: Use GitHub's CDN for assets
## Next Steps
Now that you have a deployed documentation site:
1. **[Set up development workflow](development-setup.md)** for ongoing maintenance
2. **[Configure custom domain](../how-to/custom-domains.md)** (optional)
3. **[Set up monitoring](../how-to/site-monitoring.md)** for uptime tracking
4. **[Optimize for search](../how-to/seo-optimization.md)** engines
## Summary
You've successfully:
✅ Analyzed your repository for deployment
✅ Generated production-ready configuration
✅ Set up professional documentation structure
✅ Deployed to GitHub Pages with automation
✅ Verified your live documentation site
Your documentation is now live and will automatically update with each commit!
## Troubleshooting
If you encounter issues:
1. Check the [troubleshooting guide](../how-to/troubleshooting.md)
2. Review GitHub Actions logs
3. Verify repository permissions
4. Confirm GitHub Pages settings
Need help? Open an issue on the [DocuMCP repository](https://github.com/tosin2013/documcp/issues).
```
--------------------------------------------------------------------------------
/docs/adrs/001-mcp-server-architecture.md:
--------------------------------------------------------------------------------
```markdown
---
id: 001-mcp-server-architecture
title: "ADR-001: MCP Server Architecture using TypeScript SDK"
sidebar_label: "ADR-001: MCP Server Architecture"
sidebar_position: 1
documcp:
last_updated: "2025-11-20T00:46:21.934Z"
last_validated: "2025-11-20T00:46:21.934Z"
auto_updated: false
update_frequency: monthly
---
# ADR-001: MCP Server Architecture using TypeScript SDK
## Status
Accepted
## Context
DocuMCP requires a robust server architecture that can integrate seamlessly with development environments like GitHub Copilot, Claude Desktop, and other MCP-enabled tools. The server needs to provide intelligent repository analysis, static site generator recommendations, and automated documentation deployment workflows.
Key requirements:
- Standards-compliant MCP protocol implementation
- Stateless operation for consistency and reliability
- Modular design separating concerns
- Integration with existing developer workflows
- Scalable architecture supporting complex multi-step operations
## Decision
We will implement the DocuMCP server using the TypeScript Model Context Protocol SDK, following a modular, stateless architecture pattern.
### Core Architectural Components:
1. **MCP Server Foundation**: TypeScript-based implementation using official MCP SDK
2. **Repository Analysis Engine**: Multi-layered analysis of project characteristics
3. **Static Site Generator Recommendation Engine**: Algorithmic decision framework
4. **File Generation and Template System**: Template-based configuration generation
5. **GitHub Integration Layer**: Automated deployment orchestration
### Design Principles:
- **Stateless Operation**: Each invocation analyzes current repository state
- **Modular Design**: Clear separation between analysis, recommendation, generation, and deployment
- **Standards Compliance**: Full adherence to MCP specification requirements
- **Session Context**: Temporary context preservation within single sessions for complex workflows
## Alternatives Considered
### Python-based Implementation
- **Pros**: Rich ecosystem for NLP and analysis, familiar to many developers
- **Cons**: Less mature MCP SDK, deployment complexity, slower startup times
- **Decision**: Rejected due to MCP ecosystem maturity in TypeScript
### Go-based Implementation
- **Pros**: High performance, excellent concurrency, small binary size
- **Cons**: Limited MCP SDK support, smaller ecosystem for documentation tools
- **Decision**: Rejected due to limited MCP tooling and development velocity concerns
### Stateful Server with Database
- **Pros**: Could cache analysis results, maintain user preferences
- **Cons**: Deployment complexity, synchronization issues, potential staleness
- **Decision**: Rejected to maintain simplicity and ensure consistency
## Consequences
### Positive
- **Developer Familiarity**: TypeScript is widely known in the target developer community
- **MCP Ecosystem**: Mature tooling and extensive documentation available
- **Rapid Development**: Rich ecosystem accelerates feature development
- **Integration**: Seamless integration with existing JavaScript/TypeScript tooling
- **Consistency**: Stateless design eliminates synchronization issues
- **Reliability**: Reduces complexity and potential failure modes
### Negative
- **Runtime Overhead**: Node.js runtime may have higher memory usage than compiled alternatives
- **Startup Time**: Node.js startup may be slower than Go or Rust alternatives
- **Dependency Management**: npm ecosystem can introduce supply chain complexity
### Risks and Mitigations
- **Supply Chain Security**: Use npm audit and dependency scanning in CI/CD
- **Performance**: Implement intelligent caching and optimize hot paths
- **Memory Usage**: Monitor and optimize memory allocation patterns
## Implementation Details
### Project Structure
```
src/
├── server/ # MCP server implementation
├── analysis/ # Repository analysis engine
├── recommendation/ # SSG recommendation logic
├── generation/ # File and template generation
├── deployment/ # GitHub integration
└── types/ # TypeScript type definitions
```
### Key Dependencies
- `@modelcontextprotocol/typescript-sdk`: MCP protocol implementation
- `typescript`: Type safety and development experience
- `zod`: Runtime type validation for MCP tools
- `yaml`: Configuration file parsing and generation
- `mustache`: Template rendering engine
- `simple-git`: Git repository interaction
### Error Handling Strategy
- Comprehensive input validation using Zod schemas
- Structured error responses with actionable guidance
- Graceful degradation for partial analysis failures
- Detailed logging for debugging and monitoring
## Compliance and Standards
- Full MCP specification compliance for protocol interactions
- JSON-RPC message handling with proper error codes
- Standardized tool parameter validation and responses
- Security best practices for file system access and Git operations
## Research Integration (2025-01-14)
### Performance Validation
**Research Findings Incorporated**: Comprehensive analysis validates our architectural decisions:
1. **TypeScript MCP SDK Performance**:
- ✅ JSON-RPC 2.0 protocol provides minimal communication overhead
- ✅ Native WebSocket/stdio transport layers optimize performance
- ✅ Type safety adds compile-time benefits without runtime performance cost
2. **Node.js Memory Optimization** (Critical for Repository Analysis):
- **Streaming Implementation**: 10x memory reduction for files >100MB
- **Worker Thread Pool**: 3-4x performance improvement for parallel processing
- **Memory-Mapped Files**: 5x speed improvement for large directory traversal
### Updated Implementation Strategy
Based on research validation, the architecture will implement:
```typescript
// Enhanced streaming approach for large repositories
class RepositoryAnalyzer {
private workerPool: WorkerPool;
private streamThreshold = 10 * 1024 * 1024; // 10MB
async analyzeRepository(repoPath: string): Promise<AnalysisResult> {
try {
const files = await this.scanDirectory(repoPath);
// Parallel processing with worker threads
const chunks = this.chunkFiles(files, this.workerPool.size);
const results = await Promise.allSettled(
chunks.map((chunk) => this.workerPool.execute("analyzeChunk", chunk)),
);
// Handle partial failures gracefully
const successfulResults = results
.filter(
(result): result is PromiseFulfilledResult<any> =>
result.status === "fulfilled",
)
.map((result) => result.value);
if (successfulResults.length === 0) {
throw new Error("All analysis chunks failed");
}
return this.aggregateResults(successfulResults);
} catch (error) {
throw new Error(`Repository analysis failed: ${error.message}`);
}
}
private async analyzeFile(filePath: string): Promise<FileAnalysis> {
try {
const stats = await fs.stat(filePath);
// Use streaming for large files
if (stats.size > this.streamThreshold) {
return await this.analyzeFileStream(filePath);
}
return await this.analyzeFileStandard(filePath);
} catch (error) {
throw new Error(`File analysis failed for ${filePath}: ${error.message}`);
}
}
}
```
### Performance Benchmarks
Research-validated performance targets:
- **Small Repositories** (<100 files): <1 second analysis time
- **Medium Repositories** (100-1000 files): <10 seconds analysis time
- **Large Repositories** (1000+ files): <60 seconds analysis time
- **Memory Usage**: Constant memory profile regardless of repository size
## Future Considerations
- Potential migration to WebAssembly for performance-critical components
- Plugin architecture for extensible SSG support
- Distributed analysis for large repository handling (validated by research)
- Machine learning integration for improved recommendations
## References
- [MCP TypeScript SDK Documentation](https://github.com/modelcontextprotocol/typescript-sdk)
- [Model Context Protocol Specification](https://spec.modelcontextprotocol.io/)
- [TypeScript Performance Best Practices](https://github.com/microsoft/TypeScript/wiki/Performance)
```
--------------------------------------------------------------------------------
/docs/adrs/002-repository-analysis-engine.md:
--------------------------------------------------------------------------------
```markdown
---
id: 002-repository-analysis-engine
title: "ADR-002: Repository Analysis Engine Design"
sidebar_label: "ADR-2: Repository Analysis Engine Design"
sidebar_position: 2
documcp:
last_updated: "2025-11-20T00:46:21.936Z"
last_validated: "2025-11-20T00:46:21.936Z"
auto_updated: false
update_frequency: monthly
---
# ADR-002: Multi-Layered Repository Analysis Engine Design
## Status
Accepted
## Context
DocuMCP needs to understand repository characteristics to make intelligent recommendations about static site generators and documentation structure. The analysis must go beyond simple file counting to provide deep insights into project complexity, language ecosystems, existing documentation patterns, and development practices.
Key requirements:
- Comprehensive project characterization
- Language ecosystem detection
- Documentation quality assessment
- Project complexity evaluation
- Performance optimization for large repositories
- Extensible architecture for new analysis types
## Decision
We will implement a multi-layered repository analysis engine that examines repositories from multiple perspectives to build comprehensive project profiles.
### Analysis Layers:
#### 1. File System Analysis Layer
- **Recursive directory traversal** with intelligent filtering
- **File categorization** by extension and content patterns
- **Metrics calculation**: file counts, lines of code, directory depth, size distributions
- **Ignore pattern handling**: .gitignore, common build artifacts, node_modules
#### 2. Language Ecosystem Analysis Layer
- **Package manager detection**: package.json, requirements.txt, Cargo.toml, go.mod, etc.
- **Dependency analysis**: direct and transitive dependencies
- **Build tool identification**: webpack, vite, gradle, maven, cargo, etc.
- **Version constraint analysis**: compatibility requirements
#### 3. Content Analysis Layer
- **Documentation quality assessment**: README analysis, existing docs
- **Code comment analysis**: inline documentation patterns
- **API surface detection**: public interfaces, exported functions
- **Content gap identification**: missing documentation areas
#### 4. Project Metadata Analysis Layer
- **Git history patterns**: commit frequency, contributor activity
- **Release management**: tagging patterns, version schemes
- **Issue tracking**: GitHub issues, project management indicators
- **Community engagement**: contributor count, activity patterns
#### 5. Complexity Assessment Layer
- **Architectural complexity**: microservices, modular design patterns
- **Technical complexity**: multi-language projects, advanced configurations
- **Maintenance indicators**: test coverage, CI/CD presence, code quality metrics
- **Documentation sophistication needs**: API complexity, user journey complexity
## Alternatives Considered
### Single-Pass Analysis
- **Pros**: Simpler implementation, faster for small repositories
- **Cons**: Limited depth, cannot build sophisticated project profiles
- **Decision**: Rejected due to insufficient intelligence for quality recommendations
### External Tool Integration (e.g., GitHub API, CodeClimate)
- **Pros**: Rich metadata, established metrics
- **Cons**: External dependencies, rate limiting, requires authentication
- **Decision**: Rejected for core analysis; may integrate as optional enhancement
### Machine Learning-Based Analysis
- **Pros**: Could learn patterns from successful documentation projects
- **Cons**: Training data requirements, model maintenance, unpredictable results
- **Decision**: Deferred to future versions; start with rule-based analysis
### Database-Backed Caching
- **Pros**: Faster repeat analysis, could store learning patterns
- **Cons**: Deployment complexity, staleness issues, synchronization problems
- **Decision**: Rejected for initial version; implement in-memory caching only
## Consequences
### Positive
- **Intelligent Recommendations**: Deep analysis enables sophisticated SSG matching
- **Extensible Architecture**: Easy to add new analysis dimensions
- **Performance Optimization**: Layered approach allows selective analysis depth
- **Quality Assessment**: Can identify and improve existing documentation
- **Future-Proof**: Architecture supports ML integration and advanced analytics
### Negative
- **Analysis Time**: Comprehensive analysis may be slower for large repositories
- **Complexity**: Multi-layered architecture requires careful coordination
- **Memory Usage**: Full repository analysis requires significant memory for large projects
### Risks and Mitigations
- **Performance**: Implement streaming analysis and configurable depth limits
- **Accuracy**: Validate analysis results against known project types
- **Maintenance**: Regular testing against diverse repository types
## Implementation Details
### Analysis Engine Structure
```typescript
interface RepositoryAnalysis {
fileSystem: FileSystemAnalysis;
languageEcosystem: LanguageEcosystemAnalysis;
content: ContentAnalysis;
metadata: ProjectMetadataAnalysis;
complexity: ComplexityAssessment;
}
interface AnalysisLayer {
analyze(repositoryPath: string): Promise<LayerResult>;
getMetrics(): AnalysisMetrics;
validate(): ValidationResult;
}
```
### Performance Optimizations
- **Parallel Analysis**: Independent layers run concurrently
- **Intelligent Filtering**: Skip irrelevant files and directories early
- **Progressive Analysis**: Start with lightweight analysis, deepen as needed
- **Caching Strategy**: Cache analysis results within session scope
- **Size Limits**: Configurable limits for very large repositories
### File Pattern Recognition
```typescript
const FILE_PATTERNS = {
documentation: [".md", ".rst", ".adoc", "docs/", "documentation/"],
configuration: ["config/", ".config/", "*.json", "*.yaml", "*.toml"],
source: ["src/", "lib/", "*.js", "*.ts", "*.py", "*.go", "*.rs"],
tests: ["test/", "tests/", "__tests__/", "*.test.*", "*.spec.*"],
build: ["build/", "dist/", "target/", "bin/", "*.lock"],
};
```
### Language Ecosystem Detection
```typescript
const ECOSYSTEM_INDICATORS = {
javascript: ["package.json", "node_modules/", "yarn.lock", "pnpm-lock.yaml"],
python: ["requirements.txt", "setup.py", "pyproject.toml", "Pipfile"],
rust: ["Cargo.toml", "Cargo.lock", "src/main.rs"],
go: ["go.mod", "go.sum", "main.go"],
java: ["pom.xml", "build.gradle", "gradlew"],
};
```
### Complexity Scoring Algorithm
```typescript
interface ComplexityFactors {
fileCount: number;
languageCount: number;
dependencyCount: number;
directoryDepth: number;
contributorCount: number;
apiSurfaceSize: number;
}
function calculateComplexityScore(factors: ComplexityFactors): ComplexityScore {
// Weighted scoring algorithm balancing multiple factors
// Returns: 'simple' | 'moderate' | 'complex' | 'enterprise'
}
```
## Quality Assurance
### Testing Strategy
- **Unit Tests**: Each analysis layer tested independently
- **Integration Tests**: Full analysis pipeline validation
- **Repository Fixtures**: Test suite with diverse project types
- **Performance Tests**: Analysis time benchmarks for various repository sizes
- **Accuracy Validation**: Manual verification against known project characteristics
### Monitoring and Metrics
- Analysis execution time by repository size
- Accuracy of complexity assessments
- Cache hit rates and memory usage
- Error rates and failure modes
## Future Enhancements
### Machine Learning Integration
- Pattern recognition for project types
- Automated documentation quality scoring
- Predictive analysis for maintenance needs
### Advanced Analytics
- Historical trend analysis
- Comparative analysis across similar projects
- Community best practice identification
### Performance Optimizations
- WebAssembly modules for intensive analysis
- Distributed analysis for very large repositories
- Incremental analysis for updated repositories
## Security Considerations
- **File System Access**: Restricted to repository boundaries
- **Content Scanning**: No sensitive data extraction or storage
- **Resource Limits**: Prevent resource exhaustion attacks
- **Input Validation**: Sanitize all repository paths and content
## References
- [Git Repository Analysis Best Practices](https://git-scm.com/docs)
- [Static Analysis Tools Comparison](https://analysis-tools.dev/)
- [Repository Metrics Standards](https://chaoss.community/)
```
--------------------------------------------------------------------------------
/src/tools/setup-playwright-tests.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Setup Playwright E2E Tests Tool
* Generates Playwright test configuration and files for user's documentation site
*/
import { promises as fs } from "fs";
import path from "path";
import { z } from "zod";
// Return type matches MCP tool response format
type ToolResponse = {
content: Array<{ type: "text"; text: string }>;
isError?: boolean;
};
import { fileURLToPath } from "url";
import { dirname } from "path";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const inputSchema = z.object({
repositoryPath: z.string().describe("Path to the documentation repository"),
ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
projectName: z.string().describe("Project name for tests"),
mainBranch: z.string().optional().default("main"),
includeAccessibilityTests: z.boolean().optional().default(true),
includeDockerfile: z.boolean().optional().default(true),
includeGitHubActions: z.boolean().optional().default(true),
});
interface SSGConfig {
buildCommand: string;
buildDir: string;
port: number;
packageDeps: Record<string, string>;
}
const SSG_CONFIGS: Record<string, SSGConfig> = {
jekyll: {
buildCommand: "bundle exec jekyll build",
buildDir: "_site",
port: 4000,
packageDeps: {},
},
hugo: {
buildCommand: "hugo",
buildDir: "public",
port: 1313,
packageDeps: {},
},
docusaurus: {
buildCommand: "npm run build",
buildDir: "build",
port: 3000,
packageDeps: {
"@docusaurus/core": "^3.0.0",
"@docusaurus/preset-classic": "^3.0.0",
},
},
mkdocs: {
buildCommand: "mkdocs build",
buildDir: "site",
port: 8000,
packageDeps: {},
},
eleventy: {
buildCommand: "npx @11ty/eleventy",
buildDir: "_site",
port: 8080,
packageDeps: {
"@11ty/eleventy": "^2.0.0",
},
},
};
export async function setupPlaywrightTests(
args: unknown,
): Promise<ToolResponse> {
const {
repositoryPath,
ssg,
projectName,
mainBranch,
includeAccessibilityTests,
includeDockerfile,
includeGitHubActions,
} = inputSchema.parse(args);
try {
const config = SSG_CONFIGS[ssg];
const templatesDir = path.join(__dirname, "../templates/playwright");
// Create directories
const testsDir = path.join(repositoryPath, "tests/e2e");
await fs.mkdir(testsDir, { recursive: true });
if (includeGitHubActions) {
const workflowsDir = path.join(repositoryPath, ".github/workflows");
await fs.mkdir(workflowsDir, { recursive: true });
}
// Read and process templates
const filesCreated: string[] = [];
// 1. Playwright config
const configTemplate = await fs.readFile(
path.join(templatesDir, "playwright.config.template.ts"),
"utf-8",
);
const playwrightConfig = configTemplate.replace(
/{{port}}/g,
config.port.toString(),
);
await fs.writeFile(
path.join(repositoryPath, "playwright.config.ts"),
playwrightConfig,
);
filesCreated.push("playwright.config.ts");
// 2. Link validation tests
const linkTestTemplate = await fs.readFile(
path.join(templatesDir, "link-validation.spec.template.ts"),
"utf-8",
);
const linkTest = linkTestTemplate.replace(/{{projectName}}/g, projectName);
await fs.writeFile(
path.join(testsDir, "link-validation.spec.ts"),
linkTest,
);
filesCreated.push("tests/e2e/link-validation.spec.ts");
// 3. Accessibility tests (if enabled)
if (includeAccessibilityTests) {
const a11yTemplate = await fs.readFile(
path.join(templatesDir, "accessibility.spec.template.ts"),
"utf-8",
);
await fs.writeFile(
path.join(testsDir, "accessibility.spec.ts"),
a11yTemplate,
);
filesCreated.push("tests/e2e/accessibility.spec.ts");
}
// 4. Dockerfile (if enabled)
if (includeDockerfile) {
const dockerTemplate = await fs.readFile(
path.join(templatesDir, "Dockerfile.template"),
"utf-8",
);
const dockerfile = dockerTemplate
.replace(/{{ssg}}/g, ssg)
.replace(/{{buildCommand}}/g, config.buildCommand)
.replace(/{{buildDir}}/g, config.buildDir);
await fs.writeFile(
path.join(repositoryPath, "Dockerfile.playwright"),
dockerfile,
);
filesCreated.push("Dockerfile.playwright");
}
// 5. GitHub Actions workflow (if enabled)
if (includeGitHubActions) {
const workflowTemplate = await fs.readFile(
path.join(templatesDir, "docs-e2e.workflow.template.yml"),
"utf-8",
);
const workflow = workflowTemplate
.replace(/{{mainBranch}}/g, mainBranch)
.replace(/{{buildCommand}}/g, config.buildCommand)
.replace(/{{buildDir}}/g, config.buildDir)
.replace(/{{port}}/g, config.port.toString());
await fs.writeFile(
path.join(repositoryPath, ".github/workflows/docs-e2e-tests.yml"),
workflow,
);
filesCreated.push(".github/workflows/docs-e2e-tests.yml");
}
// 6. Update package.json
const packageJsonPath = path.join(repositoryPath, "package.json");
let packageJson: any = {};
try {
const existing = await fs.readFile(packageJsonPath, "utf-8");
packageJson = JSON.parse(existing);
} catch {
// Create new package.json
packageJson = {
name: projectName.toLowerCase().replace(/\s+/g, "-"),
version: "1.0.0",
private: true,
scripts: {},
dependencies: {},
devDependencies: {},
};
}
// Add Playwright dependencies
packageJson.devDependencies = {
...packageJson.devDependencies,
"@playwright/test": "^1.55.1",
...(includeAccessibilityTests
? { "@axe-core/playwright": "^4.10.2" }
: {}),
};
// Add test scripts
packageJson.scripts = {
...packageJson.scripts,
"test:e2e": "playwright test",
"test:e2e:ui": "playwright test --ui",
"test:e2e:report": "playwright show-report",
"test:e2e:docker":
"docker build -t docs-test -f Dockerfile.playwright . && docker run --rm docs-test",
};
await fs.writeFile(packageJsonPath, JSON.stringify(packageJson, null, 2));
filesCreated.push("package.json (updated)");
// 7. Create .gitignore entries
const gitignorePath = path.join(repositoryPath, ".gitignore");
const gitignoreEntries = [
"test-results/",
"playwright-report/",
"playwright-results.json",
"playwright/.cache/",
].join("\n");
try {
const existing = await fs.readFile(gitignorePath, "utf-8");
if (!existing.includes("test-results/")) {
await fs.writeFile(
gitignorePath,
`${existing}\n\n# Playwright\n${gitignoreEntries}\n`,
);
filesCreated.push(".gitignore (updated)");
}
} catch {
await fs.writeFile(gitignorePath, `# Playwright\n${gitignoreEntries}\n`);
filesCreated.push(".gitignore");
}
return {
content: [
{
type: "text" as const,
text: JSON.stringify(
{
success: true,
filesCreated,
nextSteps: [
"Run `npm install` to install Playwright dependencies",
"Run `npx playwright install` to download browser binaries",
"Test locally: `npm run test:e2e`",
includeDockerfile
? "Build container: `docker build -t docs-test -f Dockerfile.playwright .`"
: "",
includeGitHubActions
? "Push to trigger GitHub Actions workflow"
: "",
].filter(Boolean),
configuration: {
ssg,
buildCommand: config.buildCommand,
buildDir: config.buildDir,
port: config.port,
testsIncluded: {
linkValidation: true,
accessibility: includeAccessibilityTests,
},
integrations: {
docker: includeDockerfile,
githubActions: includeGitHubActions,
},
},
},
null,
2,
),
},
],
};
} catch (error: any) {
return {
content: [
{
type: "text" as const,
text: JSON.stringify(
{
success: false,
error: error.message,
},
null,
2,
),
},
],
isError: true,
};
}
}
```
--------------------------------------------------------------------------------
/src/tools/kg-health-check.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Knowledge Graph Health Check Tool
* MCP tool for checking knowledge graph health and getting recommendations
*/
import { z } from "zod";
import { MCPToolResponse, formatMCPResponse } from "../types/api.js";
import { getKnowledgeGraph, getKGStorage } from "../memory/kg-integration.js";
import { KGHealthMonitor, KGHealthMetrics } from "../memory/kg-health.js";
const inputSchema = z.object({
includeHistory: z.boolean().optional().default(false),
generateReport: z.boolean().optional().default(true),
days: z.number().min(1).max(90).optional().default(7),
});
/**
* Check the health of the knowledge graph
*
* Performs comprehensive health analysis including data quality, structure health,
* performance metrics, issue detection, and trend analysis.
*
* @param args - The input arguments
* @param args.includeHistory - Include historical health trend data
* @param args.generateReport - Generate a formatted health report
* @param args.days - Number of days of history to include (1-90)
*
* @returns Health metrics with recommendations
*
* @example
* ```typescript
* const result = await checkKGHealth({
* includeHistory: true,
* generateReport: true,
* days: 7
* });
* ```
*/
export async function checkKGHealth(
args: unknown,
): Promise<{ content: any[]; isError?: boolean }> {
const startTime = Date.now();
try {
const { includeHistory, generateReport } = inputSchema.parse(args);
// Get KG instances
const kg = await getKnowledgeGraph();
const storage = await getKGStorage();
// Create health monitor
const monitor = new KGHealthMonitor();
// Calculate health
const health = await monitor.calculateHealth(kg, storage);
// Generate report if requested
let report = "";
if (generateReport) {
report = generateHealthReport(health, includeHistory);
}
const response: MCPToolResponse<KGHealthMetrics> = {
success: true,
data: health,
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
recommendations: health.recommendations.map((rec) => ({
type: rec.priority === "high" ? "warning" : "info",
title: rec.action,
description: `Expected impact: +${rec.expectedImpact} health score | Effort: ${rec.effort}`,
})),
nextSteps: [
{
action: "Apply Recommendations",
toolRequired: "manual",
description:
"Implement high-priority recommendations to improve health",
priority: "high",
},
...(health.issues.filter((i) => i.severity === "critical").length > 0
? [
{
action: "Fix Critical Issues",
toolRequired: "manual" as const,
description: "Address critical issues immediately",
priority: "high" as const,
},
]
: []),
],
};
if (generateReport) {
// Add report as additional content
return {
content: [
...formatMCPResponse(response).content,
{
type: "text",
text: report,
},
],
};
}
return formatMCPResponse(response);
} catch (error) {
const errorResponse: MCPToolResponse = {
success: false,
error: {
code: "HEALTH_CHECK_FAILED",
message: `Failed to check KG health: ${error}`,
resolution: "Ensure the knowledge graph is properly initialized",
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
};
return formatMCPResponse(errorResponse);
}
}
/**
* Generate a human-readable health report
*/
function generateHealthReport(
health: KGHealthMetrics,
includeHistory: boolean,
): string {
const lines: string[] = [];
// Header
lines.push("═══════════════════════════════════════════════════════");
lines.push(" KNOWLEDGE GRAPH HEALTH REPORT");
lines.push("═══════════════════════════════════════════════════════");
lines.push("");
// Overall Health
lines.push(
`📊 OVERALL HEALTH: ${health.overallHealth}/100 ${getHealthEmoji(
health.overallHealth,
)}`,
);
lines.push(
` Trend: ${health.trends.healthTrend.toUpperCase()} ${getTrendEmoji(
health.trends.healthTrend,
)}`,
);
lines.push("");
// Component Scores
lines.push("Component Scores:");
lines.push(
` • Data Quality: ${health.dataQuality.score}/100 ${getHealthEmoji(
health.dataQuality.score,
)}`,
);
lines.push(
` • Structure Health: ${health.structureHealth.score}/100 ${getHealthEmoji(
health.structureHealth.score,
)}`,
);
lines.push(
` • Performance: ${health.performance.score}/100 ${getHealthEmoji(
health.performance.score,
)}`,
);
lines.push("");
// Graph Statistics
lines.push("Graph Statistics:");
lines.push(` • Total Nodes: ${health.dataQuality.totalNodes}`);
lines.push(` • Total Edges: ${health.dataQuality.totalEdges}`);
lines.push(
` • Avg Connectivity: ${health.structureHealth.densityScore.toFixed(3)}`,
);
lines.push(
` • Storage Size: ${formatBytes(health.performance.storageSize)}`,
);
lines.push("");
// Data Quality Details
if (health.dataQuality.score < 90) {
lines.push("⚠️ Data Quality Issues:");
if (health.dataQuality.staleNodeCount > 0) {
lines.push(
` • ${health.dataQuality.staleNodeCount} stale nodes (>30 days old)`,
);
}
if (health.dataQuality.orphanedEdgeCount > 0) {
lines.push(` • ${health.dataQuality.orphanedEdgeCount} orphaned edges`);
}
if (health.dataQuality.duplicateCount > 0) {
lines.push(` • ${health.dataQuality.duplicateCount} duplicate entities`);
}
if (health.dataQuality.completenessScore < 0.8) {
lines.push(
` • Completeness: ${Math.round(
health.dataQuality.completenessScore * 100,
)}%`,
);
}
lines.push("");
}
// Critical Issues
const criticalIssues = health.issues.filter((i) => i.severity === "critical");
const highIssues = health.issues.filter((i) => i.severity === "high");
if (criticalIssues.length > 0 || highIssues.length > 0) {
lines.push("🚨 CRITICAL & HIGH PRIORITY ISSUES:");
for (const issue of [...criticalIssues, ...highIssues].slice(0, 5)) {
lines.push(` [${issue.severity.toUpperCase()}] ${issue.description}`);
lines.push(` → ${issue.remediation}`);
}
lines.push("");
}
// Top Recommendations
if (health.recommendations.length > 0) {
lines.push("💡 TOP RECOMMENDATIONS:");
for (const rec of health.recommendations.slice(0, 5)) {
lines.push(` ${getPriorityIcon(rec.priority)} ${rec.action}`);
lines.push(` Impact: +${rec.expectedImpact} | Effort: ${rec.effort}`);
}
lines.push("");
}
// Trends
if (includeHistory) {
lines.push("📈 TRENDS (Last 7 Days):");
lines.push(
` • Health: ${health.trends.healthTrend} ${getTrendEmoji(
health.trends.healthTrend,
)}`,
);
lines.push(
` • Quality: ${health.trends.qualityTrend} ${getTrendEmoji(
health.trends.qualityTrend,
)}`,
);
lines.push(
` • Node Growth: ${health.trends.nodeGrowthRate.toFixed(1)} nodes/day`,
);
lines.push(
` • Edge Growth: ${health.trends.edgeGrowthRate.toFixed(1)} edges/day`,
);
lines.push("");
}
// Footer
lines.push("═══════════════════════════════════════════════════════");
lines.push(
`Report generated: ${new Date(health.timestamp).toLocaleString()}`,
);
lines.push("═══════════════════════════════════════════════════════");
return lines.join("\n");
}
// Helper functions
function getHealthEmoji(score: number): string {
if (score >= 90) return "🟢 Excellent";
if (score >= 75) return "🟡 Good";
if (score >= 60) return "🟠 Fair";
return "🔴 Poor";
}
function getTrendEmoji(trend: string): string {
if (trend === "improving") return "📈";
if (trend === "degrading") return "📉";
return "➡️";
}
function getPriorityIcon(priority: string): string {
if (priority === "high") return "🔴";
if (priority === "medium") return "🟡";
return "🟢";
}
function formatBytes(bytes: number): string {
if (bytes < 1024) return `${bytes} B`;
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`;
if (bytes < 1024 * 1024 * 1024)
return `${(bytes / (1024 * 1024)).toFixed(1)} MB`;
return `${(bytes / (1024 * 1024 * 1024)).toFixed(1)} GB`;
}
```
--------------------------------------------------------------------------------
/tests/tools/kg-health-check.test.ts:
--------------------------------------------------------------------------------
```typescript
import { promises as fs } from "fs";
import path from "path";
import os from "os";
import { checkKGHealth } from "../../src/tools/kg-health-check";
import {
getKnowledgeGraph,
getKGStorage,
createOrUpdateProject,
} from "../../src/memory/kg-integration";
describe("KG Health Check Tool", () => {
let tempDir: string;
const originalCwd = process.cwd();
beforeEach(async () => {
tempDir = path.join(os.tmpdir(), `kg-health-${Date.now()}`);
await fs.mkdir(tempDir, { recursive: true });
process.chdir(tempDir);
});
afterEach(async () => {
process.chdir(originalCwd);
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch {
// Ignore cleanup errors
}
});
it("should perform basic health check", async () => {
const result = await checkKGHealth({
includeHistory: false,
generateReport: false,
});
expect(result.content).toBeDefined();
expect(result.content.length).toBeGreaterThan(0);
// Should contain health metrics
const text = result.content.map((c) => c.text).join(" ");
expect(text).toContain("Health");
});
it("should include historical data when requested", async () => {
const result = await checkKGHealth({
includeHistory: true,
generateReport: false,
days: 7,
});
expect(result.content).toBeDefined();
const text = result.content.map((c) => c.text).join(" ");
expect(text).toContain("Health");
});
it("should generate detailed report", async () => {
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
expect(result.content.length).toBeGreaterThan(0);
// Report should contain detailed metrics
const text = result.content.map((c) => c.text).join(" ");
expect(text).toContain("Health");
});
it("should generate report with history included", async () => {
const result = await checkKGHealth({
includeHistory: true,
generateReport: true,
days: 14,
});
expect(result.content).toBeDefined();
expect(result.content.length).toBeGreaterThan(1); // Should have formatted response + report
const text = result.content.map((c) => c.text).join(" ");
expect(text).toContain("Health");
expect(text).toContain("TRENDS");
});
it("should handle errors gracefully", async () => {
// Test with invalid parameters
const result = await checkKGHealth({
includeHistory: true,
generateReport: true,
days: -1, // Invalid
});
// Should either handle gracefully or return error
expect(result.content).toBeDefined();
});
it("should calculate health score", async () => {
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
const text = result.content.map((c) => c.text).join(" ");
// Should contain some health indicator
expect(text.length).toBeGreaterThan(0);
});
it("should include critical issues in next steps", async () => {
// Create a project with some data to trigger health calculation
const kg = await getKnowledgeGraph();
// Add some nodes and edges to test health calculation
kg.addNode({
id: "test-node-1",
type: "project",
label: "Test Project",
properties: { name: "test" },
weight: 1.0,
});
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
// Check that the response structure is correct
const text = result.content.map((c) => c.text).join(" ");
expect(text).toBeTruthy();
});
it("should handle graph with high data quality score", async () => {
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
const text = result.content.map((c) => c.text).join(" ");
// Should complete without errors
expect(text.length).toBeGreaterThan(0);
});
it("should use default values when parameters not provided", async () => {
const result = await checkKGHealth({});
expect(result.content).toBeDefined();
expect(result.content.length).toBeGreaterThan(0);
});
it("should handle various health score ranges in report", async () => {
// Test the helper functions indirectly through the report
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
const text = result.content.map((c) => c.text).join(" ");
// Should contain health indicators (emojis or text)
expect(text.length).toBeGreaterThan(0);
});
it("should handle different trend directions in report", async () => {
const result = await checkKGHealth({
includeHistory: true,
generateReport: true,
});
expect(result.content).toBeDefined();
const text = result.content.map((c) => c.text).join(" ");
// Report should include trend information
expect(text).toContain("TRENDS");
});
it("should handle different priority levels in recommendations", async () => {
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
const text = result.content.map((c) => c.text).join(" ");
// Should complete without errors
expect(text.length).toBeGreaterThan(0);
});
it("should handle different byte sizes in formatBytes", async () => {
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
const text = result.content.map((c) => c.text).join(" ");
// Report should include storage size
expect(text.length).toBeGreaterThan(0);
});
it("should handle validation errors", async () => {
const result = await checkKGHealth({
days: 150, // Exceeds max of 90
});
expect(result.content).toBeDefined();
// Should return error response
const text = result.content.map((c) => c.text).join(" ");
expect(text).toBeTruthy();
});
it("should handle recommendations with different priorities", async () => {
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
// Check response structure
const text = result.content.map((c) => c.text).join(" ");
expect(text.length).toBeGreaterThan(0);
});
it("should detect and report data quality issues", async () => {
const kg = await getKnowledgeGraph();
// Create nodes
kg.addNode({
id: "test-project-1",
type: "project",
label: "Test Project 1",
properties: { name: "test-project-1" },
weight: 1.0,
});
kg.addNode({
id: "test-tech-1",
type: "technology",
label: "TypeScript",
properties: { name: "typescript" },
weight: 1.0,
});
// Create an orphaned edge (edge pointing to non-existent node)
kg.addEdge({
source: "test-tech-1",
target: "non-existent-node-id",
type: "uses",
weight: 1.0,
confidence: 0.9,
properties: {},
});
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
const text = result.content.map((c) => c.text).join(" ");
// Should report data quality issues with score < 90
expect(text).toContain("Health");
// The report should show details about stale nodes, orphaned edges, etc.
expect(text.length).toBeGreaterThan(100); // Detailed report
});
it("should test all priority icon levels", async () => {
// This test indirectly tests getPriorityIcon for "high", "medium", and "low"
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
const text = result.content.map((c) => c.text).join(" ");
// The report should include priority indicators (emojis)
expect(text.length).toBeGreaterThan(0);
});
it("should test formatBytes for different size ranges", async () => {
// The tool will calculate storage size which triggers formatBytes
// This covers: bytes, KB, MB ranges
const result = await checkKGHealth({
includeHistory: false,
generateReport: true,
});
expect(result.content).toBeDefined();
const text = result.content.map((c) => c.text).join(" ");
// Storage size should be included in the report
expect(text.length).toBeGreaterThan(0);
});
});
```
--------------------------------------------------------------------------------
/docs/development/MCP_INSPECTOR_TESTING.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.945Z"
last_validated: "2025-11-20T00:46:21.945Z"
auto_updated: false
update_frequency: monthly
---
# MCP Inspector Testing Guide
The MCP Inspector is an in-browser debugging tool for testing MCP servers without connecting to actual applications. This guide explains how to use it for DocuMCP development.
## Prerequisites
- Node.js 20+ installed
- DocuMCP repository cloned
- Dependencies installed (`npm install`)
## Quick Start
### Option 1: Build and Launch Inspector
```bash
npm run build:inspect
```
This command:
1. Compiles TypeScript to `dist/`
2. Launches MCP Inspector
3. Opens browser at `http://localhost:5173` (or similar)
### Option 2: Launch Inspector with Existing Build
```bash
npm run build # First build (if needed)
npm run dev:inspect # Then launch inspector
```
## Using the Inspector
### 1. Connect to Server
1. Open the browser URL provided by the inspector
2. Click the "Connect" button in the left sidebar
3. Wait for connection confirmation
### 2. Test Tools
The Tools section lists all available MCP tools:
**Example: Testing `analyze_repository`**
1. Click "Tools" in the top navigation
2. Select "analyze_repository" from the list
3. In the right panel, enter parameters:
```json
{
"path": "./",
"depth": "standard"
}
```
4. Click "Run Tool"
5. Verify the output includes:
- File counts
- Language detection
- Dependency analysis
- Memory insights
**Example: Testing `recommend_ssg`**
1. First run `analyze_repository` (as above) to get an `analysisId`
2. Select "recommend_ssg"
3. Enter parameters:
```json
{
"analysisId": "<id-from-previous-analysis>",
"userId": "test-user",
"preferences": {
"priority": "simplicity",
"ecosystem": "javascript"
}
}
```
4. Click "Run Tool"
5. Verify recommendation includes:
- Recommended SSG
- Confidence score
- Reasoning
- Alternative options
### 3. Test Resources
Resources provide static data for application UIs:
**Example: Testing SSG List**
1. Click "Resources" in the top navigation
2. Select "documcp://ssgs/available"
3. Verify output shows all 5 SSGs:
- Jekyll
- Hugo
- Docusaurus
- MkDocs
- Eleventy
4. Check each SSG includes:
- ID, name, description
- Language, complexity, build speed
- Best use cases
**Example: Testing Configuration Templates**
1. Select "documcp://templates/jekyll-config"
2. Verify YAML template is returned
3. Test other templates:
- `documcp://templates/hugo-config`
- `documcp://templates/docusaurus-config`
- `documcp://templates/mkdocs-config`
- `documcp://templates/eleventy-config`
- `documcp://templates/diataxis-structure`
### 4. Test Prompts
Prompts provide pre-written instructions for specialized tasks:
**Example: Testing `tutorial-writer`**
1. Click "Prompts" in the top navigation
2. Select "tutorial-writer"
3. Provide arguments:
```json
{
"project_path": "./",
"target_audience": "beginners",
"learning_goal": "deploy first documentation site"
}
```
4. Click "Get Prompt"
5. Verify prompt messages include:
- Project context (languages, frameworks)
- Diataxis tutorial requirements
- Step-by-step structure guidance
**Example: Testing `analyze-and-recommend` workflow**
1. Select "analyze-and-recommend"
2. Provide arguments:
```json
{
"project_path": "./",
"analysis_depth": "standard",
"preferences": "good community support"
}
```
3. Verify workflow prompt includes:
- Complete analysis workflow
- SSG recommendation guidance
- Implementation steps
## Common Test Cases
### Tool Testing Checklist
- [ ] **analyze_repository**
- [ ] Test with current directory (`./`)
- [ ] Test with different depth levels
- [ ] Verify memory integration works
- [ ] Check similar projects are found
- [ ] **recommend_ssg**
- [ ] Test with valid analysisId
- [ ] Test different preference combinations
- [ ] Verify confidence scores
- [ ] Check historical data integration
- [ ] **generate_config**
- [ ] Test each SSG type
- [ ] Verify output format
- [ ] Check template variables
- [ ] **setup_structure**
- [ ] Test Diataxis structure creation
- [ ] Verify all categories included
- [ ] Check example content
- [ ] **deploy_pages**
- [ ] Test workflow generation
- [ ] Verify GitHub Actions YAML
- [ ] Check custom domain support
- [ ] **validate_content**
- [ ] Test with documentation path
- [ ] Verify link checking
- [ ] Check code block validation
### Resource Testing Checklist
- [ ] **documcp://ssgs/available**
- [ ] All 5 SSGs listed
- [ ] Complete metadata for each
- [ ] **Templates**
- [ ] Jekyll config valid YAML
- [ ] Hugo config valid YAML
- [ ] Docusaurus config valid JS
- [ ] MkDocs config valid YAML
- [ ] Eleventy config valid JS
- [ ] Diataxis structure valid JSON
- [ ] **Workflows**
- [ ] All workflows listed
- [ ] Quick setup available
- [ ] Full setup available
- [ ] Guidance provided
### Prompt Testing Checklist
- [ ] **Technical Writer Prompts**
- [ ] tutorial-writer
- [ ] howto-guide-writer
- [ ] reference-writer
- [ ] explanation-writer
- [ ] diataxis-organizer
- [ ] readme-optimizer
- [ ] **Workflow Prompts**
- [ ] analyze-and-recommend
- [ ] setup-documentation
- [ ] troubleshoot-deployment
## Troubleshooting
### Inspector Won't Connect
**Problem:** Connection fails or times out
**Solutions:**
1. Ensure server is built: `npm run build`
2. Check no other process is using the port
3. Try restarting: `Ctrl+C` and re-run `npm run dev:inspect`
### Tool Returns Error
**Problem:** Tool execution fails with error message
**Solutions:**
1. Check parameter format (must be valid JSON)
2. Verify required parameters are provided
3. Ensure file paths exist (for file-based tools)
4. Check server logs for detailed error messages
### Resource Not Found
**Problem:** Resource URI returns "Resource not found" error
**Solutions:**
1. Verify URI spelling matches exactly (case-sensitive)
2. Check resource list for available URIs
3. Ensure server version matches documentation
### Prompt Arguments Missing
**Problem:** Prompt doesn't use provided arguments
**Solutions:**
1. Check argument names match prompt definition
2. Verify JSON format is correct
3. Required arguments must be provided
## Best Practices
### During Development
1. **Keep Inspector Open:** Launch inspector at start of development session
2. **Test After Changes:** Run tool tests after modifying tool implementation
3. **Verify All Paths:** Test both success and error paths
4. **Check Edge Cases:** Test with unusual inputs, empty values, etc.
### Before Committing
1. **Full Tool Test:** Test at least one example from each tool
2. **Resource Validation:** Verify all resources return valid data
3. **Prompt Verification:** Check prompts generate correct messages
4. **Error Handling:** Test with invalid inputs to verify error messages
### For Bug Fixing
1. **Reproduce in Inspector:** Use inspector to reproduce bug consistently
2. **Test Fix:** Verify fix works in inspector before integration testing
3. **Regression Test:** Test related tools to ensure no regressions
4. **Document:** Add test case to this guide if bug was subtle
## Integration with Development Workflow
### Daily Development
```bash
# Morning startup
npm run build:inspect
# Keep inspector tab open
# Make code changes in editor
# Test changes in inspector
# Iterate until working
# Before lunch/end of day
npm run build && npm test
```
### Pre-Commit Workflow
```bash
# Run full validation
npm run ci
# Test in inspector
npm run build:inspect
# Manual spot checks on key tools
# Commit when all checks pass
```
### CI/CD Integration
While MCP Inspector is primarily for local development, you can add automated checks:
```bash
# In CI pipeline (future enhancement)
npm run build
npx @modelcontextprotocol/inspector dist/index.js --test automated-tests.json
```
## Additional Resources
- **MCP Inspector GitHub:** https://github.com/modelcontextprotocol/inspector
- **MCP Specification:** https://modelcontextprotocol.io/docs
- **MCP TypeScript SDK:** https://github.com/modelcontextprotocol/typescript-sdk
- **DocuMCP Architecture:** See `docs/adrs/` for detailed architectural decisions
## Feedback
If you encounter issues with MCP Inspector or this guide:
1. Check for known issues: https://github.com/modelcontextprotocol/inspector/issues
2. Report DocuMCP-specific issues: https://github.com/anthropics/documcp/issues
3. Suggest improvements to this guide via pull request
---
**Last Updated:** 2025-10-09
**Version:** 1.0.0
```
--------------------------------------------------------------------------------
/tests/prompts/technical-writer-prompts.test.ts:
--------------------------------------------------------------------------------
```typescript
import {
generateTechnicalWriterPrompts,
analyzeProjectContext,
} from "../../src/prompts/technical-writer-prompts.js";
import { promises as fs } from "fs";
import { join } from "path";
import { tmpdir } from "os";
describe("Technical Writer Diataxis Prompts", () => {
let testProjectPath: string;
beforeEach(async () => {
// Create a temporary test project
testProjectPath = join(tmpdir(), `test-project-${Date.now()}`);
await fs.mkdir(testProjectPath, { recursive: true });
// Create a basic package.json
const packageJson = {
name: "test-project",
version: "1.0.0",
dependencies: {
react: "^18.0.0",
typescript: "^5.0.0",
},
scripts: {
test: "jest",
},
};
await fs.writeFile(
join(testProjectPath, "package.json"),
JSON.stringify(packageJson, null, 2),
);
// Create a basic README.md
await fs.writeFile(
join(testProjectPath, "README.md"),
"# Test Project\n\nA test project for testing.",
);
// Create a test directory
await fs.mkdir(join(testProjectPath, "tests"), { recursive: true });
await fs.writeFile(
join(testProjectPath, "tests", "example.test.js"),
'test("example", () => { expect(true).toBe(true); });',
);
// Create a CI file
await fs.mkdir(join(testProjectPath, ".github", "workflows"), {
recursive: true,
});
await fs.writeFile(
join(testProjectPath, ".github", "workflows", "ci.yml"),
"name: CI\non: [push, pull_request]",
);
});
afterEach(async () => {
// Clean up test project
try {
await fs.rm(testProjectPath, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe("generateTechnicalWriterPrompts", () => {
it("should generate tutorial writer prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"tutorial-writer",
testProjectPath,
);
expect(prompts.length).toBeGreaterThan(0);
expect(prompts[0]).toHaveProperty("role");
expect(prompts[0]).toHaveProperty("content");
expect(prompts[0].content).toHaveProperty("type", "text");
expect(prompts[0].content).toHaveProperty("text");
expect(prompts[0].content.text).toContain("tutorial");
expect(prompts[0].content.text).toContain("Diataxis");
});
it("should generate how-to guide writer prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"howto-guide-writer",
testProjectPath,
);
expect(prompts.length).toBeGreaterThan(0);
expect(prompts[0].content.text).toContain("how-to guide");
expect(prompts[0].content.text).toContain("Problem-oriented");
});
it("should generate reference writer prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"reference-writer",
testProjectPath,
);
expect(prompts.length).toBeGreaterThan(0);
expect(prompts[0].content.text).toContain("reference documentation");
expect(prompts[0].content.text).toContain("Information-oriented");
});
it("should generate explanation writer prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"explanation-writer",
testProjectPath,
);
expect(prompts.length).toBeGreaterThan(0);
expect(prompts[0].content.text).toContain("explanation documentation");
expect(prompts[0].content.text).toContain("Understanding-oriented");
});
it("should generate diataxis organizer prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"diataxis-organizer",
testProjectPath,
);
expect(prompts.length).toBeGreaterThan(0);
expect(prompts[0].content.text).toContain("Diataxis framework");
expect(prompts[0].content.text).toContain("organize");
});
it("should generate readme optimizer prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"readme-optimizer",
testProjectPath,
);
expect(prompts.length).toBeGreaterThan(0);
expect(prompts[0].content.text).toContain("README");
expect(prompts[0].content.text).toContain("Diataxis-aware");
});
it("should generate analyze-and-recommend prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"analyze-and-recommend",
testProjectPath,
);
expect(prompts.length).toBeGreaterThan(0);
expect(prompts[0].content.text.toLowerCase()).toContain("analyz");
expect(prompts[0].content.text.toLowerCase()).toContain("recommend");
});
it("should generate setup-documentation prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"setup-documentation",
testProjectPath,
);
expect(prompts.length).toBeGreaterThan(0);
expect(prompts[0].content.text).toContain("documentation");
});
it("should generate troubleshoot-deployment prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"troubleshoot-deployment",
testProjectPath,
);
expect(prompts.length).toBeGreaterThan(0);
expect(prompts[0].content.text).toContain("troubleshoot");
expect(prompts[0].content.text).toContain("deployment");
});
it("should generate maintain-documentation-freshness prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"maintain-documentation-freshness",
testProjectPath,
{ action: "track", preset: "monthly" },
);
expect(prompts.length).toBeGreaterThan(0);
expect(prompts[0].content.text).toContain("freshness");
expect(prompts[0].content.text).toContain("track");
});
it("should throw error for unknown prompt type", async () => {
await expect(
generateTechnicalWriterPrompts("unknown-type", testProjectPath),
).rejects.toThrow("Unknown prompt type: unknown-type");
});
it("should include project context in prompts", async () => {
const prompts = await generateTechnicalWriterPrompts(
"tutorial-writer",
testProjectPath,
);
const promptText = prompts[0].content.text;
expect(promptText).toContain("React"); // Should detect React from package.json
expect(promptText).toContain("TypeScript"); // Should detect TypeScript
});
});
describe("analyzeProjectContext", () => {
it("should analyze project context correctly", async () => {
const context = await analyzeProjectContext(testProjectPath);
expect(context).toHaveProperty("projectType");
expect(context).toHaveProperty("languages");
expect(context).toHaveProperty("frameworks");
expect(context).toHaveProperty("hasTests");
expect(context).toHaveProperty("hasCI");
expect(context).toHaveProperty("readmeExists");
expect(context).toHaveProperty("documentationGaps");
// Check specific values based on our test setup
expect(context.projectType).toBe("node_application");
expect(context.languages).toContain("TypeScript");
expect(context.frameworks).toContain("React");
expect(context.hasTests).toBe(true);
expect(context.hasCI).toBe(true);
expect(context.readmeExists).toBe(true);
expect(context.packageManager).toBe("npm");
});
it("should detect documentation gaps", async () => {
const context = await analyzeProjectContext(testProjectPath);
expect(Array.isArray(context.documentationGaps)).toBe(true);
// Should detect missing documentation since we only have a basic README
expect(context.documentationGaps.length).toBeGreaterThan(0);
});
it("should handle projects without package.json", async () => {
// Create a project without package.json
const simpleProjectPath = join(tmpdir(), `simple-project-${Date.now()}`);
await fs.mkdir(simpleProjectPath, { recursive: true });
try {
const context = await analyzeProjectContext(simpleProjectPath);
expect(context.projectType).toBe("unknown");
expect(context.languages).toEqual([]);
expect(context.frameworks).toEqual([]);
expect(context.readmeExists).toBe(false);
} finally {
await fs.rm(simpleProjectPath, { recursive: true, force: true });
}
});
it("should detect yarn package manager", async () => {
// Create yarn.lock to simulate yarn project
await fs.writeFile(join(testProjectPath, "yarn.lock"), "# Yarn lockfile");
const context = await analyzeProjectContext(testProjectPath);
expect(context.packageManager).toBe("yarn");
});
it("should detect pnpm package manager", async () => {
// Create pnpm-lock.yaml to simulate pnpm project
await fs.writeFile(
join(testProjectPath, "pnpm-lock.yaml"),
"lockfileVersion: 5.4",
);
const context = await analyzeProjectContext(testProjectPath);
expect(context.packageManager).toBe("pnpm");
});
});
});
```
--------------------------------------------------------------------------------
/src/utils/freshness-tracker.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Documentation Freshness Tracking Utilities
*
* Tracks when documentation files were last updated and validated,
* supporting both short-term (minutes/hours) and long-term (days) staleness detection.
*/
import fs from "fs/promises";
import path from "path";
import matter from "gray-matter";
/**
* Time unit for staleness threshold
*/
export type TimeUnit = "minutes" | "hours" | "days";
/**
* Staleness threshold configuration
*/
export interface StalenessThreshold {
value: number;
unit: TimeUnit;
}
/**
* Predefined staleness levels
*/
export const STALENESS_PRESETS = {
realtime: { value: 30, unit: "minutes" as TimeUnit },
active: { value: 1, unit: "hours" as TimeUnit },
recent: { value: 24, unit: "hours" as TimeUnit },
weekly: { value: 7, unit: "days" as TimeUnit },
monthly: { value: 30, unit: "days" as TimeUnit },
quarterly: { value: 90, unit: "days" as TimeUnit },
} as const;
/**
* Documentation metadata tracked in frontmatter
*/
export interface DocFreshnessMetadata {
last_updated?: string; // ISO 8601 timestamp
last_validated?: string; // ISO 8601 timestamp
validated_against_commit?: string;
auto_updated?: boolean;
staleness_threshold?: StalenessThreshold;
update_frequency?: keyof typeof STALENESS_PRESETS;
}
/**
* Full frontmatter structure
*/
export interface DocFrontmatter {
title?: string;
description?: string;
documcp?: DocFreshnessMetadata;
[key: string]: unknown;
}
/**
* File freshness status
*/
export interface FileFreshnessStatus {
filePath: string;
relativePath: string;
hasMetadata: boolean;
metadata?: DocFreshnessMetadata;
lastUpdated?: Date;
lastValidated?: Date;
ageInMs?: number;
ageFormatted?: string;
isStale: boolean;
stalenessLevel: "fresh" | "warning" | "stale" | "critical" | "unknown";
staleDays?: number;
}
/**
* Freshness scan report
*/
export interface FreshnessScanReport {
scannedAt: string;
docsPath: string;
totalFiles: number;
filesWithMetadata: number;
filesWithoutMetadata: number;
freshFiles: number;
warningFiles: number;
staleFiles: number;
criticalFiles: number;
files: FileFreshnessStatus[];
thresholds: {
warning: StalenessThreshold;
stale: StalenessThreshold;
critical: StalenessThreshold;
};
}
/**
* Convert time threshold to milliseconds
*/
export function thresholdToMs(threshold: StalenessThreshold): number {
const { value, unit } = threshold;
switch (unit) {
case "minutes":
return value * 60 * 1000;
case "hours":
return value * 60 * 60 * 1000;
case "days":
return value * 24 * 60 * 60 * 1000;
default:
throw new Error(`Unknown time unit: ${unit}`);
}
}
/**
* Format age in human-readable format
*/
export function formatAge(ageMs: number): string {
const seconds = Math.floor(ageMs / 1000);
const minutes = Math.floor(seconds / 60);
const hours = Math.floor(minutes / 60);
const days = Math.floor(hours / 24);
if (days > 0) {
return `${days} day${days !== 1 ? "s" : ""}`;
} else if (hours > 0) {
return `${hours} hour${hours !== 1 ? "s" : ""}`;
} else if (minutes > 0) {
return `${minutes} minute${minutes !== 1 ? "s" : ""}`;
} else {
return `${seconds} second${seconds !== 1 ? "s" : ""}`;
}
}
/**
* Parse frontmatter from markdown file
*/
export async function parseDocFrontmatter(
filePath: string,
): Promise<DocFrontmatter> {
try {
const content = await fs.readFile(filePath, "utf-8");
const { data } = matter(content);
return data as DocFrontmatter;
} catch (error) {
return {};
}
}
/**
* Update frontmatter in markdown file
*/
export async function updateDocFrontmatter(
filePath: string,
metadata: Partial<DocFreshnessMetadata>,
): Promise<void> {
const content = await fs.readFile(filePath, "utf-8");
const { data, content: body } = matter(content);
const existingDocuMCP = (data.documcp as DocFreshnessMetadata) || {};
const updatedData = {
...data,
documcp: {
...existingDocuMCP,
...metadata,
},
};
const newContent = matter.stringify(body, updatedData);
await fs.writeFile(filePath, newContent, "utf-8");
}
/**
* Calculate file freshness status
*/
export function calculateFreshnessStatus(
filePath: string,
relativePath: string,
frontmatter: DocFrontmatter,
thresholds: {
warning: StalenessThreshold;
stale: StalenessThreshold;
critical: StalenessThreshold;
},
): FileFreshnessStatus {
const metadata = frontmatter.documcp;
const hasMetadata = !!metadata?.last_updated;
if (!hasMetadata) {
return {
filePath,
relativePath,
hasMetadata: false,
isStale: true,
stalenessLevel: "unknown",
};
}
const lastUpdated = new Date(metadata.last_updated!);
const lastValidated = metadata.last_validated
? new Date(metadata.last_validated)
: undefined;
const now = new Date();
const ageInMs = now.getTime() - lastUpdated.getTime();
const ageFormatted = formatAge(ageInMs);
const staleDays = Math.floor(ageInMs / (24 * 60 * 60 * 1000));
// Determine staleness level
let stalenessLevel: FileFreshnessStatus["stalenessLevel"];
let isStale: boolean;
const warningMs = thresholdToMs(thresholds.warning);
const staleMs = thresholdToMs(thresholds.stale);
const criticalMs = thresholdToMs(thresholds.critical);
if (ageInMs >= criticalMs) {
stalenessLevel = "critical";
isStale = true;
} else if (ageInMs >= staleMs) {
stalenessLevel = "stale";
isStale = true;
} else if (ageInMs >= warningMs) {
stalenessLevel = "warning";
isStale = false;
} else {
stalenessLevel = "fresh";
isStale = false;
}
return {
filePath,
relativePath,
hasMetadata: true,
metadata,
lastUpdated,
lastValidated,
ageInMs,
ageFormatted,
isStale,
stalenessLevel,
staleDays,
};
}
/**
* Find all markdown files in directory recursively
*/
export async function findMarkdownFiles(dir: string): Promise<string[]> {
const files: string[] = [];
async function scan(currentDir: string): Promise<void> {
const entries = await fs.readdir(currentDir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = path.join(currentDir, entry.name);
// Skip common directories
if (entry.isDirectory()) {
if (
!["node_modules", ".git", "dist", "build", ".documcp"].includes(
entry.name,
)
) {
await scan(fullPath);
}
continue;
}
// Include markdown files
if (entry.isFile() && /\.(md|mdx)$/i.test(entry.name)) {
files.push(fullPath);
}
}
}
await scan(dir);
return files;
}
/**
* Scan directory for documentation freshness
*/
export async function scanDocumentationFreshness(
docsPath: string,
thresholds: {
warning?: StalenessThreshold;
stale?: StalenessThreshold;
critical?: StalenessThreshold;
} = {},
): Promise<FreshnessScanReport> {
// Default thresholds
const finalThresholds = {
warning: thresholds.warning || STALENESS_PRESETS.weekly,
stale: thresholds.stale || STALENESS_PRESETS.monthly,
critical: thresholds.critical || STALENESS_PRESETS.quarterly,
};
// Find all markdown files
const markdownFiles = await findMarkdownFiles(docsPath);
// Analyze each file
const files: FileFreshnessStatus[] = [];
for (const filePath of markdownFiles) {
const relativePath = path.relative(docsPath, filePath);
const frontmatter = await parseDocFrontmatter(filePath);
const status = calculateFreshnessStatus(
filePath,
relativePath,
frontmatter,
finalThresholds,
);
files.push(status);
}
// Calculate summary statistics
const totalFiles = files.length;
const filesWithMetadata = files.filter((f) => f.hasMetadata).length;
const filesWithoutMetadata = totalFiles - filesWithMetadata;
const freshFiles = files.filter((f) => f.stalenessLevel === "fresh").length;
const warningFiles = files.filter(
(f) => f.stalenessLevel === "warning",
).length;
const staleFiles = files.filter((f) => f.stalenessLevel === "stale").length;
const criticalFiles = files.filter(
(f) => f.stalenessLevel === "critical",
).length;
return {
scannedAt: new Date().toISOString(),
docsPath,
totalFiles,
filesWithMetadata,
filesWithoutMetadata,
freshFiles,
warningFiles,
staleFiles,
criticalFiles,
files,
thresholds: finalThresholds,
};
}
/**
* Initialize frontmatter for files without metadata
*/
export async function initializeFreshnessMetadata(
filePath: string,
options: {
updateFrequency?: keyof typeof STALENESS_PRESETS;
autoUpdated?: boolean;
} = {},
): Promise<void> {
const frontmatter = await parseDocFrontmatter(filePath);
if (!frontmatter.documcp?.last_updated) {
const metadata: DocFreshnessMetadata = {
last_updated: new Date().toISOString(),
last_validated: new Date().toISOString(),
auto_updated: options.autoUpdated ?? false,
update_frequency: options.updateFrequency || "monthly",
};
if (options.updateFrequency) {
metadata.staleness_threshold = STALENESS_PRESETS[options.updateFrequency];
}
await updateDocFrontmatter(filePath, metadata);
}
}
```
--------------------------------------------------------------------------------
/tests/tools/readme-best-practices.test.ts:
--------------------------------------------------------------------------------
```typescript
import { readmeBestPractices } from "../../src/tools/readme-best-practices.js";
import { formatMCPResponse } from "../../src/types/api.js";
import { writeFile, mkdir, rm } from "fs/promises";
import { join } from "path";
describe("readmeBestPractices", () => {
const testDir = join(process.cwd(), "test-readme-best-practices-temp");
beforeEach(async () => {
// Create test directory
await mkdir(testDir, { recursive: true });
});
afterEach(async () => {
// Clean up test directory
try {
await rm(testDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe("Basic Functionality", () => {
test("should analyze README best practices with default parameters", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(
readmePath,
`# Test Library
## Description
This is a test library for analyzing best practices.
## Installation
\`\`\`bash
npm install test-library
\`\`\`
## Usage
\`\`\`javascript
const lib = require('test-library');
\`\`\`
## API Reference
Function documentation here.
## Contributing
Please read CONTRIBUTING.md.
## License
MIT License
`,
);
const result = await readmeBestPractices({
readme_path: readmePath,
});
expect(result.success).toBe(true);
expect(result.data).toBeDefined();
expect(result.data!.bestPracticesReport).toBeDefined();
expect(result.metadata).toBeDefined();
});
test("should handle different project types", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(readmePath, "# Application\n\nA web application.");
const result = await readmeBestPractices({
readme_path: readmePath,
project_type: "application",
});
expect(result.success).toBe(true);
expect(result.data).toBeDefined();
});
test("should generate templates when requested", async () => {
const outputDir = join(testDir, "output");
await mkdir(outputDir, { recursive: true });
const result = await readmeBestPractices({
readme_path: join(testDir, "nonexistent.md"),
generate_template: true,
output_directory: outputDir,
project_type: "library",
});
expect(result.success).toBe(true);
expect(result.data).toBeDefined();
});
test("should handle different target audiences", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(readmePath, "# Advanced Tool\n\nFor expert users.");
const result = await readmeBestPractices({
readme_path: readmePath,
target_audience: "advanced",
});
expect(result.success).toBe(true);
expect(result.data).toBeDefined();
});
});
describe("Error Handling", () => {
test("should handle missing README file without template generation", async () => {
const result = await readmeBestPractices({
readme_path: join(testDir, "nonexistent.md"),
generate_template: false,
});
expect(result.success).toBe(false);
expect(result.error).toBeDefined();
expect(result.error!.code).toBe("README_NOT_FOUND");
});
test("should handle invalid project type", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(readmePath, "# Test");
const result = await readmeBestPractices({
readme_path: readmePath,
project_type: "invalid_type" as any,
});
expect(result.success).toBe(false);
expect(result.error).toBeDefined();
});
test("should handle invalid target audience", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(readmePath, "# Test");
const result = await readmeBestPractices({
readme_path: readmePath,
target_audience: "invalid_audience" as any,
});
expect(result.success).toBe(false);
expect(result.error).toBeDefined();
});
});
describe("Best Practices Analysis", () => {
test("should evaluate checklist items", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(
readmePath,
`# Complete Library
## Table of Contents
- [Installation](#installation)
- [Usage](#usage)
## Description
Detailed description of the library.
## Installation
Installation instructions here.
## Usage
Usage examples here.
## API Reference
API documentation.
## Examples
Code examples.
## Contributing
Contributing guidelines.
## License
MIT License
## Support
Support information.
`,
);
const result = await readmeBestPractices({
readme_path: readmePath,
project_type: "library",
});
expect(result.success).toBe(true);
expect(result.data!.bestPracticesReport.checklist).toBeDefined();
expect(Array.isArray(result.data!.bestPracticesReport.checklist)).toBe(
true,
);
expect(result.data!.bestPracticesReport.checklist.length).toBeGreaterThan(
0,
);
});
test("should calculate overall score and grade", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(readmePath, "# Basic Project\n\nMinimal content.");
const result = await readmeBestPractices({
readme_path: readmePath,
});
expect(result.success).toBe(true);
expect(
result.data!.bestPracticesReport.overallScore,
).toBeGreaterThanOrEqual(0);
expect(result.data!.bestPracticesReport.overallScore).toBeLessThanOrEqual(
100,
);
expect(result.data!.bestPracticesReport.grade).toBeDefined();
});
test("should provide recommendations", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(readmePath, "# Incomplete Project");
const result = await readmeBestPractices({
readme_path: readmePath,
});
expect(result.success).toBe(true);
expect(result.data!.recommendations).toBeDefined();
expect(Array.isArray(result.data!.recommendations)).toBe(true);
expect(result.data!.nextSteps).toBeDefined();
expect(Array.isArray(result.data!.nextSteps)).toBe(true);
});
test("should provide summary metrics", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(
readmePath,
`# Project
## Description
Basic description.
## Installation
Install steps.
`,
);
const result = await readmeBestPractices({
readme_path: readmePath,
});
expect(result.success).toBe(true);
expect(result.data!.bestPracticesReport.summary).toBeDefined();
expect(
result.data!.bestPracticesReport.summary.criticalIssues,
).toBeGreaterThanOrEqual(0);
expect(
result.data!.bestPracticesReport.summary.importantIssues,
).toBeGreaterThanOrEqual(0);
expect(
result.data!.bestPracticesReport.summary.sectionsPresent,
).toBeGreaterThanOrEqual(0);
expect(
result.data!.bestPracticesReport.summary.totalSections,
).toBeGreaterThan(0);
});
});
describe("Template Generation", () => {
test("should generate README template when file is missing", async () => {
const outputDir = join(testDir, "template-output");
await mkdir(outputDir, { recursive: true });
const result = await readmeBestPractices({
readme_path: join(testDir, "missing.md"),
generate_template: true,
output_directory: outputDir,
project_type: "tool",
include_community_files: true,
});
expect(result.success).toBe(true);
expect(result.data).toBeDefined();
});
test("should handle template generation without community files", async () => {
const outputDir = join(testDir, "no-community-output");
await mkdir(outputDir, { recursive: true });
const result = await readmeBestPractices({
readme_path: join(testDir, "missing.md"),
generate_template: true,
output_directory: outputDir,
include_community_files: false,
});
expect(result.success).toBe(true);
expect(result.data).toBeDefined();
});
});
describe("Response Format", () => {
test("should return MCPToolResponse structure", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(readmePath, "# Test Project");
const result = await readmeBestPractices({
readme_path: readmePath,
});
expect(result.success).toBeDefined();
expect(result.metadata).toBeDefined();
expect(result.metadata.toolVersion).toBe("1.0.0");
expect(result.metadata.executionTime).toBeGreaterThanOrEqual(0);
expect(result.metadata.timestamp).toBeDefined();
expect(result.metadata.analysisId).toBeDefined();
});
test("should format properly with formatMCPResponse", async () => {
const readmePath = join(testDir, "README.md");
await writeFile(readmePath, "# Test Project");
const result = await readmeBestPractices({
readme_path: readmePath,
});
// Test that the result can be formatted without errors
const formatted = formatMCPResponse(result);
expect(formatted.content).toBeDefined();
expect(Array.isArray(formatted.content)).toBe(true);
expect(formatted.content.length).toBeGreaterThan(0);
expect(formatted.isError).toBe(false);
});
});
});
```
--------------------------------------------------------------------------------
/src/benchmarks/performance.ts:
--------------------------------------------------------------------------------
```typescript
// Performance benchmarking system per PERF-001 rules
import { promises as fs } from "fs";
import path from "path";
import { analyzeRepository } from "../tools/analyze-repository.js";
export interface BenchmarkResult {
repoSize: "small" | "medium" | "large";
fileCount: number;
executionTime: number;
targetTime: number;
passed: boolean;
performanceRatio: number;
details: {
startTime: number;
endTime: number;
memoryUsage: NodeJS.MemoryUsage;
};
}
export interface BenchmarkSuite {
testName: string;
results: BenchmarkResult[];
overallPassed: boolean;
averagePerformance: number;
summary: {
smallRepos: { count: number; avgTime: number; passed: number };
mediumRepos: { count: number; avgTime: number; passed: number };
largeRepos: { count: number; avgTime: number; passed: number };
};
}
// PERF-001 performance targets
const PERFORMANCE_TARGETS = {
small: 1000, // <1 second for <100 files
medium: 10000, // <10 seconds for 100-1000 files
large: 60000, // <60 seconds for 1000+ files
} as const;
export class PerformanceBenchmarker {
private results: BenchmarkResult[] = [];
async benchmarkRepository(
repoPath: string,
depth: "quick" | "standard" | "deep" = "standard",
): Promise<BenchmarkResult> {
const fileCount = await this.getFileCount(repoPath);
const repoSize = this.categorizeRepoSize(fileCount);
const targetTime = PERFORMANCE_TARGETS[repoSize];
// Capture initial memory state
const initialMemory = process.memoryUsage();
const startTime = Date.now();
try {
// Run the actual analysis
await analyzeRepository({ path: repoPath, depth });
const endTime = Date.now();
const executionTime = endTime - startTime;
const finalMemory = process.memoryUsage();
const performanceRatio = executionTime / targetTime;
const passed = executionTime <= targetTime;
const result: BenchmarkResult = {
repoSize,
fileCount,
executionTime,
targetTime,
passed,
performanceRatio,
details: {
startTime,
endTime,
memoryUsage: {
rss: finalMemory.rss - initialMemory.rss,
heapTotal: finalMemory.heapTotal - initialMemory.heapTotal,
heapUsed: finalMemory.heapUsed - initialMemory.heapUsed,
external: finalMemory.external - initialMemory.external,
arrayBuffers: finalMemory.arrayBuffers - initialMemory.arrayBuffers,
},
},
};
this.results.push(result);
return result;
} catch (error) {
const endTime = Date.now();
const executionTime = endTime - startTime;
// Even failed executions should be benchmarked
const result: BenchmarkResult = {
repoSize,
fileCount,
executionTime,
targetTime,
passed: false, // Failed execution = failed performance
performanceRatio: executionTime / targetTime,
details: {
startTime,
endTime,
memoryUsage: process.memoryUsage(),
},
};
this.results.push(result);
throw error;
}
}
async runBenchmarkSuite(
testRepos: Array<{ path: string; name: string }>,
): Promise<BenchmarkSuite> {
console.log("🚀 Starting performance benchmark suite...\n");
const results: BenchmarkResult[] = [];
for (const repo of testRepos) {
console.log(`📊 Benchmarking: ${repo.name}`);
try {
const result = await this.benchmarkRepository(repo.path);
results.push(result);
const status = result.passed ? "✅ PASS" : "❌ FAIL";
const ratio = (result.performanceRatio * 100).toFixed(1);
console.log(
` ${status} ${result.executionTime}ms (${ratio}% of target) - ${result.repoSize} repo with ${result.fileCount} files`,
);
} catch (error) {
console.log(` ❌ ERROR: ${error}`);
}
}
console.log("\n📈 Generating performance summary...\n");
return this.generateSuite("Full Benchmark Suite", results);
}
generateSuite(testName: string, results: BenchmarkResult[]): BenchmarkSuite {
const overallPassed = results.every((r) => r.passed);
const averagePerformance =
results.reduce((sum, r) => sum + r.performanceRatio, 0) / results.length;
// Categorize results
const smallRepos = results.filter((r) => r.repoSize === "small");
const mediumRepos = results.filter((r) => r.repoSize === "medium");
const largeRepos = results.filter((r) => r.repoSize === "large");
const suite: BenchmarkSuite = {
testName,
results,
overallPassed,
averagePerformance,
summary: {
smallRepos: {
count: smallRepos.length,
avgTime:
smallRepos.reduce((sum, r) => sum + r.executionTime, 0) /
smallRepos.length || 0,
passed: smallRepos.filter((r) => r.passed).length,
},
mediumRepos: {
count: mediumRepos.length,
avgTime:
mediumRepos.reduce((sum, r) => sum + r.executionTime, 0) /
mediumRepos.length || 0,
passed: mediumRepos.filter((r) => r.passed).length,
},
largeRepos: {
count: largeRepos.length,
avgTime:
largeRepos.reduce((sum, r) => sum + r.executionTime, 0) /
largeRepos.length || 0,
passed: largeRepos.filter((r) => r.passed).length,
},
},
};
return suite;
}
printDetailedReport(suite: BenchmarkSuite): void {
console.log(`📋 Performance Benchmark Report: ${suite.testName}`);
console.log("=".repeat(60));
console.log(
`Overall Status: ${suite.overallPassed ? "✅ PASSED" : "❌ FAILED"}`,
);
console.log(
`Average Performance: ${(suite.averagePerformance * 100).toFixed(
1,
)}% of target`,
);
console.log(`Total Tests: ${suite.results.length}\n`);
// Summary by repo size
console.log("📊 Performance by Repository Size:");
console.log("-".repeat(40));
const categories = [
{
name: "Small (<100 files)",
data: suite.summary.smallRepos,
target: PERFORMANCE_TARGETS.small,
},
{
name: "Medium (100-1000 files)",
data: suite.summary.mediumRepos,
target: PERFORMANCE_TARGETS.medium,
},
{
name: "Large (1000+ files)",
data: suite.summary.largeRepos,
target: PERFORMANCE_TARGETS.large,
},
];
categories.forEach((cat) => {
if (cat.data.count > 0) {
const passRate = ((cat.data.passed / cat.data.count) * 100).toFixed(1);
const avgTime = cat.data.avgTime.toFixed(0);
const targetTime = (cat.target / 1000).toFixed(1);
console.log(`${cat.name}:`);
console.log(
` Tests: ${cat.data.count} | Passed: ${cat.data.passed}/${cat.data.count} (${passRate}%)`,
);
console.log(` Avg Time: ${avgTime}ms | Target: <${targetTime}s`);
console.log("");
}
});
// Detailed results
console.log("🔍 Detailed Results:");
console.log("-".repeat(40));
suite.results.forEach((result, i) => {
const status = result.passed ? "✅" : "❌";
const ratio = (result.performanceRatio * 100).toFixed(1);
const memoryMB = (
result.details.memoryUsage.heapUsed /
1024 /
1024
).toFixed(1);
console.log(
`${status} Test ${i + 1}: ${
result.executionTime
}ms (${ratio}% of target)`,
);
console.log(
` Size: ${result.repoSize} (${result.fileCount} files) | Memory: ${memoryMB}MB heap`,
);
});
console.log("\n" + "=".repeat(60));
}
exportResults(suite: BenchmarkSuite, outputPath: string): Promise<void> {
const report = {
timestamp: new Date().toISOString(),
suite,
systemInfo: {
node: process.version,
platform: process.platform,
arch: process.arch,
memoryUsage: process.memoryUsage(),
},
performanceTargets: PERFORMANCE_TARGETS,
};
return fs.writeFile(outputPath, JSON.stringify(report, null, 2));
}
private async getFileCount(repoPath: string): Promise<number> {
let fileCount = 0;
async function countFiles(dir: string, level = 0): Promise<void> {
if (level > 10) return; // Prevent infinite recursion
try {
const entries = await fs.readdir(dir, { withFileTypes: true });
for (const entry of entries) {
if (entry.name.startsWith(".") && entry.name !== ".github") continue;
if (entry.name === "node_modules" || entry.name === "vendor")
continue;
const fullPath = path.join(dir, entry.name);
if (entry.isDirectory()) {
await countFiles(fullPath, level + 1);
} else {
fileCount++;
}
}
} catch (error) {
// Skip inaccessible directories
}
}
await countFiles(repoPath);
return fileCount;
}
private categorizeRepoSize(fileCount: number): "small" | "medium" | "large" {
if (fileCount < 100) return "small";
if (fileCount < 1000) return "medium";
return "large";
}
// Utility method to clear results for fresh benchmarking
reset(): void {
this.results = [];
}
// Get current benchmark results
getResults(): BenchmarkResult[] {
return [...this.results];
}
}
// Factory function for easy usage
export function createBenchmarker(): PerformanceBenchmarker {
return new PerformanceBenchmarker();
}
```
--------------------------------------------------------------------------------
/src/tools/validate-documentation-freshness.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Validate Documentation Freshness Tool
*
* Validates documentation freshness, initializes metadata for files without it,
* and updates timestamps based on code changes.
*/
import { z } from "zod";
import path from "path";
import { simpleGit } from "simple-git";
import {
findMarkdownFiles,
parseDocFrontmatter,
updateDocFrontmatter,
initializeFreshnessMetadata,
STALENESS_PRESETS,
type DocFreshnessMetadata,
scanDocumentationFreshness,
} from "../utils/freshness-tracker.js";
import { type MCPToolResponse } from "../types/api.js";
import {
storeFreshnessEvent,
updateFreshnessEvent,
} from "../memory/freshness-kg-integration.js";
/**
* Input schema for validate_documentation_freshness tool
*/
export const ValidateDocumentationFreshnessSchema = z.object({
docsPath: z.string().describe("Path to documentation directory"),
projectPath: z
.string()
.describe("Path to project root (for git integration)"),
initializeMissing: z
.boolean()
.optional()
.default(true)
.describe("Initialize metadata for files without it"),
updateExisting: z
.boolean()
.optional()
.default(false)
.describe("Update last_validated timestamp for all files"),
updateFrequency: z
.enum(["realtime", "active", "recent", "weekly", "monthly", "quarterly"])
.optional()
.default("monthly")
.describe("Default update frequency for new metadata"),
validateAgainstGit: z
.boolean()
.optional()
.default(true)
.describe("Validate against current git commit"),
});
export type ValidateDocumentationFreshnessInput = z.input<
typeof ValidateDocumentationFreshnessSchema
>;
/**
* Validation result for a single file
*/
interface FileValidationResult {
filePath: string;
relativePath: string;
action: "initialized" | "updated" | "skipped" | "error";
metadata?: DocFreshnessMetadata;
error?: string;
}
/**
* Validation report
*/
interface ValidationReport {
validatedAt: string;
docsPath: string;
projectPath: string;
totalFiles: number;
initialized: number;
updated: number;
skipped: number;
errors: number;
currentCommit?: string;
files: FileValidationResult[];
}
/**
* Format validation report for display
*/
function formatValidationReport(report: ValidationReport): string {
let output = "# Documentation Freshness Validation Report\n\n";
output += `**Validated at**: ${new Date(
report.validatedAt,
).toLocaleString()}\n`;
output += `**Documentation path**: ${report.docsPath}\n`;
if (report.currentCommit) {
output += `**Current commit**: ${report.currentCommit.substring(0, 7)}\n`;
}
output += "\n## Summary\n\n";
output += `- **Total files**: ${report.totalFiles}\n`;
output += `- **Initialized**: ${report.initialized} files\n`;
output += `- **Updated**: ${report.updated} files\n`;
output += `- **Skipped**: ${report.skipped} files\n`;
if (report.errors > 0) {
output += `- **Errors**: ${report.errors} files\n`;
}
output += "\n## Actions Performed\n\n";
// Group by action
const grouped = {
initialized: report.files.filter((f) => f.action === "initialized"),
updated: report.files.filter((f) => f.action === "updated"),
error: report.files.filter((f) => f.action === "error"),
};
if (grouped.initialized.length > 0) {
output += `### ✨ Initialized (${grouped.initialized.length})\n\n`;
for (const file of grouped.initialized) {
output += `- ${file.relativePath}\n`;
}
output += "\n";
}
if (grouped.updated.length > 0) {
output += `### 🔄 Updated (${grouped.updated.length})\n\n`;
for (const file of grouped.updated) {
output += `- ${file.relativePath}\n`;
}
output += "\n";
}
if (grouped.error.length > 0) {
output += `### ❌ Errors (${grouped.error.length})\n\n`;
for (const file of grouped.error) {
output += `- ${file.relativePath}: ${file.error}\n`;
}
output += "\n";
}
// Recommendations
output += "## Next Steps\n\n";
if (report.initialized > 0) {
output += `→ ${report.initialized} files now have freshness tracking enabled\n`;
}
if (report.updated > 0) {
output += `→ ${report.updated} files have been marked as validated\n`;
}
output += `→ Run \`track_documentation_freshness\` to view current freshness status\n`;
return output;
}
/**
* Validate documentation freshness
*/
export async function validateDocumentationFreshness(
input: ValidateDocumentationFreshnessInput,
): Promise<MCPToolResponse> {
const startTime = Date.now();
try {
const {
docsPath,
projectPath,
initializeMissing,
updateExisting,
updateFrequency,
validateAgainstGit,
} = input;
// Get current git commit if requested
let currentCommit: string | undefined;
if (validateAgainstGit) {
try {
const git = simpleGit(projectPath);
const isRepo = await git.checkIsRepo();
if (isRepo) {
const log = await git.log({ maxCount: 1 });
currentCommit = log.latest?.hash;
}
} catch (error) {
// Git not available, continue without it
}
}
// Find all markdown files
const markdownFiles = await findMarkdownFiles(docsPath);
const results: FileValidationResult[] = [];
for (const filePath of markdownFiles) {
const relativePath = path.relative(docsPath, filePath);
try {
const frontmatter = await parseDocFrontmatter(filePath);
const hasMetadata = !!frontmatter.documcp?.last_updated;
if (!hasMetadata && initializeMissing) {
// Initialize metadata
await initializeFreshnessMetadata(filePath, {
updateFrequency,
autoUpdated: false,
});
// If git is available, set validated_against_commit
if (currentCommit) {
await updateDocFrontmatter(filePath, {
validated_against_commit: currentCommit,
});
}
const updatedFrontmatter = await parseDocFrontmatter(filePath);
results.push({
filePath,
relativePath,
action: "initialized",
metadata: updatedFrontmatter.documcp,
});
} else if (hasMetadata && updateExisting) {
// Update existing metadata
const updateData: Partial<DocFreshnessMetadata> = {
last_validated: new Date().toISOString(),
};
if (currentCommit) {
updateData.validated_against_commit = currentCommit;
}
await updateDocFrontmatter(filePath, updateData);
const updatedFrontmatter = await parseDocFrontmatter(filePath);
results.push({
filePath,
relativePath,
action: "updated",
metadata: updatedFrontmatter.documcp,
});
} else {
results.push({
filePath,
relativePath,
action: "skipped",
metadata: frontmatter.documcp,
});
}
} catch (error) {
results.push({
filePath,
relativePath,
action: "error",
error: error instanceof Error ? error.message : "Unknown error",
});
}
}
// Generate report
const report: ValidationReport = {
validatedAt: new Date().toISOString(),
docsPath,
projectPath,
totalFiles: markdownFiles.length,
initialized: results.filter((r) => r.action === "initialized").length,
updated: results.filter((r) => r.action === "updated").length,
skipped: results.filter((r) => r.action === "skipped").length,
errors: results.filter((r) => r.action === "error").length,
currentCommit,
files: results,
};
const formattedReport = formatValidationReport(report);
// Store validation event in knowledge graph
let eventId: string | undefined;
if (report.initialized > 0 || report.updated > 0) {
try {
// Scan current state to get freshness metrics
const scanReport = await scanDocumentationFreshness(docsPath, {
warning: STALENESS_PRESETS.monthly,
stale: {
value: STALENESS_PRESETS.monthly.value * 2,
unit: STALENESS_PRESETS.monthly.unit,
},
critical: {
value: STALENESS_PRESETS.monthly.value * 3,
unit: STALENESS_PRESETS.monthly.unit,
},
});
// Determine event type
const eventType = report.initialized > 0 ? "initialization" : "update";
// Store in KG
eventId = await storeFreshnessEvent(
projectPath,
docsPath,
scanReport,
eventType,
);
// Update event with validation details
await updateFreshnessEvent(eventId, {
filesInitialized: report.initialized,
filesUpdated: report.updated,
eventType,
});
} catch (error) {
// KG storage failed, but continue with the response
console.warn(
"Failed to store validation event in knowledge graph:",
error,
);
}
}
const response: MCPToolResponse = {
success: true,
data: {
summary: `Validated ${report.totalFiles} files: ${report.initialized} initialized, ${report.updated} updated`,
report,
formattedReport,
kgEventId: eventId,
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
recommendations: [],
};
return response;
} catch (error) {
return {
success: false,
error: {
code: "FRESHNESS_VALIDATION_FAILED",
message:
error instanceof Error
? error.message
: "Unknown error validating documentation freshness",
resolution:
"Check that the documentation and project paths exist and are readable",
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
};
}
}
```
--------------------------------------------------------------------------------
/tests/tools/generate-llm-context.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import path from "path";
import os from "os";
import {
generateLLMContext,
setToolDefinitions,
GenerateLLMContextInputSchema,
} from "../../src/tools/generate-llm-context.js";
import { z } from "zod";
describe("generate_llm_context", () => {
let tmpDir: string;
beforeEach(async () => {
tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "generate-llm-context-"));
// Set up mock tool definitions
const mockTools = [
{
name: "analyze_repository",
description: "Analyze repository structure and dependencies",
inputSchema: z.object({
path: z.string(),
depth: z.enum(["quick", "standard", "deep"]).optional(),
}),
},
{
name: "recommend_ssg",
description: "Recommend static site generator",
inputSchema: z.object({
analysisId: z.string(),
userId: z.string().optional(),
}),
},
{
name: "sync_code_to_docs",
description: "Synchronize code with documentation",
inputSchema: z.object({
projectPath: z.string(),
docsPath: z.string(),
mode: z.enum(["detect", "preview", "apply", "auto"]).optional(),
}),
},
];
setToolDefinitions(mockTools);
});
afterEach(async () => {
await fs.rm(tmpDir, { recursive: true, force: true });
});
describe("Basic Generation", () => {
it("should generate LLM context file with default options", async () => {
const result = await generateLLMContext({
projectPath: tmpDir,
});
// Check result structure
expect(result.content).toBeDefined();
expect(result.content[0].text).toContain("path");
// Check file exists
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
const fileExists = await fs
.access(outputPath)
.then(() => true)
.catch(() => false);
expect(fileExists).toBe(true);
// Check file content
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain("# DocuMCP LLM Context Reference");
expect(content).toContain("analyze_repository");
expect(content).toContain("recommend_ssg");
expect(content).toContain("sync_code_to_docs");
});
it("should include examples when requested", async () => {
await generateLLMContext({
projectPath: tmpDir,
includeExamples: true,
});
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain("**Example**:");
expect(content).toContain("```typescript");
});
it("should generate concise format", async () => {
await generateLLMContext({
projectPath: tmpDir,
format: "concise",
includeExamples: false,
});
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain("# DocuMCP LLM Context Reference");
expect(content).not.toContain("**Parameters**:");
});
it("should generate detailed format with parameters", async () => {
await generateLLMContext({
projectPath: tmpDir,
format: "detailed",
});
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain("# DocuMCP LLM Context Reference");
expect(content).toContain("**Parameters**:");
});
});
describe("Content Sections", () => {
it("should include overview section", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
await generateLLMContext({ projectPath: tmpDir });
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain("## Overview");
expect(content).toContain("DocuMCP is an intelligent MCP server");
});
it("should include core tools section", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
await generateLLMContext({ projectPath: tmpDir });
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain("## Core Documentation Tools");
});
it("should include Phase 3 tools section", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
await generateLLMContext({ projectPath: tmpDir });
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain(
"## Phase 3: Code-to-Docs Synchronization Tools",
);
expect(content).toContain("sync_code_to_docs");
});
it("should include memory system section", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
await generateLLMContext({ projectPath: tmpDir });
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain("## Memory Knowledge Graph System");
expect(content).toContain("### Entity Types");
expect(content).toContain("### Relationship Types");
});
it("should include workflows section", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
await generateLLMContext({ projectPath: tmpDir });
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain("## Common Workflows");
expect(content).toContain("### 1. New Documentation Site Setup");
});
it("should include quick reference table", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
await generateLLMContext({ projectPath: tmpDir });
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain("## Quick Reference Table");
expect(content).toContain("| Tool | Primary Use |");
});
});
describe("Input Validation", () => {
it("should validate input schema", () => {
expect(() => {
GenerateLLMContextInputSchema.parse({
projectPath: "/test/path",
includeExamples: true,
format: "detailed",
});
}).not.toThrow();
});
it("should use default values for optional fields", () => {
const result = GenerateLLMContextInputSchema.parse({
projectPath: "/test/path",
});
expect(result.projectPath).toBe("/test/path");
expect(result.includeExamples).toBe(true);
expect(result.format).toBe("detailed");
});
it("should require projectPath", () => {
expect(() => {
GenerateLLMContextInputSchema.parse({});
}).toThrow();
});
it("should reject invalid format", () => {
expect(() => {
GenerateLLMContextInputSchema.parse({
projectPath: "/test/path",
format: "invalid",
});
}).toThrow();
});
});
describe("Error Handling", () => {
it("should handle write errors gracefully", async () => {
const invalidPath = "/invalid/path/that/does/not/exist";
const result = await generateLLMContext({
projectPath: invalidPath,
});
expect(result.content[0].text).toContain("GENERATION_ERROR");
expect(result.isError).toBe(true);
});
});
describe("File Output", () => {
it("should create LLM_CONTEXT.md in project root", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
await generateLLMContext({ projectPath: tmpDir });
const fileExists = await fs
.access(outputPath)
.then(() => true)
.catch(() => false);
expect(fileExists).toBe(true);
});
it("should overwrite existing file", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
// Write first time
await generateLLMContext({ projectPath: tmpDir });
const firstContent = await fs.readFile(outputPath, "utf-8");
// Wait a moment to ensure timestamp changes
await new Promise((resolve) => setTimeout(resolve, 10));
// Write second time
await generateLLMContext({ projectPath: tmpDir });
const secondContent = await fs.readFile(outputPath, "utf-8");
// Content should be different (timestamp changed)
expect(firstContent).not.toEqual(secondContent);
});
it("should report correct file stats", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
const result = await generateLLMContext({ projectPath: tmpDir });
const data = JSON.parse(result.content[0].text);
expect(data.stats).toBeDefined();
expect(data.stats.totalTools).toBe(3);
expect(data.stats.fileSize).toBeGreaterThan(0);
expect(data.stats.sections).toBeInstanceOf(Array);
});
});
describe("Tool Extraction", () => {
it("should extract tool names correctly", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
await generateLLMContext({ projectPath: tmpDir });
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain("`analyze_repository`");
expect(content).toContain("`recommend_ssg`");
expect(content).toContain("`sync_code_to_docs`");
});
it("should extract tool descriptions", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
await generateLLMContext({ projectPath: tmpDir });
const content = await fs.readFile(outputPath, "utf-8");
expect(content).toContain(
"Analyze repository structure and dependencies",
);
expect(content).toContain("Recommend static site generator");
});
it("should handle tools with no examples", async () => {
const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
await generateLLMContext({ projectPath: tmpDir, includeExamples: true });
const content = await fs.readFile(outputPath, "utf-8");
// recommend_ssg doesn't have an example defined
const ssgSection = content.match(
/### `recommend_ssg`[\s\S]*?(?=###|$)/,
)?.[0];
expect(ssgSection).toBeDefined();
});
});
});
```
--------------------------------------------------------------------------------
/tests/tools/analyze-coverage.test.ts:
--------------------------------------------------------------------------------
```typescript
// Additional tests to improve analyze-repository coverage
import { promises as fs } from "fs";
import path from "path";
import os from "os";
import { analyzeRepository } from "../../src/tools/analyze-repository";
describe("Analyze Repository Additional Coverage", () => {
let tempDir: string;
beforeAll(async () => {
tempDir = path.join(os.tmpdir(), "analyze-coverage");
await fs.mkdir(tempDir, { recursive: true });
});
afterAll(async () => {
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch (error) {
// Cleanup errors are okay
}
});
describe("Different Repository Types", () => {
it("should analyze Ruby project", async () => {
const rubyDir = path.join(tempDir, "ruby-project");
await fs.mkdir(rubyDir, { recursive: true });
await fs.writeFile(
path.join(rubyDir, "Gemfile"),
`
source 'https://rubygems.org'
gem 'rails', '~> 7.0'
gem 'puma'
gem 'redis'
`,
);
await fs.writeFile(path.join(rubyDir, "app.rb"), 'puts "Hello Ruby"');
await fs.writeFile(path.join(rubyDir, "README.md"), "# Ruby Project");
const result = await analyzeRepository({
path: rubyDir,
depth: "standard",
});
expect(result.content).toBeDefined();
const analysis = JSON.parse(result.content[0].text);
expect(analysis.dependencies.ecosystem).toBe("ruby");
});
it("should analyze Go project", async () => {
const goDir = path.join(tempDir, "go-project");
await fs.mkdir(goDir, { recursive: true });
await fs.writeFile(
path.join(goDir, "go.mod"),
`
module example.com/myapp
go 1.21
require (
github.com/gin-gonic/gin v1.9.0
github.com/stretchr/testify v1.8.0
)
`,
);
await fs.writeFile(path.join(goDir, "main.go"), "package main");
await fs.writeFile(path.join(goDir, "README.md"), "# Go Project");
const result = await analyzeRepository({
path: goDir,
depth: "standard",
});
expect(result.content).toBeDefined();
const analysis = JSON.parse(result.content[0].text);
expect(analysis.dependencies.ecosystem).toBe("go");
});
it("should analyze Java project", async () => {
const javaDir = path.join(tempDir, "java-project");
await fs.mkdir(javaDir, { recursive: true });
await fs.writeFile(
path.join(javaDir, "pom.xml"),
`
<?xml version="1.0" encoding="UTF-8"?>
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>com.example</groupId>
<artifactId>myapp</artifactId>
<version>1.0.0</version>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
</dependencies>
</project>
`,
);
await fs.writeFile(path.join(javaDir, "App.java"), "public class App {}");
const result = await analyzeRepository({
path: javaDir,
depth: "standard",
});
expect(result.content).toBeDefined();
const analysis = JSON.parse(result.content[0].text);
expect(analysis.dependencies.ecosystem).toBeDefined(); // May be 'java' or 'unknown' depending on detection
});
it("should analyze project with Docker", async () => {
const dockerDir = path.join(tempDir, "docker-project");
await fs.mkdir(dockerDir, { recursive: true });
await fs.writeFile(
path.join(dockerDir, "Dockerfile"),
`
FROM node:20
WORKDIR /app
COPY . .
RUN npm install
CMD ["npm", "start"]
`,
);
await fs.writeFile(
path.join(dockerDir, "docker-compose.yml"),
`
version: '3'
services:
app:
build: .
ports:
- "3000:3000"
`,
);
await fs.writeFile(
path.join(dockerDir, "package.json"),
'{"name": "docker-app"}',
);
const result = await analyzeRepository({
path: dockerDir,
depth: "standard",
});
expect(result.content).toBeDefined();
const analysis = JSON.parse(result.content[0].text);
// Verify basic analysis works - Docker detection not implemented
expect(analysis.structure).toBeDefined();
expect(analysis.structure.totalFiles).toBe(3);
expect(analysis.dependencies.ecosystem).toBe("javascript");
});
it("should analyze project with existing docs", async () => {
const docsDir = path.join(tempDir, "docs-project");
await fs.mkdir(path.join(docsDir, "docs"), { recursive: true });
await fs.mkdir(path.join(docsDir, "documentation"), { recursive: true });
await fs.writeFile(
path.join(docsDir, "docs", "index.md"),
"# Documentation",
);
await fs.writeFile(
path.join(docsDir, "docs", "api.md"),
"# API Reference",
);
await fs.writeFile(
path.join(docsDir, "documentation", "guide.md"),
"# User Guide",
);
await fs.writeFile(
path.join(docsDir, "README.md"),
"# Project with Docs",
);
const result = await analyzeRepository({
path: docsDir,
depth: "standard",
});
expect(result.content).toBeDefined();
const analysis = JSON.parse(result.content[0].text);
expect(analysis.structure.hasDocs).toBe(true);
});
});
describe("Edge Cases and Error Handling", () => {
it("should handle empty repository", async () => {
const emptyDir = path.join(tempDir, "empty-repo");
await fs.mkdir(emptyDir, { recursive: true });
const result = await analyzeRepository({
path: emptyDir,
depth: "quick",
});
expect(result.content).toBeDefined();
const analysis = JSON.parse(result.content[0].text);
expect(analysis.dependencies.ecosystem).toBe("unknown");
});
it("should handle repository with only config files", async () => {
const configDir = path.join(tempDir, "config-only");
await fs.mkdir(configDir, { recursive: true });
await fs.writeFile(path.join(configDir, ".gitignore"), "node_modules/");
await fs.writeFile(
path.join(configDir, ".editorconfig"),
"indent_style = space",
);
await fs.writeFile(path.join(configDir, "LICENSE"), "MIT License");
const result = await analyzeRepository({
path: configDir,
depth: "standard",
});
expect(result.content).toBeDefined();
expect(result.content.length).toBeGreaterThan(0);
});
it("should handle deep analysis depth", async () => {
const deepDir = path.join(tempDir, "deep-analysis");
await fs.mkdir(deepDir, { recursive: true });
// Create nested structure
await fs.mkdir(path.join(deepDir, "src", "components", "ui"), {
recursive: true,
});
await fs.mkdir(path.join(deepDir, "src", "utils", "helpers"), {
recursive: true,
});
await fs.mkdir(path.join(deepDir, "tests", "unit"), { recursive: true });
await fs.writeFile(
path.join(deepDir, "package.json"),
JSON.stringify({
name: "deep-project",
scripts: {
test: "jest",
build: "webpack",
lint: "eslint .",
},
}),
);
await fs.writeFile(
path.join(deepDir, "src", "index.js"),
'console.log("app");',
);
await fs.writeFile(
path.join(deepDir, "src", "components", "ui", "Button.js"),
"export default Button;",
);
await fs.writeFile(
path.join(deepDir, "tests", "unit", "test.js"),
'test("sample", () => {});',
);
const result = await analyzeRepository({ path: deepDir, depth: "deep" });
expect(result.content).toBeDefined();
const analysis = JSON.parse(result.content[0].text);
expect(analysis.structure.hasTests).toBe(true);
});
it("should analyze repository with multiple ecosystems", async () => {
const multiDir = path.join(tempDir, "multi-ecosystem");
await fs.mkdir(multiDir, { recursive: true });
// JavaScript
await fs.writeFile(
path.join(multiDir, "package.json"),
'{"name": "frontend"}',
);
// Python
await fs.writeFile(
path.join(multiDir, "requirements.txt"),
"flask==2.0.0",
);
// Ruby
await fs.writeFile(path.join(multiDir, "Gemfile"), 'gem "rails"');
const result = await analyzeRepository({
path: multiDir,
depth: "standard",
});
expect(result.content).toBeDefined();
// Should detect the primary ecosystem (usually the one with most files/config)
const analysis = JSON.parse(result.content[0].text);
expect(["javascript", "python", "ruby"]).toContain(
analysis.dependencies.ecosystem,
);
});
});
describe("Repository Complexity Analysis", () => {
it("should calculate complexity metrics", async () => {
const complexDir = path.join(tempDir, "complex-repo");
await fs.mkdir(path.join(complexDir, ".github", "workflows"), {
recursive: true,
});
// Create various files to test complexity
await fs.writeFile(
path.join(complexDir, "package.json"),
JSON.stringify({
name: "complex-app",
dependencies: {
react: "^18.0.0",
express: "^4.0.0",
webpack: "^5.0.0",
},
devDependencies: {
jest: "^29.0.0",
eslint: "^8.0.0",
},
}),
);
await fs.writeFile(
path.join(complexDir, ".github", "workflows", "ci.yml"),
`
name: CI
on: push
jobs:
test:
runs-on: ubuntu-latest
`,
);
await fs.writeFile(
path.join(complexDir, "README.md"),
"# Complex Project\n\nWith detailed documentation",
);
await fs.writeFile(
path.join(complexDir, "CONTRIBUTING.md"),
"# Contributing Guide",
);
const result = await analyzeRepository({
path: complexDir,
depth: "deep",
});
expect(result.content).toBeDefined();
const analysis = JSON.parse(result.content[0].text);
expect(analysis.structure.hasCI).toBe(true);
expect(analysis.documentation.hasReadme).toBe(true);
});
});
});
```
--------------------------------------------------------------------------------
/docs/reference/api-overview.md:
--------------------------------------------------------------------------------
```markdown
---
sidebar_position: 1
documcp:
last_updated: "2025-11-20T00:46:21.959Z"
last_validated: "2025-11-20T00:46:21.959Z"
auto_updated: false
update_frequency: monthly
---
# API Overview
DocuMCP provides **45 specialized tools** organized into functional categories for intelligent documentation deployment via the Model Context Protocol (MCP).
## 🎯 Quick Reference: LLM_CONTEXT.md
For AI assistants and LLMs, reference the **comprehensive context file**:
**File**: `/LLM_CONTEXT.md` (in project root)
This auto-generated file provides:
- All 45 tool descriptions with parameters
- Usage examples and code snippets
- Common workflow patterns
- Memory system documentation
- Phase 3 code-to-docs sync features
**Usage in AI assistants**:
```
@LLM_CONTEXT.md help me deploy documentation to GitHub Pages
```
## 📚 Tool Categories
### Core Documentation Tools (9 tools)
Essential tools for repository analysis, recommendations, and deployment:
| Tool | Purpose | Key Parameters |
| ------------------------------- | ---------------------------------------- | ---------------------------------- |
| `analyze_repository` | Analyze project structure & dependencies | `path`, `depth` |
| `recommend_ssg` | Recommend static site generator | `analysisId`, `preferences` |
| `generate_config` | Generate SSG configuration files | `ssg`, `projectName`, `outputPath` |
| `setup_structure` | Create Diataxis documentation structure | `path`, `ssg` |
| `deploy_pages` | Deploy to GitHub Pages with tracking | `repository`, `ssg`, `userId` |
| `verify_deployment` | Verify deployment status | `repository`, `url` |
| `populate_diataxis_content` | Generate project-specific content | `analysisId`, `docsPath` |
| `update_existing_documentation` | Update existing docs intelligently | `analysisId`, `docsPath` |
| `validate_diataxis_content` | Validate documentation quality | `contentPath`, `validationType` |
### README Analysis & Generation (6 tools)
Specialized tools for README creation and optimization:
| Tool | Purpose | Key Parameters |
| --------------------------- | ----------------------------------------- | -------------------------------------------- |
| `evaluate_readme_health` | Assess README quality & onboarding | `readme_path`, `project_type` |
| `readme_best_practices` | Analyze against best practices | `readme_path`, `generate_template` |
| `generate_readme_template` | Create standardized README | `projectName`, `description`, `templateType` |
| `validate_readme_checklist` | Validate against community standards | `readmePath`, `strict` |
| `analyze_readme` | Comprehensive length & structure analysis | `project_path`, `optimization_level` |
| `optimize_readme` | Restructure and condense content | `readme_path`, `strategy`, `max_length` |
### Phase 3: Code-to-Docs Synchronization (2 tools)
Advanced AST-based code analysis and drift detection:
| Tool | Purpose | Key Parameters |
| ----------------------------- | ---------------------------------- | --------------------------------- |
| `sync_code_to_docs` | Detect and fix documentation drift | `projectPath`, `docsPath`, `mode` |
| `generate_contextual_content` | Generate docs from code analysis | `filePath`, `documentationType` |
**Supported Languages**: TypeScript, JavaScript, Python, Go, Rust, Java, Ruby, Bash
**Drift Types Detected**: Outdated, Incorrect, Missing, Breaking
### Memory & Analytics Tools (2 tools)
User preferences and deployment pattern analysis:
| Tool | Purpose | Key Parameters |
| --------------------- | -------------------------------------- | ----------------------------------- |
| `manage_preferences` | Manage user preferences & SSG history | `action`, `userId`, `preferences` |
| `analyze_deployments` | Analyze deployment patterns & insights | `analysisType`, `ssg`, `periodDays` |
### Validation & Testing Tools (4 tools)
Quality assurance and deployment testing:
| Tool | Purpose | Key Parameters |
| --------------------------- | ------------------------------------ | -------------------------------------------- |
| `validate_content` | Validate links, code, and references | `contentPath`, `validationType` |
| `check_documentation_links` | Comprehensive link validation | `documentation_path`, `check_external_links` |
| `test_local_deployment` | Test build and local server | `repositoryPath`, `ssg`, `port` |
| `setup_playwright_tests` | Generate E2E test infrastructure | `repositoryPath`, `ssg`, `projectName` |
### Utility Tools (3 tools)
Additional functionality and management:
| Tool | Purpose | Key Parameters |
| --------------------------- | --------------------------------- | ------------------------------------- |
| `detect_documentation_gaps` | Identify missing content | `repositoryPath`, `documentationPath` |
| `manage_sitemap` | Generate and validate sitemap.xml | `action`, `docsPath`, `baseUrl` |
| `read_directory` | List files within allowed roots | `path` |
### Advanced Memory Tools (19 tools)
Sophisticated memory, learning, and knowledge graph operations:
| Tool Category | Tools | Purpose |
| ------------------- | ---------------------------------------------------------------------- | ----------------------------- |
| **Memory Recall** | `memory_recall`, `memory_contextual_search` | Retrieve and search memories |
| **Intelligence** | `memory_intelligent_analysis`, `memory_enhanced_recommendation` | AI-powered insights |
| **Knowledge Graph** | `memory_knowledge_graph`, `memory_learning_stats` | Graph queries and statistics |
| **Collaboration** | `memory_agent_network` | Multi-agent memory sharing |
| **Insights** | `memory_insights`, `memory_similar`, `memory_temporal_analysis` | Pattern analysis |
| **Data Management** | `memory_export`, `memory_cleanup`, `memory_pruning` | Export, cleanup, optimization |
| **Visualization** | `memory_visualization` | Visual representations |
| **Advanced I/O** | `memory_export_advanced`, `memory_import_advanced`, `memory_migration` | Complex data operations |
| **Metrics** | `memory_optimization_metrics` | Performance analysis |
## 🔗 Detailed Documentation
### Full API Reference
- **[MCP Tools API](./mcp-tools.md)** - Complete tool descriptions with examples
- **[TypeDoc API](../api/)** - Auto-generated API documentation for all classes, interfaces, and functions
- **[LLM Context Reference](../../LLM_CONTEXT.md)** - Comprehensive tool reference for AI assistants
### Configuration & Usage
- **[Configuration Options](./configuration.md)** - All configuration settings
- **[CLI Commands](./cli.md)** - Command-line interface reference
- **[Prompt Templates](./prompt-templates.md)** - Pre-built prompt examples
## 🚀 Common Workflows
### 1. New Documentation Site
```
analyze_repository → recommend_ssg → generate_config →
setup_structure → populate_diataxis_content → deploy_pages
```
### 2. Documentation Sync (Phase 3)
```
sync_code_to_docs (detect) → review drift →
sync_code_to_docs (apply) → manual review
```
### 3. Existing Docs Improvement
```
analyze_repository → update_existing_documentation →
validate_diataxis_content → check_documentation_links
```
### 4. README Enhancement
```
analyze_readme → evaluate_readme_health →
readme_best_practices → optimize_readme
```
## 📦 Memory Knowledge Graph
DocuMCP includes a persistent memory system that learns from every analysis:
### Entity Types
- **Project**: Software projects with analysis history
- **User**: User preferences and SSG patterns
- **Configuration**: SSG deployment configs with success rates
- **Documentation**: Documentation structures and patterns
- **CodeFile**: Source code files with change tracking
- **DocumentationSection**: Docs sections linked to code
- **Technology**: Languages, frameworks, and tools
### Relationship Types
- `project_uses_technology`: Links projects to tech stack
- `user_prefers_ssg`: Tracks user SSG preferences
- `project_deployed_with`: Records deployment outcomes
- `similar_to`: Identifies similar projects
- `documents`: Links code files to documentation
- `outdated_for`: Flags out-of-sync documentation
- `depends_on`: Tracks technology dependencies
### Storage Location
- **Default**: `.documcp/memory/`
- **Entities**: `.documcp/memory/knowledge-graph-entities.jsonl`
- **Relationships**: `.documcp/memory/knowledge-graph-relationships.jsonl`
- **Backups**: `.documcp/memory/backups/`
- **Snapshots**: `.documcp/snapshots/` (for drift detection)
## 🎓 Getting Started
1. **Start with tutorials**: [Getting Started Guide](../tutorials/getting-started.md)
2. **Learn effective prompting**: [Prompting Guide](../how-to/prompting-guide.md)
3. **Reference LLM_CONTEXT.md**: Use `@LLM_CONTEXT.md` in AI assistants
4. **Explore workflows**: [Common Workflows](#-common-workflows)
## 📊 Tool Statistics
- **Total Tools**: 45
- **Core Documentation**: 9 tools
- **README Management**: 6 tools
- **Phase 3 Sync**: 2 tools
- **Memory & Analytics**: 2 tools
- **Validation**: 4 tools
- **Utilities**: 3 tools
- **Advanced Memory**: 19 tools
## 🔍 Search & Discovery
- **By functionality**: Use the category tables above
- **By name**: See [MCP Tools API](./mcp-tools.md)
- **By code**: Browse [TypeDoc API](../api/)
- **For AI assistants**: Reference [LLM_CONTEXT.md](../../LLM_CONTEXT.md)
---
_Documentation auto-generated from DocuMCP v0.3.2_
```
--------------------------------------------------------------------------------
/docs/reference/deploy-pages.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.961Z"
last_validated: "2025-11-20T00:46:21.961Z"
auto_updated: false
update_frequency: monthly
---
# Deploy Pages Tool Documentation
## Overview
The `deploy_pages` tool provides automated GitHub Pages deployment setup with intelligent SSG (Static Site Generator) detection, optimized workflow generation, and comprehensive deployment tracking through the Knowledge Graph system.
## Features
- **SSG Auto-Detection**: Automatically retrieves SSG recommendations from Knowledge Graph using analysisId
- **Optimized Workflows**: Generates SSG-specific GitHub Actions workflows with best practices
- **Package Manager Detection**: Supports npm, yarn, and pnpm with automatic lockfile detection
- **Documentation Folder Detection**: Intelligently detects docs folders (docs/, website/, documentation/)
- **Custom Domain Support**: Automatic CNAME file generation
- **Deployment Tracking**: Integrates with Knowledge Graph to track deployment success/failure
- **User Preference Learning**: Tracks SSG usage patterns for personalized recommendations
## Usage
### Basic Usage
```javascript
// Deploy with explicit SSG
const result = await callTool("deploy_pages", {
repository: "/path/to/project",
ssg: "docusaurus",
});
```
### Advanced Usage with Knowledge Graph Integration
```javascript
// Deploy using SSG from previous analysis
const result = await callTool("deploy_pages", {
repository: "https://github.com/user/repo.git",
analysisId: "repo-analysis-123", // SSG retrieved from KG
projectPath: "/local/path",
projectName: "My Documentation Site",
customDomain: "docs.example.com",
userId: "developer-1",
});
```
## Parameters
| Parameter | Type | Required | Description |
| -------------- | -------- | -------- | --------------------------------------------------------------------------- |
| `repository` | `string` | ✅ | Repository path (local) or URL (remote) |
| `ssg` | `enum` | ⚠️\* | Static site generator: `jekyll`, `hugo`, `docusaurus`, `mkdocs`, `eleventy` |
| `branch` | `string` | ❌ | Target branch for deployment (default: `gh-pages`) |
| `customDomain` | `string` | ❌ | Custom domain for GitHub Pages |
| `projectPath` | `string` | ❌ | Local project path for tracking |
| `projectName` | `string` | ❌ | Project name for tracking |
| `analysisId` | `string` | ❌ | Repository analysis ID for SSG retrieval |
| `userId` | `string` | ❌ | User ID for preference tracking (default: `default`) |
\*Required unless `analysisId` is provided for SSG retrieval from Knowledge Graph
## SSG-Specific Workflows
### Docusaurus
- Node.js setup with configurable version
- Package manager auto-detection (npm/yarn/pnpm)
- Build caching optimization
- Working directory support for monorepos
### Hugo
- Extended Hugo version with latest releases
- Asset optimization and minification
- Submodule support for themes
- Custom build command detection
### Jekyll
- Ruby environment with Bundler
- Gemfile dependency management
- Production environment variables
- Custom plugin support
### MkDocs
- Python environment setup
- Requirements.txt dependency installation
- Direct GitHub Pages deployment
- Custom branch targeting
### Eleventy (11ty)
- Node.js with flexible configuration
- Custom output directory detection
- Plugin ecosystem support
- Development server integration
## Generated Workflow Features
### Security Best Practices
- **Minimal Permissions**: Only required `pages:write` and `id-token:write` permissions
- **OIDC Token Authentication**: JWT-based deployment validation
- **Environment Protection**: Production deployment safeguards
- **Dependency Scanning**: Automated security vulnerability checks
### Performance Optimizations
- **Build Caching**: Package manager and dependency caching
- **Incremental Builds**: Only rebuild changed content when possible
- **Asset Optimization**: Minification and compression
- **Parallel Processing**: Multi-stage builds where applicable
### Error Handling
- **Graceful Failures**: Comprehensive error reporting and recovery
- **Debug Information**: Detailed logging for troubleshooting
- **Health Checks**: Post-deployment validation
- **Rollback Support**: Automated rollback on deployment failures
## Knowledge Graph Integration
### Deployment Tracking
```typescript
// Successful deployment tracking
await trackDeployment(projectId, ssg, true, {
buildTime: executionTime,
branch: targetBranch,
customDomain: domain,
});
// Failed deployment tracking
await trackDeployment(projectId, ssg, false, {
errorMessage: error.message,
failureStage: "build|deploy|verification",
});
```
### SSG Retrieval Logic
1. **Check Analysis ID**: Query project node in Knowledge Graph
2. **Get Recommendations**: Retrieve SSG recommendations sorted by confidence
3. **Fallback to History**: Use most recent successful deployment
4. **Smart Filtering**: Only consider successful deployments
### User Preference Learning
- **Success Rate Tracking**: Monitor SSG deployment success rates
- **Usage Pattern Analysis**: Track frequency of SSG selections
- **Personalized Recommendations**: Weight future suggestions based on history
- **Multi-User Support**: Separate preference tracking per userId
## Examples
### Complete Workflow Integration
```javascript
try {
// 1. Analyze repository
const analysis = await callTool("analyze_repository", {
path: "/path/to/project",
});
// 2. Get SSG recommendation
const recommendation = await callTool("recommend_ssg", {
analysisId: analysis.analysisId,
});
// 3. Deploy with recommended SSG
const deployment = await callTool("deploy_pages", {
repository: "/path/to/project",
analysisId: analysis.analysisId,
projectName: "My Project",
userId: "developer-1",
});
console.log(`Deployed ${deployment.ssg} to ${deployment.branch}`);
} catch (error) {
console.error("Deployment workflow failed:", error);
}
```
### Custom Domain Setup
```javascript
const result = await callTool("deploy_pages", {
repository: "/path/to/docs",
ssg: "hugo",
customDomain: "docs.mycompany.com",
branch: "main", // Deploy from main branch
});
// CNAME file automatically created
console.log(`CNAME created: ${result.cnameCreated}`);
```
### Monorepo Documentation
```javascript
const result = await callTool("deploy_pages", {
repository: "/path/to/monorepo",
ssg: "docusaurus",
// Will detect docs/ folder automatically
projectPath: "/path/to/monorepo/packages/docs",
});
console.log(`Docs folder: ${result.detectedConfig.docsFolder}`);
console.log(`Build command: ${result.detectedConfig.buildCommand}`);
```
## Response Format
### Success Response
```javascript
{
repository: "/path/to/project",
ssg: "docusaurus",
branch: "gh-pages",
customDomain: "docs.example.com",
workflowPath: "deploy-docs.yml",
cnameCreated: true,
repoPath: "/path/to/project",
detectedConfig: {
docsFolder: "docs",
buildCommand: "npm run build",
outputPath: "./build",
packageManager: "npm",
workingDirectory: "docs"
}
}
```
### Error Response
```javascript
{
success: false,
error: {
code: "SSG_NOT_SPECIFIED",
message: "SSG parameter is required. Either provide it directly or ensure analysisId points to a project with SSG recommendations.",
resolution: "Run analyze_repository and recommend_ssg first, or specify the SSG parameter explicitly."
}
}
```
## Error Codes
| Code | Description | Resolution |
| ---------------------------- | ------------------------------------------------- | --------------------------------------------------- |
| `SSG_NOT_SPECIFIED` | No SSG provided and none found in Knowledge Graph | Provide SSG parameter or run analysis first |
| `DEPLOYMENT_SETUP_FAILED` | Failed to create workflow files | Check repository permissions and path accessibility |
| `INVALID_REPOSITORY` | Repository path or URL invalid | Verify repository exists and is accessible |
| `WORKFLOW_GENERATION_FAILED` | Failed to generate SSG-specific workflow | Check SSG parameter and project structure |
## Best Practices
### Repository Structure
- Place documentation in standard folders (`docs/`, `website/`, `documentation/`)
- Include `package.json` for Node.js projects with proper scripts
- Use lockfiles (`package-lock.json`, `yarn.lock`, `pnpm-lock.yaml`) for dependency consistency
### Workflow Optimization
- Enable GitHub Pages in repository settings before first deployment
- Use semantic versioning for documentation releases
- Configure branch protection rules for production deployments
- Monitor deployment logs for performance bottlenecks
### Knowledge Graph Benefits
- Run `analyze_repository` before deployment for optimal SSG selection
- Use consistent `userId` for personalized recommendations
- Provide `projectName` and `projectPath` for deployment tracking
- Review deployment history through Knowledge Graph queries
## Troubleshooting
### Common Issues
**Build Failures**
- Verify all dependencies are listed in `package.json` or `requirements.txt`
- Check Node.js/Python version compatibility
- Ensure build scripts are properly configured
**Permission Errors**
- Enable GitHub Actions in repository settings
- Check workflow file permissions (should be automatically handled)
- Verify GitHub Pages is enabled for the target branch
**Custom Domain Issues**
- Verify DNS configuration points to GitHub Pages
- Allow 24-48 hours for DNS propagation
- Check CNAME file is created in repository root
### Debug Workflow
1. Check GitHub Actions logs in repository
2. Verify workflow file syntax using GitHub workflow validator
3. Test build locally using same commands as workflow
4. Review Knowledge Graph deployment history for patterns
## Related Tools
- [`analyze_repository`](../how-to/repository-analysis.md) - Repository analysis for SSG recommendations
- [`recommend_ssg`](./mcp-tools.md#recommend_ssg) - SSG recommendation engine
- [`verify_deployment`](./mcp-tools.md#verify_deployment) - Deployment verification and health checks
- [`manage_preferences`](./mcp-tools.md#manage_preferences) - User preference management
```
--------------------------------------------------------------------------------
/docs/tutorials/development-setup.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.970Z"
last_validated: "2025-11-20T00:46:21.970Z"
auto_updated: false
update_frequency: monthly
---
# Setting Up Your Development Environment
This tutorial covers setting up a development environment for ongoing documentation work with DocuMCP, including local testing, content workflows, and maintenance automation.
## What You'll Set Up
By the end of this tutorial, you'll have:
- Local documentation development environment
- Live reload and preview capabilities
- Content validation and testing workflow
- Automated quality checks
- Integration with your existing development tools
## Prerequisites
- Completed [Getting Started](getting-started.md) and [First Deployment](first-deployment.md)
- Node.js 20.0.0+ installed
- Your preferred code editor (VS Code recommended)
- Git and GitHub CLI (optional but recommended)
## Development Environment Setup
### Step 1: Local Development Server
Set up local development with live reload:
```bash
# Test local deployment before pushing to GitHub
"test my documentation build locally with live reload"
```
This will:
- Install development dependencies
- Start local server (typically on http://localhost:3000)
- Enable live reload for instant preview
- Validate build process
**For different SSGs:**
**Docusaurus:**
```bash
npm run start
# Opens http://localhost:3000 with live reload
```
**MkDocs:**
```bash
mkdocs serve
# Opens http://127.0.0.1:8000 with auto-reload
```
**Hugo:**
```bash
hugo server -D
# Opens http://localhost:1313 with live reload
```
**Jekyll:**
```bash
bundle exec jekyll serve --livereload
# Opens http://localhost:4000 with live reload
```
### Step 2: Content Validation Workflow
Set up automated content validation:
```bash
# Validate all documentation content
"validate my documentation content for accuracy and completeness"
```
This checks:
- **Link validation**: Internal and external links
- **Code syntax**: All code blocks and examples
- **Image references**: Missing or broken images
- **Content structure**: Diataxis compliance
- **SEO optimization**: Meta tags, headings
### Step 3: Quality Assurance Integration
Integrate quality checks into your workflow:
```bash
# Set up comprehensive documentation quality checks
"check all documentation links and validate content quality"
```
**Available validation levels:**
- **Basic**: Link checking and syntax validation
- **Comprehensive**: Full content analysis with Diataxis compliance
- **Advanced**: Performance testing and SEO analysis
### Step 4: Development Scripts Setup
Add these scripts to your `package.json`:
```json
{
"scripts": {
"docs:dev": "docusaurus start",
"docs:build": "docusaurus build",
"docs:serve": "docusaurus serve",
"docs:validate": "npm run docs:check-links && npm run docs:test-build",
"docs:check-links": "markdown-link-check docs/**/*.md",
"docs:test-build": "npm run docs:build && npm run docs:serve -- --no-open",
"docs:deploy": "npm run docs:validate && npm run docs:build"
}
}
```
## Editor Configuration
### VS Code Setup
Create `.vscode/settings.json`:
```json
{
"markdownlint.config": {
"MD013": false,
"MD033": false
},
"files.associations": {
"*.mdx": "mdx"
},
"editor.wordWrap": "on",
"editor.quickSuggestions": {
"strings": true
},
"[markdown]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.quickSuggestions": {
"comments": "off",
"strings": "off",
"other": "off"
}
}
}
```
**Recommended VS Code Extensions:**
- Markdown All in One
- markdownlint
- Prettier - Code formatter
- GitLens
- Live Server (for static preview)
### Content Writing Workflow
Establish a content creation workflow:
1. **Create branch** for documentation changes
2. **Write content** using Diataxis principles
3. **Test locally** with live server
4. **Validate content** using DocuMCP tools
5. **Review and refine** based on validation feedback
6. **Commit and push** to trigger deployment
## Automated Quality Checks
### Pre-commit Hooks
Set up automated checks before commits:
```bash
# Install husky for git hooks
npm install --save-dev husky
# Set up pre-commit hook
npx husky add .husky/pre-commit "npm run docs:validate"
```
Create `.husky/pre-commit`:
```bash
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
echo "🔍 Validating documentation..."
npm run docs:validate
echo "📝 Checking markdown formatting..."
npx prettier --check "docs/**/*.md"
echo "🔗 Validating links..."
npm run docs:check-links
```
### GitHub Actions Integration
Enhance your deployment workflow with quality gates:
```yaml
# .github/workflows/docs-quality.yml
name: Documentation Quality
on:
pull_request:
paths: ["docs/**", "*.md"]
jobs:
quality-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
- name: Install dependencies
run: npm ci
- name: Validate documentation
run: |
npm run docs:validate
npm run docs:check-links
- name: Test build
run: npm run docs:build
- name: Comment PR
uses: actions/github-script@v7
with:
script: |
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: '✅ Documentation quality checks passed!'
});
```
## Content Management Strategies
### Diataxis Organization
Organize content following Diataxis principles:
**Directory Structure:**
```
docs/
├── tutorials/ # Learning-oriented (beginner-friendly)
│ ├── getting-started.md
│ ├── first-project.md
│ └── advanced-concepts.md
├── how-to-guides/ # Problem-solving (practical steps)
│ ├── troubleshooting.md
│ ├── configuration.md
│ └── deployment.md
├── reference/ # Information-oriented (comprehensive)
│ ├── api-reference.md
│ ├── cli-commands.md
│ └── configuration-options.md
└── explanation/ # Understanding-oriented (concepts)
├── architecture.md
├── design-decisions.md
└── best-practices.md
```
### Content Templates
Create content templates for consistency:
**Tutorial Template:**
```markdown
# [Action] Tutorial
## What You'll Learn
- Objective 1
- Objective 2
## Prerequisites
- Requirement 1
- Requirement 2
## Step-by-Step Instructions
### Step 1: [Action]
Instructions...
### Step 2: [Action]
Instructions...
## Verification
How to confirm success...
## Next Steps
Where to go next...
```
**How-to Guide Template:**
```markdown
# How to [Solve Problem]
## Problem
Clear problem statement...
## Solution
Step-by-step solution...
## Alternative Approaches
Other ways to solve this...
## Troubleshooting
Common issues and fixes...
```
## Performance Optimization
### Build Performance
Optimize build times:
```bash
# Enable build caching
export GATSBY_CACHE_DIR=.cache
export GATSBY_PUBLIC_DIR=public
# Parallel processing
export NODE_OPTIONS="--max-old-space-size=8192"
```
**For large sites:**
- Enable incremental builds
- Use build caching
- Optimize image processing
- Minimize plugin usage
### Development Server Performance
Speed up local development:
```bash
# Fast refresh mode (Docusaurus)
npm run start -- --fast-refresh
# Hot reload with polling (for file system issues)
npm run start -- --poll
# Open specific page
npm run start -- --host 0.0.0.0 --port 3001
```
## Maintenance Automation
### Scheduled Content Validation
Set up scheduled validation:
```yaml
# .github/workflows/scheduled-validation.yml
name: Scheduled Documentation Validation
on:
schedule:
- cron: "0 2 * * 1" # Every Monday at 2 AM
jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Full validation
run: |
"check all documentation links with external validation"
"validate all content for accuracy and completeness"
- name: Create issue on failure
if: failure()
uses: actions/github-script@v7
with:
script: |
github.rest.issues.create({
owner: context.repo.owner,
repo: context.repo.repo,
title: 'Scheduled Documentation Validation Failed',
body: 'The weekly documentation validation found issues. Check the workflow logs.',
labels: ['documentation', 'maintenance']
});
```
### Dependency Updates
Automate dependency maintenance:
```yaml
# .github/dependabot.yml
version: 2
updates:
- package-ecosystem: "npm"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
labels:
- "dependencies"
- "documentation"
```
## Collaboration Workflow
### Team Development
For team documentation:
1. **Branching strategy**: Feature branches for documentation changes
2. **Review process**: PR reviews for all documentation updates
3. **Style guide**: Consistent writing and formatting standards
4. **Content ownership**: Assign sections to team members
### Review Checklist
Documentation PR review checklist:
- [ ] Content follows Diataxis principles
- [ ] All links work (internal and external)
- [ ] Code examples are tested and accurate
- [ ] Images are optimized and accessible
- [ ] SEO metadata is complete
- [ ] Mobile responsiveness verified
- [ ] Build succeeds locally and in CI
## Next Steps
Your development environment is now ready! Next:
1. **[Learn advanced prompting](../how-to/prompting-guide.md)** for DocuMCP
2. **[Set up monitoring](../how-to/site-monitoring.md)** for your live site
3. **[Optimize for performance](../how-to/performance-optimization.md)**
4. **[Configure custom domains](../how-to/custom-domains.md)** (optional)
## Troubleshooting
**Common development issues:**
**Port conflicts:**
```bash
# Change default port
npm run start -- --port 3001
```
**Memory issues:**
```bash
# Increase Node.js memory limit
export NODE_OPTIONS="--max-old-space-size=8192"
```
**File watching problems:**
```bash
# Enable polling for file changes
npm run start -- --poll
```
**Cache issues:**
```bash
# Clear build cache
rm -rf .docusaurus .cache public
npm run start
```
## Summary
You now have:
✅ Local development environment with live reload
✅ Content validation and quality checking
✅ Automated pre-commit hooks
✅ CI/CD integration for quality gates
✅ Performance optimization
✅ Maintenance automation
✅ Team collaboration workflow
Your documentation development environment is production-ready!
```
--------------------------------------------------------------------------------
/src/tools/track-documentation-freshness.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Track Documentation Freshness Tool
*
* Scans documentation directory for staleness markers,
* identifies files needing updates based on configurable time thresholds.
*/
import { z } from "zod";
import {
scanDocumentationFreshness,
STALENESS_PRESETS,
type StalenessThreshold,
type FreshnessScanReport,
} from "../utils/freshness-tracker.js";
import { type MCPToolResponse } from "../types/api.js";
import {
storeFreshnessEvent,
getStalenessInsights,
} from "../memory/freshness-kg-integration.js";
/**
* Input schema for track_documentation_freshness tool
*/
export const TrackDocumentationFreshnessSchema = z.object({
docsPath: z.string().describe("Path to documentation directory"),
projectPath: z
.string()
.optional()
.describe("Path to project root (for knowledge graph tracking)"),
warningThreshold: z
.object({
value: z.number().positive(),
unit: z.enum(["minutes", "hours", "days"]),
})
.optional()
.describe("Warning threshold (yellow flag)"),
staleThreshold: z
.object({
value: z.number().positive(),
unit: z.enum(["minutes", "hours", "days"]),
})
.optional()
.describe("Stale threshold (orange flag)"),
criticalThreshold: z
.object({
value: z.number().positive(),
unit: z.enum(["minutes", "hours", "days"]),
})
.optional()
.describe("Critical threshold (red flag)"),
preset: z
.enum(["realtime", "active", "recent", "weekly", "monthly", "quarterly"])
.optional()
.describe("Use predefined threshold preset"),
includeFileList: z
.boolean()
.optional()
.default(true)
.describe("Include detailed file list in response"),
sortBy: z
.enum(["age", "path", "staleness"])
.optional()
.default("staleness")
.describe("Sort order for file list"),
storeInKG: z
.boolean()
.optional()
.default(true)
.describe(
"Store tracking event in knowledge graph for historical analysis",
),
});
export type TrackDocumentationFreshnessInput = z.input<
typeof TrackDocumentationFreshnessSchema
>;
/**
* Format freshness report for display
*/
function formatFreshnessReport(
report: FreshnessScanReport,
includeFileList: boolean,
sortBy: "age" | "path" | "staleness",
): string {
const {
totalFiles,
filesWithMetadata,
filesWithoutMetadata,
freshFiles,
warningFiles,
staleFiles,
criticalFiles,
files,
thresholds,
} = report;
let output = "# Documentation Freshness Report\n\n";
output += `**Scanned at**: ${new Date(report.scannedAt).toLocaleString()}\n`;
output += `**Documentation path**: ${report.docsPath}\n\n`;
// Summary statistics
output += "## Summary Statistics\n\n";
output += `- **Total files**: ${totalFiles}\n`;
output += `- **With metadata**: ${filesWithMetadata} (${Math.round(
(filesWithMetadata / totalFiles) * 100,
)}%)\n`;
output += `- **Without metadata**: ${filesWithoutMetadata}\n\n`;
// Freshness breakdown
output += "## Freshness Breakdown\n\n";
output += `- ✅ **Fresh**: ${freshFiles} files\n`;
output += `- 🟡 **Warning**: ${warningFiles} files (older than ${thresholds.warning.value} ${thresholds.warning.unit})\n`;
output += `- 🟠 **Stale**: ${staleFiles} files (older than ${thresholds.stale.value} ${thresholds.stale.unit})\n`;
output += `- 🔴 **Critical**: ${criticalFiles} files (older than ${thresholds.critical.value} ${thresholds.critical.unit})\n`;
output += `- ❓ **Unknown**: ${filesWithoutMetadata} files (no metadata)\n\n`;
// Recommendations
if (filesWithoutMetadata > 0 || criticalFiles > 0 || staleFiles > 0) {
output += "## Recommendations\n\n";
if (filesWithoutMetadata > 0) {
output += `⚠️ **${filesWithoutMetadata} files lack freshness metadata**. Run \`validate_documentation_freshness\` to initialize metadata.\n\n`;
}
if (criticalFiles > 0) {
output += `🔴 **${criticalFiles} files are critically stale**. Immediate review and update recommended.\n\n`;
} else if (staleFiles > 0) {
output += `🟠 **${staleFiles} files are stale**. Consider reviewing and updating soon.\n\n`;
}
}
// File list
if (includeFileList && files.length > 0) {
output += "## File Details\n\n";
// Sort files
const sortedFiles = [...files];
switch (sortBy) {
case "age":
sortedFiles.sort((a, b) => (b.ageInMs || 0) - (a.ageInMs || 0));
break;
case "path":
sortedFiles.sort((a, b) =>
a.relativePath.localeCompare(b.relativePath),
);
break;
case "staleness": {
const order = {
critical: 0,
stale: 1,
warning: 2,
fresh: 3,
unknown: 4,
};
sortedFiles.sort(
(a, b) => order[a.stalenessLevel] - order[b.stalenessLevel],
);
break;
}
}
// Group by staleness level
const grouped = {
critical: sortedFiles.filter((f) => f.stalenessLevel === "critical"),
stale: sortedFiles.filter((f) => f.stalenessLevel === "stale"),
warning: sortedFiles.filter((f) => f.stalenessLevel === "warning"),
fresh: sortedFiles.filter((f) => f.stalenessLevel === "fresh"),
unknown: sortedFiles.filter((f) => f.stalenessLevel === "unknown"),
};
for (const [level, levelFiles] of Object.entries(grouped)) {
if (levelFiles.length === 0) continue;
const icon = {
critical: "🔴",
stale: "🟠",
warning: "🟡",
fresh: "✅",
unknown: "❓",
}[level];
output += `### ${icon} ${
level.charAt(0).toUpperCase() + level.slice(1)
} (${levelFiles.length})\n\n`;
for (const file of levelFiles) {
output += `- **${file.relativePath}**`;
if (file.ageFormatted) {
output += ` - Last updated ${file.ageFormatted} ago`;
}
if (file.metadata?.validated_against_commit) {
output += ` (commit: ${file.metadata.validated_against_commit.substring(
0,
7,
)})`;
}
if (!file.hasMetadata) {
output += " - ⚠️ No metadata";
}
output += "\n";
}
output += "\n";
}
}
return output;
}
/**
* Track documentation freshness
*/
export async function trackDocumentationFreshness(
input: TrackDocumentationFreshnessInput,
): Promise<MCPToolResponse> {
const startTime = Date.now();
try {
const {
docsPath,
projectPath,
warningThreshold,
staleThreshold,
criticalThreshold,
preset,
includeFileList,
sortBy,
storeInKG,
} = input;
// Determine thresholds
let thresholds: {
warning?: StalenessThreshold;
stale?: StalenessThreshold;
critical?: StalenessThreshold;
} = {};
if (preset) {
// Use preset thresholds
const presetThreshold = STALENESS_PRESETS[preset];
thresholds = {
warning: presetThreshold,
stale: { value: presetThreshold.value * 2, unit: presetThreshold.unit },
critical: {
value: presetThreshold.value * 3,
unit: presetThreshold.unit,
},
};
} else {
// Use custom thresholds
if (warningThreshold) thresholds.warning = warningThreshold;
if (staleThreshold) thresholds.stale = staleThreshold;
if (criticalThreshold) thresholds.critical = criticalThreshold;
}
// Scan documentation
const report = await scanDocumentationFreshness(docsPath, thresholds);
// Store in knowledge graph if requested and projectPath provided
let kgInsights:
| Awaited<ReturnType<typeof getStalenessInsights>>
| undefined;
if (storeInKG !== false && projectPath) {
try {
await storeFreshnessEvent(projectPath, docsPath, report, "scan");
kgInsights = await getStalenessInsights(projectPath);
} catch (error) {
// KG storage failed, but continue with the response
console.warn(
"Failed to store freshness event in knowledge graph:",
error,
);
}
}
// Format response
const formattedReport = formatFreshnessReport(
report,
includeFileList ?? true,
sortBy ?? "staleness",
);
// Add KG insights to formatted report if available
let enhancedReport = formattedReport;
if (kgInsights && kgInsights.totalEvents > 0) {
enhancedReport += "\n## Historical Insights\n\n";
enhancedReport += `- **Total tracking events**: ${kgInsights.totalEvents}\n`;
enhancedReport += `- **Average improvement score**: ${(
kgInsights.averageImprovementScore * 100
).toFixed(1)}%\n`;
enhancedReport += `- **Trend**: ${
kgInsights.trend === "improving"
? "📈 Improving"
: kgInsights.trend === "declining"
? "📉 Declining"
: "➡️ Stable"
}\n\n`;
if (kgInsights.recommendations.length > 0) {
enhancedReport += "### Knowledge Graph Insights\n\n";
for (const rec of kgInsights.recommendations) {
enhancedReport += `${rec}\n\n`;
}
}
}
// Convert KG insights to Recommendation objects
const recommendations =
kgInsights?.recommendations.map((rec) => {
// Determine type based on content
let type: "info" | "warning" | "critical" = "info";
if (rec.includes("🔴") || rec.includes("critical")) {
type = "critical";
} else if (
rec.includes("🟠") ||
rec.includes("⚠️") ||
rec.includes("warning")
) {
type = "warning";
}
return {
type,
title: "Documentation Freshness Insight",
description: rec,
};
}) || [];
const response: MCPToolResponse = {
success: true,
data: {
summary: `Scanned ${report.totalFiles} files: ${report.criticalFiles} critical, ${report.staleFiles} stale, ${report.warningFiles} warnings, ${report.freshFiles} fresh`,
report,
thresholds: thresholds,
formattedReport: enhancedReport,
kgInsights,
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
recommendations,
};
return response;
} catch (error) {
return {
success: false,
error: {
code: "FRESHNESS_TRACKING_FAILED",
message:
error instanceof Error
? error.message
: "Unknown error tracking documentation freshness",
resolution: "Check that the documentation path exists and is readable",
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
};
}
}
```
--------------------------------------------------------------------------------
/tests/tools/recommend-ssg-preferences.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tests for Phase 2.2: User Preference Integration
* Tests recommend_ssg tool with user preference learning and application
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { tmpdir } from "os";
import {
initializeKnowledgeGraph,
createOrUpdateProject,
} from "../../src/memory/kg-integration.js";
import { recommendSSG } from "../../src/tools/recommend-ssg.js";
import { MemoryManager } from "../../src/memory/manager.js";
import {
getUserPreferenceManager,
clearPreferenceManagerCache,
} from "../../src/memory/user-preferences.js";
describe("recommendSSG with User Preferences (Phase 2.2)", () => {
let testDir: string;
let originalEnv: string | undefined;
let memoryManager: MemoryManager;
// Helper to create analysis memory entry in correct format
const createAnalysisMemory = async (analysisData: any) => {
return await memoryManager.remember("analysis", analysisData);
};
beforeEach(async () => {
// Create temporary test directory
testDir = join(tmpdir(), `recommend-ssg-preferences-test-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
// Set environment variable for storage
originalEnv = process.env.DOCUMCP_STORAGE_DIR;
process.env.DOCUMCP_STORAGE_DIR = testDir;
// Initialize KG and memory
await initializeKnowledgeGraph(testDir);
memoryManager = new MemoryManager(testDir);
await memoryManager.initialize();
// Clear preference manager cache
clearPreferenceManagerCache();
});
afterEach(async () => {
// Restore environment
if (originalEnv) {
process.env.DOCUMCP_STORAGE_DIR = originalEnv;
} else {
delete process.env.DOCUMCP_STORAGE_DIR;
}
// Clean up test directory
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch (error) {
console.warn("Failed to clean up test directory:", error);
}
// Clear preference manager cache
clearPreferenceManagerCache();
});
describe("User Preference Application", () => {
it("should apply user preferences when auto-apply is enabled", async () => {
// Set up user preferences
const userId = "test-user-1";
const manager = await getUserPreferenceManager(userId);
await manager.updatePreferences({
preferredSSGs: ["hugo", "eleventy"],
autoApplyPreferences: true,
});
// Create analysis that would normally recommend Docusaurus
const memoryEntry = await createAnalysisMemory({
path: "/test/js-project",
dependencies: {
ecosystem: "javascript",
languages: ["javascript", "typescript"],
},
structure: { totalFiles: 60 },
});
// Get recommendation
const result = await recommendSSG({
analysisId: memoryEntry.id,
userId,
});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
// Should recommend Hugo (user's top preference)
expect(data.recommended).toBe("hugo");
expect(data.reasoning[0]).toContain("Switched to hugo");
expect(data.reasoning[0]).toContain("usage history");
});
it("should not apply preferences when auto-apply is disabled", async () => {
const userId = "test-user-2";
const manager = await getUserPreferenceManager(userId);
await manager.updatePreferences({
preferredSSGs: ["jekyll"],
autoApplyPreferences: false,
});
const memoryEntry = await createAnalysisMemory({
path: "/test/js-project",
dependencies: {
ecosystem: "javascript",
languages: ["javascript"],
},
structure: { totalFiles: 60 },
});
const result = await recommendSSG({
analysisId: memoryEntry.id,
userId,
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Should use default recommendation, not user preference
expect(data.recommended).toBe("docusaurus");
expect(data.reasoning[0]).not.toContain("Switched");
});
it("should keep recommendation if it matches user preference", async () => {
const userId = "test-user-3";
const manager = await getUserPreferenceManager(userId);
await manager.updatePreferences({
preferredSSGs: ["mkdocs"],
autoApplyPreferences: true,
});
const memoryEntry = await createAnalysisMemory({
path: "/test/python-project",
dependencies: {
ecosystem: "python",
languages: ["python"],
},
structure: { totalFiles: 40 },
});
const result = await recommendSSG({
analysisId: memoryEntry.id,
userId,
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Should recommend mkdocs (matches both analysis and preference)
expect(data.recommended).toBe("mkdocs");
// Either "Matches" or "Switched to" is acceptable - both indicate preference was applied
expect(data.reasoning[0]).toMatch(
/Matches your preferred SSG|Switched to mkdocs/,
);
});
it("should switch to user preference even if not ideal for ecosystem", async () => {
const userId = "test-user-4";
const manager = await getUserPreferenceManager(userId);
await manager.updatePreferences({
preferredSSGs: ["mkdocs", "jekyll"], // Python/Ruby SSGs
autoApplyPreferences: true,
});
const memoryEntry = await createAnalysisMemory({
path: "/test/js-project",
dependencies: {
ecosystem: "javascript",
languages: ["javascript"],
},
structure: { totalFiles: 60 },
});
const result = await recommendSSG({
analysisId: memoryEntry.id,
userId,
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Should switch to mkdocs (user's top preference)
// User preferences override ecosystem recommendations
expect(data.recommended).toBe("mkdocs");
expect(data.reasoning[0]).toContain("Switched to mkdocs");
expect(data.reasoning[0]).toContain("usage history");
});
});
describe("Preference Tracking Integration", () => {
it("should use default user when no userId provided", async () => {
const memoryEntry = await createAnalysisMemory({
path: "/test/project",
dependencies: {
ecosystem: "javascript",
languages: ["javascript"],
},
structure: { totalFiles: 50 },
});
// Should not throw error with no userId
const result = await recommendSSG({
analysisId: memoryEntry.id,
});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
expect(data.recommended).toBeDefined();
});
it("should work with multiple users independently", async () => {
const user1 = "user1";
const user2 = "user2";
// Set different preferences for each user
const manager1 = await getUserPreferenceManager(user1);
await manager1.updatePreferences({
preferredSSGs: ["hugo"],
autoApplyPreferences: true,
});
const manager2 = await getUserPreferenceManager(user2);
await manager2.updatePreferences({
preferredSSGs: ["eleventy"],
autoApplyPreferences: true,
});
const memoryEntry = await createAnalysisMemory({
path: "/test/project",
dependencies: {
ecosystem: "javascript",
languages: ["javascript"],
},
structure: { totalFiles: 50 },
});
// Get recommendations for both users
const result1 = await recommendSSG({
analysisId: memoryEntry.id,
userId: user1,
});
const result2 = await recommendSSG({
analysisId: memoryEntry.id,
userId: user2,
});
const data1 = JSON.parse(result1.content[0].text);
const data2 = JSON.parse(result2.content[0].text);
// Each user should get their preferred SSG
expect(data1.recommended).toBe("hugo");
expect(data2.recommended).toBe("eleventy");
});
});
describe("Confidence Adjustment", () => {
it("should boost confidence when preference is applied", async () => {
const userId = "test-user-5";
const manager = await getUserPreferenceManager(userId);
await manager.updatePreferences({
preferredSSGs: ["eleventy"],
autoApplyPreferences: true,
});
const memoryEntry = await createAnalysisMemory({
path: "/test/js-project",
dependencies: {
ecosystem: "javascript",
languages: ["javascript"],
},
structure: { totalFiles: 60 },
});
const result = await recommendSSG({
analysisId: memoryEntry.id,
userId,
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Confidence should be boosted when preference is applied
// Base confidence varies by SSG, but preference adds +0.05 boost
expect(data.confidence).toBeGreaterThan(0.7);
expect(data.reasoning[0]).toContain("🎯");
});
});
describe("Edge Cases", () => {
it("should handle empty preferred SSGs list", async () => {
const userId = "test-user-6";
const manager = await getUserPreferenceManager(userId);
await manager.updatePreferences({
preferredSSGs: [],
autoApplyPreferences: true,
});
const memoryEntry = await createAnalysisMemory({
path: "/test/project",
dependencies: {
ecosystem: "javascript",
languages: ["javascript"],
},
structure: { totalFiles: 50 },
});
const result = await recommendSSG({
analysisId: memoryEntry.id,
userId,
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Should use default recommendation
expect(data.recommended).toBe("docusaurus");
expect(data.reasoning[0]).not.toContain("Switched");
});
it("should handle preference manager initialization failure gracefully", async () => {
const memoryEntry = await createAnalysisMemory({
path: "/test/project",
dependencies: {
ecosystem: "javascript",
languages: ["javascript"],
},
structure: { totalFiles: 50 },
});
// Should not throw even with invalid userId
const result = await recommendSSG({
analysisId: memoryEntry.id,
userId: "any-user-id",
});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
expect(data.recommended).toBeDefined();
});
});
});
```