This is page 5 of 23. Use http://codebase.md/tosin2013/documcp?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── ARCHITECTURAL_CHANGES_SUMMARY.md
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── adr-0001-mcp-server-architecture.md
│ │ ├── adr-0002-repository-analysis-engine.md
│ │ ├── adr-0003-static-site-generator-recommendation-engine.md
│ │ ├── adr-0004-diataxis-framework-integration.md
│ │ ├── adr-0005-github-pages-deployment-automation.md
│ │ ├── adr-0006-mcp-tools-api-design.md
│ │ ├── adr-0007-mcp-prompts-and-resources-integration.md
│ │ ├── adr-0008-intelligent-content-population-engine.md
│ │ ├── adr-0009-content-accuracy-validation-framework.md
│ │ ├── adr-0010-mcp-resource-pattern-redesign.md
│ │ ├── adr-0011-ce-mcp-compatibility.md
│ │ ├── adr-0012-priority-scoring-system-for-documentation-drift.md
│ │ ├── adr-0013-release-pipeline-and-package-distribution.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── CE-MCP-FINDINGS.md
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── change-watcher.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── drift-priority-scoring.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── llm-integration.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── ISSUE_IMPLEMENTATION_SUMMARY.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── change-watcher.ts
│ │ ├── check-documentation-links.ts
│ │ ├── cleanup-agent-artifacts.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── simulate-execution.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── artifact-detector.ts
│ │ ├── ast-analyzer.ts
│ │ ├── change-watcher.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── execution-simulator.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── llm-client.ts
│ │ ├── permission-checker.ts
│ │ ├── semantic-analyzer.ts
│ │ ├── sitemap-generator.ts
│ │ ├── usage-metadata.ts
│ │ └── user-feedback-integration.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── call-graph-builder.test.ts
│ ├── change-watcher-priority.integration.test.ts
│ ├── change-watcher.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── execution-simulator.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-documentation-examples.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas-documentation-examples.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── cleanup-agent-artifacts.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── artifact-detector.test.ts
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector-diataxis.test.ts
│ ├── drift-detector-priority.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ ├── llm-client.test.ts
│ ├── semantic-analyzer.test.ts
│ ├── sitemap-generator.test.ts
│ ├── usage-metadata.test.ts
│ └── user-feedback-integration.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/tests/memory/kg-storage.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tests for Knowledge Graph Storage
* Phase 1: Core Knowledge Graph Integration
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { KGStorage } from "../../src/memory/kg-storage.js";
import { GraphNode, GraphEdge } from "../../src/memory/knowledge-graph.js";
import { tmpdir } from "os";
describe("KGStorage", () => {
let storage: KGStorage;
let testDir: string;
beforeEach(async () => {
// Create temporary test directory
testDir = join(tmpdir(), `kg-storage-test-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
storage = new KGStorage({
storageDir: testDir,
backupOnWrite: true,
validateOnRead: true,
});
await storage.initialize();
});
afterEach(async () => {
// Clean up test directory
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch (error) {
console.warn("Failed to clean up test directory:", error);
}
});
describe("Initialization", () => {
it("should create storage directory", async () => {
const stats = await fs.stat(testDir);
expect(stats.isDirectory()).toBe(true);
});
it("should create entity and relationship files", async () => {
const entityFile = join(testDir, "knowledge-graph-entities.jsonl");
const relationshipFile = join(
testDir,
"knowledge-graph-relationships.jsonl",
);
await fs.access(entityFile);
await fs.access(relationshipFile);
// Files should exist (no error thrown)
expect(true).toBe(true);
});
it("should write file markers", async () => {
const entityFile = join(testDir, "knowledge-graph-entities.jsonl");
const content = await fs.readFile(entityFile, "utf-8");
expect(content).toContain("# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES");
});
it("should reject non-DocuMCP files", async () => {
// Create a non-DocuMCP file
const fakeFile = join(testDir, "knowledge-graph-entities.jsonl");
await fs.writeFile(fakeFile, "not a documcp file\n", "utf-8");
const newStorage = new KGStorage({ storageDir: testDir });
await expect(newStorage.initialize()).rejects.toThrow(
"is not a DocuMCP knowledge graph file",
);
});
});
describe("Entity Storage", () => {
it("should save and load entities", async () => {
const entities: GraphNode[] = [
{
id: "project:test",
type: "project",
label: "Test Project",
properties: { name: "Test" },
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
{
id: "tech:typescript",
type: "technology",
label: "TypeScript",
properties: { name: "TypeScript" },
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
await storage.saveEntities(entities);
const loaded = await storage.loadEntities();
expect(loaded).toHaveLength(2);
expect(loaded[0].id).toBe("project:test");
expect(loaded[1].id).toBe("tech:typescript");
});
it("should handle empty entity list", async () => {
await storage.saveEntities([]);
const loaded = await storage.loadEntities();
expect(loaded).toHaveLength(0);
});
it("should preserve entity properties", async () => {
const entity: GraphNode = {
id: "project:complex",
type: "project",
label: "Complex Project",
properties: {
name: "Complex",
technologies: ["typescript", "react"],
metadata: { nested: { value: 123 } },
},
weight: 0.85,
lastUpdated: new Date().toISOString(),
};
await storage.saveEntities([entity]);
const loaded = await storage.loadEntities();
expect(loaded[0].properties.technologies).toEqual([
"typescript",
"react",
]);
expect(loaded[0].properties.metadata.nested.value).toBe(123);
});
});
describe("Relationship Storage", () => {
it("should save and load relationships", async () => {
const relationships: GraphEdge[] = [
{
id: "project:test-uses-tech:typescript",
source: "project:test",
target: "tech:typescript",
type: "uses",
weight: 1.0,
confidence: 0.9,
properties: {},
lastUpdated: new Date().toISOString(),
},
];
await storage.saveRelationships(relationships);
const loaded = await storage.loadRelationships();
expect(loaded).toHaveLength(1);
expect(loaded[0].source).toBe("project:test");
expect(loaded[0].target).toBe("tech:typescript");
});
it("should handle empty relationship list", async () => {
await storage.saveRelationships([]);
const loaded = await storage.loadRelationships();
expect(loaded).toHaveLength(0);
});
it("should preserve relationship properties", async () => {
const relationship: GraphEdge = {
id: "test-edge",
source: "node1",
target: "node2",
type: "similar_to",
weight: 0.75,
confidence: 0.8,
properties: {
similarityScore: 0.75,
sharedTechnologies: ["typescript"],
},
lastUpdated: new Date().toISOString(),
};
await storage.saveRelationships([relationship]);
const loaded = await storage.loadRelationships();
expect(loaded[0].properties.similarityScore).toBe(0.75);
expect(loaded[0].properties.sharedTechnologies).toEqual(["typescript"]);
});
});
describe("Complete Graph Storage", () => {
it("should save and load complete graph", async () => {
const entities: GraphNode[] = [
{
id: "project:test",
type: "project",
label: "Test",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
const relationships: GraphEdge[] = [
{
id: "test-edge",
source: "project:test",
target: "tech:ts",
type: "uses",
weight: 1.0,
confidence: 1.0,
properties: {},
lastUpdated: new Date().toISOString(),
},
];
await storage.saveGraph(entities, relationships);
const loaded = await storage.loadGraph();
expect(loaded.entities).toHaveLength(1);
expect(loaded.relationships).toHaveLength(1);
});
});
describe("Backup System", () => {
it("should create backups on write", async () => {
const entities: GraphNode[] = [
{
id: "test",
type: "project",
label: "Test",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
await storage.saveEntities(entities);
await storage.saveEntities(entities); // Second save should create backup
const backupDir = join(testDir, "backups");
const files = await fs.readdir(backupDir);
const backupFiles = files.filter((f) => f.startsWith("entities-"));
expect(backupFiles.length).toBeGreaterThan(0);
});
it("should restore from backup", async () => {
const entities1: GraphNode[] = [
{
id: "version1",
type: "project",
label: "V1",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
const entities2: GraphNode[] = [
{
id: "version2",
type: "project",
label: "V2",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
// Save first version
await storage.saveEntities(entities1);
// Small delay to ensure different timestamps
await new Promise((resolve) => setTimeout(resolve, 10));
// Save second version (creates backup of first)
await storage.saveEntities(entities2);
// Verify we have second version
let loaded = await storage.loadEntities();
expect(loaded).toHaveLength(1);
expect(loaded[0].id).toBe("version2");
// Restore from backup
await storage.restoreFromBackup("entities");
// Verify we have first version back
loaded = await storage.loadEntities();
expect(loaded).toHaveLength(1);
expect(loaded[0].id).toBe("version1");
});
});
describe("Statistics", () => {
it("should return accurate statistics", async () => {
const entities: GraphNode[] = [
{
id: "e1",
type: "project",
label: "E1",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
{
id: "e2",
type: "technology",
label: "E2",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
const relationships: GraphEdge[] = [
{
id: "r1",
source: "e1",
target: "e2",
type: "uses",
weight: 1.0,
confidence: 1.0,
properties: {},
lastUpdated: new Date().toISOString(),
},
];
await storage.saveGraph(entities, relationships);
const stats = await storage.getStatistics();
expect(stats.entityCount).toBe(2);
expect(stats.relationshipCount).toBe(1);
expect(stats.schemaVersion).toBe("1.1.0");
expect(stats.fileSize.entities).toBeGreaterThan(0);
});
});
describe("Integrity Verification", () => {
it("should detect orphaned relationships", async () => {
const entities: GraphNode[] = [
{
id: "e1",
type: "project",
label: "E1",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
const relationships: GraphEdge[] = [
{
id: "r1",
source: "e1",
target: "missing", // References non-existent entity
type: "uses",
weight: 1.0,
confidence: 1.0,
properties: {},
lastUpdated: new Date().toISOString(),
},
];
await storage.saveGraph(entities, relationships);
const result = await storage.verifyIntegrity();
expect(result.valid).toBe(true); // No errors, just warnings
expect(result.warnings.length).toBeGreaterThan(0);
expect(result.warnings[0]).toContain("missing");
});
it("should detect duplicate entities", async () => {
const entities: GraphNode[] = [
{
id: "duplicate",
type: "project",
label: "E1",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
{
id: "duplicate",
type: "project",
label: "E2",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
await storage.saveEntities(entities);
const result = await storage.verifyIntegrity();
expect(result.valid).toBe(false);
expect(result.errors.length).toBeGreaterThan(0);
expect(result.errors[0]).toContain("Duplicate entity ID");
});
});
describe("Export", () => {
it("should export graph as JSON", async () => {
const entities: GraphNode[] = [
{
id: "test",
type: "project",
label: "Test",
properties: {},
weight: 1.0,
lastUpdated: new Date().toISOString(),
},
];
await storage.saveEntities(entities);
const json = await storage.exportAsJSON();
const parsed = JSON.parse(json);
expect(parsed.metadata).toBeDefined();
expect(parsed.metadata.version).toBe("1.1.0");
expect(parsed.entities).toHaveLength(1);
expect(parsed.relationships).toHaveLength(0);
});
});
});
```
--------------------------------------------------------------------------------
/docs/adrs/adr-0010-mcp-resource-pattern-redesign.md:
--------------------------------------------------------------------------------
```markdown
---
id: adr-10-mcp-resource-pattern-redesign
documcp:
last_updated: "2025-11-20T00:46:21.944Z"
last_validated: "2025-12-09T19:41:38.574Z"
auto_updated: false
update_frequency: monthly
validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
---
# ADR-010: MCP Resource Pattern Redesign
**Status:** Accepted
**Date:** 2025-10-09
**Deciders:** Development Team
**Context:** MCP Best Practices Review
---
## Context and Problem Statement
During an MCP best practices review (2025-10-09), a critical architectural misalignment was identified: DocuMCP was using MCP resources as a **persistence layer** to store tool execution results, violating the fundamental MCP control pattern philosophy.
**The Problem:**
- Resources were storing tool outputs via `storeResourceFromToolResult()`
- A `resourceStore` Map held dynamic tool results
- Resource URIs were generated at runtime (e.g., `documcp://analysis/{timestamp}-{random}`)
- This violated MCP's core principle that resources should **serve applications**, not store tool results
**Why This Matters:**
According to MCP best practices, the three primitives have distinct control patterns:
- **Tools** = Model-controlled (Claude decides when to execute) → Serve the **model**
- **Resources** = App-controlled (application decides when to fetch) → Serve the **app**
- **Prompts** = User-controlled (user triggers via actions) → Serve **users**
Using resources for tool result storage conflates model operations with app operations, creating architectural confusion and misusing the MCP protocol.
---
## Decision Drivers
### Technical Requirements
- Align with MCP specification and best practices
- Follow proper control pattern separation
- Maintain backward compatibility where possible
- Preserve existing tool functionality
### Architectural Principles
- **Separation of Concerns:** Tools handle execution, resources provide app data
- **Statelessness:** MCP servers should be stateless; persistence belongs elsewhere
- **Clear Purpose:** Each primitive serves its intended audience
### Developer Experience
- Simplify resource implementation
- Make resource purpose obvious
- Enable proper MCP Inspector testing
---
## Considered Options
### Option 1: Keep Current Pattern (Status Quo) ❌
**Description:** Continue using resources to store tool results.
**Pros:**
- No code changes required
- Existing URIs remain functional
- No migration needed
**Cons:**
- ❌ Violates MCP best practices
- ❌ Confuses model operations with app operations
- ❌ Makes MCP Inspector testing unclear
- ❌ Creates unnecessary complexity
- ❌ Misrepresents resource purpose
**Decision:** Rejected due to architectural misalignment
---
### Option 2: Remove All Resources ❌
**Description:** Eliminate resources entirely, return all data via tools only.
**Pros:**
- Simplifies implementation
- Eliminates resource confusion
- Focuses on tools as primary interface
**Cons:**
- ❌ Removes legitimate use cases for app-controlled data
- ❌ Loses template access for UI
- ❌ Prevents SSG list for dropdowns
- ❌ Underutilizes MCP capabilities
**Decision:** Rejected - throws baby out with bathwater
---
### Option 3: Redesign Resources for App Needs ✅ (CHOSEN)
**Description:** Remove tool result storage, create static resources that serve application UI needs.
**Pros:**
- ✅ Aligns with MCP best practices
- ✅ Clear separation: tools execute, resources provide app data
- ✅ Enables proper MCP Inspector testing
- ✅ Provides legitimate value to applications
- ✅ Follows control pattern philosophy
**Cons:**
- Requires code refactoring
- Changes resource URIs (but tools remain compatible)
**Decision:** **ACCEPTED** - Best aligns with MCP architecture
---
## Decision Outcome
**Chosen Option:** Option 3 - Redesign Resources for App Needs
### Implementation Details
#### 1. Remove Tool Result Storage
**Before:**
```typescript
const resourceStore = new Map<string, { content: string; mimeType: string }>();
function storeResourceFromToolResult(
toolName: string,
args: any,
result: any,
id?: string,
): string {
const uri = `documcp://analysis/${id}`;
resourceStore.set(uri, {
content: JSON.stringify(result),
mimeType: "application/json",
});
return uri;
}
// In tool handler:
const result = await analyzeRepository(args);
const resourceUri = storeResourceFromToolResult(
"analyze_repository",
args,
result,
);
(result as any).resourceUri = resourceUri;
return result;
```
**After:**
```typescript
// No resource storage! Tools return results directly
const result = await analyzeRepository(args);
return wrapToolResult(result, "analyze_repository");
```
#### 2. Create Static App-Serving Resources
**New Resource Categories:**
**A. SSG List Resource** (for UI dropdowns)
```typescript
{
uri: "documcp://ssgs/available",
name: "Available Static Site Generators",
description: "List of supported SSGs with capabilities for UI selection",
mimeType: "application/json"
}
```
Returns:
```json
{
"ssgs": [
{
"id": "jekyll",
"name": "Jekyll",
"description": "Ruby-based SSG, great for GitHub Pages",
"language": "ruby",
"complexity": "low",
"buildSpeed": "medium",
"ecosystem": "mature",
"bestFor": ["blogs", "documentation", "simple-sites"]
}
// ... 4 more SSGs
]
}
```
**B. Configuration Templates** (for SSG setup)
```typescript
{
uri: "documcp://templates/jekyll-config",
name: "Jekyll Configuration Template",
description: "Template for Jekyll _config.yml",
mimeType: "text/yaml"
}
```
Returns actual YAML template for Jekyll configuration.
**C. Workflow Resources** (for UI workflow display)
```typescript
{
uri: "documcp://workflows/all",
name: "All Documentation Workflows",
description: "Complete list of available documentation workflows",
mimeType: "application/json"
}
```
#### 3. Resource Handler Implementation
```typescript
server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
const { uri } = request.params;
// Handle SSG list (for UI)
if (uri === "documcp://ssgs/available") {
return {
contents: [{
uri,
mimeType: "application/json",
text: JSON.stringify({ ssgs: [...] })
}]
};
}
// Handle templates (static content)
if (uri.startsWith("documcp://templates/")) {
const templateType = uri.split("/").pop();
return {
contents: [{
uri,
mimeType: getTemplateMimeType(templateType),
text: getTemplateContent(templateType)
}]
};
}
throw new Error(`Resource not found: ${uri}`);
});
```
### Resource Design Principles
1. **Static Content Only:** Resources return pre-defined, static data
2. **App-Controlled:** Applications fetch resources when needed for UI
3. **Predictable URIs:** Fixed URIs (no timestamps or random IDs)
4. **Clear Purpose:** Each resource serves a specific app UI need
---
## Consequences
### Positive Consequences ✅
1. **Architectural Alignment**
- Resources now properly serve applications
- Clear separation between tools and resources
- Follows MCP control pattern philosophy
2. **Improved Developer Experience**
- Resource purpose is obvious
- MCP Inspector testing is clear
- No confusion about resource lifecycle
3. **Better Testability**
- Resources return predictable content
- Can test resources independently
- MCP Inspector works correctly
4. **Simplified Implementation**
- Removed `resourceStore` Map
- Removed `storeResourceFromToolResult()` function
- Removed 50+ lines of resource storage code
- Tools are simpler (no resource URI tracking)
5. **Legitimate App Value**
- SSG list enables UI dropdowns
- Templates provide boilerplate content
- Workflows guide user actions
### Negative Consequences ⚠️
1. **Breaking Change for Resource URIs**
- Old dynamic URIs (`documcp://analysis/{timestamp}`) no longer work
- Applications relying on these URIs need updates
- **Mitigation:** Tools return data directly; URIs were internal implementation detail
2. **No Tool Result Persistence**
- Tool results are not stored between executions
- Applications must handle result storage if needed
- **Mitigation:** MCP servers should be stateless; persistence is app responsibility
3. **Migration Effort**
- Required updating all tool handlers
- Updated resource definitions
- **Time Cost:** ~4 hours
---
## Implementation Results
### Code Changes
**Files Modified:**
- `src/index.ts` (main server file)
- Removed `resourceStore` Map (10 lines)
- Removed `storeResourceFromToolResult()` (50 lines)
- Redesigned `RESOURCES` array (12 new resources)
- Updated `ReadResourceRequestSchema` handler (150 lines)
- Removed resource storage from all tools (30+ locations)
**Lines of Code:**
- **Removed:** ~120 lines (resource storage logic)
- **Added:** ~200 lines (static resource handlers)
- **Net Change:** +80 lines (but much clearer purpose)
### Test Results
**Before Implementation:**
- Tests: 122/122 passing ✅
- TypeScript: Compiles ✅
**After Implementation:**
- Tests: 122/122 passing ✅
- TypeScript: Compiles ✅
- No broken tests
- No regression issues
### Performance Impact
**Before:**
- Resource storage: O(1) Map insertion per tool
- Memory: Growing Map of all tool results
**After:**
- Resource retrieval: O(1) static content lookup
- Memory: Fixed size (no growth)
**Improvement:** Reduced memory usage, no performance degradation
---
## Compliance with MCP Best Practices
### Before Redesign
- **Resource Implementation:** 3/10 ❌
- **Control Patterns:** 4/10 ❌
### After Redesign
- **Resource Implementation:** 9/10 ✅
- **Control Patterns:** 9/10 ✅
---
## Migration Guide
### For Client Applications
**Old Pattern (No Longer Works):**
```javascript
// Execute tool
const result = await callTool("analyze_repository", { path: "./" });
// WRONG: Try to fetch from resource URI
const resourceUri = result.resourceUri;
const resource = await readResource(resourceUri); // ❌ Will fail
```
**New Pattern (Recommended):**
```javascript
// Execute tool - result contains all data
const result = await callTool("analyze_repository", { path: "./" });
// Use result directly (no need for resources)
console.log(result.data); // ✅ All data is here
// Use resources for app UI needs
const ssgList = await readResource("documcp://ssgs/available"); // ✅ For dropdowns
const template = await readResource("documcp://templates/jekyll-config"); // ✅ For setup
```
### For Tool Developers
**Old Pattern:**
```typescript
const result = await analyzeRepository(args);
const resourceUri = storeResourceFromToolResult(
"analyze_repository",
args,
result,
);
(result as any).resourceUri = resourceUri;
return result;
```
**New Pattern:**
```typescript
const result = await analyzeRepository(args);
return wrapToolResult(result, "analyze_repository"); // Standardized wrapper
```
---
## References
- **MCP Specification:** https://modelcontextprotocol.io/docs
- **MCP Best Practices Review:** `MCP_BEST_PRACTICES_REVIEW.md`
- **MCP Inspector Guide:** `docs/development/MCP_INSPECTOR_TESTING.md`
- **Related ADRs:**
- ADR-006: MCP Tools API Design
- ADR-007: MCP Prompts and Resources Integration
---
## Notes
### Design Philosophy
The resource redesign embodies a core MCP principle: **each primitive serves its audience**.
- **Tools** answer the question: _"What can Claude do?"_
- **Resources** answer the question: _"What data does my app need?"_
- **Prompts** answer the question: _"What workflows can users trigger?"_
Mixing these purposes creates architectural debt and violates separation of concerns.
### Future Enhancements
**Potential Additional Resources:**
- `documcp://themes/available` - UI theme list
- `documcp://validators/rules` - Validation rule catalog
- `documcp://examples/{category}` - Example content library
These should all follow the same principle: **serve the application's UI needs**, not store execution results.
---
**Last Updated:** 2025-10-09
**Status:** Implemented and Verified ✅
```
--------------------------------------------------------------------------------
/docs/adrs/adr-0007-mcp-prompts-and-resources-integration.md:
--------------------------------------------------------------------------------
```markdown
---
id: adr-7-mcp-prompts-and-resources-integration
title: "ADR-007: MCP Prompts and Resources Integration"
sidebar_label: "ADR-007: MCP Prompts and Resources Integration"
sidebar_position: 7
documcp:
last_updated: "2025-12-12T18:24:24.459Z"
last_validated: "2025-12-12T18:24:24.459Z"
auto_updated: false
update_frequency: monthly
validated_against_commit: c4b07aaf8802a2b359d483114fa21f7cabb85d34
---
# ADR-007: MCP Prompts and Resources Integration for AI Assistance
## Status
Accepted
## Context
DocuMCP needs AI assistance capabilities, and the Model Context Protocol provides native support for exactly this use case through **Prompts** and **Resources**. Rather than extending the protocol, we should leverage MCP's built-in capabilities:
- **MCP Prompts**: Pre-written templates that help users accomplish specific tasks
- **MCP Resources**: File-like data that can be read by clients (like API responses, file contents, or generated documentation)
Current MCP Core Concepts that we can utilize:
1. **Tools**: Interactive functions (already implemented - analyze_repository, recommend_ssg, etc.)
2. **Prompts**: Template-based assistance for common workflows
3. **Resources**: Readable data and content that clients can access
This approach maintains full MCP compliance while providing rich AI assistance through the protocol's intended mechanisms.
## Decision
We will implement AI assistance using MCP's native **Prompts** and **Resources** capabilities, providing pre-written prompt templates for documentation workflows and exposing generated content through the MCP resource system.
### Core Implementation Strategy:
#### 1. MCP Prompts for Documentation Workflows
```typescript
// Implement MCP ListPromptsRequestSchema and GetPromptRequestSchema
const DOCUMENTATION_PROMPTS = [
{
name: "analyze-and-recommend",
description: "Complete repository analysis and SSG recommendation workflow",
arguments: [
{
name: "repository_path",
description: "Path to repository",
required: true,
},
{
name: "priority",
description: "Priority: simplicity, features, performance",
},
],
},
{
name: "setup-documentation",
description:
"Create comprehensive documentation structure with best practices",
arguments: [
{ name: "project_name", description: "Project name", required: true },
{ name: "ssg_type", description: "Static site generator type" },
],
},
{
name: "troubleshoot-deployment",
description: "Diagnose and fix GitHub Pages deployment issues",
arguments: [
{
name: "repository_url",
description: "GitHub repository URL",
required: true,
},
{ name: "error_message", description: "Deployment error message" },
],
},
];
```
#### 2. MCP Resources for Generated Content
```typescript
// Implement ListResourcesRequestSchema and ReadResourceRequestSchema
interface DocuMCPResource {
uri: string; // e.g., "documcp://analysis/repo-123"
name: string; // Human-readable name
description: string; // What this resource contains
mimeType: string; // Content type
}
// Resource types we'll expose:
const RESOURCE_TYPES = [
"documcp://analysis/{analysisId}", // Repository analysis results
"documcp://config/{ssgType}/{projectId}", // Generated configuration files
"documcp://structure/{projectId}", // Documentation structure templates
"documcp://deployment/{workflowId}", // GitHub Actions workflows
"documcp://templates/{templateType}", // Reusable templates
];
```
#### 3. Integration with Existing Tools
- **Tools remain unchanged**: analyze_repository, recommend_ssg, generate_config, etc.
- **Prompts provide workflows**: Chain multiple tool calls with guided prompts
- **Resources expose results**: Make tool outputs accessible as MCP resources
### Example Workflow Integration:
```typescript
// MCP Prompt: "analyze-and-recommend"
// Generated prompt text that guides the user through:
// 1. Call analyze_repository tool
// 2. Review analysis results via documcp://analysis/{id} resource
// 3. Call recommend_ssg tool with analysis results
// 4. Access recommendations via documcp://recommendations/{id} resource
// 5. Call generate_config with selected SSG
```
## Alternatives Considered
### Alternative 1: Custom Protocol Extensions (Previous Approach)
- **Pros**: Maximum flexibility, custom AI features
- **Cons**: Protocol complexity, compatibility issues, non-standard
- **Decision**: Rejected in favor of MCP-native approach
### Alternative 2: Tools-Only Approach
- **Pros**: Simple, already implemented
- **Cons**: No guided workflows, no template assistance, harder user experience
- **Decision**: Insufficient for comprehensive AI assistance
### Alternative 3: External AI Service Integration
- **Pros**: Leverage existing AI platforms
- **Cons**: Breaks MCP cohesion, additional dependencies, latency
- **Decision**: Conflicts with MCP server simplicity
## Consequences
### Positive Consequences
- **MCP Compliance**: Uses protocol as designed, no custom extensions needed
- **Client Compatibility**: Works with all MCP clients (Claude Desktop, GitHub Copilot, etc.)
- **Guided Workflows**: Prompts provide step-by-step assistance for complex tasks
- **Rich Content Access**: Resources make generated content easily accessible
- **Template Reusability**: Prompts can be customized and reused across projects
- **Simplified Architecture**: No need for custom protocol handling or AI-specific interfaces
### Negative Consequences
- **Prompt Complexity**: Complex workflows require sophisticated prompt engineering
- **Resource Management**: Need efficient resource caching and lifecycle management
- **Limited AI Features**: Constrained to MCP's prompt/resource model
- **Template Maintenance**: Prompts need regular updates as tools evolve
## Implementation Plan
### Phase 1: Core MCP Integration (Week 1-2)
1. Implement `ListPromptsRequestSchema` and `GetPromptRequestSchema` handlers
2. Implement `ListResourcesRequestSchema` and `ReadResourceRequestSchema` handlers
3. Create resource URI schema and routing system
4. Add MCP capabilities registration for prompts and resources
### Phase 2: Documentation Prompts (Week 3-4)
1. Create "analyze-and-recommend" workflow prompt
2. Create "setup-documentation" structure prompt
3. Create "troubleshoot-deployment" diagnostic prompt
4. Add prompt argument validation and help text
### Phase 3: Resource Management (Week 5-6)
1. Implement resource caching for analysis results
2. Add generated configuration file resources
3. Create template library resources
4. Add resource cleanup and lifecycle management
### Phase 4: Advanced Features (Week 7-8)
1. Dynamic prompt generation based on project characteristics
2. Contextual resource recommendations
3. Prompt composition for complex workflows
4. Integration testing with major MCP clients
## Integration with Existing Architecture
### ADR-001 (MCP Server Architecture)
- Extends the TypeScript MCP SDK usage to include prompts and resources
- Maintains stateless operation model
- Leverages existing modular design
### ADR-006 (MCP Tools API Design)
- Tools remain the primary interface for actions
- Prompts provide guided workflows using existing tools
- Resources expose tool outputs in structured format
### ADR-007 (Pluggable Prompt Tool Architecture)
- **Modified Approach**: Instead of custom prompt engines, use MCP prompts
- Template system becomes MCP prompt templates
- Configuration-driven approach still applies for prompt customization
## MCP Server Capabilities Declaration
```typescript
server.setRequestHandler(InitializeRequestSchema, async () => ({
protocolVersion: "2024-11-05",
capabilities: {
tools: {}, // Existing tool capabilities
prompts: {}, // NEW: Prompt template capabilities
resources: {}, // NEW: Resource access capabilities
},
serverInfo: {
name: "documcp",
version: "0.2.0",
},
}));
```
## Code Execution with MCP (CE-MCP) Integration (2025-12-09)
### Resources are Perfect for Code Mode
**Critical Insight**: MCP Resources are the ideal mechanism for preventing context pollution in Code Mode workflows:
```typescript
// ✅ GOOD: Summary-only result with resource URI
async function handleAnalyzeRepository(params) {
const fullAnalysis = await analyzeRepo(params.path);
// Store complete result as MCP resource
const resourceUri = await storeResource({
type: "analysis",
data: fullAnalysis,
});
// Return only summary to LLM context (not 50,000 tokens of full data!)
return {
summary: {
fileCount: fullAnalysis.fileCount,
primaryLanguage: fullAnalysis.primaryLanguage,
complexity: fullAnalysis.complexityScore,
},
resourceUri, // Client can access full data when needed
nextSteps: [
/* guidance */
],
};
}
```
### Prompts for Code Mode Workflows
MCP Prompts provide guided workflows for Code Mode clients:
```typescript
// Prompt guides LLM to generate orchestration code
{
name: "complete-documentation-setup",
description: "Complete workflow from analysis to deployment",
prompt: `
You will set up documentation for a project using these steps:
1. Call analyze_repository tool and store result
2. Access analysis via resource URI
3. Call recommend_ssg with analysis data
4. Generate configuration files
5. Create Diataxis structure
6. Set up GitHub Actions deployment
Write TypeScript code to orchestrate these tools efficiently.
`
}
```
### Resource Lifecycle in Code Mode
```typescript
// Code Mode execution pattern
async function codeModWorkflow(repoPath: string) {
// Step 1: Analysis (returns resource URI)
const analysisResult = await callTool("analyze_repository", {
path: repoPath,
});
const analysis = await readResource(analysisResult.resourceUri);
// Step 2: Recommendation (uses cached analysis)
const recommendation = await callTool("recommend_ssg", { analysis });
// Step 3: Configuration (parallel execution possible!)
const [config, structure] = await Promise.all([
callTool("generate_config", { ssg: recommendation.primary }),
callTool("setup_structure", { ssg: recommendation.primary }),
]);
// Resources prevent intermediate data from polluting LLM context
return { config, structure };
}
```
### Performance Benefits
**Token Savings**:
- Traditional: Full analysis result (50,000 tokens) → LLM context
- With Resources: Summary (500 tokens) + resource URI → LLM context
- **99% token reduction** for large results
**Cost Savings**:
- Complex workflow: $2.50 → $0.03 (75x reduction)
- Achieved through resource-based intermediate storage
For detailed analysis, see [ADR-011: CE-MCP Compatibility](adr-0011-ce-mcp-compatibility.md).
## Implementation Status Review (2025-12-12)
**Status Update**: Changed from "Proposed" to "Accepted" based on comprehensive ADR compliance review.
**Review Findings**:
- ✅ **Implementation Confirmed**: Comprehensive code review validates full implementation of MCP Prompts and Resources integration
- ✅ **Compliance Score**: 9/10 - Well implemented with strong architectural consistency
- ✅ **Code Evidence**: Smart Code Linking identified 25 related files confirming implementation
- ✅ **Integration Verified**: Successfully integrated with existing tools and architecture (ADR-001, ADR-006)
**Implementation Evidence**:
- MCP Prompts handlers implemented and registered
- MCP Resources system operational with URI schema
- Resource caching and lifecycle management in place
- CE-MCP compatibility validated (see ADR-011)
- Integration testing completed with major MCP clients
**Validation**: ADR compliance review conducted 2025-12-12, commit c4b07aaf8802a2b359d483114fa21f7cabb85d34
## Future Considerations
- Integration with MCP sampling for AI-powered responses
- Advanced prompt chaining and conditional workflows
- Resource subscriptions for real-time updates
- Community prompt template sharing and marketplace
- Resource caching strategies for Code Mode optimization
- Streaming resources for real-time progress updates
```
--------------------------------------------------------------------------------
/docs/tutorials/user-onboarding.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.973Z"
last_validated: "2025-12-09T19:41:38.605Z"
auto_updated: false
update_frequency: monthly
validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
---
# DocuMCP User Onboarding Guide
Welcome to DocuMCP! This comprehensive guide will help you get started with DocuMCP in your own environment, from initial setup to advanced usage patterns.
## 🚀 Quick Start
### Prerequisites
- **Node.js**: Version 20.0.0 or higher
- **npm**: Version 8.0.0 or higher
- **Git**: For repository analysis
- **GitHub Account**: For GitHub Pages deployment
### Installation
```bash
# Install DocuMCP globally
npm install -g documcp
# Or install locally in your project
npm install documcp --save-dev
```
### Verify Installation
```bash
# Check if DocuMCP is installed correctly
documcp --version
# Should output: DocuMCP v0.5.0
```
## 📋 Basic Usage Patterns
### Pattern 1: Repository Analysis
Start by analyzing your repository to understand its structure and documentation needs.
```bash
# Basic repository analysis
documcp analyze-repository --path ./my-project --depth standard
# Quick analysis for large repositories
documcp analyze-repository --path ./large-project --depth quick
# Deep analysis for comprehensive documentation
documcp analyze-repository --path ./complex-project --depth deep
```
**Example Output:**
```json
{
"success": true,
"data": {
"id": "analysis_abc123_def456",
"structure": {
"totalFiles": 150,
"totalDirectories": 25,
"languages": { ".ts": 100, ".md": 20, ".json": 10 },
"hasTests": true,
"hasCI": true,
"hasDocs": false
},
"recommendations": {
"primaryLanguage": "TypeScript",
"projectType": "Library",
"teamSize": "small"
}
}
}
```
### Pattern 2: SSG Recommendation
Get intelligent recommendations for the best static site generator for your project.
```bash
# Get SSG recommendation based on analysis
documcp recommend-ssg --analysis-id analysis_abc123_def456
# With user preferences
documcp recommend-ssg --analysis-id analysis_abc123_def456 --priority performance --ecosystem javascript
# For enterprise users
documcp recommend-ssg --analysis-id analysis_abc123_def456 --priority simplicity
```
**Example Output:**
```json
{
"success": true,
"data": {
"recommended": "docusaurus",
"confidence": 0.92,
"reasoning": [
"React-based project detected",
"Documentation focus identified",
"Team size suitable for Docusaurus"
],
"alternatives": [
{
"name": "hugo",
"score": 0.85,
"pros": ["Performance", "Fast builds"],
"cons": ["Learning curve", "Go templates"]
}
]
}
}
```
### Pattern 3: Documentation Structure Setup
Create a Diataxis-compliant documentation structure.
```bash
# Set up documentation structure
documcp setup-structure --path ./docs --ssg docusaurus --include-examples
# Minimal structure for existing projects
documcp setup-structure --path ./site --ssg hugo --include-examples false
```
### Pattern 4: Configuration Generation
Generate configuration files for your chosen SSG.
```bash
# Generate Docusaurus configuration
documcp generate-config --ssg docusaurus --project-name "My Project" --output-path ./docs
# Generate Hugo configuration
documcp generate-config --ssg hugo --project-name "My Site" --output-path ./site
```
### Pattern 5: Content Population
Populate your documentation with intelligent content based on your repository.
```bash
# Populate documentation content
documcp populate-content --analysis-id analysis_abc123_def456 --docs-path ./docs
# With specific focus areas
documcp populate-content --analysis-id analysis_abc123_def456 --docs-path ./docs --focus-areas api,examples
```
### Pattern 6: GitHub Pages Deployment
Deploy your documentation to GitHub Pages.
```bash
# Deploy to GitHub Pages
documcp deploy-pages --repository "user/repo" --ssg docusaurus
# With custom domain
documcp deploy-pages --repository "user/repo" --ssg docusaurus --custom-domain "docs.example.com"
```
## 🎯 Common Use Cases
### Use Case 1: New Open Source Project
For a new open source project, follow this workflow:
```bash
# 1. Analyze your repository
ANALYSIS_ID=$(documcp analyze-repository --path . --depth standard | jq -r '.data.id')
# 2. Get SSG recommendation
documcp recommend-ssg --analysis-id $ANALYSIS_ID --priority community_focused
# 3. Set up documentation structure
documcp setup-structure --path ./docs --ssg docusaurus --include-examples
# 4. Generate configuration
documcp generate-config --ssg docusaurus --project-name "My Open Source Project" --output-path ./docs
# 5. Populate content
documcp populate-content --analysis-id $ANALYSIS_ID --docs-path ./docs
# 6. Deploy to GitHub Pages
documcp deploy-pages --repository "$(git remote get-url origin | sed 's/.*github.com[:/]\([^.]*\).*/\1/')" --ssg docusaurus
```
### Use Case 2: Enterprise Documentation
For enterprise documentation with specific requirements:
```bash
# 1. Analyze with enterprise focus
ANALYSIS_ID=$(documcp analyze-repository --path . --depth deep | jq -r '.data.id')
# 2. Get enterprise-focused recommendation
documcp recommend-ssg --analysis-id $ANALYSIS_ID --priority enterprise_focused
# 3. Set up minimal structure
documcp setup-structure --path ./enterprise-docs --ssg hugo --include-examples false
# 4. Generate enterprise configuration
documcp generate-config --ssg hugo --project-name "Enterprise Documentation" --output-path ./enterprise-docs
# 5. Populate with enterprise focus
documcp populate-content --analysis-id $ANALYSIS_ID --docs-path ./enterprise-docs --focus-areas security,compliance,api
```
### Use Case 3: API Documentation
For API-focused projects:
```bash
# 1. Analyze API project
ANALYSIS_ID=$(documcp analyze-repository --path . --depth standard | jq -r '.data.id')
# 2. Get API-focused recommendation
documcp recommend-ssg --analysis-id $ANALYSIS_ID --priority features
# 3. Set up API documentation structure
documcp setup-structure --path ./api-docs --ssg docusaurus --include-examples
# 4. Generate API documentation configuration
documcp generate-config --ssg docusaurus --project-name "API Documentation" --output-path ./api-docs
# 5. Populate with API focus
documcp populate-content --analysis-id $ANALYSIS_ID --docs-path ./api-docs --focus-areas api,examples,integration
```
## 🔧 Advanced Configuration
### Environment Variables
Set up environment variables for advanced configuration:
```bash
# GitHub token for deployment
export GITHUB_TOKEN="your_github_token"
# Custom storage directory for memory
export DOCUMCP_STORAGE_DIR="./.documcp"
# Development mode for debugging
export NODE_ENV="development"
```
### Memory System Configuration
Configure the memory system for learning and pattern recognition:
```bash
# Initialize memory system
documcp memory initialize --storage-dir ./.documcp
# Export memories for backup
documcp memory export --format json --output ./documcp-memories.json
# Import memories from backup
documcp memory import --format json --input ./documcp-memories.json
```
### User Preferences
Set up user preferences for personalized recommendations:
```bash
# Set user preferences
documcp preferences set --user-id "developer123" --priority performance --ecosystem javascript
# Get personalized recommendations
documcp recommend-ssg --analysis-id $ANALYSIS_ID --user-id "developer123"
# Export preferences
documcp preferences export --user-id "developer123" --output ./preferences.json
```
## 🚨 Troubleshooting
### Common Issues
#### Issue 1: Repository Analysis Fails
**Problem:** `Permission denied: Cannot read directory`
**Solution:**
```bash
# Check directory permissions
ls -la /path/to/repository
# Fix permissions if needed
chmod -R 755 /path/to/repository
# Run analysis again
documcp analyze-repository --path /path/to/repository --depth standard
```
#### Issue 2: SSG Recommendation Returns Low Confidence
**Problem:** Low confidence scores in recommendations
**Solution:**
```bash
# Try deeper analysis
documcp analyze-repository --path . --depth deep
# Use specific preferences
documcp recommend-ssg --analysis-id $ANALYSIS_ID --priority simplicity --ecosystem any
# Check for similar projects in memory
documcp memory similar --analysis-id $ANALYSIS_ID
```
#### Issue 3: GitHub Pages Deployment Fails
**Problem:** Deployment fails with permission errors
**Solution:**
```bash
# Check GitHub token permissions
curl -H "Authorization: token $GITHUB_TOKEN" https://api.github.com/user
# Ensure token has repo and pages permissions
# Regenerate token with correct permissions if needed
# Try deployment again
documcp deploy-pages --repository "user/repo" --ssg docusaurus
```
#### Issue 4: Content Population Generates Empty Content
**Problem:** No content is generated during population
**Solution:**
```bash
# Check if repository has sufficient content
documcp analyze-repository --path . --depth deep
# Ensure documentation structure exists
documcp setup-structure --path ./docs --ssg docusaurus
# Try with different population level
documcp populate-content --analysis-id $ANALYSIS_ID --docs-path ./docs --population-level comprehensive
```
## 📚 Best Practices
### 1. Repository Organization
- Keep your repository well-organized with clear directory structure
- Include a comprehensive README.md file
- Use consistent naming conventions
- Include package.json or equivalent dependency files
### 2. Documentation Structure
- Follow Diataxis framework principles
- Use clear, descriptive headings
- Include code examples and use cases
- Keep documentation up-to-date with code changes
### 3. Memory System Usage
- Regularly export memories for backup
- Use consistent user IDs for preference tracking
- Clean up old memories periodically
- Share memories across team members for better recommendations
### 4. Deployment Strategy
- Test documentation locally before deployment
- Use staging environments for testing
- Monitor deployment success rates
- Keep deployment configurations in version control
## 🔗 Integration Examples
### GitHub Actions Integration
```yaml
name: Deploy Documentation
on:
push:
branches: [main]
paths: ["docs/**", "src/**"]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install DocuMCP
run: npm install -g documcp
- name: Analyze Repository
id: analyze
run: |
ANALYSIS_ID=$(documcp analyze-repository --path . --depth standard | jq -r '.data.id')
echo "analysis_id=$ANALYSIS_ID" >> $GITHUB_OUTPUT
- name: Deploy Documentation
run: |
documcp deploy-pages --repository ${{ github.repository }} --ssg docusaurus
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
```
### Docker Integration
```dockerfile
FROM node:20-alpine
# Install DocuMCP
RUN npm install -g documcp
# Set working directory
WORKDIR /app
# Copy project files
COPY . .
# Analyze and deploy documentation
RUN documcp analyze-repository --path . --depth standard && \
documcp recommend-ssg --analysis-id $(documcp analyze-repository --path . | jq -r '.data.id') && \
documcp deploy-pages --repository $REPOSITORY --ssg docusaurus
EXPOSE 3000
CMD ["documcp", "serve", "--port", "3000"]
```
## 📖 Additional Resources
- [API Reference](../api/) - Complete API documentation
- [Configuration Guide](../reference/configuration.md) - Detailed configuration options
- [MCP Tools Reference](../reference/mcp-tools.md) - MCP tool specifications
- [GitHub Pages Deployment](../how-to/github-pages-deployment.md) - Deployment guide
- [Troubleshooting Guide](../how-to/troubleshooting.md) - Common issues and solutions
## 🤝 Getting Help
- **GitHub Issues**: Report bugs and request features
- **GitHub Discussions**: Ask questions and share ideas
- **Documentation**: Check the comprehensive documentation
- **API Reference**: Explore the complete API documentation
Welcome to the DocuMCP community! 🎉
```
--------------------------------------------------------------------------------
/src/tools/cleanup-agent-artifacts.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Agent Artifact Cleanup Tool
*
* Detects, classifies, and cleans up artifacts generated by AI coding agents.
* Supports scan, clean, and archive operations with configurable behavior.
*/
import { promises as fs } from "fs";
import path from "path";
import { z } from "zod";
import { formatMCPResponse } from "../types/api.js";
import {
ArtifactDetector,
ArtifactScanResult,
ArtifactCleanupConfig,
AgentArtifact,
DEFAULT_CONFIG,
} from "../utils/artifact-detector.js";
const inputSchema = z.object({
path: z.string().describe("Path to the project directory to scan"),
operation: z
.enum(["scan", "clean", "archive"])
.describe(
"Operation to perform: scan (detect only), clean (remove), or archive (move to .agent-archive/)",
),
dryRun: z
.boolean()
.optional()
.default(false)
.describe("Show what would be changed without making changes"),
interactive: z
.boolean()
.optional()
.default(false)
.describe(
"Prompt for confirmation before each action (not supported in MCP, treated as dryRun)",
),
autoDeleteThreshold: z
.number()
.min(0)
.max(1)
.optional()
.default(0.9)
.describe("Confidence threshold for automatic deletion (0-1)"),
includeGitIgnored: z
.boolean()
.optional()
.default(false)
.describe("Include artifacts that are already in .gitignore"),
customPatterns: z
.object({
files: z.array(z.string()).optional(),
directories: z.array(z.string()).optional(),
inlineMarkers: z.array(z.string()).optional(),
})
.optional()
.describe("Custom patterns to detect in addition to defaults"),
});
export type CleanupAgentArtifactsInput = z.infer<typeof inputSchema>;
export interface CleanupAgentArtifactsOutput {
operation: string;
result: ArtifactScanResult;
actionsPerformed?: {
deleted: string[];
archived: string[];
skipped: string[];
};
}
/**
* Cleanup agent artifacts tool
*/
export async function cleanupAgentArtifacts(
args: unknown,
): Promise<{ content: any[]; isError?: boolean }> {
const startTime = Date.now();
try {
const input = inputSchema.parse(args);
// Validate path exists
try {
const stats = await fs.stat(input.path);
if (!stats.isDirectory()) {
return formatMCPResponse({
success: false,
error: {
code: "INVALID_PATH",
message: `Path is not a directory: ${input.path}`,
resolution: "Provide a valid directory path",
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
});
}
} catch (error) {
return formatMCPResponse({
success: false,
error: {
code: "PATH_NOT_FOUND",
message: `Path does not exist: ${input.path}`,
resolution: "Provide a valid directory path",
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
});
}
// Build configuration
const config: Partial<ArtifactCleanupConfig> = {
autoDeleteThreshold: input.autoDeleteThreshold,
preserveGitIgnored: !input.includeGitIgnored,
};
// Merge custom patterns if provided
if (input.customPatterns) {
config.patterns = {
files: [
...DEFAULT_CONFIG.patterns.files,
...(input.customPatterns.files || []),
],
directories: [
...DEFAULT_CONFIG.patterns.directories,
...(input.customPatterns.directories || []),
],
inlineMarkers: [
...DEFAULT_CONFIG.patterns.inlineMarkers,
...(input.customPatterns.inlineMarkers || []),
],
blockPatterns: DEFAULT_CONFIG.patterns.blockPatterns,
};
}
// Create detector and scan
const detector = new ArtifactDetector(input.path, config);
const scanResult = await detector.scan();
// Handle operations
let actionsPerformed:
| { deleted: string[]; archived: string[]; skipped: string[] }
| undefined;
// Interactive mode is treated as dry-run in MCP context
const isDryRun = input.dryRun || input.interactive;
if (input.operation === "clean" && !isDryRun) {
actionsPerformed = await performCleanup(
input.path,
scanResult.artifacts,
input.autoDeleteThreshold,
);
} else if (input.operation === "archive" && !isDryRun) {
actionsPerformed = await performArchive(
input.path,
scanResult.artifacts,
input.autoDeleteThreshold,
);
}
// Build output
const output: CleanupAgentArtifactsOutput = {
operation: isDryRun ? `${input.operation} (dry-run)` : input.operation,
result: scanResult,
actionsPerformed,
};
// Build recommendations
const recommendations = [];
if (input.operation === "scan" && scanResult.artifacts.length > 0) {
const deleteCount = scanResult.summary.byRecommendation["delete"] || 0;
const archiveCount = scanResult.summary.byRecommendation["archive"] || 0;
const reviewCount = scanResult.summary.byRecommendation["review"] || 0;
if (deleteCount > 0) {
recommendations.push({
type: "info" as const,
title: "High-confidence artifacts found",
description: `Found ${deleteCount} artifacts recommended for deletion. Run with operation='clean' to remove them.`,
action: "cleanup_agent_artifacts with operation='clean'",
});
}
if (archiveCount > 0) {
recommendations.push({
type: "info" as const,
title: "Archivable artifacts found",
description: `Found ${archiveCount} artifacts recommended for archiving. Run with operation='archive' to preserve them.`,
action: "cleanup_agent_artifacts with operation='archive'",
});
}
if (reviewCount > 0) {
recommendations.push({
type: "warning" as const,
title: "Manual review recommended",
description: `Found ${reviewCount} artifacts that require manual review before action.`,
});
}
}
if (isDryRun && input.operation !== "scan") {
recommendations.push({
type: "info" as const,
title: "Dry-run mode",
description:
"No changes were made. Remove dryRun=true to apply changes.",
});
}
// Build next steps
const nextSteps = [];
if (input.operation === "scan" && scanResult.artifacts.length > 0) {
nextSteps.push({
action: "Review detected artifacts",
description:
"Examine the artifacts list to understand what was detected",
priority: "high" as const,
});
nextSteps.push({
action: "Run cleanup with dry-run",
toolRequired: "cleanup_agent_artifacts",
description: "Test cleanup operation before making changes",
priority: "medium" as const,
});
}
if (
actionsPerformed &&
(actionsPerformed.deleted.length > 0 ||
actionsPerformed.archived.length > 0)
) {
nextSteps.push({
action: "Review changes and commit",
description: "Verify the cleanup results and commit to version control",
priority: "high" as const,
});
}
return formatMCPResponse(
{
success: true,
data: output,
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
recommendations:
recommendations.length > 0 ? recommendations : undefined,
nextSteps: nextSteps.length > 0 ? nextSteps : undefined,
},
{ fullResponse: true },
);
} catch (error) {
if (error instanceof z.ZodError) {
return formatMCPResponse({
success: false,
error: {
code: "INVALID_INPUT",
message: "Invalid input parameters",
details: error.errors,
resolution: "Check the input schema and provide valid parameters",
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
});
}
return formatMCPResponse({
success: false,
error: {
code: "TOOL_ERROR",
message: error instanceof Error ? error.message : "Unknown error",
resolution: "Check the error message and try again",
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
});
}
}
/**
* Perform cleanup operation (delete artifacts)
*/
async function performCleanup(
projectPath: string,
artifacts: AgentArtifact[],
threshold: number,
): Promise<{ deleted: string[]; archived: string[]; skipped: string[] }> {
const deleted: string[] = [];
const skipped: string[] = [];
for (const artifact of artifacts) {
// Only delete high-confidence artifacts with delete recommendation
if (
artifact.confidence >= threshold &&
artifact.recommendation === "delete"
) {
const fullPath = path.join(projectPath, artifact.path.split(":")[0]);
try {
if (artifact.type === "file") {
await fs.unlink(fullPath);
deleted.push(artifact.path);
} else if (artifact.type === "directory") {
await fs.rm(fullPath, { recursive: true, force: true });
deleted.push(artifact.path);
} else {
// Inline and block comments require manual editing
skipped.push(artifact.path);
}
} catch (error) {
console.error(`Error deleting ${artifact.path}: ${error}`);
skipped.push(artifact.path);
}
} else {
skipped.push(artifact.path);
}
}
return { deleted, archived: [], skipped };
}
/**
* Perform archive operation (move artifacts to .agent-archive/)
*/
async function performArchive(
projectPath: string,
artifacts: AgentArtifact[],
threshold: number,
): Promise<{ deleted: string[]; archived: string[]; skipped: string[] }> {
const archived: string[] = [];
const skipped: string[] = [];
// Create archive directory
const archiveDir = path.join(projectPath, ".agent-archive");
const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
const archivePath = path.join(archiveDir, timestamp);
try {
await fs.mkdir(archivePath, { recursive: true });
} catch (error) {
console.error(`Error creating archive directory: ${error}`);
return { deleted: [], archived: [], skipped: artifacts.map((a) => a.path) };
}
for (const artifact of artifacts) {
// Archive medium to high confidence artifacts
if (
artifact.confidence >= threshold * 0.7 &&
(artifact.recommendation === "archive" ||
artifact.recommendation === "delete")
) {
const sourcePath = path.join(projectPath, artifact.path.split(":")[0]);
try {
if (artifact.type === "file" || artifact.type === "directory") {
const destPath = path.join(archivePath, artifact.path);
const destDir = path.dirname(destPath);
// Create destination directory
await fs.mkdir(destDir, { recursive: true });
// Move file or directory
if (artifact.type === "file") {
await fs.copyFile(sourcePath, destPath);
await fs.unlink(sourcePath);
} else {
await fs.cp(sourcePath, destPath, { recursive: true });
await fs.rm(sourcePath, { recursive: true, force: true });
}
archived.push(artifact.path);
} else {
// Inline and block comments require manual editing
skipped.push(artifact.path);
}
} catch (error) {
console.error(`Error archiving ${artifact.path}: ${error}`);
skipped.push(artifact.path);
}
} else {
skipped.push(artifact.path);
}
}
return { deleted: [], archived, skipped };
}
```
--------------------------------------------------------------------------------
/docs/api/assets/icons.svg:
--------------------------------------------------------------------------------
```
<svg xmlns="http://www.w3.org/2000/svg"><g id="icon-1" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-module)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">M</text></g><g id="icon-2" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-module)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">M</text></g><g id="icon-4" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-namespace)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">N</text></g><g id="icon-8" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-enum)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">E</text></g><g id="icon-16" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-32" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-variable)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">V</text></g><g id="icon-64" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-function)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">F</text></g><g id="icon-128" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-class)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">C</text></g><g id="icon-256" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-interface)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">I</text></g><g id="icon-512" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-constructor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">C</text></g><g id="icon-1024" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-2048" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-method)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">M</text></g><g id="icon-4096" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-function)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">F</text></g><g id="icon-8192" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-16384" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-constructor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">C</text></g><g id="icon-32768" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-property)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">P</text></g><g id="icon-65536" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-type-alias)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">T</text></g><g id="icon-131072" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-type-alias)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">T</text></g><g id="icon-262144" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-accessor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">A</text></g><g id="icon-524288" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-accessor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">A</text></g><g id="icon-1048576" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-accessor)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">A</text></g><g id="icon-2097152" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-type-alias)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">T</text></g><g id="icon-4194304" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-ts-reference)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="12"></rect><text fill="var(--color-icon-text)" x="50%" y="50%" dominant-baseline="central" text-anchor="middle">R</text></g><g id="icon-8388608" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-document)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><g stroke="var(--color-icon-text)" fill="none" stroke-width="1.5"><polygon points="6,5 6,19 18,19, 18,10 13,5"></polygon><line x1="9" y1="9" x2="13" y2="9"></line><line x1="9" y1="12" x2="15" y2="12"></line><line x1="9" y1="15" x2="15" y2="15"></line></g></g><g id="icon-folder" class="tsd-no-select"><rect fill="var(--color-icon-background)" stroke="var(--color-document)" stroke-width="1.5" x="1" y="1" width="22" height="22" rx="6"></rect><g stroke="var(--color-icon-text)" fill="none" stroke-width="1.5"><polygon points="5,5 10,5 12,8 19,8 19,18 5,18"></polygon></g></g><g id="icon-chevronDown" class="tsd-no-select"><path d="M4.93896 8.531L12 15.591L19.061 8.531L16.939 6.409L12 11.349L7.06098 6.409L4.93896 8.531Z" fill="var(--color-icon-text)"></path></g><g id="icon-chevronSmall" class="tsd-no-select"><path d="M1.5 5.50969L8 11.6609L14.5 5.50969L12.5466 3.66086L8 7.96494L3.45341 3.66086L1.5 5.50969Z" fill="var(--color-icon-text)"></path></g><g id="icon-checkbox" class="tsd-no-select"><rect class="tsd-checkbox-background" width="30" height="30" x="1" y="1" rx="6" fill="none"></rect><path class="tsd-checkbox-checkmark" d="M8.35422 16.8214L13.2143 21.75L24.6458 10.25" stroke="none" stroke-width="3.5" stroke-linejoin="round" fill="none"></path></g><g id="icon-menu" class="tsd-no-select"><rect x="1" y="3" width="14" height="2" fill="var(--color-icon-text)"></rect><rect x="1" y="7" width="14" height="2" fill="var(--color-icon-text)"></rect><rect x="1" y="11" width="14" height="2" fill="var(--color-icon-text)"></rect></g><g id="icon-search" class="tsd-no-select"><path d="M15.7824 13.833L12.6666 10.7177C12.5259 10.5771 12.3353 10.499 12.1353 10.499H11.6259C12.4884 9.39596 13.001 8.00859 13.001 6.49937C13.001 2.90909 10.0914 0 6.50048 0C2.90959 0 0 2.90909 0 6.49937C0 10.0896 2.90959 12.9987 6.50048 12.9987C8.00996 12.9987 9.39756 12.4863 10.5008 11.6239V12.1332C10.5008 12.3332 10.5789 12.5238 10.7195 12.6644L13.8354 15.7797C14.1292 16.0734 14.6042 16.0734 14.8948 15.7797L15.7793 14.8954C16.0731 14.6017 16.0731 14.1267 15.7824 13.833ZM6.50048 10.499C4.29094 10.499 2.50018 8.71165 2.50018 6.49937C2.50018 4.29021 4.28781 2.49976 6.50048 2.49976C8.71001 2.49976 10.5008 4.28708 10.5008 6.49937C10.5008 8.70852 8.71314 10.499 6.50048 10.499Z" fill="var(--color-icon-text)"></path></g><g id="icon-anchor" class="tsd-no-select"><g stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"></path><path d="M10 14a3.5 3.5 0 0 0 5 0l4 -4a3.5 3.5 0 0 0 -5 -5l-.5 .5"></path><path d="M14 10a3.5 3.5 0 0 0 -5 0l-4 4a3.5 3.5 0 0 0 5 5l.5 -.5"></path></g></g><g id="icon-alertNote" class="tsd-no-select"><path fill="var(--color-alert-note)" d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8Zm8-6.5a6.5 6.5 0 1 0 0 13 6.5 6.5 0 0 0 0-13ZM6.5 7.75A.75.75 0 0 1 7.25 7h1a.75.75 0 0 1 .75.75v2.75h.25a.75.75 0 0 1 0 1.5h-2a.75.75 0 0 1 0-1.5h.25v-2h-.25a.75.75 0 0 1-.75-.75ZM8 6a1 1 0 1 1 0-2 1 1 0 0 1 0 2Z"></path></g><g id="icon-alertTip" class="tsd-no-select"><path fill="var(--color-alert-tip)" d="M8 1.5c-2.363 0-4 1.69-4 3.75 0 .984.424 1.625.984 2.304l.214.253c.223.264.47.556.673.848.284.411.537.896.621 1.49a.75.75 0 0 1-1.484.211c-.04-.282-.163-.547-.37-.847a8.456 8.456 0 0 0-.542-.68c-.084-.1-.173-.205-.268-.32C3.201 7.75 2.5 6.766 2.5 5.25 2.5 2.31 4.863 0 8 0s5.5 2.31 5.5 5.25c0 1.516-.701 2.5-1.328 3.259-.095.115-.184.22-.268.319-.207.245-.383.453-.541.681-.208.3-.33.565-.37.847a.751.751 0 0 1-1.485-.212c.084-.593.337-1.078.621-1.489.203-.292.45-.584.673-.848.075-.088.147-.173.213-.253.561-.679.985-1.32.985-2.304 0-2.06-1.637-3.75-4-3.75ZM5.75 12h4.5a.75.75 0 0 1 0 1.5h-4.5a.75.75 0 0 1 0-1.5ZM6 15.25a.75.75 0 0 1 .75-.75h2.5a.75.75 0 0 1 0 1.5h-2.5a.75.75 0 0 1-.75-.75Z"></path></g><g id="icon-alertImportant" class="tsd-no-select"><path fill="var(--color-alert-important)" d="M0 1.75C0 .784.784 0 1.75 0h12.5C15.216 0 16 .784 16 1.75v9.5A1.75 1.75 0 0 1 14.25 13H8.06l-2.573 2.573A1.458 1.458 0 0 1 3 14.543V13H1.75A1.75 1.75 0 0 1 0 11.25Zm1.75-.25a.25.25 0 0 0-.25.25v9.5c0 .138.112.25.25.25h2a.75.75 0 0 1 .75.75v2.19l2.72-2.72a.749.749 0 0 1 .53-.22h6.5a.25.25 0 0 0 .25-.25v-9.5a.25.25 0 0 0-.25-.25Zm7 2.25v2.5a.75.75 0 0 1-1.5 0v-2.5a.75.75 0 0 1 1.5 0ZM9 9a1 1 0 1 1-2 0 1 1 0 0 1 2 0Z"></path></g><g id="icon-alertWarning" class="tsd-no-select"><path fill="var(--color-alert-warning)" d="M6.457 1.047c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0 1 14.082 15H1.918a1.75 1.75 0 0 1-1.543-2.575Zm1.763.707a.25.25 0 0 0-.44 0L1.698 13.132a.25.25 0 0 0 .22.368h12.164a.25.25 0 0 0 .22-.368Zm.53 3.996v2.5a.75.75 0 0 1-1.5 0v-2.5a.75.75 0 0 1 1.5 0ZM9 11a1 1 0 1 1-2 0 1 1 0 0 1 2 0Z"></path></g><g id="icon-alertCaution" class="tsd-no-select"><path fill="var(--color-alert-caution)" d="M4.47.22A.749.749 0 0 1 5 0h6c.199 0 .389.079.53.22l4.25 4.25c.141.14.22.331.22.53v6a.749.749 0 0 1-.22.53l-4.25 4.25A.749.749 0 0 1 11 16H5a.749.749 0 0 1-.53-.22L.22 11.53A.749.749 0 0 1 0 11V5c0-.199.079-.389.22-.53Zm.84 1.28L1.5 5.31v5.38l3.81 3.81h5.38l3.81-3.81V5.31L10.69 1.5ZM8 4a.75.75 0 0 1 .75.75v3.5a.75.75 0 0 1-1.5 0v-3.5A.75.75 0 0 1 8 4Zm0 8a1 1 0 1 1 0-2 1 1 0 0 1 0 2Z"></path></g></svg>
```
--------------------------------------------------------------------------------
/docs/adrs/adr-0011-ce-mcp-compatibility.md:
--------------------------------------------------------------------------------
```markdown
---
id: adr-11-ce-mcp-compatibility
title: "ADR-011: Code Execution with MCP (CE-MCP) Compatibility"
sidebar_label: "ADR-011: CE-MCP Compatibility"
sidebar_position: 11
documcp:
last_updated: "2025-12-09T18:50:00.000Z"
last_validated: "2025-12-09T19:41:38.575Z"
auto_updated: false
update_frequency: monthly
validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
---
# ADR-011: Code Execution with MCP (CE-MCP) Compatibility
## Status
Accepted
## Context
The Model Context Protocol (MCP) ecosystem has evolved to support a new paradigm called **Code Execution with MCP (CE-MCP)** or **Code Mode**. This paradigm addresses the scalability crisis of traditional direct tool-calling by enabling LLMs to generate orchestration code that executes in secure sandboxes, achieving:
- **98.7% token reduction** for complex workflows
- **75x cost reduction** in API expenses
- **60% faster execution** through parallel operations
- **19.2% fewer API calls** via direct orchestration
### The Scalability Crisis of Direct Tool-Calling
Traditional MCP implementations suffer from:
1. **Tool Definition Overload**: Loading all tool definitions into context upfront (20,000+ tokens)
2. **Intermediate Result Bloat**: Full tool outputs fed back into context (50,000+ tokens for large files)
3. **Sequential Latency**: High-latency roundtrips for each tool call
4. **Cost Explosion**: ~27.5% cost increase to achieve 100% reliability
### The CE-MCP Solution
Code Mode transforms the LLM's role from sequential planner to code generator:
1. **Dynamic Tool Discovery**: Tools discovered on-demand via filesystem navigation
2. **Code Generation**: LLM writes complete orchestration scripts (TypeScript/Python)
3. **Sandboxed Execution**: Code runs in isolated environment (Docker, isolates)
4. **Summary Return**: Only final results return to context (not intermediate data)
**Critical Insight**: This is **client-side functionality**. MCP servers provide tools; clients handle code generation, sandboxing, and execution.
## Decision
**documcp is already CE-MCP compatible** without requiring architectural changes. Our existing stateless, tool-based architecture aligns perfectly with Code Mode requirements.
### Compatibility Validation
| CE-MCP Requirement | documcp Implementation | Status |
| ----------------------------- | ----------------------------------- | ------------- |
| Standard MCP protocol | TypeScript SDK, JSON-RPC | ✅ Compatible |
| Tool definitions with schemas | Zod-validated, comprehensive docs | ✅ Compatible |
| Stateless operation | No session state (ADR-001) | ✅ Compatible |
| Composable tools | 25+ independent tools (ADR-006) | ✅ Compatible |
| Resource system | MCP resources for results (ADR-007) | ✅ Compatible |
### What documcp Provides (Server-Side)
```typescript
// documcp exposes tools via standard MCP protocol
const tools = [
{
name: "analyze_repository",
description: "Comprehensive repository analysis...",
inputSchema: {
/* Zod-validated schema */
},
},
{
name: "recommend_ssg",
description: "Intelligent SSG recommendation...",
inputSchema: {
/* Zod-validated schema */
},
},
// ... 23+ more tools
];
```
### What Code Mode Clients Handle (Client-Side)
- **Tool Discovery**: Client converts tool definitions → filesystem structure
- **Code Generation**: LLM writes orchestration code using tool APIs
- **Sandboxing**: Client executes code in secure isolates/containers
- **Security**: AgentBound-style frameworks enforce least-privilege
- **Summary Filtering**: Client returns only final results to LLM
## Alternatives Considered
### Alternative 1: Implement Server-Side Code Generation
- **Pros**: Full control over code generation and execution
- **Cons**: Duplicates client functionality, security complexity, not standard
- **Decision**: Rejected - CE-MCP is a client-side pattern
### Alternative 2: Custom Tool Organization System
- **Pros**: Could optimize for specific client implementations
- **Cons**: Breaks MCP compatibility, client-specific customizations
- **Decision**: Rejected - standard MCP protocol works universally
### Alternative 3: Embedded Sandbox in Server
- **Pros**: Control over execution environment
- **Cons**: Massive security risk, deployment complexity, violates separation of concerns
- **Decision**: Rejected - sandboxing belongs in the client
## Consequences
### Positive
- **Zero Migration Cost**: No architectural changes required
- **Universal Compatibility**: Works with all CE-MCP clients (Claude Code, pctx, Cloudflare)
- **Future-Proof**: Architecture naturally supports Code Mode evolution
- **Validated Design**: ADR-001, ADR-006, ADR-007 decisions proven correct
- **Performance Gains**: Users automatically benefit from client-side optimizations
### Negative
- **Optimization Opportunities**: Could enhance UX with optional improvements
- **Client Dependency**: Performance relies on client implementation quality
- **Documentation Gap**: Need to document Code Mode best practices
### Risks and Mitigations
| Risk | Mitigation |
| ---------------------------- | ----------------------------------------------------- |
| Poor client implementations | Document best practices, provide examples |
| Tool description bloat | Optimize descriptions for token efficiency (optional) |
| Resource management overhead | Implement efficient caching and cleanup |
## Implementation Details
### SDK Upgrade
PR #69 upgraded MCP SDK from v0.6.0 → v1.24.0, bringing:
- **Tasks API (SEP-1686)**: Long-running agent operations
- **Better SSE handling**: Improved streaming
- **OAuth enhancements**: Client credentials flow
- **Type safety**: Zod V4 compatibility
### Testing Validation
```bash
npm run ci
# ✅ All tests pass: 91.67% coverage
# ✅ TypeScript compilation successful
# ✅ No breaking changes detected
```
### Tool Definition Best Practices for Code Mode
```typescript
// ✅ GOOD: Concise, focused tool descriptions
const goodTool = {
name: "analyze_repository",
description: "Analyze project structure, languages, and documentation",
inputSchema: analyzeRepositorySchema,
};
// ❌ AVOID: Overly verbose descriptions that bloat tokens
const verboseTool = {
name: "analyze_repository",
description:
"This tool performs a comprehensive multi-layered analysis of your repository including but not limited to project structure evaluation, language ecosystem detection, existing documentation assessment, complexity scoring, and detailed metadata extraction for the purpose of providing intelligent recommendations...",
// ... excessive verbosity
};
```
### Resource Usage for Summary-Only Results
```typescript
// Leverage MCP resources to prevent context pollution
async function handleAnalyzeRepository(params) {
const analysis = await analyzeRepo(params.path);
// Store full result as MCP resource
const resourceId = await storeResource(analysis);
// Return only summary to LLM context
return {
summary: `Analysis complete: ${analysis.fileCount} files, ${analysis.primaryLanguage}`,
resourceUri: `documcp://analysis/${resourceId}`,
// Full analysis accessible via resource, not in context
};
}
```
## Optional Optimizations
While not required, these enhancements improve Code Mode UX:
### 1. Tool Categorization Metadata
```typescript
interface ToolMetadata {
category: "analysis" | "generation" | "deployment" | "validation";
complexity: "simple" | "moderate" | "complex";
estimatedTokens: number;
suggestedUse: string;
}
```
### 2. Concise Descriptions
Audit and optimize tool descriptions for token efficiency while maintaining clarity.
### 3. Result Summarization
Implement smart summarization for large outputs:
```typescript
function summarizeResult(result: LargeResult): Summary {
if (result.size > 10_000) {
return {
summary: extractKeyMetrics(result),
details: "Full result available via resource URI",
resourceUri: storeAsResource(result),
};
}
return result; // Small results returned directly
}
```
### 4. MCP Tasks Integration
For long-running operations (e.g., full repository analysis):
```typescript
// Use new Tasks API from MCP SDK 1.24.0
server.setRequestHandler(CreateTaskRequestSchema, async (request) => {
const taskId = generateTaskId();
// Start long-running analysis
executeInBackground(async () => {
const result = await deepAnalysis(request.params);
await completeTask(taskId, result);
});
return { taskId };
});
```
## Integration with Existing ADRs
### ADR-001 (MCP Server Architecture)
**Validation**: Stateless design is perfect for Code Mode workflows.
**Update**: Add note about CE-MCP compatibility validation.
### ADR-006 (MCP Tools API Design)
**Validation**: Modular, composable tools align with code orchestration needs.
**Update**: Add recommendations for tool description optimization.
### ADR-007 (MCP Prompts and Resources)
**Validation**: Resources are ideal for summary-only result filtering.
**Update**: Emphasize resource usage for Code Mode efficiency.
## Testing Strategy
### Compatibility Testing
```typescript
describe("CE-MCP Compatibility", () => {
it("should provide standard MCP tool definitions", () => {
const tools = server.listTools();
expect(tools).toMatchSnapshot();
});
it("should support resource-based result access", async () => {
const result = await server.callTool("analyze_repository", params);
expect(result).toHaveProperty("resourceUri");
});
it("should return concise summaries for large results", async () => {
const result = await server.callTool("detect_gaps", params);
expect(result.summary.length).toBeLessThan(1000);
});
});
```
### Client Integration Testing
Test with actual Code Mode clients:
- **Claude Code**: Anthropic's CLI with built-in Code Mode
- **pctx**: Open-source self-hostable Code Mode framework
- **Cloudflare Workers AI**: Production Code Mode implementation
## Documentation Requirements
### For Users
- **CE-MCP Usage Guide**: How to use documcp with Code Mode clients
- **Example Workflows**: TypeScript/Python code examples
- **Performance Benchmarks**: Token savings and cost comparisons
### For Developers
- **Tool Design Guidelines**: Best practices for Code Mode compatibility
- **Resource Management**: Efficient resource lifecycle patterns
- **Testing Patterns**: Validating Code Mode compatibility
## Future Considerations
### Monitoring and Observability
- Track token usage patterns in Code Mode vs direct tool-calling
- Measure performance improvements in real-world workflows
- Identify optimization opportunities based on usage data
### Community Feedback
- Gather feedback from Code Mode client developers
- Iterate on tool descriptions based on real-world usage
- Contribute improvements back to MCP ecosystem
### Advanced Features
- **Streaming Results**: For real-time progress updates
- **Parallel Tool Execution**: Coordinate multi-tool workflows
- **Result Caching**: Intelligent caching strategies for repeated operations
## References
- [Anthropic: Code Execution with MCP](https://www.anthropic.com/engineering/code-execution-with-mcp)
- [Cloudflare: Code Mode - The Better Way to Use MCP](https://blog.cloudflare.com/code-mode/)
- [MCP Specification 2025-11-25](https://modelcontextprotocol.io/specification/2025-06-18)
- [MCP SDK 1.24.0 Release Notes](https://github.com/modelcontextprotocol/typescript-sdk/releases/tag/1.24.0)
- [CE-MCP Research Findings](../CE-MCP-FINDINGS.md)
## Conclusion
documcp's architecture is inherently compatible with Code Execution with MCP (CE-MCP). The stateless, tool-based design aligns perfectly with Code Mode requirements, requiring no architectural changes. Our focus should shift to:
1. **Testing** with Code Mode clients to validate real-world usage
2. **Documentation** to guide users in Code Mode workflows
3. **Optional optimizations** to enhance user experience
The CE-MCP paradigm validates our architectural decisions and positions documcp as a best-in-class MCP server for documentation automation workflows.
```
--------------------------------------------------------------------------------
/src/memory/manager.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Memory Management Module for DocuMCP
* Implements Issue #46: Memory Management Module
*/
import { JSONLStorage, MemoryEntry } from "./storage.js";
import { EventEmitter } from "events";
export interface MemoryContext {
projectId: string;
repository?: string;
branch?: string;
user?: string;
session?: string;
}
export interface MemorySearchOptions {
semantic?: boolean;
fuzzy?: boolean;
sortBy?: "relevance" | "timestamp" | "type";
groupBy?: "type" | "project" | "date";
}
export class MemoryManager extends EventEmitter {
private storage: JSONLStorage;
private context: MemoryContext | null = null;
private cache: Map<string, MemoryEntry>;
private readonly maxCacheSize = 200; // Reduced cache size for better memory efficiency
constructor(storageDir?: string) {
super();
this.storage = new JSONLStorage(storageDir);
this.cache = new Map();
}
async initialize(): Promise<void> {
await this.storage.initialize();
this.emit("initialized");
}
setContext(context: MemoryContext): void {
this.context = context;
this.emit("context-changed", context);
}
async remember(
type: MemoryEntry["type"],
data: Record<string, any>,
metadata?: Partial<MemoryEntry["metadata"]>,
): Promise<MemoryEntry> {
const entry = await this.storage.append({
type,
timestamp: new Date().toISOString(),
data,
metadata: {
...metadata,
projectId: this.context?.projectId,
repository: this.context?.repository || metadata?.repository,
},
});
this.addToCache(entry);
this.emit("memory-created", entry);
return entry;
}
async recall(id: string): Promise<MemoryEntry | null> {
if (this.cache.has(id)) {
return this.cache.get(id)!;
}
const entry = await this.storage.get(id);
if (entry) {
this.addToCache(entry);
}
return entry;
}
async search(
query: string | Partial<MemoryEntry["metadata"]>,
options?: MemorySearchOptions,
): Promise<MemoryEntry[]> {
let filter: any = {};
if (typeof query === "string") {
// Text-based search - search in multiple fields
// Try to match projectId first, then tags
const results: MemoryEntry[] = [];
// Search by projectId
const projectResults = await this.storage.query({ projectId: query });
results.push(...projectResults);
// Search by tags (excluding already found entries)
const tagResults = await this.storage.query({ tags: [query] });
const existingIds = new Set(results.map((r) => r.id));
results.push(...tagResults.filter((r) => !existingIds.has(r.id)));
// Apply sorting and grouping if requested
let finalResults = results;
if (options?.sortBy) {
finalResults = this.sortResults(finalResults, options.sortBy);
}
if (options?.groupBy) {
return this.groupResults(finalResults, options.groupBy);
}
return finalResults;
} else {
filter = { ...query };
}
if (this.context) {
filter.projectId = filter.projectId || this.context.projectId;
filter.repository = filter.repository || this.context.repository;
}
let results = await this.storage.query(filter);
if (options?.sortBy) {
results = this.sortResults(results, options.sortBy);
}
if (options?.groupBy) {
return this.groupResults(results, options.groupBy);
}
return results;
}
async update(
id: string,
updates: Partial<MemoryEntry>,
): Promise<MemoryEntry | null> {
const existing = await this.recall(id);
if (!existing) return null;
const updated: MemoryEntry = {
...existing,
...updates,
id: existing.id,
timestamp: new Date().toISOString(),
};
await this.storage.delete(id);
const newEntry = await this.storage.append(updated);
this.cache.delete(id);
this.addToCache(newEntry);
this.emit("memory-updated", newEntry);
return newEntry;
}
async forget(id: string): Promise<boolean> {
const result = await this.storage.delete(id);
if (result) {
this.cache.delete(id);
this.emit("memory-deleted", id);
}
return result;
}
async getRelated(
entry: MemoryEntry,
limit: number = 10,
): Promise<MemoryEntry[]> {
const related: MemoryEntry[] = [];
// Find by same project
if (entry.metadata.projectId) {
const projectMemories = await this.search({
projectId: entry.metadata.projectId,
});
related.push(...projectMemories.filter((m: any) => m.id !== entry.id));
}
// Find by same type
const typeMemories = await this.storage.query({
type: entry.type,
limit: limit * 2,
});
related.push(...typeMemories.filter((m: any) => m.id !== entry.id));
// Find by overlapping tags
if (entry.metadata.tags && entry.metadata.tags.length > 0) {
const tagMemories = await this.storage.query({
tags: entry.metadata.tags,
limit: limit * 2,
});
related.push(...tagMemories.filter((m: any) => m.id !== entry.id));
}
// Deduplicate and limit
const uniqueRelated = Array.from(
new Map(related.map((m: any) => [m.id, m])).values(),
).slice(0, limit);
return uniqueRelated;
}
async analyze(timeRange?: { start: string; end: string }): Promise<{
patterns: Record<string, any>;
insights: string[];
statistics: any;
}> {
const stats = await this.storage.getStatistics();
const memories = await this.storage.query({
startDate: timeRange?.start,
endDate: timeRange?.end,
});
const patterns = this.extractPatterns(memories);
const insights = this.generateInsights(patterns, stats);
return {
patterns,
insights,
statistics: stats,
};
}
private extractPatterns(memories: MemoryEntry[]): Record<string, any> {
const patterns: Record<string, any> = {
mostCommonSSG: {},
projectTypes: {},
deploymentSuccess: { success: 0, failed: 0 },
timeDistribution: {},
};
for (const memory of memories) {
// SSG patterns
if (memory.metadata.ssg) {
patterns.mostCommonSSG[memory.metadata.ssg] =
(patterns.mostCommonSSG[memory.metadata.ssg] || 0) + 1;
}
// Deployment patterns
if (memory.type === "deployment") {
if (memory.data.status === "success") {
patterns.deploymentSuccess.success++;
} else if (memory.data.status === "failed") {
patterns.deploymentSuccess.failed++;
}
}
// Time patterns
const hour = new Date(memory.timestamp).getHours();
patterns.timeDistribution[hour] =
(patterns.timeDistribution[hour] || 0) + 1;
}
return patterns;
}
private generateInsights(patterns: any, stats: any): string[] {
const insights: string[] = [];
// SSG preference insight
if (Object.keys(patterns.mostCommonSSG).length > 0) {
const topSSG = Object.entries(patterns.mostCommonSSG).sort(
([, a]: any, [, b]: any) => b - a,
)[0];
insights.push(
`Most frequently used SSG: ${topSSG[0]} (${topSSG[1]} projects)`,
);
}
// Deployment success rate
const total =
patterns.deploymentSuccess.success + patterns.deploymentSuccess.failed;
if (total > 0) {
const successRate = (
(patterns.deploymentSuccess.success / total) *
100
).toFixed(1);
insights.push(`Deployment success rate: ${successRate}%`);
}
// Activity patterns
if (Object.keys(patterns.timeDistribution).length > 0) {
const peakHour = Object.entries(patterns.timeDistribution).sort(
([, a]: any, [, b]: any) => b - a,
)[0];
insights.push(`Peak activity hour: ${peakHour[0]}:00`);
}
// Storage insights
const sizeMB = (stats.totalSize / 1024 / 1024).toFixed(2);
insights.push(
`Total memory storage: ${sizeMB} MB across ${stats.totalEntries} entries`,
);
return insights;
}
private sortResults(
results: MemoryEntry[],
sortBy: "relevance" | "timestamp" | "type",
): MemoryEntry[] {
switch (sortBy) {
case "timestamp":
return results.sort(
(a, b) =>
new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime(),
);
case "type":
return results.sort((a, b) => a.type.localeCompare(b.type));
default:
return results;
}
}
private groupResults(
results: MemoryEntry[],
groupBy: "type" | "project" | "date",
): any {
const grouped: Record<string, MemoryEntry[]> = {};
for (const entry of results) {
let key: string;
switch (groupBy) {
case "type":
key = entry.type;
break;
case "project":
key = entry.metadata.projectId || "unknown";
break;
case "date":
key = entry.timestamp.split("T")[0];
break;
default:
key = "all";
}
if (!grouped[key]) {
grouped[key] = [];
}
grouped[key].push(entry);
}
return grouped;
}
private addToCache(entry: MemoryEntry): void {
// More aggressive cache eviction to prevent memory growth
while (this.cache.size >= this.maxCacheSize) {
const firstKey = this.cache.keys().next().value;
if (firstKey) {
this.cache.delete(firstKey);
}
}
// Store a shallow copy to avoid retaining large objects
const cacheEntry = {
id: entry.id,
timestamp: entry.timestamp,
type: entry.type,
data: entry.data,
metadata: entry.metadata,
tags: entry.tags,
};
this.cache.set(entry.id, cacheEntry as MemoryEntry);
}
async export(
format: "json" | "csv" = "json",
projectId?: string,
): Promise<string> {
const filter = projectId ? { projectId } : {};
const allMemories = await this.storage.query(filter);
if (format === "json") {
return JSON.stringify(allMemories, null, 2);
} else {
// CSV export
const headers = [
"id",
"timestamp",
"type",
"projectId",
"repository",
"ssg",
];
const rows = allMemories.map((m: any) => [
m.id,
m.timestamp,
m.type,
m.metadata?.projectId || "",
m.metadata?.repository || "",
m.metadata?.ssg || "",
]);
return [headers, ...rows].map((r: any) => r.join(",")).join("\n");
}
}
async import(data: string, format: "json" | "csv" = "json"): Promise<number> {
let entries: MemoryEntry[] = [];
if (format === "json") {
entries = JSON.parse(data);
} else {
// CSV import - simplified for now
const lines = data.split("\n");
const headers = lines[0].split(",");
for (let i = 1; i < lines.length; i++) {
const values = lines[i].split(",");
if (values.length === headers.length) {
entries.push({
id: values[0],
timestamp: values[1],
type: values[2] as MemoryEntry["type"],
data: {},
metadata: {
projectId: values[3],
repository: values[4],
ssg: values[5],
},
});
}
}
}
let imported = 0;
for (const entry of entries) {
// Use store to preserve the original ID when importing
await this.storage.store(entry);
imported++;
}
this.emit("import-complete", imported);
return imported;
}
async cleanup(olderThan?: Date): Promise<number> {
const cutoff = olderThan || new Date(Date.now() - 30 * 24 * 60 * 60 * 1000); // 30 days
const oldMemories = await this.storage.query({
endDate: cutoff.toISOString(),
});
let deleted = 0;
for (const memory of oldMemories) {
if (await this.storage.delete(memory.id)) {
deleted++;
}
}
await this.storage.compact();
this.emit("cleanup-complete", deleted);
return deleted;
}
async close(): Promise<void> {
await this.storage.close();
this.cache.clear();
this.emit("closed");
}
/**
* Get the storage instance for use with other systems
*/
getStorage(): JSONLStorage {
return this.storage;
}
}
export default MemoryManager;
```
--------------------------------------------------------------------------------
/tests/tools/analyze-readme.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { analyzeReadme } from "../../src/tools/analyze-readme.js";
import { tmpdir } from "os";
describe("analyze_readme", () => {
let testDir: string;
let readmePath: string;
beforeEach(async () => {
// Create temporary test directory
testDir = join(tmpdir(), `test-readme-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
readmePath = join(testDir, "README.md");
});
afterEach(async () => {
// Cleanup test directory
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch {
// Ignore cleanup errors
}
});
describe("input validation", () => {
it("should require project_path parameter", async () => {
const result = await analyzeReadme({});
expect(result.success).toBe(false);
expect(result.error?.code).toBe("ANALYSIS_FAILED");
});
it("should handle non-existent project directory", async () => {
const result = await analyzeReadme({
project_path: "/non/existent/path",
});
expect(result.success).toBe(false);
expect(result.error?.code).toBe("README_NOT_FOUND");
});
});
describe("README detection", () => {
it("should find README.md file", async () => {
const readmeContent = `# Test Project\n\n> A simple test project\n\n## Installation\n\n\`\`\`bash\nnpm install\n\`\`\`\n\n## Usage\n\nExample usage here.`;
await fs.writeFile(readmePath, readmeContent);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis).toBeDefined();
});
it("should find alternative README file names", async () => {
const readmeContent = `# Test Project\n\nBasic content`;
await fs.writeFile(join(testDir, "readme.md"), readmeContent);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
});
});
describe("length analysis", () => {
it("should analyze README length correctly", async () => {
const longReadme = Array(400)
.fill("# Section\n\nContent here.\n")
.join("\n");
await fs.writeFile(readmePath, longReadme);
const result = await analyzeReadme({
project_path: testDir,
max_length_target: 300,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.lengthAnalysis.exceedsTarget).toBe(true);
expect(
result.data?.analysis.lengthAnalysis.reductionNeeded,
).toBeGreaterThan(0);
});
it("should handle README within target length", async () => {
const shortReadme = `# Project\n\n## Quick Start\n\nInstall and use.`;
await fs.writeFile(readmePath, shortReadme);
const result = await analyzeReadme({
project_path: testDir,
max_length_target: 300,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.lengthAnalysis.exceedsTarget).toBe(false);
expect(result.data?.analysis.lengthAnalysis.reductionNeeded).toBe(0);
});
});
describe("structure analysis", () => {
it("should evaluate scannability score", async () => {
const wellStructuredReadme = `# Project Title
> Clear description
## Installation
\`\`\`bash
npm install
\`\`\`
## Usage
- Feature 1
- Feature 2
- Feature 3
### Advanced Usage
More details here.
## Contributing
Guidelines here.`;
await fs.writeFile(readmePath, wellStructuredReadme);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(
result.data?.analysis.structureAnalysis.scannabilityScore,
).toBeGreaterThan(50);
expect(
result.data?.analysis.structureAnalysis.headingHierarchy.length,
).toBeGreaterThan(0);
});
it("should detect poor structure", async () => {
const poorStructure = `ProjectTitle\nSome text without proper headings or spacing.More text.Even more text without breaks.`;
await fs.writeFile(readmePath, poorStructure);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(
result.data?.analysis.structureAnalysis.scannabilityScore,
).toBeLessThan(50);
});
});
describe("content analysis", () => {
it("should detect TL;DR section", async () => {
const readmeWithTldr = `# Project\n\n## TL;DR\n\nQuick overview here.\n\n## Details\n\nMore info.`;
await fs.writeFile(readmePath, readmeWithTldr);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.contentAnalysis.hasTldr).toBe(true);
});
it("should detect quick start section", async () => {
const readmeWithQuickStart = `# Project\n\n## Quick Start\n\nGet started quickly.\n\n## Installation\n\nDetailed setup.`;
await fs.writeFile(readmePath, readmeWithQuickStart);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.contentAnalysis.hasQuickStart).toBe(true);
});
it("should count code blocks and links", async () => {
const readmeWithCodeAndLinks = `# Project
## Installation
\`\`\`bash
npm install
\`\`\`
## Usage
\`\`\`javascript
const lib = require('lib');
\`\`\`
See [documentation](https://example.com) and [API reference](https://api.example.com).`;
await fs.writeFile(readmePath, readmeWithCodeAndLinks);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.contentAnalysis.codeBlockCount).toBe(2);
expect(result.data?.analysis.contentAnalysis.linkCount).toBe(2);
});
});
describe("community readiness", () => {
it("should detect community files", async () => {
const readmeContent = `# Project\n\nSee [CONTRIBUTING.md](CONTRIBUTING.md) and [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md).`;
await fs.writeFile(readmePath, readmeContent);
await fs.writeFile(
join(testDir, "CONTRIBUTING.md"),
"Contributing guidelines",
);
await fs.writeFile(
join(testDir, "CODE_OF_CONDUCT.md"),
"Code of conduct",
);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.communityReadiness.hasContributing).toBe(
true,
);
expect(result.data?.analysis.communityReadiness.hasCodeOfConduct).toBe(
true,
);
});
it("should count badges", async () => {
const readmeWithBadges = `# Project
[](https://travis-ci.org/user/repo)
[](https://badge.fury.io/js/package)
Description here.`;
await fs.writeFile(readmePath, readmeWithBadges);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.communityReadiness.badgeCount).toBe(2);
});
});
describe("optimization opportunities", () => {
it("should identify length reduction opportunities", async () => {
const longReadme = Array(500)
.fill("# Section\n\nLong content here that exceeds target length.\n")
.join("\n");
await fs.writeFile(readmePath, longReadme);
const result = await analyzeReadme({
project_path: testDir,
max_length_target: 200,
optimization_level: "aggressive",
});
expect(result.success).toBe(true);
expect(
result.data?.analysis.optimizationOpportunities.length,
).toBeGreaterThan(0);
expect(
result.data?.analysis.optimizationOpportunities.some(
(op) => op.type === "length_reduction",
),
).toBe(true);
});
it("should identify content enhancement opportunities", async () => {
const basicReadme = `# Project\n\nBasic description.\n\n## Installation\n\nnpm install`;
await fs.writeFile(readmePath, basicReadme);
const result = await analyzeReadme({
project_path: testDir,
target_audience: "community_contributors",
});
expect(result.success).toBe(true);
expect(
result.data?.analysis.optimizationOpportunities.some(
(op) => op.type === "content_enhancement",
),
).toBe(true);
});
});
describe("scoring system", () => {
it("should calculate overall score", async () => {
const goodReadme = `# Excellent Project
> Clear, concise description of what this project does
[](https://travis-ci.org/user/repo)
[](https://opensource.org/licenses/MIT)
## TL;DR
This project solves X problem for Y users. Perfect for Z use cases.
## Quick Start
\`\`\`bash
npm install excellent-project
\`\`\`
\`\`\`javascript
const project = require('excellent-project');
project.doSomething();
\`\`\`
## Prerequisites
- Node.js 16+
- npm or yarn
## Usage
Detailed usage examples here.
## Contributing
See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
## License
MIT © Author`;
await fs.writeFile(readmePath, goodReadme);
await fs.writeFile(join(testDir, "CONTRIBUTING.md"), "Guidelines");
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.overallScore).toBeGreaterThan(70);
});
it("should provide lower score for poor README", async () => {
const poorReadme = `ProjectName\nSome description\nInstall it\nUse it`;
await fs.writeFile(readmePath, poorReadme);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis.overallScore).toBeLessThan(50);
});
});
describe("recommendations and next steps", () => {
it("should provide relevant recommendations", async () => {
const basicReadme = `# Project\n\nDescription`;
await fs.writeFile(readmePath, basicReadme);
const result = await analyzeReadme({
project_path: testDir,
target_audience: "community_contributors",
optimization_level: "moderate",
});
expect(result.success).toBe(true);
expect(result.data?.analysis.recommendations.length).toBeGreaterThan(0);
expect(result.data?.nextSteps.length).toBeGreaterThan(0);
});
it("should tailor recommendations to target audience", async () => {
const readmeContent = `# Enterprise Tool\n\nBasic description`;
await fs.writeFile(readmePath, readmeContent);
const result = await analyzeReadme({
project_path: testDir,
target_audience: "enterprise_users",
});
expect(result.success).toBe(true);
expect(
result.data?.analysis.recommendations.some(
(rec) =>
rec.includes("enterprise") ||
rec.includes("security") ||
rec.includes("support"),
),
).toBe(true);
});
});
describe("project context detection", () => {
it("should detect JavaScript project", async () => {
const readmeContent = `# JS Project\n\nA JavaScript project`;
await fs.writeFile(readmePath, readmeContent);
await fs.writeFile(join(testDir, "package.json"), '{"name": "test"}');
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
// Should analyze successfully with project context
expect(result.data?.analysis).toBeDefined();
});
it("should handle projects without specific type indicators", async () => {
const readmeContent = `# Generic Project\n\nSome project`;
await fs.writeFile(readmePath, readmeContent);
const result = await analyzeReadme({
project_path: testDir,
});
expect(result.success).toBe(true);
expect(result.data?.analysis).toBeDefined();
});
});
});
```
--------------------------------------------------------------------------------
/tests/memory/enhanced-manager.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Advanced unit tests for Enhanced Memory Manager
* Tests intelligent memory management with learning and knowledge graph integration
* Part of Issue #55 - Advanced Memory Components Unit Tests
*/
import { promises as fs } from "fs";
import path from "path";
import os from "os";
import {
EnhancedMemoryManager,
EnhancedRecommendation,
IntelligentAnalysis,
} from "../../src/memory/enhanced-manager.js";
import { ProjectFeatures } from "../../src/memory/learning.js";
describe("EnhancedMemoryManager", () => {
let tempDir: string;
let enhancedManager: EnhancedMemoryManager;
beforeEach(async () => {
// Create unique temp directory for each test
tempDir = path.join(
os.tmpdir(),
`enhanced-memory-test-${Date.now()}-${Math.random()
.toString(36)
.substr(2, 9)}`,
);
await fs.mkdir(tempDir, { recursive: true });
enhancedManager = new EnhancedMemoryManager(tempDir);
await enhancedManager.initialize();
});
afterEach(async () => {
// Cleanup temp directory
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe("Enhanced Manager Initialization", () => {
test("should create enhanced manager instance", () => {
expect(enhancedManager).toBeDefined();
expect(enhancedManager).toBeInstanceOf(EnhancedMemoryManager);
});
test("should initialize all subsystems", async () => {
// Test that the enhanced manager properly initializes
// The initialize() method should complete without throwing
await enhancedManager.initialize();
expect(true).toBe(true);
});
test("should have learning and knowledge graph capabilities", async () => {
// Test that we can get learning statistics (indicating learning system exists)
const learningStats = await enhancedManager.getLearningStatistics();
expect(learningStats).toBeDefined();
expect(learningStats.learning).toBeDefined();
expect(learningStats.knowledgeGraph).toBeDefined();
});
});
describe("Enhanced Recommendations", () => {
test("should provide enhanced recommendations with multiple data sources", async () => {
// Set up test context
enhancedManager.setContext({ projectId: "enhanced-rec-test" });
// Add some historical data
await enhancedManager.remember("analysis", {
language: { primary: "typescript" },
framework: { name: "react" },
stats: { files: 150 },
});
await enhancedManager.remember("recommendation", {
recommended: "docusaurus",
confidence: 0.9,
});
await enhancedManager.remember("deployment", {
status: "success",
ssg: "docusaurus",
});
// Test enhanced recommendation
const projectFeatures: ProjectFeatures = {
language: "typescript",
framework: "react",
size: "medium",
complexity: "moderate",
hasTests: true,
hasCI: true,
hasDocs: false,
isOpenSource: true,
};
const baseRecommendation = {
recommended: "gatsby",
confidence: 0.7,
score: 0.75,
};
const enhanced = await enhancedManager.getEnhancedRecommendation(
"/test/project",
baseRecommendation,
projectFeatures,
);
expect(enhanced).toBeDefined();
expect(enhanced.baseRecommendation).toEqual(baseRecommendation);
expect(enhanced.learningEnhanced).toBeDefined();
expect(Array.isArray(enhanced.graphBased)).toBe(true);
expect(Array.isArray(enhanced.insights)).toBe(true);
expect(typeof enhanced.confidence).toBe("number");
expect(Array.isArray(enhanced.reasoning)).toBe(true);
expect(enhanced.metadata).toBeDefined();
expect(typeof enhanced.metadata.usedLearning).toBe("boolean");
expect(typeof enhanced.metadata.usedKnowledgeGraph).toBe("boolean");
});
test("should handle recommendations with insufficient data", async () => {
const projectFeatures: ProjectFeatures = {
language: "unknown",
size: "small",
complexity: "simple",
hasTests: false,
hasCI: false,
hasDocs: false,
isOpenSource: false,
};
const baseRecommendation = {
recommended: "jekyll",
confidence: 0.5,
};
const enhanced = await enhancedManager.getEnhancedRecommendation(
"/test/project",
baseRecommendation,
projectFeatures,
);
expect(enhanced).toBeDefined();
expect(enhanced.confidence).toBeGreaterThanOrEqual(0);
expect(enhanced.confidence).toBeLessThanOrEqual(1);
});
});
describe("Intelligent Analysis", () => {
test("should provide intelligent analysis with patterns and predictions", async () => {
enhancedManager.setContext({ projectId: "intelligent-analysis-test" });
// Add analysis data
await enhancedManager.remember("analysis", {
language: { primary: "python" },
framework: { name: "flask" },
dependencies: { count: 25 },
testing: { hasTests: true },
ci: { hasCI: true },
});
const analysisData = {
language: "python",
framework: "flask",
size: "medium",
hasTests: true,
hasCI: true,
};
const intelligentAnalysis = await enhancedManager.getIntelligentAnalysis(
"/test/project",
analysisData,
);
expect(intelligentAnalysis).toBeDefined();
expect(intelligentAnalysis.analysis).toBeDefined();
expect(Array.isArray(intelligentAnalysis.patterns)).toBe(true);
expect(Array.isArray(intelligentAnalysis.predictions)).toBe(true);
expect(Array.isArray(intelligentAnalysis.recommendations)).toBe(true);
expect(intelligentAnalysis.learningData).toBeDefined();
expect(typeof intelligentAnalysis.learningData.similarProjects).toBe(
"number",
);
expect(typeof intelligentAnalysis.learningData.confidenceLevel).toBe(
"number",
);
expect(["low", "medium", "high"]).toContain(
intelligentAnalysis.learningData.dataQuality,
);
// Check prediction structure
if (intelligentAnalysis.predictions.length > 0) {
const prediction = intelligentAnalysis.predictions[0];
expect(["success_rate", "optimal_ssg", "potential_issues"]).toContain(
prediction.type,
);
expect(typeof prediction.prediction).toBe("string");
expect(typeof prediction.confidence).toBe("number");
}
});
test("should adapt analysis based on historical patterns", async () => {
enhancedManager.setContext({ projectId: "adaptive-analysis-test" });
// Create pattern with multiple similar projects
for (let i = 0; i < 3; i++) {
await enhancedManager.remember("analysis", {
language: { primary: "javascript" },
framework: { name: "vue" },
});
await enhancedManager.remember("recommendation", {
recommended: "vuepress",
confidence: 0.8 + i * 0.05,
});
await enhancedManager.remember("deployment", {
status: "success",
ssg: "vuepress",
});
}
const analysisData = {
language: "javascript",
framework: "vue",
size: "small",
};
const analysis = await enhancedManager.getIntelligentAnalysis(
"/test/project",
analysisData,
);
expect(analysis.learningData.similarProjects).toBeGreaterThan(0);
expect(analysis.learningData.dataQuality).toBe("medium");
});
});
describe("Memory Integration", () => {
test("should integrate learning feedback into knowledge graph", async () => {
enhancedManager.setContext({ projectId: "integration-test" });
// Create initial recommendation
const memoryEntry = await enhancedManager.remember("recommendation", {
recommended: "hugo",
confidence: 0.8,
language: { primary: "go" },
});
// Simulate feedback by creating a deployment success record
await enhancedManager.remember("deployment", {
status: "success",
ssg: "hugo",
feedback: {
rating: 5,
helpful: true,
comments: "Worked perfectly",
},
});
// Verify feedback was processed
const stats = await enhancedManager.getLearningStatistics();
expect(stats).toBeDefined();
expect(stats.learning).toBeDefined();
});
test("should synchronize data between subsystems", async () => {
enhancedManager.setContext({ projectId: "sync-test" });
// Add data that should propagate between systems
await enhancedManager.remember("analysis", {
language: { primary: "rust" },
framework: { name: "actix" },
});
await enhancedManager.remember("deployment", {
status: "success",
ssg: "mdbook",
});
// The subsystems should automatically sync through the enhanced manager
// Verify data exists in both systems
const learningStats = await enhancedManager.getLearningStatistics();
expect(learningStats).toBeDefined();
expect(learningStats.learning).toBeDefined();
expect(learningStats.knowledgeGraph).toBeDefined();
expect(learningStats.combined).toBeDefined();
});
});
describe("Performance and Optimization", () => {
test("should handle concurrent enhanced operations", async () => {
enhancedManager.setContext({ projectId: "concurrent-enhanced-test" });
const operations = Array.from({ length: 5 }, async (_, i) => {
const projectFeatures: ProjectFeatures = {
language: "go",
size: "medium",
complexity: "moderate",
hasTests: true,
hasCI: true,
hasDocs: true,
isOpenSource: true,
};
const baseRecommendation = {
recommended: "hugo",
confidence: 0.8 + i * 0.02,
};
return enhancedManager.getEnhancedRecommendation(
"/test/project",
baseRecommendation,
projectFeatures,
);
});
const results = await Promise.all(operations);
expect(results.length).toBe(5);
results.forEach((result) => {
expect(result).toBeDefined();
expect(result.confidence).toBeGreaterThanOrEqual(0);
});
});
test("should provide optimization insights", async () => {
enhancedManager.setContext({ projectId: "optimization-test" });
// Add some data
await enhancedManager.remember("analysis", { performanceTest: true });
// Test learning statistics as a proxy for optimization insights
const stats = await enhancedManager.getLearningStatistics();
expect(stats).toBeDefined();
expect(stats.combined).toBeDefined();
expect(typeof stats.combined.systemMaturity).toBe("string");
expect(["nascent", "developing", "mature"]).toContain(
stats.combined.systemMaturity,
);
});
});
describe("Error Handling and Edge Cases", () => {
test("should handle malformed input gracefully", async () => {
const malformedFeatures = {
language: null,
size: "invalid" as any,
complexity: undefined as any,
};
const malformedRecommendation = {
recommended: "",
confidence: -1,
};
// Should not throw, but handle gracefully
const result = await enhancedManager.getEnhancedRecommendation(
"/test/project",
malformedRecommendation,
malformedFeatures as any,
);
expect(result).toBeDefined();
expect(result.confidence).toBeGreaterThanOrEqual(0);
expect(result.confidence).toBeLessThanOrEqual(1);
});
test("should handle subsystem failures gracefully", async () => {
// Test with partial system availability
const projectFeatures: ProjectFeatures = {
language: "javascript",
size: "small",
complexity: "simple",
hasTests: false,
hasCI: false,
hasDocs: false,
isOpenSource: true,
};
const baseRecommendation = {
recommended: "gatsby",
confidence: 0.6,
};
// Should work even if some subsystems have issues
const result = await enhancedManager.getEnhancedRecommendation(
"/test/project",
baseRecommendation,
projectFeatures,
);
expect(result).toBeDefined();
expect(result.baseRecommendation).toEqual(baseRecommendation);
});
});
});
```
--------------------------------------------------------------------------------
/src/utils/semantic-analyzer.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Semantic Code Analyzer (Phase 3)
*
* Provides semantic analysis of code changes using LLM integration,
* with fallback to AST-based analysis when LLM is unavailable.
*/
import { ASTAnalyzer, CodeDiff } from "./ast-analyzer.js";
import {
createLLMClient,
LLMClient,
SemanticAnalysis,
SimulationResult,
} from "./llm-client.js";
export interface SemanticAnalysisOptions {
useLLM?: boolean;
confidenceThreshold?: number;
includeASTFallback?: boolean;
llmConfig?: {
provider?: "deepseek" | "openai" | "anthropic" | "ollama";
apiKey?: string;
model?: string;
};
}
export interface EnhancedSemanticAnalysis extends SemanticAnalysis {
analysisMode: "llm" | "ast" | "hybrid";
astDiffs?: CodeDiff[];
llmAvailable: boolean;
timestamp: string;
}
export interface CodeValidationResult {
isValid: boolean;
examples: ExampleValidation[];
overallConfidence: number;
requiresManualReview: boolean;
suggestions: string[];
}
export interface ExampleValidation {
exampleCode: string;
simulationResult: SimulationResult;
isValid: boolean;
issues: string[];
}
// Type alias for AST analysis result to improve readability
type ASTAnalysisOutput = {
hasSignificantChanges: boolean;
hasBreakingChanges: boolean;
description: string;
affectedSections: string[];
confidence: number;
diffs: CodeDiff[];
};
/**
* Semantic Analyzer with LLM integration and AST fallback
*/
export class SemanticAnalyzer {
private astAnalyzer: ASTAnalyzer;
private llmClient: LLMClient | null;
private confidenceThreshold: number;
private initialized: boolean = false;
constructor(options: SemanticAnalysisOptions = {}) {
this.astAnalyzer = new ASTAnalyzer();
this.confidenceThreshold = options.confidenceThreshold || 0.7;
// Try to create LLM client if enabled (default: true)
const useLLM = options.useLLM !== false;
this.llmClient = useLLM ? createLLMClient(options.llmConfig) : null;
}
/**
* Initialize the analyzer
*/
async initialize(): Promise<void> {
await this.astAnalyzer.initialize();
this.initialized = true;
}
/**
* Check if LLM is available for semantic analysis
*/
isLLMAvailable(): boolean {
return this.llmClient !== null;
}
/**
* Analyze semantic impact of code changes
*/
async analyzeSemanticImpact(
codeBefore: string,
codeAfter: string,
functionName?: string,
): Promise<EnhancedSemanticAnalysis> {
// Ensure analyzer is initialized before use
if (!this.initialized) {
await this.initialize();
}
const timestamp = new Date().toISOString();
// Try LLM-based analysis first
if (this.llmClient) {
try {
const llmAnalysis = await this.llmClient.analyzeCodeChange(
codeBefore,
codeAfter,
);
// If confidence is high enough, return LLM result
if (llmAnalysis.confidence >= this.confidenceThreshold) {
return {
...llmAnalysis,
analysisMode: "llm",
llmAvailable: true,
timestamp,
};
}
// Low confidence: combine with AST analysis
const astAnalysis = await this.performASTAnalysis(
codeBefore,
codeAfter,
functionName,
);
return this.combineAnalyses(llmAnalysis, astAnalysis, timestamp);
} catch (error) {
// LLM failed, fall back to AST
console.warn("LLM analysis failed, falling back to AST:", error);
}
}
// Fallback to AST-only analysis
const astAnalysis = await this.performASTAnalysis(
codeBefore,
codeAfter,
functionName,
);
return {
hasBehavioralChange: astAnalysis.hasSignificantChanges,
breakingForExamples: astAnalysis.hasBreakingChanges,
changeDescription: astAnalysis.description,
affectedDocSections: astAnalysis.affectedSections,
confidence: astAnalysis.confidence,
analysisMode: "ast",
astDiffs: astAnalysis.diffs,
llmAvailable: false,
timestamp,
};
}
/**
* Perform AST-based analysis (fallback mode)
* Note: This is a simplified heuristic analysis for quick fallback.
* For full AST parsing, use the astAnalyzer.compareASTs() method directly.
*/
private async performASTAnalysis(
codeBefore: string,
codeAfter: string,
functionName?: string,
): Promise<ASTAnalysisOutput> {
// Simplified heuristic analysis for quick fallback
// Full AST analysis would use this.astAnalyzer.analyzeFile() and compareASTs()
const diffs: CodeDiff[] = [];
let hasBreakingChanges = false;
let hasSignificantChanges = false;
// Detect function signature changes
const beforeHasAsync = codeBefore.includes("async");
const afterHasAsync = codeAfter.includes("async");
if (beforeHasAsync !== afterHasAsync) {
diffs.push({
type: "modified",
category: "function",
name: functionName || "unknown",
details: "Async modifier changed",
impactLevel: "major",
});
hasSignificantChanges = true;
}
// Detect parameter changes (simplified)
const beforeParams = this.extractParameters(codeBefore);
const afterParams = this.extractParameters(codeAfter);
if (beforeParams !== afterParams) {
diffs.push({
type: "modified",
category: "function",
name: functionName || "unknown",
details: "Function parameters changed",
oldSignature: beforeParams,
newSignature: afterParams,
impactLevel: "breaking",
});
hasBreakingChanges = true;
hasSignificantChanges = true;
}
// Detect return type changes
const beforeReturn = this.extractReturnType(codeBefore);
const afterReturn = this.extractReturnType(codeAfter);
if (beforeReturn !== afterReturn) {
diffs.push({
type: "modified",
category: "function",
name: functionName || "unknown",
details: "Return type changed",
impactLevel: "breaking",
});
hasBreakingChanges = true;
hasSignificantChanges = true;
}
// Detect implementation changes
if (codeBefore !== codeAfter && diffs.length === 0) {
diffs.push({
type: "modified",
category: "function",
name: functionName || "unknown",
details: "Implementation changed",
impactLevel: "minor",
});
hasSignificantChanges = true;
}
const description = this.generateChangeDescription(diffs);
const affectedSections = this.determineAffectedSections(diffs);
return {
hasSignificantChanges,
hasBreakingChanges,
description,
affectedSections,
confidence: 0.6, // AST analysis has moderate confidence
diffs,
};
}
/**
* Extract function parameters (simplified)
*/
private extractParameters(code: string): string {
const match = code.match(/\(([^)]*)\)/);
return match ? match[1].trim() : "";
}
/**
* Extract return type (simplified)
*/
private extractReturnType(code: string): string {
const match = code.match(/:\s*([^{=>\s]+)/);
return match ? match[1].trim() : "void";
}
/**
* Generate human-readable change description
*/
private generateChangeDescription(diffs: CodeDiff[]): string {
if (diffs.length === 0) {
return "No significant changes detected";
}
const breakingChanges = diffs.filter((d) => d.impactLevel === "breaking");
if (breakingChanges.length > 0) {
return `Breaking changes detected: ${breakingChanges
.map((d) => d.details)
.join(", ")}`;
}
const majorChanges = diffs.filter((d) => d.impactLevel === "major");
if (majorChanges.length > 0) {
return `Major changes detected: ${majorChanges
.map((d) => d.details)
.join(", ")}`;
}
return `Minor changes detected: ${diffs.map((d) => d.details).join(", ")}`;
}
/**
* Determine which documentation sections are affected
*/
private determineAffectedSections(diffs: CodeDiff[]): string[] {
const sections = new Set<string>();
for (const diff of diffs) {
if (diff.impactLevel === "breaking") {
sections.add("API Reference");
sections.add("Migration Guide");
}
if (diff.category === "function") {
sections.add("API Reference");
sections.add("Code Examples");
}
if (diff.category === "interface" || diff.category === "type") {
sections.add("Type Definitions");
sections.add("API Reference");
}
}
return Array.from(sections);
}
/**
* Combine LLM and AST analyses for hybrid approach
*/
private combineAnalyses(
llmAnalysis: SemanticAnalysis,
astAnalysis: ASTAnalysisOutput,
timestamp: string,
): EnhancedSemanticAnalysis {
// Merge affected sections
const allSections = new Set([
...llmAnalysis.affectedDocSections,
...astAnalysis.affectedSections,
]);
// Take the more conservative assessment
const hasBehavioralChange =
llmAnalysis.hasBehavioralChange || astAnalysis.hasSignificantChanges;
const breakingForExamples =
llmAnalysis.breakingForExamples || astAnalysis.hasBreakingChanges;
// Combine descriptions
const description = `${llmAnalysis.changeDescription}. AST analysis: ${astAnalysis.description}`;
// Average confidence, weighted toward AST for reliability
const confidence =
llmAnalysis.confidence * 0.6 + astAnalysis.confidence * 0.4;
return {
hasBehavioralChange,
breakingForExamples,
changeDescription: description,
affectedDocSections: Array.from(allSections),
confidence,
analysisMode: "hybrid",
astDiffs: astAnalysis.diffs,
llmAvailable: true,
timestamp,
};
}
/**
* Validate code examples against implementation
*/
async validateExamples(
examples: string[],
implementation: string,
): Promise<CodeValidationResult> {
if (!this.llmClient) {
return {
isValid: true,
examples: [],
overallConfidence: 0,
requiresManualReview: true,
suggestions: ["LLM not available - manual validation required"],
};
}
const validations: ExampleValidation[] = [];
for (const example of examples) {
try {
const simulation = await this.llmClient.simulateExecution(
example,
implementation,
);
validations.push({
exampleCode: example,
simulationResult: simulation,
isValid: simulation.matches,
issues: simulation.matches ? [] : simulation.differences,
});
} catch (error) {
validations.push({
exampleCode: example,
simulationResult: {
success: false,
expectedOutput: "",
actualOutput: "",
matches: false,
differences: [
`Validation failed: ${
error instanceof Error ? error.message : "Unknown error"
}`,
],
confidence: 0,
},
isValid: false,
issues: ["Validation failed"],
});
}
}
const validExamples = validations.filter((v) => v.isValid).length;
const overallConfidence =
validations.reduce((sum, v) => sum + v.simulationResult.confidence, 0) /
validations.length;
const isValid = validExamples === examples.length;
const requiresManualReview = overallConfidence < this.confidenceThreshold;
const suggestions: string[] = [];
if (!isValid) {
suggestions.push(
`${examples.length - validExamples} example(s) may be invalid`,
);
}
if (requiresManualReview) {
suggestions.push("Low confidence - manual review recommended");
}
return {
isValid,
examples: validations,
overallConfidence,
requiresManualReview,
suggestions,
};
}
/**
* Batch analyze multiple code changes
*/
async analyzeBatch(
changes: Array<{ before: string; after: string; name?: string }>,
): Promise<EnhancedSemanticAnalysis[]> {
const results: EnhancedSemanticAnalysis[] = [];
for (const change of changes) {
const analysis = await this.analyzeSemanticImpact(
change.before,
change.after,
change.name,
);
results.push(analysis);
}
return results;
}
}
/**
* Utility function to create a semantic analyzer with default configuration
*/
export function createSemanticAnalyzer(
options?: SemanticAnalysisOptions,
): SemanticAnalyzer {
return new SemanticAnalyzer(options);
}
```
--------------------------------------------------------------------------------
/src/tools/setup-structure.ts:
--------------------------------------------------------------------------------
```typescript
import { promises as fs } from "fs";
import path from "path";
import { z } from "zod";
import { MCPToolResponse, formatMCPResponse } from "../types/api.js";
const inputSchema = z.object({
path: z.string(),
ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
includeExamples: z.boolean().optional().default(true),
});
// Diataxis structure based on ADR-004
const DIATAXIS_STRUCTURE = {
tutorials: {
description: "Learning-oriented guides for newcomers",
example: "getting-started.md",
},
"how-to": {
description: "Task-oriented guides for specific goals",
example: "deploy-to-production.md",
},
reference: {
description: "Information-oriented technical descriptions",
example: "api-documentation.md",
},
explanation: {
description: "Understanding-oriented conceptual discussions",
example: "architecture-overview.md",
},
};
/**
* Sets up the Diataxis-compliant documentation structure for a project.
*
* Creates a comprehensive documentation structure following the Diataxis framework,
* organizing content into four categories: tutorials (learning-oriented), how-to
* guides (problem-oriented), reference (information-oriented), and explanation
* (understanding-oriented). Includes support for different static site generators
* and optional example content.
*
* @param args - The input arguments for structure setup
* @param args.path - The root path where documentation structure should be created
* @param args.ssg - The static site generator type for structure optimization
* @param args.includeExamples - Whether to include example content (default: true)
*
* @returns Promise resolving to structure setup results
* @returns content - Array containing the setup results in MCP tool response format
*
* @throws {Error} When the output path is inaccessible or invalid
* @throws {Error} When the SSG type is unsupported
* @throws {Error} When directory structure creation fails
*
* @example
* ```typescript
* // Set up Docusaurus structure
* const result = await setupStructure({
* path: "./docs",
* ssg: "docusaurus",
* includeExamples: true
* });
*
* // Set up minimal Hugo structure
* const minimal = await setupStructure({
* path: "./site/content",
* ssg: "hugo",
* includeExamples: false
* });
* ```
*
* @since 1.0.0
*/
export async function setupStructure(
args: unknown,
): Promise<{ content: any[] }> {
const startTime = Date.now();
const { path: docsPath, ssg, includeExamples } = inputSchema.parse(args);
try {
const createdDirs: string[] = [];
const createdFiles: string[] = [];
// Create base docs directory
await fs.mkdir(docsPath, { recursive: true });
// Create Diataxis structure
for (const [category, info] of Object.entries(DIATAXIS_STRUCTURE)) {
const categoryPath = path.join(docsPath, category);
await fs.mkdir(categoryPath, { recursive: true });
createdDirs.push(categoryPath);
// Create index file for category
const indexPath = path.join(categoryPath, "index.md");
const indexContent = generateCategoryIndex(
category,
info.description,
ssg,
includeExamples,
);
await fs.writeFile(indexPath, indexContent);
createdFiles.push(indexPath);
// Create example content if requested
if (includeExamples) {
const examplePath = path.join(categoryPath, info.example);
const exampleContent = generateExampleContent(
category,
info.example,
ssg,
);
await fs.writeFile(examplePath, exampleContent);
createdFiles.push(examplePath);
}
}
// Create root index
const rootIndexPath = path.join(docsPath, "index.md");
const rootIndexContent = generateRootIndex(ssg);
await fs.writeFile(rootIndexPath, rootIndexContent);
createdFiles.push(rootIndexPath);
const structureResult = {
docsPath,
ssg,
includeExamples,
directoriesCreated: createdDirs,
filesCreated: createdFiles,
diataxisCategories: Object.keys(DIATAXIS_STRUCTURE),
totalDirectories: createdDirs.length,
totalFiles: createdFiles.length,
};
const response: MCPToolResponse<typeof structureResult> = {
success: true,
data: structureResult,
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
recommendations: [
{
type: "info",
title: "Diataxis Structure Created",
description: `Successfully created ${createdDirs.length} directories and ${createdFiles.length} files`,
},
],
nextSteps: [
{
action: "Generate Sitemap",
toolRequired: "manage_sitemap",
description:
"Create sitemap.xml as source of truth for documentation links (required for SEO)",
priority: "high",
},
{
action: "Setup GitHub Pages Deployment",
toolRequired: "deploy_pages",
description: "Create automated deployment workflow",
priority: "medium",
},
],
};
return formatMCPResponse(response);
} catch (error) {
const errorResponse: MCPToolResponse = {
success: false,
error: {
code: "STRUCTURE_SETUP_FAILED",
message: `Failed to setup structure: ${error}`,
resolution: "Ensure the documentation path is writable and accessible",
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
};
return formatMCPResponse(errorResponse);
}
}
function generateCategoryIndex(
category: string,
description: string,
ssg: string,
includeExamples: boolean = true,
): string {
const title =
category.charAt(0).toUpperCase() + category.slice(1).replace("-", " ");
let frontmatter = "";
switch (ssg) {
case "docusaurus":
frontmatter = `---
id: ${category}-index
title: ${title}
sidebar_label: ${title}
---\n\n`;
break;
case "mkdocs":
case "jekyll":
case "hugo":
frontmatter = `---
title: ${title}
description: ${description}
---\n\n`;
break;
}
return `${frontmatter}# ${title}
${description}
## Available Guides
This section contains ${category} documentation following the Diataxis framework.
${generateDiataxisExplanation(category)}
## Contents
${
includeExamples
? `- [Example: ${
DIATAXIS_STRUCTURE[category as keyof typeof DIATAXIS_STRUCTURE].example
}](./${
DIATAXIS_STRUCTURE[category as keyof typeof DIATAXIS_STRUCTURE].example
})`
: "- Coming soon..."
}
`;
}
function generateExampleContent(
category: string,
filename: string,
ssg: string,
): string {
const title = filename
.replace(".md", "")
.replace(/-/g, " ")
.split(" ")
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
.join(" ");
let frontmatter = "";
switch (ssg) {
case "docusaurus":
frontmatter = `---
id: ${filename.replace(".md", "")}
title: ${title}
sidebar_label: ${title}
---\n\n`;
break;
default:
frontmatter = `---
title: ${title}
---\n\n`;
break;
}
let content = "";
switch (category) {
case "tutorials":
content = `# ${title}
This tutorial will guide you through the process step by step.
## Prerequisites
Before you begin, ensure you have:
- Requirement 1
- Requirement 2
## Step 1: Initial Setup
Start by...
## Step 2: Configuration
Next, configure...
## Step 3: Verification
Finally, verify...
## Summary
In this tutorial, you learned how to:
- Achievement 1
- Achievement 2
- Achievement 3
## Next Steps
- Explore [How-To Guides](../how-to/)
- Read the [API Reference](../reference/)`;
break;
case "how-to":
content = `# ${title}
This guide shows you how to accomplish a specific task.
## Prerequisites
- Prerequisite 1
- Prerequisite 2
## Steps
### 1. Prepare your environment
\`\`\`bash
# Example command
echo "Setup environment"
\`\`\`
### 2. Execute the task
\`\`\`bash
# Main command
echo "Execute task"
\`\`\`
### 3. Verify results
\`\`\`bash
# Verification command
echo "Verify success"
\`\`\`
## Troubleshooting
If you encounter issues:
- Check condition 1
- Verify setting 2
## Related Guides
- [Another How-To Guide](./another-guide.md)
- [Reference Documentation](../reference/)`;
break;
case "reference":
content = `# ${title}
Technical reference documentation.
## Overview
This document provides complete reference information for...
## API Endpoints
### GET /api/resource
Retrieves...
**Parameters:**
- \`param1\` (string, required): Description
- \`param2\` (number, optional): Description
**Response:**
\`\`\`json
{
"field1": "value",
"field2": 123
}
\`\`\`
### POST /api/resource
Creates...
## Configuration Options
| Option | Type | Default | Description |
|--------|------|---------|-------------|
| option1 | string | "default" | Description of option1 |
| option2 | boolean | false | Description of option2 |
## Error Codes
| Code | Description | Resolution |
|------|-------------|------------|
| E001 | Error description | How to fix |
| E002 | Error description | How to fix |`;
break;
case "explanation":
content = `# ${title}
This document explains the concepts and reasoning behind...
## Introduction
Understanding the architecture requires knowledge of...
## Core Concepts
### Concept 1
Explanation of the first core concept...
### Concept 2
Explanation of the second core concept...
## Design Decisions
### Why This Approach?
We chose this approach because...
### Trade-offs
The main trade-offs include:
- Trade-off 1: Benefit vs Cost
- Trade-off 2: Benefit vs Cost
## Comparison with Alternatives
| Approach | Pros | Cons |
|----------|------|------|
| Our Approach | Pro 1, Pro 2 | Con 1 |
| Alternative 1 | Pro 1 | Con 1, Con 2 |
| Alternative 2 | Pro 1, Pro 2 | Con 1 |
## Further Reading
- [Related Tutorial](../tutorials/)
- [Implementation Guide](../how-to/)`;
break;
}
return `${frontmatter}${content}`;
}
function generateRootIndex(ssg: string): string {
let frontmatter = "";
switch (ssg) {
case "docusaurus":
frontmatter = `---
id: intro
title: Documentation
sidebar_position: 1
---\n\n`;
break;
default:
frontmatter = `---
title: Documentation
---\n\n`;
break;
}
return `${frontmatter}# Documentation
Welcome to our documentation! This site follows the [Diataxis](https://diataxis.fr/) framework to provide clear, well-organized documentation.
## Documentation Structure
Our documentation is organized into four distinct sections:
### 📚 [Tutorials](./tutorials/)
Learning-oriented guides that take you through a process step by step. Perfect for newcomers who want to get started.
### 🔧 [How-To Guides](./how-to/)
Task-oriented recipes that help you accomplish specific goals. Ideal when you know what you want to do.
### 📖 [Reference](./reference/)
Information-oriented technical descriptions of the system. Essential when you need to look up specific details.
### 💡 [Explanation](./explanation/)
Understanding-oriented discussions that clarify and illuminate topics. Great for deepening your knowledge.
## Quick Start
New to this project? Start with our [Getting Started Tutorial](./tutorials/getting-started.md).
## Contributing
We welcome contributions to our documentation! Please see our [Contributing Guide](./how-to/contribute.md) for details.
`;
}
function generateDiataxisExplanation(category: string): string {
const explanations: Record<string, string> = {
tutorials: `
**Tutorials** are learning-oriented and help newcomers get started:
- Take the reader through a process step by step
- Focus on learning by doing
- Ensure the reader succeeds in accomplishing something
- Build confidence through success`,
"how-to": `
**How-To Guides** are task-oriented and help users accomplish specific goals:
- Solve specific problems
- Assume some knowledge and experience
- Provide a series of steps
- Focus on results`,
reference: `
**Reference** documentation is information-oriented:
- Describe the machinery
- Be accurate and complete
- Focus on describing, not explaining
- Structure content for finding information`,
explanation: `
**Explanation** documentation is understanding-oriented:
- Clarify and illuminate a topic
- Provide context and background
- Discuss alternatives and opinions
- Focus on understanding, not instruction`,
};
return explanations[category] || "";
}
```
--------------------------------------------------------------------------------
/tests/memory/schemas.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tests for Knowledge Graph Schemas
* Phase 1: Core Knowledge Graph Integration
*/
import { describe, it, expect } from "@jest/globals";
import {
ProjectEntitySchema,
UserEntitySchema,
ConfigurationEntitySchema,
CodeFileEntitySchema,
DocumentationSectionEntitySchema,
TechnologyEntitySchema,
ProjectUsesTechnologySchema,
UserPrefersSSGSchema,
ProjectDeployedWithSchema,
SimilarToSchema,
DocumentsSchema,
ReferencesSchema,
OutdatedForSchema,
validateEntity,
validateRelationship,
isProjectEntity,
isUserEntity,
SCHEMA_METADATA,
} from "../../src/memory/schemas.js";
describe("Entity Schemas", () => {
describe("ProjectEntitySchema", () => {
it("should validate a valid project entity", () => {
const validProject = {
name: "test-project",
path: "/path/to/project",
technologies: ["typescript", "javascript"],
size: "medium" as const,
lastAnalyzed: new Date().toISOString(),
analysisCount: 1,
hasTests: true,
hasCI: true,
hasDocs: false,
totalFiles: 100,
};
const result = ProjectEntitySchema.parse(validProject);
expect(result).toBeDefined();
expect(result.name).toBe("test-project");
expect(result.technologies).toHaveLength(2);
});
it("should apply defaults for optional fields", () => {
const minimalProject = {
name: "minimal-project",
path: "/path/to/minimal",
lastAnalyzed: new Date().toISOString(),
};
const result = ProjectEntitySchema.parse(minimalProject);
expect(result.technologies).toEqual([]);
expect(result.size).toBe("medium");
expect(result.analysisCount).toBe(0);
expect(result.hasTests).toBe(false);
});
it("should reject invalid size values", () => {
const invalidProject = {
name: "test-project",
path: "/path/to/project",
size: "huge", // Invalid
lastAnalyzed: new Date().toISOString(),
};
expect(() => ProjectEntitySchema.parse(invalidProject)).toThrow();
});
it("should require name and path", () => {
const missingName = {
path: "/path/to/project",
lastAnalyzed: new Date().toISOString(),
};
expect(() => ProjectEntitySchema.parse(missingName)).toThrow();
});
});
describe("UserEntitySchema", () => {
it("should validate a valid user entity", () => {
const validUser = {
userId: "user123",
expertiseLevel: "intermediate" as const,
preferredTechnologies: ["react", "typescript"],
preferredSSGs: ["docusaurus"],
lastActive: new Date().toISOString(),
createdAt: new Date().toISOString(),
};
const result = UserEntitySchema.parse(validUser);
expect(result.userId).toBe("user123");
expect(result.expertiseLevel).toBe("intermediate");
});
it("should apply defaults", () => {
const minimalUser = {
userId: "user456",
lastActive: new Date().toISOString(),
createdAt: new Date().toISOString(),
};
const result = UserEntitySchema.parse(minimalUser);
expect(result.expertiseLevel).toBe("intermediate");
expect(result.preferredTechnologies).toEqual([]);
expect(result.documentationStyle).toBe("comprehensive");
});
});
describe("ConfigurationEntitySchema", () => {
it("should validate a valid configuration entity", () => {
const validConfig = {
ssg: "docusaurus" as const,
settings: { theme: "classic" },
deploymentSuccessRate: 0.95,
usageCount: 10,
lastUsed: new Date().toISOString(),
};
const result = ConfigurationEntitySchema.parse(validConfig);
expect(result.ssg).toBe("docusaurus");
expect(result.deploymentSuccessRate).toBe(0.95);
});
it("should reject invalid SSG values", () => {
const invalidConfig = {
ssg: "gatsby", // Not in enum
lastUsed: new Date().toISOString(),
};
expect(() => ConfigurationEntitySchema.parse(invalidConfig)).toThrow();
});
it("should validate success rate bounds", () => {
const invalidRate = {
ssg: "jekyll" as const,
deploymentSuccessRate: 1.5, // > 1.0
lastUsed: new Date().toISOString(),
};
expect(() => ConfigurationEntitySchema.parse(invalidRate)).toThrow();
});
});
describe("CodeFileEntitySchema", () => {
it("should validate a valid code file entity", () => {
const validCodeFile = {
path: "/src/index.ts",
language: "typescript",
functions: ["main", "helper"],
classes: ["App"],
dependencies: ["express", "zod"],
lastModified: new Date().toISOString(),
contentHash: "abc123",
linesOfCode: 150,
};
const result = CodeFileEntitySchema.parse(validCodeFile);
expect(result.language).toBe("typescript");
expect(result.functions).toHaveLength(2);
});
});
describe("DocumentationSectionEntitySchema", () => {
it("should validate a valid documentation section", () => {
const validSection = {
filePath: "/docs/api.md",
sectionTitle: "API Reference",
contentHash: "def456",
referencedCodeFiles: ["/src/api.ts"],
lastUpdated: new Date().toISOString(),
category: "reference" as const,
};
const result = DocumentationSectionEntitySchema.parse(validSection);
expect(result.category).toBe("reference");
expect(result.referencedCodeFiles).toHaveLength(1);
});
});
});
describe("Relationship Schemas", () => {
describe("ProjectUsesTechnologySchema", () => {
it("should validate a valid project-technology relationship", () => {
const validRelationship = {
type: "project_uses_technology" as const,
weight: 0.8,
confidence: 1.0,
createdAt: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
fileCount: 50,
percentage: 80,
isPrimary: true,
metadata: {},
};
const result = ProjectUsesTechnologySchema.parse(validRelationship);
expect(result.type).toBe("project_uses_technology");
expect(result.isPrimary).toBe(true);
});
});
describe("ProjectDeployedWithSchema", () => {
it("should validate a successful deployment relationship", () => {
const validDeployment = {
type: "project_deployed_with" as const,
weight: 1.0,
confidence: 1.0,
createdAt: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
success: true,
timestamp: new Date().toISOString(),
buildTime: 45,
deploymentUrl: "https://example.com",
metadata: {},
};
const result = ProjectDeployedWithSchema.parse(validDeployment);
expect(result.success).toBe(true);
expect(result.buildTime).toBe(45);
});
it("should validate a failed deployment relationship", () => {
const failedDeployment = {
type: "project_deployed_with" as const,
weight: 0.5,
confidence: 1.0,
createdAt: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
success: false,
timestamp: new Date().toISOString(),
errorMessage: "Build failed",
metadata: {},
};
const result = ProjectDeployedWithSchema.parse(failedDeployment);
expect(result.success).toBe(false);
expect(result.errorMessage).toBe("Build failed");
});
});
describe("OutdatedForSchema", () => {
it("should validate an outdated documentation relationship", () => {
const validOutdated = {
type: "outdated_for" as const,
weight: 1.0,
confidence: 0.9,
createdAt: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
detectedAt: new Date().toISOString(),
changeType: "function_signature" as const,
severity: "high" as const,
autoFixable: false,
metadata: {},
};
const result = OutdatedForSchema.parse(validOutdated);
expect(result.changeType).toBe("function_signature");
expect(result.severity).toBe("high");
});
});
});
describe("Validation Functions", () => {
describe("validateEntity", () => {
it("should validate a complete entity", () => {
const entity = {
type: "project",
name: "test-project",
path: "/test",
lastAnalyzed: new Date().toISOString(),
};
const result = validateEntity(entity);
expect(result).toBeDefined();
expect(result.type).toBe("project");
});
it("should throw on invalid entity", () => {
const invalidEntity = {
type: "invalid_type",
name: "test",
};
expect(() => validateEntity(invalidEntity)).toThrow();
});
});
describe("validateRelationship", () => {
it("should validate a complete relationship", () => {
const relationship = {
type: "similar_to",
weight: 0.85,
confidence: 0.9,
createdAt: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
similarityScore: 0.85,
sharedTechnologies: ["typescript"],
metadata: {},
};
const result = validateRelationship(relationship);
expect(result).toBeDefined();
});
});
});
describe("Type Guards", () => {
describe("isProjectEntity", () => {
it("should return true for project entities", () => {
const entity = {
type: "project" as const,
name: "test",
path: "/test",
technologies: ["typescript"],
size: "medium" as const,
lastAnalyzed: new Date().toISOString(),
analysisCount: 1,
hasTests: false,
hasCI: false,
hasDocs: false,
totalFiles: 10,
};
expect(isProjectEntity(entity)).toBe(true);
});
it("should return false for non-project entities", () => {
const entity = {
type: "user" as const,
userId: "user123",
expertiseLevel: "intermediate" as const,
preferredTechnologies: [],
preferredSSGs: [],
documentationStyle: "comprehensive" as const,
preferredDiataxisCategories: [],
projectCount: 0,
lastActive: new Date().toISOString(),
createdAt: new Date().toISOString(),
};
expect(isProjectEntity(entity as any)).toBe(false);
});
});
describe("isUserEntity", () => {
it("should return true for user entities", () => {
const entity = {
type: "user" as const,
userId: "user123",
expertiseLevel: "intermediate" as const,
preferredTechnologies: [],
preferredSSGs: [],
documentationStyle: "comprehensive" as const,
preferredDiataxisCategories: [],
projectCount: 0,
lastActive: new Date().toISOString(),
createdAt: new Date().toISOString(),
};
expect(isUserEntity(entity)).toBe(true);
});
});
});
describe("Schema Metadata", () => {
it("should have correct version", () => {
expect(SCHEMA_METADATA.version).toBe("1.1.0");
});
it("should list all entity types", () => {
expect(SCHEMA_METADATA.entityTypes).toContain("project");
expect(SCHEMA_METADATA.entityTypes).toContain("user");
expect(SCHEMA_METADATA.entityTypes).toContain("configuration");
expect(SCHEMA_METADATA.entityTypes).toContain("code_file");
expect(SCHEMA_METADATA.entityTypes).toContain(
"documentation_freshness_event",
);
// New entity types added in v1.1.0 for documentation example tracking
expect(SCHEMA_METADATA.entityTypes).toContain("documentation_example");
expect(SCHEMA_METADATA.entityTypes).toContain("example_validation");
expect(SCHEMA_METADATA.entityTypes).toContain("call_graph");
expect(SCHEMA_METADATA.entityTypes).toHaveLength(11);
});
it("should list all relationship types", () => {
expect(SCHEMA_METADATA.relationshipTypes).toContain(
"project_uses_technology",
);
expect(SCHEMA_METADATA.relationshipTypes).toContain("outdated_for");
expect(SCHEMA_METADATA.relationshipTypes).toContain("project_has_sitemap");
expect(SCHEMA_METADATA.relationshipTypes).toContain(
"project_has_freshness_event",
);
// New relationship types added in v1.1.0 for documentation example tracking
expect(SCHEMA_METADATA.relationshipTypes).toContain("has_example");
expect(SCHEMA_METADATA.relationshipTypes).toContain("validates");
expect(SCHEMA_METADATA.relationshipTypes).toContain("has_call_graph");
expect(SCHEMA_METADATA.relationshipTypes).toHaveLength(16);
});
});
```
--------------------------------------------------------------------------------
/docs/reference/configuration.md:
--------------------------------------------------------------------------------
```markdown
---
documcp:
last_updated: "2025-11-20T00:46:21.960Z"
last_validated: "2025-12-09T19:41:38.591Z"
auto_updated: false
update_frequency: monthly
validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
---
# Configuration Options
This reference guide covers all configuration options available in DocuMCP and the static site generators it supports.
## DocuMCP Configuration
### Environment Variables
DocuMCP supports the following environment variables:
| Variable | Default | Description |
| --------------------- | ----------------- | ----------------------------------- |
| `DOCUMCP_STORAGE_DIR` | `.documcp/memory` | Directory for memory system storage |
| `DEBUG` | `false` | Enable debug logging |
| `NODE_ENV` | `development` | Node.js environment |
### Memory System Configuration
The memory system stores analysis results and learning patterns:
```bash
# Default storage location (relative to project)
.documcp/memory/
├── analysis/ # Repository analysis results
├── recommendations/ # SSG recommendations
├── patterns/ # Learning patterns
└── metadata.json # System metadata
```
#### Memory Cleanup Options
```javascript
// Cleanup configuration
{
"daysToKeep": 30, // Days to retain memories
"maxEntries": 1000, // Maximum memory entries
"compressionEnabled": true
}
```
## Static Site Generator Configurations
### Jekyll Configuration
**\_config.yml:**
```yaml
title: "Your Documentation Site"
description: "Project documentation"
baseurl: "/repository-name"
url: "https://username.github.io"
markdown: kramdown
highlighter: rouge
theme: minima
plugins:
- jekyll-feed
- jekyll-sitemap
- jekyll-seo-tag
collections:
tutorials:
output: true
permalink: /:collection/:name/
how-to-guides:
output: true
permalink: /:collection/:name/
defaults:
- scope:
path: ""
values:
layout: "default"
- scope:
path: "_tutorials"
values:
layout: "tutorial"
```
**Gemfile:**
```ruby
source 'https://rubygems.org'
gem 'jekyll', '~> 4.3.0'
gem 'jekyll-feed', '~> 0.17'
gem 'jekyll-sitemap', '~> 1.4'
gem 'jekyll-seo-tag', '~> 2.8'
gem 'minima', '~> 2.5'
group :jekyll_plugins do
gem 'jekyll-timeago', '~> 0.13.1'
end
```
### Hugo Configuration
**config.yml:**
```yaml
baseURL: "https://username.github.io/repository-name"
languageCode: "en-us"
title: "Documentation Site"
theme: "docsy"
params:
github_repo: "https://github.com/username/repository"
github_branch: "main"
edit_page: true
search:
enabled: true
menu:
main:
- name: "Tutorials"
url: "/tutorials/"
weight: 10
- name: "How-to Guides"
url: "/how-to/"
weight: 20
- name: "Reference"
url: "/reference/"
weight: 30
- name: "Explanation"
url: "/explanation/"
weight: 40
markup:
goldmark:
renderer:
unsafe: true
highlight:
style: github
lineNos: true
codeFences: true
security:
funcs:
getenv:
- ^HUGO_
- ^CI$
```
**go.mod:**
```go
module github.com/username/repository
go 1.19
require (
github.com/google/docsy v0.6.0 // indirect
github.com/google/docsy/dependencies v0.6.0 // indirect
)
```
### Docusaurus Configuration
**docusaurus.config.js:**
For GitHub Pages deployment, ensure you configure `organizationName`, `projectName`, and `deploymentBranch`:
```javascript
const config = {
title: "Documentation Site",
tagline: "Comprehensive project documentation",
url: "https://yourusername.github.io", // Your GitHub Pages URL
baseUrl: "/repository-name/", // Repository name (or "/" for user/organization pages)
organizationName: "yourusername", // GitHub username or organization
projectName: "repository-name", // Repository name
deploymentBranch: "gh-pages", // Branch for deployment (default: gh-pages)
trailingSlash: false, // Set to true if using trailing slashes
onBrokenLinks: "throw",
onBrokenMarkdownLinks: "warn",
i18n: {
defaultLocale: "en",
locales: ["en"],
},
presets: [
[
"classic",
{
docs: {
routeBasePath: "/",
sidebarPath: require.resolve("./sidebars.js"),
editUrl: "https://github.com/username/repository/tree/main/",
},
theme: {
customCss: require.resolve("./src/css/custom.css"),
},
gtag: {
trackingID: "G-XXXXXXXXXX",
anonymizeIP: true,
},
},
],
],
themeConfig: {
navbar: {
title: "Documentation",
items: [
{
type: "doc",
docId: "tutorials/index",
position: "left",
label: "Tutorials",
},
{
type: "doc",
docId: "how-to/index",
position: "left",
label: "How-to",
},
{
type: "doc",
docId: "reference/index",
position: "left",
label: "Reference",
},
{
href: "https://github.com/username/repository",
label: "GitHub",
position: "right",
},
],
},
footer: {
style: "dark",
copyright: `Copyright © ${new Date().getFullYear()} Your Project Name.`,
},
prism: {
theme: require("prism-react-renderer/themes/github"),
darkTheme: require("prism-react-renderer/themes/dracula"),
},
},
};
module.exports = config;
```
**sidebars.js:**
```javascript
const sidebars = {
tutorialSidebar: [
"index",
{
type: "category",
label: "Tutorials",
items: [
"tutorials/getting-started",
"tutorials/first-deployment",
"tutorials/development-setup",
],
},
{
type: "category",
label: "How-to Guides",
items: [
"how-to/prompting-guide",
"how-to/repository-analysis",
"how-to/github-pages-deployment",
"how-to/troubleshooting",
],
},
{
type: "category",
label: "Reference",
items: [
"reference/mcp-tools",
"reference/configuration",
"reference/cli",
],
},
],
};
module.exports = sidebars;
```
### MkDocs Configuration
**mkdocs.yml:**
```yaml
site_name: Documentation Site
site_url: https://username.github.io/repository-name
site_description: Comprehensive project documentation
repo_name: username/repository
repo_url: https://github.com/username/repository
edit_uri: edit/main/docs/
theme:
name: material
palette:
- scheme: default
primary: blue
accent: blue
toggle:
icon: material/brightness-7
name: Switch to dark mode
- scheme: slate
primary: blue
accent: blue
toggle:
icon: material/brightness-4
name: Switch to light mode
features:
- navigation.tabs
- navigation.sections
- navigation.expand
- navigation.top
- search.highlight
- content.code.copy
nav:
- Home: index.md
- Tutorials:
- tutorials/index.md
- Getting Started: tutorials/getting-started.md
- First Deployment: tutorials/first-deployment.md
- How-to Guides:
- how-to/index.md
- Prompting Guide: how-to/prompting-guide.md
- Repository Analysis: how-to/repository-analysis.md
- Reference:
- reference/index.md
- MCP Tools: reference/mcp-tools.md
- Configuration: reference/configuration.md
- Explanation:
- explanation/index.md
- Architecture: explanation/architecture.md
plugins:
- search
- git-revision-date-localized:
enable_creation_date: true
markdown_extensions:
- pymdownx.highlight:
anchor_linenums: true
- pymdownx.inlinehilite
- pymdownx.snippets
- pymdownx.superfences
- admonition
- pymdownx.details
- pymdownx.tabbed:
alternate_style: true
- attr_list
- md_in_html
extra:
social:
- icon: fontawesome/brands/github
link: https://github.com/username/repository
```
**requirements.txt:**
```txt
mkdocs>=1.5.0
mkdocs-material>=9.0.0
mkdocs-git-revision-date-localized-plugin>=1.2.0
```
### Eleventy Configuration
**.eleventy.js:**
```javascript
const { EleventyHtmlBasePlugin } = require("@11ty/eleventy");
const markdownIt = require("markdown-it");
const markdownItAnchor = require("markdown-it-anchor");
module.exports = function (eleventyConfig) {
// Add plugins
eleventyConfig.addPlugin(EleventyHtmlBasePlugin);
// Configure Markdown
let markdownLibrary = markdownIt({
html: true,
breaks: true,
linkify: true,
}).use(markdownItAnchor, {
permalink: markdownItAnchor.permalink.ariaHidden({
placement: "after",
class: "direct-link",
symbol: "#",
}),
level: [1, 2, 3, 4],
slugify: eleventyConfig.getFilter("slug"),
});
eleventyConfig.setLibrary("md", markdownLibrary);
// Copy static files
eleventyConfig.addPassthroughCopy("src/assets");
eleventyConfig.addPassthroughCopy("src/css");
// Collections for Diataxis structure
eleventyConfig.addCollection("tutorials", function (collection) {
return collection.getFilteredByGlob("src/tutorials/*.md");
});
eleventyConfig.addCollection("howto", function (collection) {
return collection.getFilteredByGlob("src/how-to/*.md");
});
eleventyConfig.addCollection("reference", function (collection) {
return collection.getFilteredByGlob("src/reference/*.md");
});
eleventyConfig.addCollection("explanation", function (collection) {
return collection.getFilteredByGlob("src/explanation/*.md");
});
return {
dir: {
input: "src",
output: "_site",
includes: "_includes",
layouts: "_layouts",
data: "_data",
},
pathPrefix: "/repository-name/",
markdownTemplateEngine: "njk",
htmlTemplateEngine: "njk",
};
};
```
**package.json additions:**
```json
{
"scripts": {
"build": "eleventy",
"serve": "eleventy --serve",
"debug": "DEBUG=Eleventy* eleventy"
},
"devDependencies": {
"@11ty/eleventy": "^2.0.0",
"markdown-it": "^13.0.0",
"markdown-it-anchor": "^8.6.0"
}
}
```
## GitHub Actions Configuration
### Common Workflow Settings
All generated workflows include these optimizations:
```yaml
permissions:
contents: read
pages: write
id-token: write
concurrency:
group: "pages"
cancel-in-progress: false
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
```
### Caching Configuration
Node.js dependencies:
```yaml
- name: Cache dependencies
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
```
Ruby dependencies (Jekyll):
```yaml
- name: Cache gems
uses: actions/cache@v4
with:
path: vendor/bundle
key: ${{ runner.os }}-gems-${{ hashFiles('**/Gemfile.lock') }}
restore-keys: |
${{ runner.os }}-gems-
```
## Performance Configuration
### Build Optimization
**Docusaurus:**
```javascript
const config = {
future: {
experimental_faster: true,
},
webpack: {
jsLoader: (isServer) => ({
loader: "esbuild-loader",
options: {
loader: "tsx",
target: isServer ? "node12" : "es2017",
},
}),
},
};
```
**Hugo:**
```yaml
build:
writeStats: true
noJSConfigInAssets: true
caches:
getjson:
maxAge: "1m"
getcsv:
maxAge: "1m"
```
### SEO Configuration
All SSGs include:
- Meta tags for social sharing
- Structured data markup
- XML sitemaps
- RSS feeds
- Canonical URLs
- Open Graph tags
## Security Configuration
### Content Security Policy
Generated sites include CSP headers:
```html
<meta
http-equiv="Content-Security-Policy"
content="
default-src 'self';
script-src 'self' 'unsafe-inline' https://www.googletagmanager.com;
style-src 'self' 'unsafe-inline';
img-src 'self' data: https:;
connect-src 'self' https://www.google-analytics.com;
"
/>
```
### HTTPS Enforcement
All deployments force HTTPS and include HSTS headers.
## Troubleshooting Configuration Issues
### Common Problems
**BaseURL Mismatch:**
```bash
# Check your configuration matches repository name
baseURL: "https://username.github.io/repository-name/" # Must match exactly
```
**Build Failures:**
```bash
# Verify Node.js version in workflows
node-version: '20' # Must match your local version
```
**Asset Loading Issues:**
```bash
# Ensure relative paths
<img src="./images/logo.png" /> # Good
<img src="/images/logo.png" /> # May fail
```
For more troubleshooting help, see the [Troubleshooting Guide](../how-to/troubleshooting.md).
```