This is page 19 of 20. Use http://codebase.md/tosin2013/documcp?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── 001-mcp-server-architecture.md
│ │ ├── 002-repository-analysis-engine.md
│ │ ├── 003-static-site-generator-recommendation-engine.md
│ │ ├── 004-diataxis-framework-integration.md
│ │ ├── 005-github-pages-deployment-automation.md
│ │ ├── 006-mcp-tools-api-design.md
│ │ ├── 007-mcp-prompts-and-resources-integration.md
│ │ ├── 008-intelligent-content-population-engine.md
│ │ ├── 009-content-accuracy-validation-framework.md
│ │ ├── 010-mcp-resource-pattern-redesign.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── check-documentation-links.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── ast-analyzer.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── permission-checker.ts
│ │ └── sitemap-generator.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/src/tools/populate-content.ts:
--------------------------------------------------------------------------------
```typescript
import { Tool } from "@modelcontextprotocol/sdk/types.js";
import * as fs from "fs/promises";
import * as path from "path";
import {
handleMemoryRecall,
handleMemoryEnhancedRecommendation,
handleMemoryIntelligentAnalysis,
} from "../memory/index.js";
interface PopulationOptions {
analysisId: string;
docsPath: string;
populationLevel: "basic" | "comprehensive" | "intelligent";
includeProjectSpecific: boolean;
preserveExisting: boolean;
technologyFocus?: string[];
}
interface TutorialContent {
title: string;
description: string;
content: string;
codeExamples?: string[];
}
interface ContentPlan {
tutorials: TutorialContent[];
howToGuides: any[];
reference: any[];
explanation: any[];
}
// ProjectContext interface - currently unused but kept for future enhancements
// interface ProjectContext {
// primaryLanguage: string;
// frameworks: any[];
// testingFrameworks: any[];
// dependencies: any;
// devopsTools?: DevOpsToolProfile;
// }
interface DevOpsToolProfile {
containerization: ContainerTechnology[];
orchestration: OrchestrationTechnology[];
cicd: CICDTechnology[];
configuration: ConfigManagementTechnology[];
monitoring: MonitoringTechnology[];
security: SecurityTechnology[];
}
interface ContainerTechnology {
name: string;
version?: string;
configFiles: string[];
usage: string;
}
interface OrchestrationTechnology {
name: string;
manifests?: string[];
resources?: string[];
namespaces?: string[];
}
interface CICDTechnology {
name: string;
pipelines?: string[];
tasks?: string[];
triggers?: string[];
}
interface ConfigManagementTechnology {
name: string;
playbooks?: string[];
roles?: string[];
inventory?: string[];
vaultFiles?: string[];
}
interface MonitoringTechnology {
name: string;
}
interface SecurityTechnology {
name: string;
}
// Interfaces for future extensibility
// interface LanguageContentGenerator {
// detectFrameworks(analysis: any): any[];
// generateContent(frameworks: any[], context: ProjectContext): any;
// }
// interface DevOpsContentGenerator {
// detectDevOpsTools(analysis: any): DevOpsToolProfile;
// generateDevOpsContent(tools: DevOpsToolProfile, context: ProjectContext): any;
// }
interface PopulationResult {
success: boolean;
filesCreated: number;
contentPlan: ContentPlan;
populationMetrics: {
coverage: number;
completeness: number;
projectSpecificity: number;
};
nextSteps: string[];
}
class ContentPopulationEngine {
private analysisCache: Map<string, any> = new Map();
private memoryInsights: any = null;
private similarProjects: any[] = [];
async populateContent(
options: PopulationOptions,
context?: any,
): Promise<PopulationResult> {
// Report initial progress
if (context?.meta?.progressToken) {
await context.meta.reportProgress?.({ progress: 0, total: 100 });
}
await context?.info?.("📝 Starting Diataxis content population...");
// 1. Retrieve and validate repository analysis
await context?.info?.("📊 Retrieving repository analysis...");
const analysis = await this.getRepositoryAnalysis(options.analysisId);
if (context?.meta?.progressToken) {
await context.meta.reportProgress?.({ progress: 20, total: 100 });
}
// 2. Get memory-enhanced insights for intelligent content generation
await context?.info?.(
"🧠 Loading memory insights for intelligent generation...",
);
await this.loadMemoryInsights(analysis, options);
if (context?.meta?.progressToken) {
await context.meta.reportProgress?.({ progress: 40, total: 100 });
}
// 3. Generate content plan based on project characteristics AND memory insights
await context?.info?.("🗺️ Generating intelligent content plan...");
const contentPlan = await this.generateIntelligentContentPlan(
analysis,
options.populationLevel,
this.memoryInsights,
);
// 4. Generate memory-informed content for each Diataxis category
const tutorials = await this.generateMemoryInformedTutorialContent(
contentPlan.tutorials,
analysis,
this.memoryInsights,
);
const howTos = await this.generateMemoryInformedHowToContent(
contentPlan.howToGuides,
analysis,
this.memoryInsights,
);
const reference = await this.generateMemoryInformedReferenceContent(
contentPlan.reference,
analysis,
this.memoryInsights,
);
const explanation = await this.generateMemoryInformedExplanationContent(
contentPlan.explanation,
analysis,
this.memoryInsights,
);
// 5. Write content to documentation structure
const filesCreated = await this.writeContentToStructure(
options.docsPath,
{ tutorials, howTos, reference, explanation },
options.preserveExisting,
);
// 6. Generate cross-references and navigation updates
await context?.info?.("🔗 Generating cross-references and navigation...");
await this.updateNavigationAndCrossReferences(
options.docsPath,
contentPlan,
);
if (context?.meta?.progressToken) {
await context.meta.reportProgress?.({ progress: 100, total: 100 });
}
await context?.info?.(
`✅ Content population complete! Generated ${filesCreated} file(s)`,
);
return {
success: true,
filesCreated,
contentPlan,
populationMetrics: this.calculatePopulationMetrics(
filesCreated,
contentPlan,
),
nextSteps: this.generateMemoryInformedNextSteps(
analysis,
contentPlan,
this.memoryInsights,
),
};
}
private async getRepositoryAnalysis(analysisId: string): Promise<any> {
// First try to get analysis from memory system
try {
const memoryRecall = await this.getFromMemorySystem(analysisId);
if (memoryRecall) {
return memoryRecall;
}
} catch (error) {
console.warn("Failed to retrieve from memory system:", error);
}
// Fallback to reading from cached analysis file
const analysisPath = path.join(
".documcp",
"analyses",
`${analysisId}.json`,
);
try {
const content = await fs.readFile(analysisPath, "utf-8");
return JSON.parse(content);
} catch {
throw new Error(
`Repository analysis with ID '${analysisId}' not found. Please run analyze_repository first.`,
);
}
}
private async getFromMemorySystem(analysisId: string): Promise<any> {
try {
const result = await handleMemoryRecall({
query: analysisId,
type: "analysis",
limit: 1,
});
// Handle the memory recall result structure
if (result && result.memories && result.memories.length > 0) {
const memory = result.memories[0];
// Handle wrapped content structure
if (
memory.data &&
memory.data.content &&
Array.isArray(memory.data.content)
) {
// Extract the JSON from the first text content
const firstContent = memory.data.content[0];
if (
firstContent &&
firstContent.type === "text" &&
firstContent.text
) {
try {
return JSON.parse(firstContent.text);
} catch (parseError) {
console.warn(
"Failed to parse analysis content from memory:",
parseError,
);
return memory.data;
}
}
}
// Try direct content access (legacy format)
if (memory.content) {
return memory.content;
}
// Try data field
if (memory.data) {
return memory.data;
}
}
return null;
} catch (error) {
console.warn("Memory system recall failed:", error);
return null;
}
}
private async loadMemoryInsights(
analysis: any,
options: PopulationOptions,
): Promise<void> {
try {
// Get similar projects from memory system
const similarProjectsQuery = `${
analysis.metadata?.primaryLanguage || ""
} ${analysis.metadata?.ecosystem || ""} documentation`;
const similarProjects = await handleMemoryRecall({
query: similarProjectsQuery,
type: "recommendation",
limit: 5,
});
// Get memory-enhanced analysis
const enhancedAnalysis = await handleMemoryIntelligentAnalysis({
projectPath: analysis.projectPath || "",
baseAnalysis: analysis,
});
// Get memory-enhanced recommendations for content strategy
const enhancedRecommendations = await handleMemoryEnhancedRecommendation({
projectPath: analysis.projectPath || "",
baseRecommendation: {
contentStrategy: "diataxis",
populationLevel: options.populationLevel,
},
projectFeatures: {
ecosystem: analysis.metadata?.ecosystem || "unknown",
primaryLanguage: analysis.metadata?.primaryLanguage || "unknown",
complexity: analysis.complexity || "medium",
hasTests: analysis.structure?.hasTests || false,
hasCI: analysis.structure?.hasCI || false,
},
});
this.memoryInsights = {
similarProjects: similarProjects.memories || [],
enhancedAnalysis: enhancedAnalysis,
enhancedRecommendations: enhancedRecommendations,
patterns: this.extractPatternsFromSimilarProjects(
similarProjects.memories || [],
),
};
this.similarProjects = similarProjects.memories || [];
} catch (error) {
console.warn("Failed to load memory insights:", error);
// Fallback to minimal insights
this.memoryInsights = {
similarProjects: [],
enhancedAnalysis: null,
enhancedRecommendations: null,
patterns: {},
};
}
}
private extractPatternsFromSimilarProjects(projects: any[]): any {
const patterns = {
commonFrameworks: {} as Record<string, number>,
successfulSSGs: {} as Record<string, number>,
documentationStructures: {} as Record<string, number>,
deploymentStrategies: {} as Record<string, number>,
};
projects.forEach((project) => {
const content = project.content || {};
// Extract framework patterns
if (content.frameworks) {
content.frameworks.forEach((framework: string) => {
patterns.commonFrameworks[framework] =
(patterns.commonFrameworks[framework] || 0) + 1;
});
}
// Extract SSG success patterns
if (content.recommendedSSG) {
patterns.successfulSSGs[content.recommendedSSG] =
(patterns.successfulSSGs[content.recommendedSSG] || 0) + 1;
}
// Extract documentation structure patterns
if (content.documentationApproach) {
patterns.documentationStructures[content.documentationApproach] =
(patterns.documentationStructures[content.documentationApproach] ||
0) + 1;
}
// Extract deployment patterns
if (content.deploymentStrategy) {
patterns.deploymentStrategies[content.deploymentStrategy] =
(patterns.deploymentStrategies[content.deploymentStrategy] || 0) + 1;
}
});
return patterns;
}
private async generateIntelligentContentPlan(
analysis: any,
level: string,
_memoryInsights: any,
): Promise<ContentPlan> {
const plan: ContentPlan = {
tutorials: [],
howToGuides: [],
reference: [],
explanation: [],
};
// Generate tutorials based on project type AND memory patterns
plan.tutorials = this.generateMemoryInformedTutorialPlan(
analysis,
level,
_memoryInsights,
);
// Generate how-to guides for common tasks (enhanced with successful patterns)
plan.howToGuides = this.generateMemoryInformedHowToPlan(
analysis,
level,
_memoryInsights,
);
// Generate reference documentation (based on similar project structures)
plan.reference = this.generateMemoryInformedReferencePlan(
analysis,
level,
_memoryInsights,
);
// Generate explanation content (leveraging successful explanation patterns)
plan.explanation = this.generateMemoryInformedExplanationPlan(
analysis,
level,
_memoryInsights,
);
return plan;
}
private generateMemoryInformedTutorialPlan(
analysis: any,
_level: string,
_memoryInsights: any,
): TutorialContent[] {
const tutorials: TutorialContent[] = [];
const similarProjects = _memoryInsights?.similarProjects || [];
tutorials.push({
title: `Getting Started with ${
analysis.metadata?.projectName || "the Project"
}`,
description: this.generateMemoryInformedDescription(
analysis,
similarProjects,
"getting-started",
),
content: this.generateMemoryInformedGettingStartedContent(
analysis,
_memoryInsights,
),
codeExamples: this.generateMemoryInformedGettingStartedExamples(
analysis,
_memoryInsights,
),
});
// Add technology-specific tutorials based on what worked for similar projects
const ecosystem =
analysis.metadata?.ecosystem || analysis.technologies?.runtime;
if (
ecosystem === "Node.js" ||
ecosystem === "javascript" ||
ecosystem === "typescript"
) {
const nodeSuccessPatterns =
this.extractNodeSuccessPatterns(similarProjects);
tutorials.push({
title: this.generateMemoryInformedTutorialTitle(
"environment-setup",
nodeSuccessPatterns,
analysis,
),
description:
"Configure your development environment based on proven successful patterns",
content: this.generateMemoryInformedNodeSetupContent(
analysis,
nodeSuccessPatterns,
),
codeExamples: this.generateMemoryInformedNodeSetupExamples(
analysis,
nodeSuccessPatterns,
),
});
}
// Add testing tutorial if tests detected
if (analysis.structure.hasTests) {
tutorials.push({
title: "Writing and Running Tests",
description: "Learn how to test your code effectively",
content: this.generateTestingTutorialContent(analysis),
codeExamples: this.generateTestingExamples(analysis),
});
}
// Add DevOps tutorials based on detected tools
const devopsTools = this.detectDevOpsTools(analysis);
if (devopsTools.containerization.length > 0) {
const containerTech = devopsTools.containerization[0];
tutorials.push({
title: `Containerizing ${analysis.metadata.projectName} with ${containerTech.name}`,
description: `Learn how to containerize your application using ${containerTech.name}`,
content: this.generateContainerTutorialContent(analysis, containerTech),
codeExamples: this.generateContainerExamples(analysis, containerTech),
});
}
if (devopsTools.orchestration.length > 0) {
const orchestrationTech = devopsTools.orchestration[0];
tutorials.push({
title: `Deploying to ${orchestrationTech.name}`,
description: `Deploy your application to ${orchestrationTech.name}`,
content: this.generateOrchestrationTutorialContent(
analysis,
orchestrationTech,
),
codeExamples: this.generateOrchestrationExamples(
analysis,
orchestrationTech,
),
});
}
// Python-specific tutorials
if (analysis.metadata.primaryLanguage === "Python") {
tutorials.push({
title: "Python Virtual Environment Setup",
description: "Set up isolated Python development environment",
content: this.generatePythonEnvironmentContent(analysis),
codeExamples: this.generatePythonEnvironmentExamples(),
});
// Python framework-specific tutorials
const pythonFrameworks = this.detectPythonFrameworks(analysis);
pythonFrameworks.forEach((framework) => {
tutorials.push({
title: `Building Applications with ${framework.name}`,
description: `Complete guide to ${framework.name} development`,
content: this.generatePythonFrameworkTutorialContent(
analysis,
framework,
),
codeExamples: this.generatePythonFrameworkExamples(framework),
});
});
}
return tutorials;
}
private generateHowToPlan(analysis: any, _level: string): any[] {
const howTos: any[] = [];
// Common development tasks
howTos.push({
title: "How to Add a New Feature",
content: this.generateFeatureGuideContent(analysis),
});
howTos.push({
title: "How to Debug Common Issues",
content: this.generateDebuggingGuideContent(analysis),
});
// Deployment guides if CI detected
if (analysis.structure.hasCI) {
howTos.push({
title: "How to Deploy Your Application",
content: this.generateDeploymentGuideContent(analysis),
});
}
return howTos;
}
private generateReferencePlan(analysis: any, _level: string): any[] {
const reference: any[] = [];
// API reference
reference.push({
title: "API Reference",
content: this.generateAPIReference(analysis),
});
// Configuration reference
reference.push({
title: "Configuration Options",
content: this.generateConfigReference(analysis),
});
// CLI reference if applicable
reference.push({
title: "Command Line Interface",
content: this.generateCLIReference(analysis),
});
return reference;
}
private generateExplanationPlan(analysis: any, _level: string): any[] {
const explanations: any[] = [];
// Architecture overview
explanations.push({
title: "Architecture Overview",
content: this.generateArchitectureContent(analysis),
});
// Design decisions
explanations.push({
title: "Design Decisions",
content: this.generateDesignDecisionsContent(analysis),
});
// Technology choices
explanations.push({
title: "Technology Stack",
content: this.generateTechnologyStackContent(analysis),
});
return explanations;
}
// Memory-informed helper methods
private getSuccessfulGettingStartedApproach(
similarProjects: any[],
analysis: any,
): any {
// Analyze successful getting-started approaches from similar projects
const approaches = similarProjects
.filter((p) => p.content?.gettingStartedApproach)
.map((p) => p.content.gettingStartedApproach);
// Return most common successful approach, or default based on project type
return approaches.length > 0
? approaches[0]
: this.getDefaultApproachForProjectType(analysis);
}
private generateMemoryInformedDescription(
analysis: any,
similarProjects: any[],
tutorialType: string,
): string {
const successfulDescriptions = similarProjects
.filter((p) => p.content?.tutorials?.[tutorialType])
.map((p) => p.content.tutorials[tutorialType].description);
if (successfulDescriptions.length > 0) {
// Adapt successful pattern for this project
return `Learn ${
analysis.metadata?.primaryLanguage || "development"
} with ${
analysis.metadata?.projectName || "this project"
} using proven patterns from ${
successfulDescriptions.length
} similar successful projects`;
}
return `Learn ${
analysis.metadata?.primaryLanguage || "development"
} development with ${analysis.metadata?.projectName || "this project"}`;
}
private extractNodeSuccessPatterns(similarProjects: any[]): any {
const nodeProjects = similarProjects.filter(
(p) =>
p.content?.ecosystem === "Node.js" ||
p.content?.primaryLanguage === "TypeScript" ||
p.content?.primaryLanguage === "JavaScript",
);
return {
commonTools: this.extractCommonTools(nodeProjects),
successfulCommands: this.extractSuccessfulCommands(nodeProjects),
recommendedVersions: this.extractRecommendedVersions(nodeProjects),
bestPractices: this.extractBestPractices(nodeProjects),
};
}
private extractCommonTools(projects: any[]): string[] {
const toolCounts: Record<string, number> = {};
projects.forEach((p) => {
(p.content?.tools || []).forEach((tool: string) => {
toolCounts[tool] = (toolCounts[tool] || 0) + 1;
});
});
// Return tools used by >50% of similar projects
const threshold = Math.ceil(projects.length * 0.5);
return Object.entries(toolCounts)
.filter(([_, count]) => count >= threshold)
.map(([tool, _]) => tool);
}
private extractSuccessfulCommands(projects: any[]): Record<string, string[]> {
const commands: Record<string, string[]> = {};
projects.forEach((p) => {
if (p.content?.commands) {
Object.entries(p.content.commands).forEach(([key, cmds]) => {
if (!commands[key]) commands[key] = [];
commands[key].push(...(cmds as string[]));
});
}
});
// Deduplicate and return most common commands
Object.keys(commands).forEach((key) => {
commands[key] = [...new Set(commands[key])];
});
return commands;
}
private extractRecommendedVersions(projects: any[]): Record<string, string> {
const versions: Record<string, Record<string, number>> = {};
projects.forEach((p) => {
if (p.content?.versions) {
Object.entries(p.content.versions).forEach(([tool, version]) => {
if (!versions[tool]) versions[tool] = {};
versions[tool][version as string] =
(versions[tool][version as string] || 0) + 1;
});
}
});
// Return most commonly used versions
const recommendedVersions: Record<string, string> = {};
Object.entries(versions).forEach(([tool, versionCounts]) => {
const mostCommon = Object.entries(versionCounts).sort(
([, a], [, b]) => (b as number) - (a as number),
)[0];
if (mostCommon) {
recommendedVersions[tool] = mostCommon[0];
}
});
return recommendedVersions;
}
private extractBestPractices(projects: any[]): string[] {
const practices: Record<string, number> = {};
projects.forEach((p) => {
(p.content?.bestPractices || []).forEach((practice: string) => {
practices[practice] = (practices[practice] || 0) + 1;
});
});
// Return practices mentioned by >30% of projects
const threshold = Math.ceil(projects.length * 0.3);
return Object.entries(practices)
.filter(([_, count]) => count >= threshold)
.map(([practice, _]) => practice);
}
private getDefaultApproachForProjectType(analysis: any): any {
const projectType = analysis.metadata?.projectType || "library";
const primaryLanguage = analysis.metadata?.primaryLanguage || "JavaScript";
return {
type: projectType,
language: primaryLanguage,
approach: "hands-on",
complexity: "progressive",
};
}
private generateMemoryInformedTutorialTitle(
tutorialType: string,
patterns: any,
analysis: any,
): string {
const projectName = analysis.metadata?.projectName || "Project";
const language = analysis.metadata?.primaryLanguage || "Development";
switch (tutorialType) {
case "environment-setup":
return patterns.commonTools?.length > 0
? `Setting Up Your ${language} Development Environment`
: `Development Environment Setup for ${projectName}`;
default:
return `${tutorialType} Tutorial for ${projectName}`;
}
}
// Enhanced content generation methods with memory insights
private generateMemoryInformedGettingStartedContent(
analysis: any,
_memoryInsights: any,
): string {
const projectName = analysis.metadata?.projectName || "the project";
const language = analysis.metadata?.primaryLanguage || "development";
const patterns = _memoryInsights?.patterns || {};
const similarProjects = _memoryInsights?.similarProjects || [];
// Extract real project structure and dependencies
const realDependencies = analysis.dependencies?.packages || [];
const hasTests = analysis.structure?.hasTests;
const hasCI = analysis.structure?.hasCI;
const ecosystem = analysis.metadata?.ecosystem || "Node.js";
// Build getting started content based on actual project characteristics
let content = `# Getting Started with ${projectName}\n\n`;
if (similarProjects.length > 0) {
content += `Welcome to ${projectName}! This tutorial leverages successful patterns from ${similarProjects.length} similar ${language} projects to get you up and running quickly.\n\n`;
} else {
content += `Welcome to ${projectName}! This tutorial will guide you through setting up and running the project.\n\n`;
}
// Prerequisites based on actual analysis
content += `## Prerequisites\n\n`;
content += `Based on the project analysis, you'll need:\n\n`;
if (
ecosystem === "Node.js" ||
language === "TypeScript" ||
language === "JavaScript"
) {
const recommendedVersion =
this.getRecommendedNodeVersion(similarProjects);
content += `- Node.js (version ${recommendedVersion} or higher)\n`;
content += `- npm or yarn package manager\n`;
}
if (language === "TypeScript") {
content += `- TypeScript (globally installed or via npx)\n`;
}
content += `- Git for version control\n\n`;
// Installation based on real dependencies
content += `## Installation\n\n`;
content += `1. Clone the repository:\n`;
content += ` \`\`\`bash\n`;
content += ` git clone <repository-url>\n`;
content += ` cd ${projectName}\n`;
content += ` \`\`\`\n\n`;
content += `2. Install dependencies:\n`;
content += ` \`\`\`bash\n`;
if (realDependencies.includes("yarn")) {
content += ` yarn install\n`;
} else {
content += ` npm install\n`;
}
content += ` \`\`\`\n\n`;
// Real environment setup
if (this.projectHasEnvFile(analysis)) {
content += `3. Set up environment variables:\n`;
content += ` \`\`\`bash\n`;
content += ` cp .env.example .env\n`;
content += ` \`\`\`\n\n`;
}
// Running based on actual project setup
content += `## Running the Project\n\n`;
const runCommands = this.extractActualRunCommands(analysis);
if (runCommands.length > 0) {
runCommands.forEach((cmd) => {
content += `\`\`\`bash\n${cmd}\n\`\`\`\n\n`;
});
} else {
content += `\`\`\`bash\nnpm start\n\`\`\`\n\n`;
}
// Testing section based on actual test setup
if (hasTests) {
content += `## Verifying Your Setup\n\n`;
content += `Run the test suite to ensure everything is working:\n`;
content += `\`\`\`bash\n`;
const testFramework = this.detectTestFramework(analysis);
if (testFramework) {
content += `npm test # Uses ${testFramework}\n`;
} else {
content += `npm test\n`;
}
content += `\`\`\`\n\n`;
}
// Next steps based on memory insights
content += `## Next Steps\n\n`;
if (patterns.documentationStructures) {
content += `- Explore the [Architecture Overview](../explanation/architecture.md)\n`;
content += `- Learn about [Adding New Features](../how-to/add-feature.md)\n`;
}
if (hasCI) {
content += `- Check the [Deployment Guide](../how-to/deploy.md)\n`;
}
content += `- Review the [API Reference](../reference/api.md)\n`;
return content;
}
private generateMemoryInformedGettingStartedExamples(
analysis: any,
_memoryInsights: any,
): string[] {
const examples: string[] = [];
// Generate real usage example based on actual entry points
const entryPoint = this.findProjectEntryPoint(analysis);
if (entryPoint) {
examples.push(
`// Example: Basic usage\nimport { initialize } from './${entryPoint}';\n\nconst app = initialize({\n // Configuration options\n});\n\napp.start();`,
);
}
// TypeScript config example if project uses TypeScript
if (analysis.metadata?.primaryLanguage === "TypeScript") {
const actualTsConfig = this.extractTsConfigPatterns(analysis);
examples.push(
`// TypeScript configuration\n${JSON.stringify(
actualTsConfig,
null,
2,
)}`,
);
}
// Real package.json scripts
const scripts = this.extractPackageScripts(analysis);
if (scripts && Object.keys(scripts).length > 0) {
examples.push(
`// Available scripts\n${JSON.stringify({ scripts }, null, 2)}`,
);
}
return examples;
}
private getRecommendedNodeVersion(similarProjects: any[]): string {
const versions = similarProjects
.map((p) => p.content?.versions?.node)
.filter(Boolean);
if (versions.length > 0) {
// Return most common version
const versionCounts = versions.reduce(
(acc, v) => {
acc[v] = (acc[v] || 0) + 1;
return acc;
},
{} as Record<string, number>,
);
const mostCommon = Object.entries(versionCounts).sort(
([, a], [, b]) => (b as number) - (a as number),
)[0];
return mostCommon ? mostCommon[0] : "18";
}
return "18"; // Default modern version
}
private projectHasEnvFile(analysis: any): boolean {
const files = analysis.files || [];
return files.some(
(f: any) =>
f.name === ".env.example" ||
f.name === ".env.template" ||
f.name === "env.example",
);
}
private extractActualRunCommands(analysis: any): string[] {
const packageJson = this.findPackageJson(analysis);
if (packageJson?.scripts) {
const scripts = packageJson.scripts;
const runCommands = [];
if (scripts.dev) runCommands.push("npm run dev");
else if (scripts.start) runCommands.push("npm start");
else if (scripts.serve) runCommands.push("npm run serve");
return runCommands;
}
return [];
}
private detectTestFramework(analysis: any): string | null {
const dependencies = analysis.dependencies?.packages || [];
if (dependencies.includes("jest")) return "Jest";
if (dependencies.includes("mocha")) return "Mocha";
if (dependencies.includes("vitest")) return "Vitest";
if (dependencies.includes("jasmine")) return "Jasmine";
return null;
}
private findProjectEntryPoint(analysis: any): string | null {
const packageJson = this.findPackageJson(analysis);
if (packageJson?.main) {
return packageJson.main.replace(/\.(js|ts)$/, "");
}
// Look for common entry points
const files = analysis.files || [];
const entryPoints = ["index", "main", "app", "server"];
for (const entry of entryPoints) {
if (
files.some(
(f: any) => f.name === `${entry}.ts` || f.name === `${entry}.js`,
)
) {
return entry;
}
}
return null;
}
private findPackageJson(analysis: any): any {
const files = analysis.files || [];
const packageFile = files.find((f: any) => f.name === "package.json");
if (packageFile?.content) {
try {
return JSON.parse(packageFile.content);
} catch {
return null;
}
}
return null;
}
private extractTsConfigPatterns(analysis: any): any {
const files = analysis.files || [];
const tsConfigFile = files.find((f: any) => f.name === "tsconfig.json");
if (tsConfigFile?.content) {
try {
return JSON.parse(tsConfigFile.content);
} catch {
// Return sensible defaults based on project analysis
return {
compilerOptions: {
target: "ES2020",
module: "commonjs",
strict: true,
esModuleInterop: true,
skipLibCheck: true,
forceConsistentCasingInFileNames: true,
},
};
}
}
return null;
}
private extractPackageScripts(analysis: any): any {
const packageJson = this.findPackageJson(analysis);
return packageJson?.scripts || null;
}
// Content generation methods (keeping original structure but with memory enhancement)
private generateGettingStartedContent(_analysis: any): string {
return `# Getting Started with ${_analysis.metadata.projectName}
Welcome to ${_analysis.metadata.projectName}! This tutorial will guide you through setting up and running the project for the first time.
## Prerequisites
Before you begin, ensure you have the following installed:
- Node.js (version 18 or higher)
- npm or yarn package manager
- Git for version control
## Installation
1. Clone the repository:
\`\`\`bash
git clone <repository-url>
cd ${_analysis.metadata.projectName}
\`\`\`
2. Install dependencies:
\`\`\`bash
npm install
\`\`\`
3. Set up environment variables:
\`\`\`bash
cp .env.example .env
\`\`\`
## Running the Project
Start the development server:
\`\`\`bash
npm run dev
\`\`\`
## Verifying Your Setup
Run the test suite to ensure everything is working:
\`\`\`bash
npm test
\`\`\`
## Next Steps
- Explore the [Architecture Overview](../explanation/architecture.md)
- Learn about [Adding New Features](../how-to/add-feature.md)
- Check the [API Reference](../reference/api.md)
`;
}
private generateGettingStartedExamples(_analysis: any): string[] {
return [
`// Example: Basic usage
import { initialize } from './${_analysis.metadata.projectName}';
const app = initialize({
// Configuration options
});
app.start();`,
`// Example: TypeScript configuration
{
"compilerOptions": {
"target": "ES2020",
"module": "commonjs",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true
}
}`,
];
}
private generateNodeSetupContent(_analysis: any): string {
return `# Setting Up Your Development Environment
This guide will help you configure a complete Node.js and TypeScript development environment.
## Installing Node.js
### Using Node Version Manager (nvm)
1. Install nvm:
\`\`\`bash
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash
\`\`\`
2. Install and use the correct Node.js version:
\`\`\`bash
nvm install 18
nvm use 18
\`\`\`
## TypeScript Setup
1. Install TypeScript globally:
\`\`\`bash
npm install -g typescript
\`\`\`
2. Initialize TypeScript configuration:
\`\`\`bash
npx tsc --init
\`\`\`
## Development Tools
### Recommended VS Code Extensions
- ESLint
- Prettier
- TypeScript and JavaScript Language Features
- GitLens
### Debugging Configuration
Create a \`.vscode/launch.json\` file:
\`\`\`json
{
"version": "0.2.0",
"configurations": [
{
"type": "node",
"request": "launch",
"name": "Debug TypeScript",
"skipFiles": ["<node_internals>/**"],
"program": "\${workspaceFolder}/src/index.ts",
"preLaunchTask": "tsc: build - tsconfig.json",
"outFiles": ["\${workspaceFolder}/dist/**/*.js"]
}
]
}
\`\`\`
`;
}
private generateNodeSetupExamples(): string[] {
return [
`// package.json scripts
{
"scripts": {
"dev": "ts-node-dev --respawn src/index.ts",
"build": "tsc",
"start": "node dist/index.js",
"test": "jest",
"lint": "eslint src --ext .ts"
}
}`,
];
}
private generateTestingTutorialContent(_analysis: any): string {
const testFramework = _analysis.technologies.testing?.[0] || "Jest";
return `# Writing and Running Tests
Learn how to write effective tests for your ${_analysis.metadata.projectName} code using ${testFramework}.
## Test Structure
Tests should follow the AAA pattern:
- **Arrange**: Set up test data and conditions
- **Act**: Execute the code being tested
- **Assert**: Verify the results
## Writing Your First Test
Create a test file with the \`.test.ts\` extension:
\`\`\`typescript
// example.test.ts
describe('Example Module', () => {
it('should perform expected behavior', () => {
// Arrange
const input = 'test';
// Act
const result = exampleFunction(input);
// Assert
expect(result).toBe('expected output');
});
});
\`\`\`
## Running Tests
Execute all tests:
\`\`\`bash
npm test
\`\`\`
Run tests in watch mode:
\`\`\`bash
npm test -- --watch
\`\`\`
## Test Coverage
Generate a coverage report:
\`\`\`bash
npm test -- --coverage
\`\`\`
## Best Practices
1. **Test behavior, not implementation**: Focus on what the code does, not how
2. **Keep tests simple**: Each test should verify one thing
3. **Use descriptive names**: Test names should explain what is being tested
4. **Maintain test independence**: Tests should not depend on each other
`;
}
private generateTestingExamples(_analysis: any): string[] {
return [
`// Unit test example
import { calculateTotal } from './calculator';
describe('Calculator', () => {
describe('calculateTotal', () => {
it('should sum all numbers correctly', () => {
const numbers = [1, 2, 3, 4, 5];
const result = calculateTotal(numbers);
expect(result).toBe(15);
});
it('should handle empty arrays', () => {
const result = calculateTotal([]);
expect(result).toBe(0);
});
it('should handle negative numbers', () => {
const numbers = [-1, -2, 3];
const result = calculateTotal(numbers);
expect(result).toBe(0);
});
});
});`,
`// Integration test example
import request from 'supertest';
import { app } from './app';
describe('API Endpoints', () => {
describe('GET /api/health', () => {
it('should return health status', async () => {
const response = await request(app)
.get('/api/health')
.expect(200);
expect(response.body).toEqual({
status: 'healthy',
timestamp: expect.any(String)
});
});
});
});`,
];
}
private generateFeatureGuideContent(_analysis: any): string {
return `# How to Add a New Feature
This guide walks you through the process of adding a new feature to ${_analysis.metadata.projectName}.
## Step 1: Plan Your Feature
Before writing code:
1. Define the feature requirements
2. Consider the impact on existing functionality
3. Plan the implementation approach
## Step 2: Create a Feature Branch
\`\`\`bash
git checkout -b feature/your-feature-name
\`\`\`
## Step 3: Implement the Feature
1. Write the core functionality
2. Add appropriate error handling
3. Include logging for debugging
## Step 4: Write Tests
Create tests for your new feature:
- Unit tests for individual functions
- Integration tests for feature workflows
- Edge case testing
## Step 5: Update Documentation
- Add API documentation if applicable
- Update user guides
- Include code examples
## Step 6: Submit for Review
1. Push your branch:
\`\`\`bash
git push origin feature/your-feature-name
\`\`\`
2. Create a pull request
3. Address review feedback
## Best Practices
- Keep changes focused and atomic
- Follow existing code patterns
- Maintain backward compatibility
- Consider performance implications
`;
}
private generateDebuggingGuideContent(_analysis: any): string {
return `# How to Debug Common Issues
This guide helps you troubleshoot and debug common issues in ${_analysis.metadata.projectName}.
## Debugging Tools
### Using the Built-in Debugger
1. Set breakpoints in your code
2. Run with debugging enabled:
\`\`\`bash
node --inspect src/index.js
\`\`\`
3. Connect your debugger (VS Code, Chrome DevTools, etc.)
### Logging
Enable verbose logging:
\`\`\`bash
DEBUG=* npm start
\`\`\`
## Common Issues and Solutions
### Issue: Module Not Found
**Symptoms**: Error message "Cannot find module"
**Solutions**:
1. Check if dependencies are installed: \`npm install\`
2. Verify import paths are correct
3. Check TypeScript path mappings in tsconfig.json
### Issue: Type Errors
**Symptoms**: TypeScript compilation errors
**Solutions**:
1. Run type checking: \`npm run typecheck\`
2. Update type definitions: \`npm install @types/package-name\`
3. Check for version mismatches
### Issue: Test Failures
**Symptoms**: Tests failing unexpectedly
**Solutions**:
1. Run tests in isolation
2. Check for race conditions
3. Verify test data setup
## Performance Debugging
### Memory Leaks
Use heap snapshots:
\`\`\`bash
node --expose-gc --inspect src/index.js
\`\`\`
### Slow Performance
Profile your application:
\`\`\`bash
node --prof src/index.js
\`\`\`
## Getting Help
If you're still stuck:
1. Check the [FAQ](../reference/faq.md)
2. Search existing issues on GitHub
3. Ask in the community forum
`;
}
private generateDeploymentGuideContent(_analysis: any): string {
return `# How to Deploy Your Application
This guide covers deployment options and best practices for ${_analysis.metadata.projectName}.
## Pre-Deployment Checklist
- [ ] All tests passing
- [ ] Environment variables configured
- [ ] Production dependencies installed
- [ ] Build process successful
- [ ] Security vulnerabilities addressed
## Deployment Options
### Option 1: GitHub Pages (Static Sites)
1. Build your application:
\`\`\`bash
npm run build
\`\`\`
2. Deploy to GitHub Pages:
\`\`\`bash
npm run deploy
\`\`\`
### Option 2: Cloud Platforms
#### Vercel
\`\`\`bash
vercel --prod
\`\`\`
#### Netlify
\`\`\`bash
netlify deploy --prod
\`\`\`
#### Heroku
\`\`\`bash
git push heroku main
\`\`\`
### Option 3: Docker Container
1. Build the Docker image:
\`\`\`bash
docker build -t ${_analysis.metadata.projectName} .
\`\`\`
2. Run the container:
\`\`\`bash
docker run -p 3000:3000 ${_analysis.metadata.projectName}
\`\`\`
## Continuous Deployment
### GitHub Actions
Create \`.github/workflows/deploy.yml\`:
\`\`\`yaml
name: Deploy
on:
push:
branches: [main]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
- run: npm ci
- run: npm run build
- run: npm run deploy
\`\`\`
## Post-Deployment
1. Verify deployment success
2. Run smoke tests
3. Monitor application logs
4. Check performance metrics
`;
}
private generateAPIReference(analysis: any): string {
return `# API Reference
Complete reference for ${analysis.metadata.projectName} APIs.
## Core APIs
### initialize(options)
Initialize the application with the given options.
**Parameters:**
- \`options\` (Object): Configuration options
- \`port\` (number): Server port (default: 3000)
- \`host\` (string): Server host (default: 'localhost')
- \`debug\` (boolean): Enable debug mode (default: false)
**Returns:**
- \`Application\`: Application instance
**Example:**
\`\`\`typescript
const app = initialize({
port: 8080,
debug: true
});
\`\`\`
### start()
Start the application server.
**Returns:**
- \`Promise<void>\`: Resolves when server is running
**Example:**
\`\`\`typescript
await app.start();
console.log('Server running');
\`\`\`
### stop()
Stop the application server.
**Returns:**
- \`Promise<void>\`: Resolves when server is stopped
**Example:**
\`\`\`typescript
await app.stop();
console.log('Server stopped');
\`\`\`
## Events
### 'ready'
Emitted when the application is ready to accept connections.
\`\`\`typescript
app.on('ready', () => {
console.log('Application ready');
});
\`\`\`
### 'error'
Emitted when an error occurs.
\`\`\`typescript
app.on('error', (error) => {
console.error('Application error:', error);
});
\`\`\`
## Error Codes
| Code | Description |
|------|-------------|
| ERR_INVALID_CONFIG | Invalid configuration provided |
| ERR_PORT_IN_USE | Specified port is already in use |
| ERR_STARTUP_FAILED | Application failed to start |
`;
}
private generateConfigReference(analysis: any): string {
return `# Configuration Reference
Complete guide to configuring ${analysis.metadata.projectName}.
## Configuration File
Configuration can be provided via:
1. Environment variables
2. Configuration file (config.json)
3. Command-line arguments
## Configuration Options
### Server Configuration
| Option | Type | Default | Description |
|--------|------|---------|-------------|
| \`server.port\` | number | 3000 | Server port |
| \`server.host\` | string | 'localhost' | Server host |
| \`server.timeout\` | number | 30000 | Request timeout (ms) |
### Database Configuration
| Option | Type | Default | Description |
|--------|------|---------|-------------|
| \`database.host\` | string | 'localhost' | Database host |
| \`database.port\` | number | 5432 | Database port |
| \`database.name\` | string | 'myapp' | Database name |
| \`database.pool.min\` | number | 2 | Minimum pool connections |
| \`database.pool.max\` | number | 10 | Maximum pool connections |
### Logging Configuration
| Option | Type | Default | Description |
|--------|------|---------|-------------|
| \`logging.level\` | string | 'info' | Log level (debug, info, warn, error) |
| \`logging.format\` | string | 'json' | Log format (json, text) |
| \`logging.destination\` | string | 'stdout' | Log destination |
## Environment Variables
All configuration options can be set via environment variables:
\`\`\`bash
# Server
PORT=8080
HOST=0.0.0.0
# Database
DATABASE_URL=postgresql://user:pass@localhost:5432/myapp
# Logging
LOG_LEVEL=debug
\`\`\`
## Configuration File Example
\`\`\`json
{
"server": {
"port": 3000,
"host": "localhost"
},
"database": {
"host": "localhost",
"port": 5432,
"name": "myapp"
},
"logging": {
"level": "info",
"format": "json"
}
}
\`\`\`
`;
}
private generateCLIReference(analysis: any): string {
return `# Command Line Interface
Reference for ${analysis.metadata.projectName} CLI commands.
## Global Options
| Option | Description |
|--------|-------------|
| \`--help, -h\` | Show help information |
| \`--version, -v\` | Show version number |
| \`--verbose\` | Enable verbose output |
| \`--quiet\` | Suppress non-error output |
## Commands
### start
Start the application.
\`\`\`bash
${analysis.metadata.projectName} start [options]
\`\`\`
**Options:**
- \`--port, -p <port>\`: Server port (default: 3000)
- \`--host, -h <host>\`: Server host (default: localhost)
- \`--config, -c <file>\`: Configuration file path
### build
Build the application for production.
\`\`\`bash
${analysis.metadata.projectName} build [options]
\`\`\`
**Options:**
- \`--output, -o <dir>\`: Output directory (default: dist)
- \`--minify\`: Minify output
- \`--sourcemap\`: Generate source maps
### test
Run tests.
\`\`\`bash
${analysis.metadata.projectName} test [options]
\`\`\`
**Options:**
- \`--watch, -w\`: Run in watch mode
- \`--coverage\`: Generate coverage report
- \`--bail\`: Stop on first test failure
### lint
Run linting checks.
\`\`\`bash
${analysis.metadata.projectName} lint [options]
\`\`\`
**Options:**
- \`--fix\`: Automatically fix issues
- \`--format <format>\`: Output format (stylish, json, compact)
## Examples
### Starting with custom configuration
\`\`\`bash
${analysis.metadata.projectName} start --config production.json --port 8080
\`\`\`
### Running tests with coverage
\`\`\`bash
${analysis.metadata.projectName} test --coverage --watch
\`\`\`
### Building for production
\`\`\`bash
${analysis.metadata.projectName} build --minify --output ./production
\`\`\`
`;
}
private generateArchitectureContent(_analysis: any): string {
return `# Architecture Overview
Understanding the architecture of ${_analysis.metadata.projectName}.
## System Architecture
${_analysis.metadata.projectName} follows a modular architecture designed for scalability and maintainability.
### Core Components
1. **Core Engine**: The main processing engine that handles all operations
2. **Plugin System**: Extensible plugin architecture for custom functionality
3. **API Layer**: RESTful API for external integrations
4. **Data Layer**: Abstracted data access layer
## Design Principles
### Separation of Concerns
Each module has a single, well-defined responsibility:
- Business logic is separated from presentation
- Data access is abstracted from business logic
- External dependencies are isolated
### Dependency Injection
Dependencies are injected rather than hard-coded:
- Improves testability
- Enables easier mocking
- Supports multiple implementations
### Event-Driven Architecture
Components communicate through events:
- Loose coupling between modules
- Asynchronous processing capabilities
- Scalable message handling
## Directory Structure
\`\`\`
${_analysis.metadata.projectName}/
├── src/
│ ├── core/ # Core functionality
│ ├── plugins/ # Plugin implementations
│ ├── api/ # API endpoints
│ ├── services/ # Business services
│ ├── models/ # Data models
│ └── utils/ # Utility functions
├── tests/ # Test files
├── docs/ # Documentation
└── config/ # Configuration files
\`\`\`
## Data Flow
1. **Request Reception**: API receives incoming requests
2. **Validation**: Input validation and sanitization
3. **Processing**: Business logic execution
4. **Data Access**: Database operations if needed
5. **Response Generation**: Format and return response
## Scalability Considerations
### Horizontal Scaling
The application supports horizontal scaling through:
- Stateless design
- Load balancer compatibility
- Distributed caching support
### Performance Optimization
- Lazy loading of modules
- Caching strategies
- Database connection pooling
- Asynchronous operations
## Security Architecture
### Authentication & Authorization
- JWT-based authentication
- Role-based access control (RBAC)
- API key management
### Data Protection
- Encryption at rest and in transit
- Input validation and sanitization
- SQL injection prevention
- XSS protection
`;
}
private generateDesignDecisionsContent(_analysis: any): string {
return `# Design Decisions
Key architectural and design decisions made in ${_analysis.metadata.projectName}.
## Technology Stack
### Why ${_analysis.metadata.primaryLanguage}?
We chose ${_analysis.metadata.primaryLanguage} for:
- Strong type safety
- Excellent tooling support
- Large ecosystem of libraries
- Good performance characteristics
- Team familiarity
### Framework Selection
After evaluating multiple options, we selected our current stack based on:
- Community support and documentation
- Performance benchmarks
- Learning curve for new developers
- Long-term maintenance considerations
## Architectural Patterns
### Repository Pattern
We implement the repository pattern for data access:
- **Benefit**: Abstracts data source details
- **Trade-off**: Additional abstraction layer
- **Rationale**: Enables easy switching between data sources
### Service Layer
Business logic is encapsulated in services:
- **Benefit**: Reusable business logic
- **Trade-off**: More files and complexity
- **Rationale**: Clear separation of concerns
### Dependency Injection
We use dependency injection throughout:
- **Benefit**: Improved testability and flexibility
- **Trade-off**: Initial setup complexity
- **Rationale**: Essential for large-scale applications
## API Design
### RESTful vs GraphQL
We chose REST because:
- Simpler to implement and understand
- Better caching strategies
- Fits our use case well
- Lower operational complexity
### Versioning Strategy
API versioning through URL paths:
- **Format**: /api/v1/resource
- **Benefit**: Clear version boundaries
- **Trade-off**: URL complexity
- **Rationale**: Industry standard approach
## Database Decisions
### SQL vs NoSQL
We use SQL for:
- ACID compliance requirements
- Complex relational data
- Mature tooling and expertise
- Predictable performance
### Migration Strategy
Database migrations are managed through:
- Version-controlled migration files
- Automated migration on deployment
- Rollback capabilities
- Data validation steps
## Testing Strategy
### Test Pyramid
Our testing approach follows the test pyramid:
- Many unit tests (fast, isolated)
- Some integration tests (component interaction)
- Few E2E tests (full system validation)
### Coverage Goals
- Unit test coverage: 80% minimum
- Critical path coverage: 100%
- Integration test coverage: Key workflows
## Performance Decisions
### Caching Strategy
Multi-level caching approach:
- Application-level caching
- Database query caching
- CDN for static assets
- Redis for session data
### Async Processing
Background jobs for:
- Email sending
- Report generation
- Data processing
- Third-party integrations
## Security Decisions
### Authentication Method
JWT tokens because:
- Stateless authentication
- Scalable across services
- Standard implementation
- Good library support
### Data Encryption
- Passwords: bcrypt with salt rounds
- Sensitive data: AES-256 encryption
- Communications: TLS 1.3
- Secrets: Environment variables
## Future Considerations
### Microservices
Currently monolithic, but designed for potential splitting:
- Clear module boundaries
- Service-oriented architecture
- Database per service capability
- API gateway ready
### Cloud Native
Prepared for cloud deployment:
- 12-factor app principles
- Container-ready architecture
- Environment-based configuration
- Stateless design
`;
}
private generateTechnologyStackContent(_analysis: any): string {
return `# Technology Stack
Complete overview of technologies used in ${_analysis.metadata.projectName}.
## Core Technologies
### Runtime & Language
- **${_analysis.metadata.primaryLanguage}**: Primary development language
- **${_analysis.metadata.ecosystem}**: Runtime environment
- **TypeScript**: Type-safe JavaScript development
### Package Management
- **npm/yarn**: Dependency management
- **npx**: Package execution
- **nvm**: Node version management
## Development Tools
### Build Tools
- **TypeScript Compiler**: Transpilation to JavaScript
- **Webpack/Rollup**: Module bundling
- **Babel**: JavaScript transformation
### Code Quality
- **ESLint**: Code linting
- **Prettier**: Code formatting
- **Husky**: Git hooks
- **lint-staged**: Pre-commit checks
### Testing
${
_analysis.technologies.testing
?.map((t: string) => `- **${t}**: Testing framework`)
.join("\\n") || "- **Jest**: Testing framework"
}
- **Supertest**: API testing
- **Coverage tools**: Code coverage reporting
## Infrastructure
### Version Control
- **Git**: Source control
- **GitHub**: Repository hosting
- **GitHub Actions**: CI/CD pipelines
### Deployment
${
_analysis.technologies.deployment
?.map((t: string) => `- **${t}**: Deployment platform`)
.join("\\n") || "- **Docker**: Containerization"
}
- **GitHub Pages**: Documentation hosting
### Monitoring
- **Application logs**: Custom logging
- **Error tracking**: Error monitoring
- **Performance monitoring**: APM tools
## Dependencies
### Core Dependencies
\`\`\`json
${JSON.stringify(_analysis.dependencies.packages?.slice(0, 5) || [], null, 2)}
\`\`\`
### Development Dependencies
- Testing frameworks
- Build tools
- Linting tools
- Type definitions
## Database & Storage
${
_analysis.technologies.database
? `### Database
- **${_analysis.technologies.database}**: Primary database
- **Migration tools**: Database versioning
- **ORMs/Query builders**: Data access layer`
: `### Storage
- File system for local development
- Cloud storage for production
- Caching layers for performance`
}
## External Services
### Third-party APIs
- Authentication services
- Payment processing
- Email services
- Analytics
### Cloud Services
- Hosting platforms
- CDN services
- Backup solutions
- Monitoring services
## Security Tools
### Development Security
- **Dependency scanning**: npm audit
- **Secret management**: Environment variables
- **Security headers**: Helmet.js
- **Input validation**: Sanitization libraries
### Production Security
- **TLS/SSL**: Encrypted communications
- **WAF**: Web application firewall
- **DDoS protection**: Rate limiting
- **Access control**: Authentication/authorization
## Documentation Tools
### Documentation Generation
- **Markdown**: Documentation format
- **Static site generators**: Documentation hosting
- **API documentation**: OpenAPI/Swagger
- **Code documentation**: JSDoc/TypeDoc
## Development Environment
### Recommended IDE
- **VS Code**: Primary development environment
- **Extensions**: Language support, debugging
- **Configuration**: Shared team settings
### Local Development
- **Hot reloading**: Development server
- **Debugging tools**: Chrome DevTools, VS Code debugger
- **Database tools**: Local database management
- **API testing**: Postman/Insomnia
## Upgrade Path
### Version Management
- Regular dependency updates
- Security patch monitoring
- Breaking change management
- Deprecation handling
### Future Technologies
- Considering adoption of:
- New framework versions
- Performance improvements
- Developer experience enhancements
- Security improvements
`;
}
private async generateTutorialContent(
tutorials: any[],
_analysis: any,
): Promise<any[]> {
// Transform tutorial plans into actual content
return tutorials;
}
private async generateHowToContent(
howTos: any[],
_analysis: any,
): Promise<any[]> {
return howTos;
}
private async generateReferenceContent(
reference: any[],
_analysis: any,
): Promise<any[]> {
return reference;
}
private async generateExplanationContent(
explanation: any[],
_analysis: any,
): Promise<any[]> {
return explanation;
}
private async writeContentToStructure(
docsPath: string,
content: any,
preserveExisting: boolean,
): Promise<number> {
let filesCreated = 0;
// Create directory structure if it doesn't exist
const dirs = ["tutorials", "how-to", "reference", "explanation"];
for (const dir of dirs) {
const dirPath = path.join(docsPath, dir);
await fs.mkdir(dirPath, { recursive: true });
}
// Write tutorial content
for (const tutorial of content.tutorials) {
const fileName = this.slugify(tutorial.title) + ".md";
const filePath = path.join(docsPath, "tutorials", fileName);
if (preserveExisting) {
try {
await fs.access(filePath);
continue; // Skip if file exists
} catch {
// File doesn't exist, proceed to write
}
}
await fs.writeFile(filePath, tutorial.content, "utf-8");
filesCreated++;
}
// Write how-to guides
for (const howTo of content.howTos) {
const fileName = this.slugify(howTo.title) + ".md";
const filePath = path.join(docsPath, "how-to", fileName);
if (preserveExisting) {
try {
await fs.access(filePath);
continue;
} catch {
// File doesn't exist, proceed with creation
}
}
await fs.writeFile(filePath, howTo.content, "utf-8");
filesCreated++;
}
// Write reference documentation
for (const ref of content.reference) {
const fileName = this.slugify(ref.title) + ".md";
const filePath = path.join(docsPath, "reference", fileName);
if (preserveExisting) {
try {
await fs.access(filePath);
continue;
} catch {
// File doesn't exist, proceed with creation
}
}
await fs.writeFile(filePath, ref.content, "utf-8");
filesCreated++;
}
// Write explanation content
for (const exp of content.explanation) {
const fileName = this.slugify(exp.title) + ".md";
const filePath = path.join(docsPath, "explanation", fileName);
if (preserveExisting) {
try {
await fs.access(filePath);
continue;
} catch {
// File doesn't exist, proceed with creation
}
}
await fs.writeFile(filePath, exp.content, "utf-8");
filesCreated++;
}
return filesCreated;
}
private async updateNavigationAndCrossReferences(
docsPath: string,
contentPlan: ContentPlan,
): Promise<void> {
// Create main index file with navigation
const indexContent = `# Documentation
Welcome to the documentation! This comprehensive guide is organized following the Diataxis framework.
## 📚 Learning-Oriented: Tutorials
Start here if you're new to the project:
${contentPlan.tutorials
.map((t) => `- [${t.title}](tutorials/${this.slugify(t.title)}.md)`)
.join("\\n")}
## 🔧 Task-Oriented: How-To Guides
Practical guides for specific tasks:
${contentPlan.howToGuides
.map((h) => `- [${h.title}](how-to/${this.slugify(h.title)}.md)`)
.join("\\n")}
## 📖 Information-Oriented: Reference
Detailed technical reference:
${contentPlan.reference
.map((r) => `- [${r.title}](reference/${this.slugify(r.title)}.md)`)
.join("\\n")}
## 💡 Understanding-Oriented: Explanation
Conceptual documentation and background:
${contentPlan.explanation
.map((e) => `- [${e.title}](explanation/${this.slugify(e.title)}.md)`)
.join("\\n")}
`;
await fs.writeFile(path.join(docsPath, "index.md"), indexContent, "utf-8");
}
private calculatePopulationMetrics(
filesCreated: number,
contentPlan: ContentPlan,
): any {
const totalPlanned =
contentPlan.tutorials.length +
contentPlan.howToGuides.length +
contentPlan.reference.length +
contentPlan.explanation.length;
return {
coverage: (filesCreated / totalPlanned) * 100,
completeness: 85, // Example metric
projectSpecificity: 75, // Example metric
};
}
private generateMemoryInformedNextSteps(
analysis: any,
_contentPlan: ContentPlan,
_memoryInsights: any,
): string[] {
const nextSteps = [];
const patterns = _memoryInsights?.patterns || {};
const _similarProjects = _memoryInsights?.similarProjects || [];
// Memory-informed next steps based on successful patterns
if (_similarProjects.length > 0) {
nextSteps.push(
`Review and customize the generated content (based on ${_similarProjects.length} similar project patterns)`,
);
} else {
nextSteps.push("Review and customize the generated content");
}
// Project-specific recommendations
if (analysis.structure?.hasTests) {
nextSteps.push("Run and validate all code examples and commands");
}
if (analysis.structure?.hasCI) {
nextSteps.push(
"Set up automated documentation deployment using successful CI patterns",
);
}
// Memory-informed improvements
if (patterns.documentationStructures) {
nextSteps.push(
"Enhance documentation structure based on successful similar projects",
);
}
if (patterns.deploymentStrategies) {
nextSteps.push(
"Implement proven deployment strategies from similar projects",
);
}
// Update sitemap after content population
nextSteps.push(
"Update sitemap.xml using manage_sitemap tool (action: update) to include new pages",
);
// Always include validation
nextSteps.push(
"Validate technical accuracy using project-specific analysis",
);
return nextSteps;
}
// Add the missing memory-informed content generation methods
private async generateMemoryInformedTutorialContent(
tutorials: any[],
analysis: any,
_memoryInsights: any,
): Promise<any[]> {
return tutorials.map((tutorial) => ({
...tutorial,
content: this.enhanceContentWithMemoryInsights(
tutorial.content,
analysis,
_memoryInsights,
),
codeExamples: this.enhanceExamplesWithRealCode(
tutorial.codeExamples || [],
analysis,
_memoryInsights,
),
}));
}
private async generateMemoryInformedHowToContent(
howTos: any[],
analysis: any,
_memoryInsights: any,
): Promise<any[]> {
return howTos.map((howTo) => ({
...howTo,
content: this.enhanceContentWithMemoryInsights(
howTo.content,
analysis,
_memoryInsights,
),
}));
}
private async generateMemoryInformedReferenceContent(
reference: any[],
analysis: any,
_memoryInsights: any,
): Promise<any[]> {
return reference.map((ref) => ({
...ref,
content: this.generateMemoryInformedAPIReference(
analysis,
_memoryInsights,
),
}));
}
private async generateMemoryInformedExplanationContent(
explanation: any[],
analysis: any,
_memoryInsights: any,
): Promise<any[]> {
return explanation.map((exp) => ({
...exp,
content: this.enhanceContentWithMemoryInsights(
exp.content,
analysis,
_memoryInsights,
),
}));
}
private enhanceContentWithMemoryInsights(
content: string,
analysis: any,
_memoryInsights: any,
): string {
// Replace generic placeholders with real project information
const language = analysis.metadata?.primaryLanguage || "development";
const similarCount = _memoryInsights?.similarProjects?.length || 0;
let enhancedContent = content;
// Add memory-informed context
if (similarCount > 0) {
enhancedContent = enhancedContent.replace(
/This guide/g,
`This guide (based on patterns from ${similarCount} similar ${language} projects)`,
);
}
// Replace generic examples with real ones
enhancedContent = this.replaceGenericExamplesWithReal(
enhancedContent,
analysis,
);
return enhancedContent;
}
private enhanceExamplesWithRealCode(
examples: string[],
analysis: any,
_memoryInsights: any,
): string[] {
return examples.map((example) => {
// Replace generic project names with actual project name
const projectName = analysis.metadata?.projectName || "project";
return example.replace(
/your-project|myproject|example-project/g,
projectName,
);
});
}
private generateMemoryInformedAPIReference(
analysis: any,
_memoryInsights: any,
): string {
// Extract actual API structure from project analysis
const entryPoint = this.findProjectEntryPoint(analysis);
const packageJson = this.findPackageJson(analysis);
const projectName = analysis.metadata?.projectName || "the project";
let content = `# API Reference\n\n`;
content += `Complete reference for ${projectName} APIs.\n\n`;
if (entryPoint) {
content += `## Core APIs\n\n`;
content += `### Entry Point: ${entryPoint}\n\n`;
content += `The main entry point for ${projectName}.\n\n`;
}
// Add real configuration if available
if (packageJson?.main) {
content += `**Main Module:** \`${packageJson.main}\`\n\n`;
}
// Add real scripts if available
const scripts = packageJson?.scripts;
if (scripts && Object.keys(scripts).length > 0) {
content += `## Available Scripts\n\n`;
Object.entries(scripts).forEach(([script, command]) => {
content += `### ${script}\n\n`;
content += `\`\`\`bash\nnpm run ${script}\n\`\`\`\n\n`;
content += `Runs: \`${command}\`\n\n`;
});
}
return content;
}
private replaceGenericExamplesWithReal(
content: string,
analysis: any,
): string {
const projectName = analysis.metadata?.projectName || "project";
const language = analysis.metadata?.primaryLanguage || "JavaScript";
// Replace generic project references
content = content.replace(/your-project-name/g, projectName);
content = content.replace(/YourProject/g, projectName);
content = content.replace(/your-language/g, language);
// Replace generic ports with common defaults for the project type
const defaultPort = this.getDefaultPortForProject(analysis);
content = content.replace(/3000/g, defaultPort.toString());
return content;
}
private getDefaultPortForProject(analysis: any): number {
const packageJson = this.findPackageJson(analysis);
if (
packageJson?.scripts?.start &&
packageJson.scripts.start.includes("port")
) {
// Try to extract port from start script
const portMatch = packageJson.scripts.start.match(/port[:\s=](\d+)/i);
if (portMatch) {
return parseInt(portMatch[1], 10);
}
}
// Default ports based on project type
const dependencies = analysis.dependencies?.packages || [];
if (dependencies.includes("express")) return 3000;
if (dependencies.includes("fastify")) return 3000;
if (dependencies.includes("next")) return 3000;
if (dependencies.includes("gatsby")) return 8000;
if (dependencies.includes("nuxt")) return 3000;
return 3000; // Generic default
}
// Stub methods for the missing plan generation (to be implemented if needed)
private generateMemoryInformedHowToPlan(
analysis: any,
level: string,
_memoryInsights: any,
): any[] {
return this.generateHowToPlan(analysis, level);
}
private generateMemoryInformedReferencePlan(
analysis: any,
level: string,
_memoryInsights: any,
): any[] {
return this.generateReferencePlan(analysis, level);
}
private generateMemoryInformedExplanationPlan(
analysis: any,
level: string,
_memoryInsights: any,
): any[] {
return this.generateExplanationPlan(analysis, level);
}
private generateMemoryInformedNodeSetupContent(
_analysis: any,
_patterns: any,
): string {
return this.generateNodeSetupContent(_analysis);
}
private generateMemoryInformedNodeSetupExamples(
_analysis: any,
_patterns: any,
): string[] {
return this.generateNodeSetupExamples();
}
private generateNextSteps(
_analysis: any,
_contentPlan: ContentPlan,
): string[] {
return [
"Review and customize the generated content",
"Add project-specific examples and use cases",
"Validate technical accuracy of code examples",
"Add screenshots and diagrams where helpful",
"Test all commands and code snippets",
"Set up automated documentation deployment",
];
}
private slugify(text: string): string {
return text
.toLowerCase()
.replace(/[^\w\s-]/g, "")
.replace(/\s+/g, "-")
.replace(/--+/g, "-")
.trim();
}
// DevOps Detection Methods
private detectDevOpsTools(analysis: any): DevOpsToolProfile {
return {
containerization: this.detectContainerization(analysis),
orchestration: this.detectOrchestration(analysis),
cicd: this.detectCICD(analysis),
configuration: this.detectConfigManagement(analysis),
monitoring: this.detectMonitoring(analysis),
security: this.detectSecurity(analysis),
};
}
private detectContainerization(analysis: any): ContainerTechnology[] {
const detected: ContainerTechnology[] = [];
const files = analysis.files || [];
// Docker detection
if (
files.some((f: any) => f.name === "Dockerfile") ||
files.some((f: any) => f.name === "docker-compose.yml") ||
files.some((f: any) => f.name === "docker-compose.yaml")
) {
detected.push({
name: "docker",
version: this.extractDockerVersion(analysis),
configFiles: this.getDockerFiles(analysis),
usage: "containerization",
});
}
// Podman detection
if (
files.some((f: any) => f.name === "Containerfile") ||
files.some((f: any) => f.name === "podman-compose.yml")
) {
detected.push({
name: "podman",
configFiles: this.getPodmanFiles(analysis),
usage: "containerization",
});
}
return detected;
}
private detectOrchestration(analysis: any): OrchestrationTechnology[] {
const detected: OrchestrationTechnology[] = [];
const files = analysis.files || [];
// Kubernetes detection
if (
files.some(
(f: any) => f.path?.includes("k8s/") || f.path?.includes("kubernetes/"),
)
) {
detected.push({
name: "kubernetes",
manifests: this.getKubernetesManifests(analysis),
resources: this.analyzeKubernetesResources(analysis),
namespaces: this.extractNamespaces(analysis),
});
}
// OpenShift detection
if (
files.some((f: any) => f.path?.includes(".s2i/")) ||
this.hasFileContent(analysis, "kind: DeploymentConfig")
) {
detected.push({
name: "openshift",
});
}
return detected;
}
private detectCICD(analysis: any): CICDTechnology[] {
const detected: CICDTechnology[] = [];
const files = analysis.files || [];
// GitHub Actions detection
if (files.some((f: any) => f.path?.includes(".github/workflows/"))) {
detected.push({
name: "github-actions",
});
}
// Tekton detection
if (
files.some((f: any) => f.path?.includes(".tekton/")) ||
this.hasFileContent(analysis, "apiVersion: tekton.dev")
) {
detected.push({
name: "tekton",
});
}
return detected;
}
private detectConfigManagement(analysis: any): ConfigManagementTechnology[] {
const detected: ConfigManagementTechnology[] = [];
const files = analysis.files || [];
// Ansible detection
if (
files.some((f: any) => f.name === "ansible.cfg") ||
files.some((f: any) => f.path?.includes("playbooks/")) ||
files.some((f: any) => f.path?.includes("roles/"))
) {
detected.push({
name: "ansible",
playbooks: this.getAnsiblePlaybooks(analysis),
roles: this.getAnsibleRoles(analysis),
});
}
// Terraform detection
if (files.some((f: any) => f.name?.endsWith(".tf"))) {
detected.push({
name: "terraform",
});
}
return detected;
}
private detectMonitoring(analysis: any): MonitoringTechnology[] {
const detected: MonitoringTechnology[] = [];
if (this.hasFileContent(analysis, "prometheus")) {
detected.push({ name: "prometheus" });
}
if (this.hasFileContent(analysis, "grafana")) {
detected.push({ name: "grafana" });
}
return detected;
}
private detectSecurity(analysis: any): SecurityTechnology[] {
const detected: SecurityTechnology[] = [];
if (this.hasFileContent(analysis, "falco")) {
detected.push({ name: "falco" });
}
return detected;
}
// Python Framework Detection
private detectPythonFrameworks(analysis: any): any[] {
const frameworks: any[] = [];
const dependencies = analysis.dependencies?.packages || [];
if (dependencies.includes("django")) {
frameworks.push({ name: "django", type: "web-framework" });
}
if (dependencies.includes("fastapi")) {
frameworks.push({ name: "fastapi", type: "web-framework" });
}
if (dependencies.includes("flask")) {
frameworks.push({ name: "flask", type: "web-framework" });
}
return frameworks;
}
// Helper methods for file detection
private hasFileContent(analysis: any, content: string): boolean {
const files = analysis.files || [];
return files.some((f: any) => f.content?.includes(content));
}
private extractDockerVersion(_analysis: any): string | undefined {
return undefined; // Could be implemented to parse Dockerfile
}
private getDockerFiles(analysis: any): string[] {
const files = analysis.files || [];
return files
.filter(
(f: any) =>
f.name === "Dockerfile" || f.name.includes("docker-compose"),
)
.map((f: any) => f.name);
}
private getPodmanFiles(analysis: any): string[] {
const files = analysis.files || [];
return files
.filter(
(f: any) =>
f.name === "Containerfile" || f.name.includes("podman-compose"),
)
.map((f: any) => f.name);
}
private getKubernetesManifests(analysis: any): string[] {
const files = analysis.files || [];
return files
.filter(
(f: any) => f.path?.includes("k8s/") || f.path?.includes("kubernetes/"),
)
.map((f: any) => f.name);
}
private analyzeKubernetesResources(_analysis: any): string[] {
return ["Deployment", "Service", "ConfigMap"]; // Simplified
}
private extractNamespaces(_analysis: any): string[] {
return ["default"]; // Simplified
}
private getAnsiblePlaybooks(analysis: any): string[] {
const files = analysis.files || [];
return files
.filter(
(f: any) => f.path?.includes("playbooks/") && f.name?.endsWith(".yml"),
)
.map((f: any) => f.name);
}
private getAnsibleRoles(analysis: any): string[] {
const files = analysis.files || [];
return files
.filter((f: any) => f.path?.includes("roles/"))
.map((f: any) => f.name);
}
// Content generation methods for new features
private generateContainerTutorialContent(
_analysis: any,
_containerTech: ContainerTechnology,
): string {
return `# Containerizing ${_analysis.metadata.projectName} with ${
_containerTech.name
}
Learn how to package your ${
_analysis.metadata.primaryLanguage
} application into a container for consistent deployment across environments.
## Prerequisites
- ${_containerTech.name} installed on your system
- Basic understanding of containerization concepts
- Your application running locally
## Understanding Containerization
Containers provide a lightweight, portable way to package applications with all their dependencies. This ensures your application runs consistently across different environments.
## Creating a ${
_containerTech.name === "docker" ? "Dockerfile" : "Containerfile"
}
1. Create a ${
_containerTech.name === "docker" ? "Dockerfile" : "Containerfile"
} in your project root:
\`\`\`dockerfile
${this.generateContainerFileContent(_analysis, _containerTech)}
\`\`\`
## Building Your Container Image
\`\`\`bash
${_containerTech.name} build -t ${_analysis.metadata.projectName}:latest .
\`\`\`
## Running Your Container
\`\`\`bash
${_containerTech.name} run -p 3000:3000 ${_analysis.metadata.projectName}:latest
\`\`\`
## Best Practices
- Use multi-stage builds to reduce image size
- Don't run containers as root user
- Use .dockerignore to exclude unnecessary files
- Pin base image versions for reproducibility
## Next Steps
- Learn about container orchestration with Kubernetes
- Set up automated builds in CI/CD pipeline
- Implement health checks for production deployments
`;
}
private generateOrchestrationTutorialContent(
_analysis: any,
_orchestrationTech: OrchestrationTechnology,
): string {
return `# Deploying ${_analysis.metadata.projectName} to ${
_orchestrationTech.name
}
Deploy your containerized application to ${
_orchestrationTech.name
} for scalable, production-ready hosting.
## Prerequisites
- ${_orchestrationTech.name} cluster access
- kubectl CLI tool installed
- Container image built and pushed to registry
## Understanding ${_orchestrationTech.name}
${
_orchestrationTech.name
} is a container orchestration platform that automates deployment, scaling, and management of containerized applications.
## Creating Deployment Manifests
1. Create a deployment configuration:
\`\`\`yaml
${this.generateKubernetesManifest(_analysis, "deployment")}
\`\`\`
2. Create a service configuration:
\`\`\`yaml
${this.generateKubernetesManifest(_analysis, "service")}
\`\`\`
## Deploying to ${_orchestrationTech.name}
\`\`\`bash
kubectl apply -f deployment.yaml
kubectl apply -f service.yaml
\`\`\`
## Monitoring Your Deployment
\`\`\`bash
kubectl get pods
kubectl get services
kubectl logs -f deployment/${_analysis.metadata.projectName}
\`\`\`
## Scaling Your Application
\`\`\`bash
kubectl scale deployment ${_analysis.metadata.projectName} --replicas=3
\`\`\`
## Next Steps
- Set up ingress for external access
- Configure persistent storage
- Implement monitoring and logging
`;
}
private generatePythonEnvironmentContent(_analysis: any): string {
return `# Python Virtual Environment Setup
Set up an isolated Python development environment for ${_analysis.metadata.projectName}.
## Why Virtual Environments?
Virtual environments isolate project dependencies, preventing conflicts between different projects and ensuring reproducible builds.
## Creating a Virtual Environment
### Using venv (Python 3.3+)
\`\`\`bash
python -m venv venv
\`\`\`
### Using virtualenv
\`\`\`bash
pip install virtualenv
virtualenv venv
\`\`\`
### Using conda
\`\`\`bash
conda create --name ${_analysis.metadata.projectName} python=3.11
\`\`\`
## Activating the Environment
### Linux/macOS
\`\`\`bash
source venv/bin/activate
\`\`\`
### Windows
\`\`\`bash
venv\\Scripts\\activate
\`\`\`
### Conda
\`\`\`bash
conda activate ${_analysis.metadata.projectName}
\`\`\`
## Installing Dependencies
\`\`\`bash
pip install -r requirements.txt
\`\`\`
## Development Tools
Install useful development tools:
\`\`\`bash
pip install black flake8 pytest mypy
\`\`\`
## Deactivating the Environment
\`\`\`bash
deactivate
\`\`\`
## Best Practices
- Always use virtual environments for Python projects
- Keep requirements.txt updated
- Use requirements-dev.txt for development dependencies
- Consider using poetry or pipenv for advanced dependency management
`;
}
private generatePythonFrameworkTutorialContent(
_analysis: any,
framework: any,
): string {
if (framework.name === "django") {
return this.generateDjangoTutorialContent(_analysis);
} else if (framework.name === "fastapi") {
return this.generateFastAPITutorialContent(_analysis);
} else if (framework.name === "flask") {
return this.generateFlaskTutorialContent(_analysis);
}
return `# Building Applications with ${framework.name}
Learn how to build applications using ${framework.name}.
## Getting Started
Install ${framework.name}:
\`\`\`bash
pip install ${framework.name}
\`\`\`
## Basic Application Structure
Create your first ${framework.name} application and explore the framework's core concepts.
`;
}
private generateDjangoTutorialContent(_analysis: any): string {
return `# Building Applications with Django
Create robust web applications using Django's Model-View-Template architecture.
## Project Setup
1. Install Django:
\`\`\`bash
pip install django
\`\`\`
2. Create a new Django project:
\`\`\`bash
django-admin startproject ${_analysis.metadata.projectName}
cd ${_analysis.metadata.projectName}
\`\`\`
3. Create your first app:
\`\`\`bash
python manage.py startapp core
\`\`\`
## Understanding Django Architecture
Django follows the MTV (Model-View-Template) pattern:
- **Models**: Define your data structure
- **Views**: Handle business logic and user interactions
- **Templates**: Control presentation layer
## Creating Your First Model
\`\`\`python
# core/models.py
from django.db import models
class Item(models.Model):
name = models.CharField(max_length=100)
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
\`\`\`
## Running Migrations
\`\`\`bash
python manage.py makemigrations
python manage.py migrate
\`\`\`
## Creating Views
\`\`\`python
# core/views.py
from django.shortcuts import render
from .models import Item
def item_list(request):
items = Item.objects.all()
return render(request, 'core/item_list.html', {'items': items})
\`\`\`
## URL Configuration
\`\`\`python
# core/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.item_list, name='item_list'),
]
\`\`\`
## Running the Development Server
\`\`\`bash
python manage.py runserver
\`\`\`
## Next Steps
- Learn about Django REST Framework for API development
- Explore Django's admin interface
- Implement user authentication
- Deploy with Gunicorn and PostgreSQL
`;
}
private generateFastAPITutorialContent(_analysis: any): string {
return `# Building APIs with FastAPI
Create modern, fast APIs with automatic documentation using FastAPI.
## Installation
\`\`\`bash
pip install fastapi uvicorn
\`\`\`
## Basic Application
\`\`\`python
# main.py
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
description: str = None
price: float
tax: float = None
@app.get("/")
async def read_root():
return {"Hello": "World"}
@app.post("/items/")
async def create_item(item: Item):
return item
@app.get("/items/{item_id}")
async def read_item(item_id: int, q: str = None):
return {"item_id": item_id, "q": q}
\`\`\`
## Running the Server
\`\`\`bash
uvicorn main:app --reload
\`\`\`
## Interactive Documentation
FastAPI automatically generates interactive API documentation:
- Swagger UI: http://127.0.0.1:8000/docs
- ReDoc: http://127.0.0.1:8000/redoc
## Key Features
- **Type Hints**: Python type hints for automatic validation
- **Async Support**: Native async/await support
- **Dependency Injection**: Powerful dependency injection system
- **Security**: Built-in security utilities
## Next Steps
- Add database integration with SQLAlchemy
- Implement authentication with JWT
- Add background tasks
- Deploy with Docker
`;
}
private generateFlaskTutorialContent(_analysis: any): string {
return `# Building Applications with Flask
Create lightweight web applications and APIs using Flask's minimalist approach.
## Installation
\`\`\`bash
pip install flask
\`\`\`
## Basic Application
\`\`\`python
# app.py
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/api/items', methods=['GET', 'POST'])
def handle_items():
if request.method == 'POST':
data = request.get_json()
return jsonify(data), 201
else:
return jsonify([{"name": "item1"}, {"name": "item2"}])
if __name__ == '__main__':
app.run(debug=True)
\`\`\`
## Running the Application
\`\`\`bash
python app.py
\`\`\`
## Application Factory Pattern
For larger applications, use the application factory pattern:
\`\`\`python
# app/__init__.py
from flask import Flask
def create_app():
app = Flask(__name__)
app.config.from_object('config.Config')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
return app
\`\`\`
## Flask Extensions
Popular Flask extensions:
- **Flask-SQLAlchemy**: Database ORM
- **Flask-Login**: User session management
- **Flask-WTF**: Form handling and validation
- **Flask-Migrate**: Database migrations
## Next Steps
- Structure larger applications with Blueprints
- Add database integration
- Implement user authentication
- Deploy with Gunicorn
`;
}
// Helper methods for container content generation
private generateContainerFileContent(
_analysis: any,
_containerTech: ContainerTechnology,
): string {
const language = _analysis.metadata.primaryLanguage?.toLowerCase();
if (language === "python") {
return `FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
EXPOSE 8000
CMD ["python", "manage.py", "runserver", "0.0.0.0:8000"]`;
} else if (language === "javascript" || language === "typescript") {
return `FROM node:18-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
COPY . .
EXPOSE 3000
USER node
CMD ["npm", "start"]`;
} else {
return `FROM ubuntu:22.04
WORKDIR /app
COPY . .
EXPOSE 8080
CMD ["./start.sh"]`;
}
}
private generateKubernetesManifest(analysis: any, type: string): string {
if (type === "deployment") {
return `apiVersion: apps/v1
kind: Deployment
metadata:
name: ${analysis.metadata.projectName}
labels:
app: ${analysis.metadata.projectName}
spec:
replicas: 3
selector:
matchLabels:
app: ${analysis.metadata.projectName}
template:
metadata:
labels:
app: ${analysis.metadata.projectName}
spec:
containers:
- name: ${analysis.metadata.projectName}
image: ${analysis.metadata.projectName}:latest
ports:
- containerPort: 3000
env:
- name: NODE_ENV
value: "production"`;
} else if (type === "service") {
return `apiVersion: v1
kind: Service
metadata:
name: ${analysis.metadata.projectName}-service
spec:
selector:
app: ${analysis.metadata.projectName}
ports:
- protocol: TCP
port: 80
targetPort: 3000
type: LoadBalancer`;
}
return "";
}
private generateContainerExamples(
_analysis: any,
_containerTech: ContainerTechnology,
): string[] {
return [
`# Build the container image
${_containerTech.name} build -t ${_analysis.metadata.projectName}:latest .`,
`# Run the container locally
${_containerTech.name} run -p 3000:3000 -d ${_analysis.metadata.projectName}:latest`,
`# View running containers
${_containerTech.name} ps`,
];
}
private generateOrchestrationExamples(
_analysis: any,
_orchestrationTech: OrchestrationTechnology,
): string[] {
return [
`# Deploy the application
kubectl apply -f k8s/`,
`# Check deployment status
kubectl get deployments`,
`# View application logs
kubectl logs -f deployment/${_analysis.metadata.projectName}`,
];
}
private generatePythonEnvironmentExamples(): string[] {
return [
`# Create virtual environment
python -m venv venv`,
`# Activate environment (Linux/macOS)
source venv/bin/activate`,
`# Install dependencies
pip install -r requirements.txt`,
];
}
private generatePythonFrameworkExamples(framework: any): string[] {
if (framework.name === "django") {
return [
`# Create Django project
django-admin startproject myproject`,
`# Run development server
python manage.py runserver`,
`# Create superuser
python manage.py createsuperuser`,
];
}
return [];
}
}
// Export the tool implementation
export const populateDiataxisContent: Tool = {
name: "populate_diataxis_content",
description:
"Intelligently populate Diataxis documentation with project-specific content",
inputSchema: {
type: "object",
properties: {
analysisId: {
type: "string",
description: "Repository analysis ID from analyze_repository tool",
},
docsPath: {
type: "string",
description: "Path to documentation directory",
},
populationLevel: {
type: "string",
enum: ["basic", "comprehensive", "intelligent"],
default: "comprehensive",
description: "Level of content generation detail",
},
includeProjectSpecific: {
type: "boolean",
default: true,
description: "Generate project-specific examples and code",
},
preserveExisting: {
type: "boolean",
default: true,
description: "Preserve any existing content",
},
technologyFocus: {
type: "array",
items: { type: "string" },
description: "Specific technologies to emphasize in content",
},
},
required: ["analysisId", "docsPath"],
},
};
export async function handlePopulateDiataxisContent(
args: any,
context?: any,
): Promise<PopulationResult> {
const engine = new ContentPopulationEngine();
return await engine.populateContent(args, context);
}
```
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
```typescript
#!/usr/bin/env node
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ListToolsRequestSchema,
ListPromptsRequestSchema,
GetPromptRequestSchema,
ListResourcesRequestSchema,
ReadResourceRequestSchema,
ListRootsRequestSchema,
} from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { readFileSync } from "fs";
import { fileURLToPath } from "url";
import path, { dirname, join } from "path";
import { analyzeRepository } from "./tools/analyze-repository.js";
import { recommendSSG } from "./tools/recommend-ssg.js";
import { generateConfig } from "./tools/generate-config.js";
import { setupStructure } from "./tools/setup-structure.js";
import { deployPages } from "./tools/deploy-pages.js";
import { verifyDeployment } from "./tools/verify-deployment.js";
import { setupPlaywrightTests } from "./tools/setup-playwright-tests.js";
import { handlePopulateDiataxisContent } from "./tools/populate-content.js";
import {
handleValidateDiataxisContent,
validateGeneralContent,
} from "./tools/validate-content.js";
import { handleUpdateExistingDocumentation } from "./tools/update-existing-documentation.js";
import { detectDocumentationGaps } from "./tools/detect-gaps.js";
import { testLocalDeployment } from "./tools/test-local-deployment.js";
import { evaluateReadmeHealth } from "./tools/evaluate-readme-health.js";
import { readmeBestPractices } from "./tools/readme-best-practices.js";
import { checkDocumentationLinks } from "./tools/check-documentation-links.js";
import { generateReadmeTemplate } from "./tools/generate-readme-template.js";
import { validateReadmeChecklist } from "./tools/validate-readme-checklist.js";
import { analyzeReadme } from "./tools/analyze-readme.js";
import { optimizeReadme } from "./tools/optimize-readme.js";
import { managePreferences } from "./tools/manage-preferences.js";
import { analyzeDeployments } from "./tools/analyze-deployments.js";
import { handleSyncCodeToDocs } from "./tools/sync-code-to-docs.js";
import { handleGenerateContextualContent } from "./tools/generate-contextual-content.js";
import { trackDocumentationFreshness } from "./tools/track-documentation-freshness.js";
import { validateDocumentationFreshness } from "./tools/validate-documentation-freshness.js";
import {
manageSitemap,
ManageSitemapInputSchema,
} from "./tools/manage-sitemap.js";
import {
generateLLMContext,
GenerateLLMContextInputSchema,
setToolDefinitions,
} from "./tools/generate-llm-context.js";
import { formatMCPResponse } from "./types/api.js";
import {
isPathAllowed,
getPermissionDeniedMessage,
} from "./utils/permission-checker.js";
import { promises as fs } from "fs";
import { generateTechnicalWriterPrompts } from "./prompts/technical-writer-prompts.js";
import {
DOCUMENTATION_WORKFLOWS,
WORKFLOW_EXECUTION_GUIDANCE,
WORKFLOW_METADATA,
} from "./workflows/documentation-workflow.js";
import {
initializeMemory,
rememberAnalysis,
rememberRecommendation,
getProjectInsights,
getSimilarProjects,
getMemoryStatistics,
exportMemories,
cleanupOldMemories,
memoryTools,
} from "./memory/index.js";
// Get version from package.json
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const packageJson = JSON.parse(
readFileSync(join(__dirname, "..", "package.json"), "utf-8"),
);
// Parse allowed roots from command line arguments
const allowedRoots: string[] = [];
process.argv.forEach((arg, index) => {
if (arg === "--root" && process.argv[index + 1]) {
const rootPath = process.argv[index + 1];
// Resolve to absolute path and expand ~ for home directory
const expandedPath = rootPath.startsWith("~")
? join(
process.env.HOME || process.env.USERPROFILE || "",
rootPath.slice(1),
)
: rootPath;
allowedRoots.push(path.resolve(expandedPath));
}
});
// If no roots specified, allow current working directory by default
if (allowedRoots.length === 0) {
allowedRoots.push(process.cwd());
}
const server = new Server(
{
name: "documcp",
version: packageJson.version,
},
{
capabilities: {
tools: {},
prompts: {
listChanged: true,
},
resources: {
subscribe: true,
listChanged: true,
},
roots: {
listChanged: true,
},
},
},
);
// Tool definitions following ADR-006
const TOOLS = [
{
name: "analyze_repository",
description:
"Analyze repository structure, dependencies, and documentation needs",
inputSchema: z.object({
path: z.string().describe("Path to the repository to analyze"),
depth: z
.enum(["quick", "standard", "deep"])
.optional()
.default("standard"),
}),
},
{
name: "recommend_ssg",
description:
"Recommend the best static site generator based on project analysis and user preferences",
inputSchema: z.object({
analysisId: z.string().describe("ID from previous repository analysis"),
userId: z
.string()
.optional()
.default("default")
.describe(
"User ID for personalized recommendations based on usage history",
),
preferences: z
.object({
priority: z
.enum(["simplicity", "features", "performance"])
.optional(),
ecosystem: z
.enum(["javascript", "python", "ruby", "go", "any"])
.optional(),
})
.optional(),
}),
},
{
name: "generate_config",
description:
"Generate configuration files for the selected static site generator",
inputSchema: z.object({
ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
projectName: z.string(),
projectDescription: z.string().optional(),
outputPath: z.string().describe("Where to generate config files"),
}),
},
{
name: "setup_structure",
description: "Create Diataxis-compliant documentation structure",
inputSchema: z.object({
path: z.string().describe("Root path for documentation"),
ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
includeExamples: z.boolean().optional().default(true),
}),
},
{
name: "setup_playwright_tests",
description:
"Generate Playwright E2E test setup for documentation site (containers + CI/CD)",
inputSchema: z.object({
repositoryPath: z.string().describe("Path to documentation repository"),
ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
projectName: z.string().describe("Project name for tests"),
mainBranch: z.string().optional().default("main"),
includeAccessibilityTests: z.boolean().optional().default(true),
includeDockerfile: z.boolean().optional().default(true),
includeGitHubActions: z.boolean().optional().default(true),
}),
},
{
name: "deploy_pages",
description:
"Set up GitHub Pages deployment workflow with deployment tracking and preference learning",
inputSchema: z.object({
repository: z.string().describe("Repository path or URL"),
ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
branch: z.string().optional().default("gh-pages"),
customDomain: z.string().optional(),
projectPath: z
.string()
.optional()
.describe("Local path to the project for tracking"),
projectName: z.string().optional().describe("Project name for tracking"),
analysisId: z
.string()
.optional()
.describe("ID from repository analysis for linking"),
userId: z
.string()
.optional()
.default("default")
.describe("User ID for preference tracking"),
}),
},
{
name: "verify_deployment",
description: "Verify and troubleshoot GitHub Pages deployment",
inputSchema: z.object({
repository: z.string().describe("Repository path or URL"),
url: z.string().optional().describe("Expected deployment URL"),
}),
},
{
name: "populate_diataxis_content",
description:
"Intelligently populate Diataxis documentation with project-specific content",
inputSchema: z.object({
analysisId: z
.string()
.describe("Repository analysis ID from analyze_repository tool"),
docsPath: z.string().describe("Path to documentation directory"),
populationLevel: z
.enum(["basic", "comprehensive", "intelligent"])
.optional()
.default("comprehensive"),
includeProjectSpecific: z.boolean().optional().default(true),
preserveExisting: z.boolean().optional().default(true),
technologyFocus: z
.array(z.string())
.optional()
.describe("Specific technologies to emphasize"),
}),
},
{
name: "update_existing_documentation",
description:
"Intelligently analyze and update existing documentation using memory insights and code comparison",
inputSchema: z.object({
analysisId: z
.string()
.describe("Repository analysis ID from analyze_repository tool"),
docsPath: z.string().describe("Path to existing documentation directory"),
compareMode: z
.enum(["comprehensive", "gap-detection", "accuracy-check"])
.optional()
.default("comprehensive")
.describe("Mode of comparison between code and documentation"),
updateStrategy: z
.enum(["conservative", "moderate", "aggressive"])
.optional()
.default("moderate")
.describe("How aggressively to suggest updates"),
preserveStyle: z
.boolean()
.optional()
.default(true)
.describe("Preserve existing documentation style and formatting"),
focusAreas: z
.array(z.string())
.optional()
.describe(
'Specific areas to focus updates on (e.g., "dependencies", "scripts", "api")',
),
}),
},
{
name: "validate_diataxis_content",
description:
"Validate the accuracy, completeness, and compliance of generated Diataxis documentation",
inputSchema: z.object({
contentPath: z
.string()
.describe("Path to the documentation directory to validate"),
analysisId: z
.string()
.optional()
.describe(
"Optional repository analysis ID for context-aware validation",
),
validationType: z
.enum(["accuracy", "completeness", "compliance", "all"])
.optional()
.default("all")
.describe(
"Type of validation: accuracy, completeness, compliance, or all",
),
includeCodeValidation: z
.boolean()
.optional()
.default(true)
.describe("Whether to validate code examples"),
confidence: z
.enum(["strict", "moderate", "permissive"])
.optional()
.default("moderate")
.describe(
"Validation confidence level: strict, moderate, or permissive",
),
}),
},
{
name: "validate_content",
description:
"Validate general content quality: broken links, code syntax, references, and basic accuracy",
inputSchema: z.object({
contentPath: z
.string()
.describe("Path to the content directory to validate"),
validationType: z
.string()
.optional()
.default("all")
.describe("Type of validation: links, code, references, or all"),
includeCodeValidation: z
.boolean()
.optional()
.default(true)
.describe("Whether to validate code blocks"),
followExternalLinks: z
.boolean()
.optional()
.default(false)
.describe("Whether to validate external URLs (slower)"),
}),
},
{
name: "detect_documentation_gaps",
description:
"Analyze repository and existing documentation to identify missing content and gaps",
inputSchema: z.object({
repositoryPath: z.string().describe("Path to the repository to analyze"),
documentationPath: z
.string()
.optional()
.describe("Path to existing documentation (if any)"),
analysisId: z
.string()
.optional()
.describe("Optional existing analysis ID to reuse"),
depth: z
.enum(["quick", "standard", "comprehensive"])
.optional()
.default("standard"),
}),
},
{
name: "test_local_deployment",
description:
"Test documentation build and local server before deploying to GitHub Pages",
inputSchema: z.object({
repositoryPath: z.string().describe("Path to the repository"),
ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
port: z
.number()
.optional()
.default(3000)
.describe("Port for local server"),
timeout: z
.number()
.optional()
.default(60)
.describe("Timeout in seconds for build process"),
skipBuild: z
.boolean()
.optional()
.default(false)
.describe("Skip build step and only start server"),
}),
},
{
name: "evaluate_readme_health",
description:
"Evaluate README files for community health, accessibility, and onboarding effectiveness",
inputSchema: z.object({
readme_path: z.string().describe("Path to the README file to evaluate"),
project_type: z
.enum([
"community_library",
"enterprise_tool",
"personal_project",
"documentation",
])
.optional()
.default("community_library")
.describe("Type of project for tailored evaluation"),
repository_path: z
.string()
.optional()
.describe("Optional path to repository for additional context"),
}),
},
{
name: "readme_best_practices",
description:
"Analyze README files against best practices checklist and generate templates for improvement",
inputSchema: z.object({
readme_path: z.string().describe("Path to the README file to analyze"),
project_type: z
.enum(["library", "application", "tool", "documentation", "framework"])
.optional()
.default("library")
.describe("Type of project for tailored analysis"),
generate_template: z
.boolean()
.optional()
.default(false)
.describe("Generate README templates and community files"),
output_directory: z
.string()
.optional()
.describe("Directory to write generated templates and community files"),
include_community_files: z
.boolean()
.optional()
.default(true)
.describe(
"Generate community health files (CONTRIBUTING.md, CODE_OF_CONDUCT.md, etc.)",
),
target_audience: z
.enum(["beginner", "intermediate", "advanced", "mixed"])
.optional()
.default("mixed")
.describe("Target audience for recommendations"),
}),
},
{
name: "check_documentation_links",
description:
"Comprehensive link checking for documentation deployment with external, internal, and anchor link validation",
inputSchema: z.object({
documentation_path: z
.string()
.optional()
.default("./docs")
.describe("Path to the documentation directory to check"),
check_external_links: z
.boolean()
.optional()
.default(true)
.describe("Validate external URLs (slower but comprehensive)"),
check_internal_links: z
.boolean()
.optional()
.default(true)
.describe("Validate internal file references"),
check_anchor_links: z
.boolean()
.optional()
.default(true)
.describe("Validate anchor links within documents"),
timeout_ms: z
.number()
.min(1000)
.max(30000)
.optional()
.default(5000)
.describe("Timeout for external link requests in milliseconds"),
max_concurrent_checks: z
.number()
.min(1)
.max(20)
.optional()
.default(5)
.describe("Maximum concurrent link checks"),
allowed_domains: z
.array(z.string())
.optional()
.default([])
.describe(
"Whitelist of allowed external domains (empty = all allowed)",
),
ignore_patterns: z
.array(z.string())
.optional()
.default([])
.describe("URL patterns to ignore during checking"),
fail_on_broken_links: z
.boolean()
.optional()
.default(false)
.describe("Fail the check if broken links are found"),
output_format: z
.enum(["summary", "detailed", "json"])
.optional()
.default("detailed")
.describe("Output format for results"),
}),
},
{
name: "generate_readme_template",
description:
"Generate standardized README templates for different project types with best practices",
inputSchema: z.object({
projectName: z.string().min(1).describe("Name of the project"),
description: z
.string()
.min(1)
.describe("Brief description of what the project does"),
templateType: z
.enum(["library", "application", "cli-tool", "api", "documentation"])
.describe("Type of project template to generate"),
author: z
.string()
.optional()
.describe("Project author/organization name"),
license: z.string().optional().default("MIT").describe("Project license"),
includeScreenshots: z
.boolean()
.optional()
.default(false)
.describe("Include screenshot placeholders for applications"),
includeBadges: z
.boolean()
.optional()
.default(true)
.describe("Include status badges"),
includeContributing: z
.boolean()
.optional()
.default(true)
.describe("Include contributing section"),
outputPath: z
.string()
.optional()
.describe("Path to write the generated README.md file"),
}),
},
{
name: "validate_readme_checklist",
description:
"Validate README files against community best practices checklist with detailed scoring",
inputSchema: z.object({
readmePath: z
.string()
.min(1)
.describe("Path to the README file to validate"),
projectPath: z
.string()
.optional()
.describe("Path to project directory for additional context"),
strict: z
.boolean()
.optional()
.default(false)
.describe("Use strict validation rules"),
outputFormat: z
.enum(["json", "markdown", "console"])
.optional()
.default("console")
.describe("Output format for the validation report"),
}),
},
{
name: "analyze_readme",
description:
"Comprehensive README analysis with length assessment, structure evaluation, and optimization opportunities",
inputSchema: z.object({
project_path: z
.string()
.min(1)
.describe("Path to the project directory containing README"),
target_audience: z
.enum([
"community_contributors",
"enterprise_users",
"developers",
"general",
])
.optional()
.default("community_contributors")
.describe("Target audience for analysis"),
optimization_level: z
.enum(["light", "moderate", "aggressive"])
.optional()
.default("moderate")
.describe("Level of optimization suggestions"),
max_length_target: z
.number()
.min(50)
.max(1000)
.optional()
.default(300)
.describe("Target maximum length in lines"),
}),
},
{
name: "optimize_readme",
description:
"Optimize README content by restructuring, condensing, and extracting detailed sections to separate documentation",
inputSchema: z.object({
readme_path: z
.string()
.min(1)
.describe("Path to the README file to optimize"),
strategy: z
.enum([
"community_focused",
"enterprise_focused",
"developer_focused",
"general",
])
.optional()
.default("community_focused")
.describe("Optimization strategy"),
max_length: z
.number()
.min(50)
.max(1000)
.optional()
.default(300)
.describe("Target maximum length in lines"),
include_tldr: z
.boolean()
.optional()
.default(true)
.describe("Generate and include TL;DR section"),
preserve_existing: z
.boolean()
.optional()
.default(true)
.describe("Preserve existing content structure where possible"),
output_path: z
.string()
.optional()
.describe(
"Path to write optimized README (if not specified, returns content only)",
),
create_docs_directory: z
.boolean()
.optional()
.default(true)
.describe("Create docs/ directory for extracted content"),
}),
},
{
name: "manage_preferences",
description:
"Manage user preferences for documentation generation and SSG recommendations",
inputSchema: z.object({
action: z
.enum(["get", "update", "reset", "export", "import", "recommendations"])
.describe("Action to perform on preferences"),
userId: z
.string()
.optional()
.default("default")
.describe("User ID for multi-user setups"),
preferences: z
.object({
preferredSSGs: z
.array(z.string())
.optional()
.describe("List of preferred static site generators"),
documentationStyle: z
.enum(["minimal", "comprehensive", "tutorial-heavy"])
.optional()
.describe("Preferred documentation style"),
expertiseLevel: z
.enum(["beginner", "intermediate", "advanced"])
.optional()
.describe("User's technical expertise level"),
preferredTechnologies: z
.array(z.string())
.optional()
.describe("Preferred technologies and frameworks"),
preferredDiataxisCategories: z
.array(z.enum(["tutorials", "how-to", "reference", "explanation"]))
.optional()
.describe("Preferred Diataxis documentation categories"),
autoApplyPreferences: z
.boolean()
.optional()
.describe("Automatically apply preferences to recommendations"),
})
.optional()
.describe("Preference updates (for update action)"),
json: z.string().optional().describe("JSON string for import action"),
}),
},
{
name: "analyze_deployments",
description:
"Analyze deployment patterns and generate insights from historical deployment data",
inputSchema: z.object({
analysisType: z
.enum(["full_report", "ssg_stats", "compare", "health", "trends"])
.optional()
.default("full_report")
.describe(
"Type of analysis: full_report (comprehensive), ssg_stats (per-SSG), compare (compare SSGs), health (deployment health score), trends (temporal analysis)",
),
ssg: z.string().optional().describe("SSG name for ssg_stats analysis"),
ssgs: z
.array(z.string())
.optional()
.describe("Array of SSG names for comparison"),
periodDays: z
.number()
.optional()
.default(30)
.describe("Period in days for trend analysis"),
}),
},
{
name: "read_directory",
description:
"List files and directories within allowed roots. Use this to discover files without requiring full absolute paths from the user.",
inputSchema: z.object({
path: z
.string()
.describe(
"Path to directory (relative to root or absolute within root)",
),
}),
},
// Phase 3: Code-to-Documentation Synchronization
{
name: "sync_code_to_docs",
description:
"Automatically synchronize documentation with code changes using AST-based drift detection (Phase 3)",
inputSchema: z.object({
projectPath: z.string().describe("Path to the project root directory"),
docsPath: z.string().describe("Path to the documentation directory"),
mode: z
.enum(["detect", "preview", "apply", "auto"])
.default("detect")
.describe(
"Sync mode: detect=analyze only, preview=show changes, apply=apply safe changes, auto=apply all",
),
autoApplyThreshold: z
.number()
.min(0)
.max(1)
.default(0.8)
.describe(
"Confidence threshold (0-1) for automatic application of changes",
),
createSnapshot: z
.boolean()
.default(true)
.describe("Create a snapshot before making changes (recommended)"),
}),
},
{
name: "generate_contextual_content",
description:
"Generate context-aware documentation using AST analysis and knowledge graph insights (Phase 3)",
inputSchema: z.object({
filePath: z.string().describe("Path to the source code file to document"),
documentationType: z
.enum(["tutorial", "how-to", "reference", "explanation", "all"])
.default("reference")
.describe("Type of Diataxis documentation to generate"),
includeExamples: z
.boolean()
.default(true)
.describe("Include code examples in generated documentation"),
style: z
.enum(["concise", "detailed", "verbose"])
.default("detailed")
.describe("Documentation detail level"),
outputFormat: z
.enum(["markdown", "mdx", "html"])
.default("markdown")
.describe("Output format for generated content"),
}),
},
// Documentation Freshness Tracking
{
name: "track_documentation_freshness",
description:
"Scan documentation directory for staleness markers and identify files needing updates based on configurable time thresholds (minutes, hours, days)",
inputSchema: z.object({
docsPath: z.string().describe("Path to documentation directory"),
projectPath: z
.string()
.optional()
.describe("Path to project root (for knowledge graph tracking)"),
warningThreshold: z
.object({
value: z.number().positive(),
unit: z.enum(["minutes", "hours", "days"]),
})
.optional()
.describe("Warning threshold (yellow flag)"),
staleThreshold: z
.object({
value: z.number().positive(),
unit: z.enum(["minutes", "hours", "days"]),
})
.optional()
.describe("Stale threshold (orange flag)"),
criticalThreshold: z
.object({
value: z.number().positive(),
unit: z.enum(["minutes", "hours", "days"]),
})
.optional()
.describe("Critical threshold (red flag)"),
preset: z
.enum([
"realtime",
"active",
"recent",
"weekly",
"monthly",
"quarterly",
])
.optional()
.describe("Use predefined threshold preset"),
includeFileList: z
.boolean()
.optional()
.default(true)
.describe("Include detailed file list in response"),
sortBy: z
.enum(["age", "path", "staleness"])
.optional()
.default("staleness")
.describe("Sort order for file list"),
storeInKG: z
.boolean()
.optional()
.default(true)
.describe(
"Store tracking event in knowledge graph for historical analysis",
),
}),
},
{
name: "validate_documentation_freshness",
description:
"Validate documentation freshness, initialize metadata for files without it, and update timestamps based on code changes",
inputSchema: z.object({
docsPath: z.string().describe("Path to documentation directory"),
projectPath: z
.string()
.describe("Path to project root (for git integration)"),
initializeMissing: z
.boolean()
.optional()
.default(true)
.describe("Initialize metadata for files without it"),
updateExisting: z
.boolean()
.optional()
.default(false)
.describe("Update last_validated timestamp for all files"),
updateFrequency: z
.enum([
"realtime",
"active",
"recent",
"weekly",
"monthly",
"quarterly",
])
.optional()
.default("monthly")
.describe("Default update frequency for new metadata"),
validateAgainstGit: z
.boolean()
.optional()
.default(true)
.describe("Validate against current git commit"),
}),
},
{
name: "manage_sitemap",
description:
"Generate, validate, and manage sitemap.xml as the source of truth for documentation links. Sitemap.xml is used for SEO, search engine submission, and deployment tracking.",
inputSchema: ManageSitemapInputSchema,
},
{
name: "generate_llm_context",
description:
"Generate a comprehensive LLM context reference file documenting all tools, memory system, and workflows for easy @ reference",
inputSchema: GenerateLLMContextInputSchema,
},
// Memory system tools
...memoryTools.map((tool) => ({
...tool,
inputSchema: z.object(
Object.entries(tool.inputSchema.properties || {}).reduce(
(acc: any, [key, value]: [string, any]) => {
if (value.type === "string") {
acc[key] = value.enum ? z.enum(value.enum) : z.string();
} else if (value.type === "number") {
acc[key] = z.number();
} else if (value.type === "boolean") {
acc[key] = z.boolean();
} else if (value.type === "object") {
acc[key] = z.object({});
}
if (value.description) {
acc[key] = acc[key].describe(value.description);
}
if (!tool.inputSchema.required?.includes(key)) {
acc[key] = acc[key].optional();
}
if (value.default !== undefined) {
acc[key] = acc[key].default(value.default);
}
return acc;
},
{},
),
),
})),
];
// Export TOOLS for use in generate_llm_context tool
export { TOOLS };
// Set tool definitions for generate_llm_context tool
setToolDefinitions(TOOLS);
// Native MCP Prompts for technical writing assistance
const PROMPTS = [
{
name: "tutorial-writer",
description:
"Generate learning-oriented tutorial content following Diataxis principles",
arguments: [
{
name: "project_path",
description:
"Path to the project directory (used to analyze project context)",
required: true,
},
{
name: "target_audience",
description:
"Target audience for the tutorial (default: 'beginners'). Options: 'beginners', 'intermediate', 'advanced'",
required: false,
},
{
name: "learning_goal",
description:
"What users should learn (default: 'get started with the project'). Examples: 'deploy first app', 'understand core concepts'",
required: false,
},
],
},
{
name: "howto-guide-writer",
description:
"Generate problem-oriented how-to guide content following Diataxis principles",
arguments: [
{
name: "project_path",
description:
"Path to the project directory (used to analyze project context)",
required: true,
},
{
name: "problem",
description:
"Problem to solve (default: 'common development task'). Example: 'deploy to production', 'add authentication'",
required: false,
},
{
name: "user_experience",
description:
"User experience level (default: 'intermediate'). Options: 'beginner', 'intermediate', 'advanced'",
required: false,
},
],
},
{
name: "reference-writer",
description:
"Generate information-oriented reference documentation following Diataxis principles",
arguments: [
{
name: "project_path",
description:
"Path to the project directory (used to analyze project context)",
required: true,
},
{
name: "reference_type",
description:
"Type of reference (default: 'API'). Options: 'API', 'CLI', 'Configuration', 'Architecture'",
required: false,
},
{
name: "completeness",
description:
"Level of completeness required (default: 'comprehensive'). Options: 'basic', 'comprehensive', 'exhaustive'",
required: false,
},
],
},
{
name: "explanation-writer",
description:
"Generate understanding-oriented explanation content following Diataxis principles",
arguments: [
{
name: "project_path",
description:
"Path to the project directory (used to analyze project context)",
required: true,
},
{
name: "concept",
description:
"Concept to explain (default: 'system architecture'). Examples: 'data flow', 'design patterns', 'security model'",
required: false,
},
{
name: "depth",
description:
"Depth of explanation (default: 'detailed'). Options: 'overview', 'detailed', 'deep-dive'",
required: false,
},
],
},
{
name: "diataxis-organizer",
description:
"Organize existing documentation using Diataxis framework principles",
arguments: [
{
name: "project_path",
description:
"Path to the project directory (used to analyze project context)",
required: true,
},
{
name: "current_docs",
description:
"Description of current documentation (default: 'mixed documentation'). Example: 'single README with everything', 'scattered wiki pages'",
required: false,
},
{
name: "priority",
description:
"Organization priority (default: 'user needs'). Options: 'user needs', 'completeness', 'maintainability'",
required: false,
},
],
},
{
name: "readme-optimizer",
description: "Optimize README content using Diataxis-aware principles",
arguments: [
{
name: "project_path",
description:
"Path to the project directory (used to analyze README and project context)",
required: true,
},
{
name: "optimization_focus",
description:
"Focus area for optimization (default: 'general'). Options: 'length', 'clarity', 'structure', 'onboarding'",
required: false,
},
],
},
// Guided workflow prompts (ADR-007)
{
name: "analyze-and-recommend",
description: "Complete repository analysis and SSG recommendation workflow",
arguments: [
{
name: "project_path",
description: "Path to the project directory (used for analysis)",
required: true,
},
{
name: "analysis_depth",
description:
"Analysis depth (default: 'standard'). Options: 'quick' (basic scan), 'standard' (comprehensive), 'deep' (detailed with dependencies)",
required: false,
},
{
name: "preferences",
description:
"SSG preferences as text (default: 'balanced approach'). Examples: 'prefer JavaScript ecosystem', 'prioritize simplicity', 'need fast builds'",
required: false,
},
],
},
{
name: "setup-documentation",
description:
"Create comprehensive documentation structure with best practices",
arguments: [
{
name: "project_path",
description:
"Path to the project directory (where docs will be created)",
required: true,
},
{
name: "ssg_type",
description:
"Static site generator type (default: 'recommended based on analysis'). Options: 'jekyll', 'hugo', 'docusaurus', 'mkdocs', 'eleventy'",
required: false,
},
{
name: "include_examples",
description:
"Include example content (default: 'true'). Set to 'false' for templates only, 'true' for populated examples",
required: false,
},
],
},
{
name: "troubleshoot-deployment",
description: "Diagnose and fix GitHub Pages deployment issues",
arguments: [
{
name: "repository",
description:
"Repository path or URL (GitHub repository to troubleshoot)",
required: true,
},
{
name: "deployment_url",
description:
"Expected deployment URL (default: derived from repository). Example: 'https://username.github.io/repo'",
required: false,
},
{
name: "issue_description",
description:
"Description of the issue (default: 'deployment not working'). Examples: 'builds fail', '404 errors', 'outdated content'",
required: false,
},
],
},
{
name: "maintain-documentation-freshness",
description:
"Track and maintain documentation freshness with automated staleness detection",
arguments: [
{
name: "project_path",
description:
"Path to the project directory (used for knowledge graph tracking)",
required: true,
},
{
name: "docs_path",
description:
"Path to documentation directory (default: derived from project). Example: './docs', './documentation'",
required: false,
},
{
name: "freshness_preset",
description:
"Staleness threshold preset (default: 'monthly'). Options: 'realtime' (minutes), 'active' (hours), 'recent' (days), 'weekly' (7 days), 'monthly' (30 days), 'quarterly' (90 days)",
required: false,
},
{
name: "action",
description:
"Action to perform (default: 'track'). Options: 'validate' (initialize metadata), 'track' (scan staleness), 'insights' (view trends)",
required: false,
},
],
},
];
// MCP resources should serve APPLICATION needs, not store tool results
// Resources are app-controlled and used for UI display, autocomplete, etc.
// Resource definitions following ADR-007 and MCP best practices
// Resources serve APPLICATIONS (UI needs) not tool result storage
const RESOURCES = [
// Static Site Generators - for UI selection dropdowns
{
uri: "documcp://ssgs/available",
name: "Available Static Site Generators",
description: "List of supported SSGs with capabilities for UI selection",
mimeType: "application/json",
},
// Templates - static templates for documentation setup
{
uri: "documcp://templates/jekyll-config",
name: "Jekyll Configuration Template",
description: "Template for Jekyll _config.yml",
mimeType: "text/yaml",
},
{
uri: "documcp://templates/hugo-config",
name: "Hugo Configuration Template",
description: "Template for Hugo config.yaml",
mimeType: "text/yaml",
},
{
uri: "documcp://templates/docusaurus-config",
name: "Docusaurus Configuration Template",
description: "Template for Docusaurus docusaurus.config.js",
mimeType: "text/javascript",
},
{
uri: "documcp://templates/mkdocs-config",
name: "MkDocs Configuration Template",
description: "Template for MkDocs mkdocs.yml",
mimeType: "text/yaml",
},
{
uri: "documcp://templates/eleventy-config",
name: "Eleventy Configuration Template",
description: "Template for Eleventy .eleventy.js",
mimeType: "text/javascript",
},
{
uri: "documcp://templates/diataxis-structure",
name: "Diataxis Structure Template",
description: "Diataxis documentation structure blueprint",
mimeType: "application/json",
},
// Workflows - for UI to display available workflows
{
uri: "documcp://workflows/all",
name: "All Documentation Workflows",
description: "Complete list of available documentation workflows",
mimeType: "application/json",
},
{
uri: "documcp://workflows/quick-setup",
name: "Quick Documentation Setup Workflow",
description: "Fast-track workflow for basic documentation",
mimeType: "application/json",
},
{
uri: "documcp://workflows/full-setup",
name: "Full Documentation Setup Workflow",
description: "Comprehensive workflow for complete documentation",
mimeType: "application/json",
},
{
uri: "documcp://workflows/guidance",
name: "Workflow Execution Guidance",
description: "Guidelines for executing documentation workflows",
mimeType: "application/json",
},
// Freshness tracking - for UI selection and configuration
{
uri: "documcp://freshness/presets",
name: "Documentation Freshness Presets",
description:
"Available staleness threshold presets for UI selection (realtime, active, recent, weekly, monthly, quarterly)",
mimeType: "application/json",
},
{
uri: "documcp://freshness/metadata-schema",
name: "Freshness Metadata Schema",
description:
"Schema for documentation frontmatter freshness metadata fields",
mimeType: "application/json",
},
];
// List available tools
server.setRequestHandler(ListToolsRequestSchema, async () => ({
tools: TOOLS.map((tool) => ({
name: tool.name,
description: tool.description,
inputSchema: zodToJsonSchema(tool.inputSchema),
})),
}));
// Helper function to detect documentation directories
async function detectDocsDirectories(
projectRoot: string,
): Promise<Array<{ path: string; name: string }>> {
const commonDocsDirs = [
"docs",
"documentation",
"doc",
"wiki",
"website/docs", // Docusaurus pattern
".vitepress", // VitePress
"book", // mdBook
];
const detected: Array<{ path: string; name: string }> = [];
for (const dirName of commonDocsDirs) {
const fullPath = path.join(projectRoot, dirName);
try {
const stats = await fs.stat(fullPath);
if (stats.isDirectory()) {
detected.push({
path: fullPath,
name: dirName,
});
}
} catch {
// Directory doesn't exist, skip
}
}
return detected;
}
// List allowed roots (includes auto-detected docs directories)
server.setRequestHandler(ListRootsRequestSchema, async () => {
const roots: Array<{
uri: string;
name: string;
type?: string;
description?: string;
parent?: string;
}> = [];
// Add project roots
for (const root of allowedRoots) {
roots.push({
uri: `file://${root}`,
name: path.basename(root),
type: "project",
description: "Project root containing source code and documentation",
});
// Auto-detect and add docs directories within this root
const docsDirectories = await detectDocsDirectories(root);
for (const docsDir of docsDirectories) {
roots.push({
uri: `file://${docsDir.path}`,
name: docsDir.name,
type: "documentation",
description: `Documentation directory within ${path.basename(root)}`,
parent: `file://${root}`,
});
}
}
return { roots };
});
// List available prompts
server.setRequestHandler(ListPromptsRequestSchema, async () => ({
prompts: PROMPTS,
}));
// Get specific prompt
server.setRequestHandler(GetPromptRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
// Generate dynamic prompt messages using our Diataxis-aligned prompt system
const projectPath = args?.project_path || process.cwd();
const messages = await generateTechnicalWriterPrompts(
name,
projectPath,
args || {},
);
return {
description: `Technical writing assistance for ${name}`,
messages,
};
});
// List available resources
server.setRequestHandler(ListResourcesRequestSchema, async () => ({
resources: RESOURCES,
}));
// Read specific resource
// Resources serve APPLICATION needs - static content for UI display
server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
const { uri } = request.params;
// Handle SSG list resource (for UI dropdowns/selection)
if (uri === "documcp://ssgs/available") {
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify(
{
ssgs: [
{
id: "jekyll",
name: "Jekyll",
description: "Ruby-based SSG, great for GitHub Pages",
language: "ruby",
complexity: "low",
buildSpeed: "medium",
ecosystem: "mature",
bestFor: ["blogs", "documentation", "simple-sites"],
},
{
id: "hugo",
name: "Hugo",
description: "Go-based SSG, extremely fast builds",
language: "go",
complexity: "medium",
buildSpeed: "very-fast",
ecosystem: "mature",
bestFor: ["documentation", "blogs", "large-sites"],
},
{
id: "docusaurus",
name: "Docusaurus",
description:
"React-based, optimized for technical documentation",
language: "javascript",
complexity: "medium",
buildSpeed: "medium",
ecosystem: "growing",
bestFor: [
"technical-documentation",
"api-docs",
"versioned-docs",
],
},
{
id: "mkdocs",
name: "MkDocs",
description: "Python-based, simple and fast documentation",
language: "python",
complexity: "low",
buildSpeed: "fast",
ecosystem: "mature",
bestFor: ["documentation", "technical-docs", "simple-setup"],
},
{
id: "eleventy",
name: "Eleventy",
description: "JavaScript-based, simple and flexible",
language: "javascript",
complexity: "low",
buildSpeed: "fast",
ecosystem: "growing",
bestFor: ["blogs", "documentation", "flexible-sites"],
},
],
},
null,
2,
),
},
],
};
}
// Handle template resources (static content)
if (uri.startsWith("documcp://templates/")) {
const templateType = uri.split("/").pop();
switch (templateType) {
case "jekyll-config":
return {
contents: [
{
uri,
mimeType: "text/yaml",
text: `# Jekyll Configuration Template
title: "Documentation Site"
description: "Project documentation"
baseurl: ""
url: ""
markdown: kramdown
highlighter: rouge
theme: minima
plugins:
- jekyll-feed
- jekyll-sitemap
exclude:
- Gemfile
- Gemfile.lock
- node_modules
- vendor
`,
},
],
};
case "hugo-config":
return {
contents: [
{
uri,
mimeType: "text/yaml",
text: `# Hugo Configuration Template
baseURL: "https://username.github.io/repository"
languageCode: "en-us"
title: "Documentation Site"
theme: "docsy"
params:
github_repo: "https://github.com/username/repository"
github_branch: "main"
markup:
goldmark:
renderer:
unsafe: true
highlight:
style: github
lineNos: true
`,
},
],
};
case "docusaurus-config":
return {
contents: [
{
uri,
mimeType: "text/javascript",
text: `// Docusaurus Configuration Template
// @ts-check
/** @type {import('@docusaurus/types').Config} */
const config = {
title: 'Documentation Site',
tagline: 'Project documentation',
url: 'https://username.github.io',
baseUrl: '/repository/',
onBrokenLinks: 'throw',
onBrokenMarkdownLinks: 'warn',
favicon: 'img/favicon.ico',
organizationName: 'username',
projectName: 'repository',
i18n: {
defaultLocale: 'en',
locales: ['en'],
},
presets: [
[
'classic',
/** @type {import('@docusaurus/preset-classic').Options} */
({
docs: {
sidebarPath: require.resolve('./sidebars.js'),
editUrl: 'https://github.com/username/repository/tree/main/',
},
blog: false,
theme: {
customCss: require.resolve('./src/css/custom.css'),
},
}),
],
],
themeConfig:
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
({
navbar: {
title: 'Documentation',
items: [
{
type: 'doc',
docId: 'intro',
position: 'left',
label: 'Tutorial',
},
{
href: 'https://github.com/username/repository',
label: 'GitHub',
position: 'right',
},
],
},
footer: {
style: 'dark',
copyright: \`Copyright © \${new Date().getFullYear()} Project Name\`,
},
}),
};
module.exports = config;
`,
},
],
};
case "mkdocs-config":
return {
contents: [
{
uri,
mimeType: "text/yaml",
text: `# MkDocs Configuration Template
site_name: Documentation Site
site_url: https://username.github.io/repository
repo_url: https://github.com/username/repository
repo_name: username/repository
theme:
name: material
palette:
- scheme: default
primary: indigo
accent: indigo
toggle:
icon: material/brightness-7
name: Switch to dark mode
- scheme: slate
primary: indigo
accent: indigo
toggle:
icon: material/brightness-4
name: Switch to light mode
features:
- navigation.tabs
- navigation.sections
- toc.integrate
- navigation.top
- search.suggest
- search.highlight
- content.tabs.link
plugins:
- search
- awesome-pages
markdown_extensions:
- pymdownx.highlight
- pymdownx.superfences
- pymdownx.tabbed
- admonition
- pymdownx.details
nav:
- Home: index.md
- Tutorials: tutorials/
- How-To Guides: how-to/
- Reference: reference/
- Explanation: explanation/
`,
},
],
};
case "eleventy-config":
return {
contents: [
{
uri,
mimeType: "text/javascript",
text: `// Eleventy Configuration Template
module.exports = function(eleventyConfig) {
// Copy static assets
eleventyConfig.addPassthroughCopy("src/css");
eleventyConfig.addPassthroughCopy("src/js");
eleventyConfig.addPassthroughCopy("src/images");
// Add plugins
// eleventyConfig.addPlugin(require("@11ty/eleventy-plugin-syntaxhighlight"));
// Add filters
eleventyConfig.addFilter("readableDate", dateObj => {
return new Date(dateObj).toLocaleDateString();
});
// Add shortcodes
eleventyConfig.addShortcode("year", () => \`\${new Date().getFullYear()}\`);
// Markdown configuration
let markdownIt = require("markdown-it");
let markdownItAnchor = require("markdown-it-anchor");
let options = {
html: true,
breaks: true,
linkify: true
};
eleventyConfig.setLibrary("md", markdownIt(options)
.use(markdownItAnchor)
);
return {
dir: {
input: "src",
output: "_site",
includes: "_includes",
layouts: "_layouts",
data: "_data"
},
templateFormats: ["md", "njk", "html"],
markdownTemplateEngine: "njk",
htmlTemplateEngine: "njk",
dataTemplateEngine: "njk"
};
};
`,
},
],
};
case "diataxis-structure":
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify(
{
structure: {
tutorials: {
description: "Learning-oriented guides",
files: ["getting-started.md", "your-first-project.md"],
},
"how-to-guides": {
description: "Problem-oriented step-by-step guides",
files: ["common-tasks.md", "troubleshooting.md"],
},
reference: {
description: "Information-oriented technical reference",
files: ["api-reference.md", "configuration.md"],
},
explanation: {
description: "Understanding-oriented background material",
files: ["architecture.md", "design-decisions.md"],
},
},
},
null,
2,
),
},
],
};
default:
throw new Error(`Unknown template: ${templateType}`);
}
}
// Handle workflow resources
if (uri.startsWith("documcp://workflows/")) {
const workflowType = uri.split("/").pop();
switch (workflowType) {
case "all":
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify(
{
workflows: DOCUMENTATION_WORKFLOWS,
executionGuidance: WORKFLOW_EXECUTION_GUIDANCE,
metadata: WORKFLOW_METADATA,
},
null,
2,
),
},
],
};
case "quick-setup":
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify(
DOCUMENTATION_WORKFLOWS["quick-documentation-setup"],
null,
2,
),
},
],
};
case "full-setup":
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify(
DOCUMENTATION_WORKFLOWS["full-documentation-setup"],
null,
2,
),
},
],
};
case "guidance":
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify(
{
executionGuidance: WORKFLOW_EXECUTION_GUIDANCE,
recommendationEngine:
"Use recommendWorkflow() function with project status and requirements",
},
null,
2,
),
},
],
};
default: {
// Try to find specific workflow
const workflow = DOCUMENTATION_WORKFLOWS[workflowType || ""];
if (workflow) {
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify(workflow, null, 2),
},
],
};
}
throw new Error(`Unknown workflow: ${workflowType}`);
}
}
}
// Handle freshness tracking resources
if (uri.startsWith("documcp://freshness/")) {
const freshnessType = uri.split("/").pop();
switch (freshnessType) {
case "presets":
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify(
{
presets: [
{
id: "realtime",
name: "Realtime",
description:
"For frequently updated documentation (minutes)",
thresholds: {
warning: { value: 5, unit: "minutes" },
stale: { value: 15, unit: "minutes" },
critical: { value: 30, unit: "minutes" },
},
bestFor: ["api-docs", "status-pages", "live-updates"],
},
{
id: "active",
name: "Active",
description:
"For actively maintained documentation (hours)",
thresholds: {
warning: { value: 2, unit: "hours" },
stale: { value: 6, unit: "hours" },
critical: { value: 12, unit: "hours" },
},
bestFor: [
"development-docs",
"feature-guides",
"release-notes",
],
},
{
id: "recent",
name: "Recent",
description: "For regularly updated documentation (days)",
thresholds: {
warning: { value: 1, unit: "days" },
stale: { value: 3, unit: "days" },
critical: { value: 7, unit: "days" },
},
bestFor: [
"tutorials",
"getting-started",
"project-updates",
],
},
{
id: "weekly",
name: "Weekly",
description: "For weekly maintenance cycle (7 days)",
thresholds: {
warning: { value: 7, unit: "days" },
stale: { value: 14, unit: "days" },
critical: { value: 30, unit: "days" },
},
bestFor: ["how-to-guides", "examples", "best-practices"],
},
{
id: "monthly",
name: "Monthly",
description:
"For monthly maintenance cycle (30 days) - DEFAULT",
thresholds: {
warning: { value: 30, unit: "days" },
stale: { value: 60, unit: "days" },
critical: { value: 90, unit: "days" },
},
bestFor: [
"reference-docs",
"architecture",
"stable-features",
],
default: true,
},
{
id: "quarterly",
name: "Quarterly",
description: "For quarterly maintenance cycle (90 days)",
thresholds: {
warning: { value: 90, unit: "days" },
stale: { value: 180, unit: "days" },
critical: { value: 365, unit: "days" },
},
bestFor: [
"explanations",
"background",
"rarely-changing-docs",
],
},
],
},
null,
2,
),
},
],
};
case "metadata-schema":
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify(
{
schema: {
documcp: {
description: "DocuMCP metadata block in YAML frontmatter",
type: "object",
properties: {
last_updated: {
type: "string",
format: "date-time",
description:
"ISO 8601 timestamp of last content update",
example: "2025-01-19T10:30:00Z",
},
last_validated: {
type: "string",
format: "date-time",
description:
"ISO 8601 timestamp of last validation check",
example: "2025-01-19T10:30:00Z",
},
update_frequency: {
type: "string",
enum: [
"realtime",
"active",
"recent",
"weekly",
"monthly",
"quarterly",
],
description: "Expected update frequency preset",
default: "monthly",
},
validated_against_commit: {
type: "string",
description:
"Git commit hash the documentation was validated against",
example: "a1b2c3d",
},
auto_updated: {
type: "boolean",
description:
"Whether timestamps are automatically updated",
default: false,
},
},
required: ["last_updated"],
},
},
example: {
yaml: `---
title: "API Reference"
description: "Complete API documentation"
documcp:
last_updated: "2025-01-19T10:30:00Z"
last_validated: "2025-01-19T10:30:00Z"
update_frequency: "monthly"
validated_against_commit: "a1b2c3d"
auto_updated: false
---`,
},
},
null,
2,
),
},
],
};
default:
throw new Error(`Unknown freshness resource: ${freshnessType}`);
}
}
throw new Error(`Resource not found: ${uri}`);
});
// Helper to wrap tool results in standard MCP format
function wrapToolResult<T>(result: T, _toolName: string) {
// If result is already in MCP format (has 'content' array), return as-is
if (
result &&
typeof result === "object" &&
"content" in result &&
Array.isArray((result as any).content)
) {
return result;
}
// Otherwise, wrap in formatMCPResponse
return formatMCPResponse({
success: true,
data: result,
metadata: {
toolVersion: packageJson.version,
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
// Handle tool execution
server.setRequestHandler(CallToolRequestSchema, async (request, extra) => {
const { name, arguments: args } = request.params;
try {
switch (name) {
case "analyze_repository": {
// Check if path is allowed
const repoPath = (args as any)?.path;
if (repoPath && !isPathAllowed(repoPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(repoPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument, or use a path within allowed roots.",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await analyzeRepository(args, extra);
// Remember in persistent memory
if (args?.path && typeof args.path === "string") {
const memoryId = await rememberAnalysis(args.path, result);
(result as any).memoryId = memoryId;
// Get insights from similar projects
const similarProjects = await getSimilarProjects(result, 3);
if (similarProjects.length > 0) {
(result as any).insights = {
similarProjects,
message: `Found ${similarProjects.length} similar projects in memory`,
};
}
}
return wrapToolResult(result, "analyze_repository");
}
case "recommend_ssg": {
const result = await recommendSSG(args, extra);
// Remember recommendation
if (args?.analysisId && typeof args.analysisId === "string") {
const memoryId = await rememberRecommendation(
args.analysisId,
result,
);
(result as any).memoryId = memoryId;
// Get project history if available
const projectInsights = await getProjectInsights(args.analysisId);
if (projectInsights.length > 0) {
(result as any).projectHistory = projectInsights;
}
}
return wrapToolResult(result, "recommend_ssg");
}
case "generate_config": {
const result = await generateConfig(args);
return wrapToolResult(result, "generate_config");
}
case "setup_structure": {
// Check if basePath is allowed
const basePath = (args as any)?.basePath;
if (basePath && !isPathAllowed(basePath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(basePath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument.",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await setupStructure(args);
return wrapToolResult(result, "setup_structure");
}
case "setup_playwright_tests": {
const result = await setupPlaywrightTests(args);
return wrapToolResult(result, "setup_playwright_tests");
}
case "deploy_pages": {
const result = await deployPages(args, extra);
return wrapToolResult(result, "deploy_pages");
}
case "verify_deployment": {
const result = await verifyDeployment(args);
return wrapToolResult(result, "verify_deployment");
}
case "populate_diataxis_content": {
// Check if docsPath is allowed
const docsPath = (args as any)?.docsPath;
if (docsPath && !isPathAllowed(docsPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(docsPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument.",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await handlePopulateDiataxisContent(args, extra);
return {
content: [
{
type: "text",
text: `Content population completed successfully. Generated ${
result.filesCreated
} files with ${Math.round(
result.populationMetrics.coverage,
)}% coverage.`,
},
{
type: "text",
text: `Population metrics: Coverage: ${result.populationMetrics.coverage}%, Completeness: ${result.populationMetrics.completeness}%, Project Specificity: ${result.populationMetrics.projectSpecificity}%`,
},
{
type: "text",
text: `Next steps:\n${result.nextSteps
.map((step) => `- ${step}`)
.join("\n")}`,
},
],
};
}
case "update_existing_documentation": {
const result = await handleUpdateExistingDocumentation(args);
return {
content: [
{
type: "text",
text: `Documentation analysis completed. Found ${result.updateMetrics.gapsDetected} gaps and generated ${result.updateMetrics.recommendationsGenerated} recommendations.`,
},
{
type: "text",
text: `Update metrics: Confidence Score: ${result.updateMetrics.confidenceScore}, Estimated Effort: ${result.updateMetrics.estimatedEffort}`,
},
{
type: "text",
text: `Memory insights: ${result.memoryInsights.similarProjects.length} similar projects analyzed, ${result.memoryInsights.successfulUpdatePatterns.length} successful update patterns found`,
},
{
type: "text",
text: `Top recommendations:\n${result.recommendations
.slice(0, 5)
.map(
(rec, i) =>
`${i + 1}. ${rec.reasoning} (confidence: ${Math.round(
rec.confidence * 100,
)}%)`,
)
.join("\n")}`,
},
{
type: "text",
text: `Next steps:\n${result.nextSteps
.map((step) => `- ${step}`)
.join("\n")}`,
},
],
};
}
case "validate_diataxis_content": {
// Check if contentPath is allowed
const contentPath = (args as any)?.contentPath;
if (contentPath && !isPathAllowed(contentPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(contentPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument.",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await handleValidateDiataxisContent(args, extra);
// Return structured validation results as JSON
const validationSummary = {
status: result.success ? "PASSED" : "ISSUES FOUND",
confidence: `${result.confidence.overall}%`,
issuesFound: result.issues.length,
breakdown: {
errors: result.issues.filter((i) => i.type === "error").length,
warnings: result.issues.filter((i) => i.type === "warning").length,
info: result.issues.filter((i) => i.type === "info").length,
},
topIssues: result.issues.slice(0, 5).map((issue) => ({
type: issue.type.toUpperCase(),
category: issue.category,
file: issue.location.file,
description: issue.description,
})),
recommendations: result.recommendations,
nextSteps: result.nextSteps,
confidenceBreakdown: result.confidence.breakdown,
};
return {
content: [
{
type: "text",
text: `Content validation ${
result.success ? "passed" : "found issues"
}. Overall confidence: ${result.confidence.overall}%.`,
},
{
type: "text",
text: `Issues found: ${result.issues.length} (${
result.issues.filter((i) => i.type === "error").length
} errors, ${
result.issues.filter((i) => i.type === "warning").length
} warnings)`,
},
{
type: "text",
text: JSON.stringify(validationSummary, null, 2),
},
],
};
}
case "validate_content": {
const result = await validateGeneralContent(args);
// Return structured validation results as JSON
const contentSummary = {
status: result.success ? "PASSED" : "ISSUES FOUND",
summary: result.summary,
linksChecked: result.linksChecked || 0,
codeBlocksValidated: result.codeBlocksValidated || 0,
brokenLinks: result.brokenLinks || [],
codeErrors: (result.codeErrors || []).slice(0, 10), // Limit to first 10 errors
recommendations: result.recommendations || [],
};
return {
content: [
{
type: "text",
text: `Content validation completed. Status: ${
result.success ? "PASSED" : "ISSUES FOUND"
}`,
},
{
type: "text",
text: `Results: ${result.linksChecked || 0} links checked, ${
result.codeBlocksValidated || 0
} code blocks validated`,
},
{
type: "text",
text: JSON.stringify(contentSummary, null, 2),
},
],
};
}
case "detect_documentation_gaps": {
const result = await detectDocumentationGaps(args);
return wrapToolResult(result, "detect_documentation_gaps");
}
case "test_local_deployment": {
const result = await testLocalDeployment(args);
return wrapToolResult(result, "test_local_deployment");
}
case "evaluate_readme_health": {
const result = await evaluateReadmeHealth(args as any);
return wrapToolResult(result, "evaluate_readme_health");
}
case "readme_best_practices": {
const result = await readmeBestPractices(args as any);
return formatMCPResponse(result);
}
case "check_documentation_links": {
// Check if documentation_path is allowed
const docLinksPath = (args as any)?.documentation_path;
if (docLinksPath && !isPathAllowed(docLinksPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(docLinksPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument.",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await checkDocumentationLinks(args as any);
return formatMCPResponse(result);
}
case "generate_readme_template": {
const result = await generateReadmeTemplate(args as any);
return formatMCPResponse({
success: true,
data: result,
metadata: {
toolVersion: packageJson.version,
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
case "validate_readme_checklist": {
const result = await validateReadmeChecklist(args as any);
return formatMCPResponse({
success: true,
data: result,
metadata: {
toolVersion: packageJson.version,
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
case "analyze_readme": {
const result = await analyzeReadme(args as any);
return formatMCPResponse(result);
}
case "manage_preferences": {
const result = await managePreferences(args);
return wrapToolResult(result, "manage_preferences");
}
case "analyze_deployments": {
const result = await analyzeDeployments(args);
return wrapToolResult(result, "analyze_deployments");
}
// Phase 3: Code-to-Documentation Synchronization
case "sync_code_to_docs": {
const projectPath = (args as any)?.projectPath;
const docsPath = (args as any)?.docsPath;
// Check if paths are allowed
if (projectPath && !isPathAllowed(projectPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(projectPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
if (docsPath && !isPathAllowed(docsPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(docsPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await handleSyncCodeToDocs(args, extra);
return wrapToolResult(result, "sync_code_to_docs");
}
case "generate_contextual_content": {
const filePath = (args as any)?.filePath;
// Check if file path is allowed
if (filePath && !isPathAllowed(filePath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(filePath, allowedRoots),
resolution:
"Request access to this file by starting the server with --root argument",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await handleGenerateContextualContent(args, extra);
return wrapToolResult(result, "generate_contextual_content");
}
// Documentation Freshness Tracking
case "track_documentation_freshness": {
const docsPath = (args as any)?.docsPath;
// Check if docs path is allowed
if (docsPath && !isPathAllowed(docsPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(docsPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await trackDocumentationFreshness(args as any);
return wrapToolResult(result, "track_documentation_freshness");
}
case "validate_documentation_freshness": {
const docsPath = (args as any)?.docsPath;
const projectPath = (args as any)?.projectPath;
// Check if paths are allowed
if (docsPath && !isPathAllowed(docsPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(docsPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
if (projectPath && !isPathAllowed(projectPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(projectPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await validateDocumentationFreshness(args as any);
return wrapToolResult(result, "validate_documentation_freshness");
}
case "manage_sitemap": {
const docsPath = (args as any)?.docsPath;
// Check if docs path is allowed
if (docsPath && !isPathAllowed(docsPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(docsPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await manageSitemap(args as any);
return wrapToolResult(result, "manage_sitemap");
}
case "generate_llm_context": {
const projectPath = (args as any)?.projectPath;
// Check if project path is allowed
if (projectPath && !isPathAllowed(projectPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(projectPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
const result = await generateLLMContext(args as any);
return wrapToolResult(result, "generate_llm_context");
}
case "read_directory": {
const { path: dirPath } = args as { path: string };
// Check if path is allowed
if (!isPathAllowed(dirPath, allowedRoots)) {
return formatMCPResponse({
success: false,
error: {
code: "PERMISSION_DENIED",
message: getPermissionDeniedMessage(dirPath, allowedRoots),
resolution:
"Request access to this directory by starting the server with --root argument, or use a path within allowed roots.",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
try {
const entries = await fs.readdir(dirPath, { withFileTypes: true });
const files = [];
const directories = [];
for (const entry of entries) {
if (entry.isDirectory()) {
directories.push(entry.name);
} else if (entry.isFile()) {
files.push(entry.name);
}
}
return formatMCPResponse({
success: true,
data: {
path: dirPath,
files,
directories,
totalFiles: files.length,
totalDirectories: directories.length,
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
} catch (error: any) {
return formatMCPResponse({
success: false,
error: {
code: "READ_DIRECTORY_FAILED",
message: `Failed to read directory: ${error.message}`,
resolution: "Ensure the directory exists and is accessible.",
},
metadata: {
toolVersion: packageJson.version,
executionTime: 0,
timestamp: new Date().toISOString(),
},
});
}
}
case "optimize_readme": {
const result = await optimizeReadme(args as any);
return formatMCPResponse(result);
}
// Memory system tools
case "memory_recall": {
await initializeMemory(); // Ensure memory is initialized
const manager = (await import("./memory/index.js")).getMemoryManager();
if (!manager) throw new Error("Memory system not initialized");
let results;
if (args?.type === "all") {
results = await manager.search(args?.query || "", {
sortBy: "timestamp",
});
} else {
results = await manager.search(args?.type || "analysis", {
sortBy: "timestamp",
});
}
if (args?.limit && typeof args.limit === "number") {
results = results.slice(0, args.limit);
}
return {
content: [
{
type: "text",
text: `Found ${results.length} memories`,
},
{
type: "text",
text: JSON.stringify(results, null, 2),
},
],
};
}
case "memory_insights": {
const insights = await getMemoryStatistics();
if (args?.projectId && typeof args.projectId === "string") {
const projectInsights = await getProjectInsights(args.projectId);
(insights as any).projectSpecific = projectInsights;
}
return {
content: [
{
type: "text",
text: "Memory system insights and patterns",
},
{
type: "text",
text: JSON.stringify(insights, null, 2),
},
],
};
}
case "memory_similar": {
await initializeMemory();
const manager = (await import("./memory/index.js")).getMemoryManager();
if (!manager) throw new Error("Memory system not initialized");
if (!args?.analysisId || typeof args.analysisId !== "string") {
throw new Error("analysisId is required");
}
const analysis = await manager.recall(args.analysisId);
if (!analysis) {
throw new Error(`Analysis ${args.analysisId} not found in memory`);
}
const limitValue = typeof args?.limit === "number" ? args.limit : 5;
const similar = await getSimilarProjects(analysis.data, limitValue);
return {
content: [
{
type: "text",
text: `Found ${similar.length} similar projects`,
},
{
type: "text",
text: JSON.stringify(similar, null, 2),
},
],
};
}
case "memory_export": {
const format =
args?.format === "json" || args?.format === "csv"
? args.format
: "json";
const exported = await exportMemories(format);
return {
content: [
{
type: "text",
text: `Exported memories in ${format} format`,
},
{
type: "text",
text: exported,
},
],
};
}
case "memory_cleanup": {
const daysToKeep =
typeof args?.daysToKeep === "number" ? args.daysToKeep : 30;
if (args?.dryRun) {
const stats = await getMemoryStatistics();
const cutoff = new Date(
Date.now() - daysToKeep * 24 * 60 * 60 * 1000,
);
const oldCount = Object.entries(
(stats as any).statistics?.byMonth || {},
)
.filter(([month]) => new Date(month + "-01") < cutoff)
.reduce((sum, [_, count]) => sum + (count as number), 0);
return {
content: [
{
type: "text",
text: `Dry run: Would delete approximately ${oldCount} memories older than ${daysToKeep} days`,
},
],
};
} else {
const deleted = await cleanupOldMemories(daysToKeep);
return {
content: [
{
type: "text",
text: `Cleaned up ${deleted} old memories`,
},
],
};
}
}
case "memory_intelligent_analysis": {
const projectPath = args?.projectPath as string;
const baseAnalysis = args?.baseAnalysis as any;
// Get insights and similar projects
const insights = await getProjectInsights(projectPath);
const similar = await getSimilarProjects(baseAnalysis, 5);
// Build intelligent analysis
const intelligentAnalysis = {
projectPath,
contextualInsights: {
insights: insights,
similarProjects: similar.map((p: any) => ({
name: p.projectPath,
similarity: p.similarity,
technologies: p.technologies,
hasTests: p.hasTests,
hasDocs: p.hasDocs,
})),
documentationHealth: {
hasDocumentation: baseAnalysis?.documentation?.hasDocs || false,
coverage: baseAnalysis?.documentation?.coverage || "unknown",
recommendedImprovement: baseAnalysis?.documentation?.hasDocs
? "Add missing documentation categories"
: "Create initial documentation structure",
},
},
patterns: {
technologyStack:
baseAnalysis?.technologies?.primaryLanguage || "unknown",
projectSize: baseAnalysis?.structure?.size || "unknown",
testingMaturity: baseAnalysis?.structure?.hasTests
? "has tests"
: "no tests",
cicdMaturity: baseAnalysis?.structure?.hasCI
? "has CI/CD"
: "no CI/CD",
},
predictions: {
recommendedSSG:
similar.length > 0
? `Based on ${similar.length} similar projects`
: "Insufficient data",
estimatedEffort:
baseAnalysis?.structure?.size === "large"
? "high"
: baseAnalysis?.structure?.size === "medium"
? "medium"
: "low",
},
recommendations: [
...(baseAnalysis?.documentation?.hasDocs
? []
: ["Create documentation structure using Diataxis framework"]),
...(baseAnalysis?.structure?.hasTests
? []
: ["Add test coverage to improve reliability"]),
...(baseAnalysis?.structure?.hasCI
? []
: ["Set up CI/CD pipeline for automated deployment"]),
],
};
return {
content: [
{
type: "text",
text: JSON.stringify(intelligentAnalysis, null, 2),
},
],
};
}
case "memory_enhanced_recommendation": {
const projectPath = args?.projectPath as string;
const baseRecommendation = args?.baseRecommendation as any;
const projectFeatures = args?.projectFeatures as any;
// Get historical deployment data and similar projects
await getProjectInsights(projectPath);
const similar = await getSimilarProjects(projectFeatures, 10);
// Calculate success rates from similar projects
const successfulDeployments = similar.filter(
(p: any) => p.deploymentSuccess === true,
);
const ssgUsage: Record<string, number> = {};
similar.forEach((p: any) => {
if (p.recommendedSSG) {
ssgUsage[p.recommendedSSG] = (ssgUsage[p.recommendedSSG] || 0) + 1;
}
});
const enhancedRecommendation = {
baseRecommendation: baseRecommendation?.ssg || "unknown",
confidence: baseRecommendation?.confidence || 0,
historicalContext: {
similarProjectsAnalyzed: similar.length,
successfulDeployments: successfulDeployments.length,
successRate:
similar.length > 0
? (
(successfulDeployments.length / similar.length) *
100
).toFixed(1) + "%"
: "N/A",
},
popularChoices: Object.entries(ssgUsage)
.sort(([, a], [, b]) => (b as number) - (a as number))
.slice(0, 3)
.map(([ssg, count]) => ({
ssg,
usage: count,
percentage:
similar.length > 0
? (((count as number) / similar.length) * 100).toFixed(1) +
"%"
: "N/A",
})),
enhancedRecommendations: [
{
ssg: baseRecommendation?.ssg || "Jekyll",
reason: "Base recommendation from analysis",
confidence: baseRecommendation?.confidence || 0.7,
},
...Object.entries(ssgUsage)
.filter(([ssg]) => ssg !== baseRecommendation?.ssg)
.slice(0, 2)
.map(([ssg, count]) => ({
ssg,
reason: `Used by ${count} similar project(s)`,
confidence: similar.length > 0 ? count / similar.length : 0.5,
})),
],
considerations: [
...(projectFeatures.hasTests
? ["Project has tests - consider SSG with good test integration"]
: []),
...(projectFeatures.hasCI
? ["Project has CI/CD - ensure SSG supports automated builds"]
: []),
...(projectFeatures.complexity === "complex"
? ["Complex project - consider robust SSG with plugin ecosystem"]
: []),
...(projectFeatures.isOpenSource
? ["Open source project - community support is important"]
: []),
],
};
return {
content: [
{
type: "text",
text: JSON.stringify(enhancedRecommendation, null, 2),
},
],
};
}
case "memory_learning_stats": {
const stats = await getMemoryStatistics();
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "active",
learningStats: stats,
message: "Learning stats from current memory system",
},
null,
2,
),
},
],
};
}
case "memory_knowledge_graph": {
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "development",
message: "Knowledge graph feature is being developed",
query: args?.query,
},
null,
2,
),
},
],
};
}
case "memory_contextual_search": {
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "development",
message: "Contextual search feature is being developed",
query: args?.query,
context: args?.context,
},
null,
2,
),
},
],
};
}
case "memory_agent_network": {
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "development",
message: "Agent network feature is being developed",
action: args?.action,
},
null,
2,
),
},
],
};
}
case "memory_pruning": {
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "development",
message: "Memory pruning feature is being developed",
dryRun: args?.dryRun,
},
null,
2,
),
},
],
};
}
case "memory_temporal_analysis": {
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "development",
message: "Temporal analysis feature is being developed",
query: args?.query,
},
null,
2,
),
},
],
};
}
case "memory_visualization": {
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "development",
message: "Memory visualization feature is being developed",
visualizationType: args?.visualizationType,
},
null,
2,
),
},
],
};
}
case "memory_export_advanced": {
await initializeMemory();
const manager = (await import("./memory/index.js")).getMemoryManager();
if (!manager) throw new Error("Memory system not initialized");
const result = await manager.export("json");
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "success",
exported: result.length,
data: result,
},
null,
2,
),
},
],
};
}
case "memory_import_advanced": {
await initializeMemory();
const manager = (await import("./memory/index.js")).getMemoryManager();
if (!manager) throw new Error("Memory system not initialized");
if (!args?.inputPath || typeof args.inputPath !== "string") {
throw new Error("inputPath is required");
}
const fs = await import("fs/promises");
const data = await fs.readFile(args.inputPath, "utf-8");
const result = await manager.import(data, "json");
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "success",
imported: result,
},
null,
2,
),
},
],
};
}
case "memory_migration": {
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "development",
message: "Migration functionality not yet implemented",
action: args?.action,
},
null,
2,
),
},
],
};
}
case "memory_optimization_metrics": {
const stats = await getMemoryStatistics();
return {
content: [
{
type: "text",
text: JSON.stringify(
{
status: "active",
optimizationMetrics: stats,
message: "Optimization metrics from current memory system",
},
null,
2,
),
},
],
};
}
default:
throw new Error(`Unknown tool: ${name}`);
}
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error occurred";
return formatMCPResponse({
success: false,
error: {
code: "TOOL_EXECUTION_ERROR",
message: errorMessage,
details: error instanceof Error ? error.stack : undefined,
resolution:
"Check tool parameters and try again. If the issue persists, review server logs for details.",
},
metadata: {
toolVersion: packageJson.version,
executionTime: Date.now(),
timestamp: new Date().toISOString(),
},
});
}
});
// Start the server
async function main() {
const transport = new StdioServerTransport();
await server.connect(transport);
// Show storage information at startup
const storageDir =
process.env.DOCUMCP_STORAGE_DIR || `${process.cwd()}/.documcp/memory`;
console.error("DocuMCP server started successfully");
console.error(`Storage location: ${storageDir}`);
}
main().catch((error) => {
console.error("Failed to start DocuMCP server:", error);
process.exit(1);
});
```