This is page 7 of 20. Use http://codebase.md/tosin2013/documcp?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── 001-mcp-server-architecture.md
│ │ ├── 002-repository-analysis-engine.md
│ │ ├── 003-static-site-generator-recommendation-engine.md
│ │ ├── 004-diataxis-framework-integration.md
│ │ ├── 005-github-pages-deployment-automation.md
│ │ ├── 006-mcp-tools-api-design.md
│ │ ├── 007-mcp-prompts-and-resources-integration.md
│ │ ├── 008-intelligent-content-population-engine.md
│ │ ├── 009-content-accuracy-validation-framework.md
│ │ ├── 010-mcp-resource-pattern-redesign.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── check-documentation-links.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── ast-analyzer.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── permission-checker.ts
│ │ └── sitemap-generator.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/tests/tools/analyze-deployments.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tests for Phase 2.4: Deployment Analytics and Insights
* Tests the analyze_deployments tool with comprehensive pattern analysis
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { tmpdir } from "os";
import {
initializeKnowledgeGraph,
getKnowledgeGraph,
createOrUpdateProject,
trackDeployment,
} from "../../src/memory/kg-integration.js";
import { analyzeDeployments } from "../../src/tools/analyze-deployments.js";
describe("analyzeDeployments (Phase 2.4)", () => {
let testDir: string;
let originalEnv: string | undefined;
beforeEach(async () => {
// Create temporary test directory
testDir = join(tmpdir(), `analyze-deployments-test-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
// Set environment variable for storage
originalEnv = process.env.DOCUMCP_STORAGE_DIR;
process.env.DOCUMCP_STORAGE_DIR = testDir;
// Initialize KG
await initializeKnowledgeGraph(testDir);
});
afterEach(async () => {
// Restore environment
if (originalEnv) {
process.env.DOCUMCP_STORAGE_DIR = originalEnv;
} else {
delete process.env.DOCUMCP_STORAGE_DIR;
}
// Clean up test directory
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch (error) {
console.warn("Failed to clean up test directory:", error);
}
});
/**
* Helper function to create sample deployment data
*/
const createSampleDeployments = async () => {
const timestamp = new Date().toISOString();
// Create 3 projects
const project1 = await createOrUpdateProject({
id: "project1",
timestamp,
path: "/test/project1",
projectName: "Docusaurus Site",
structure: {
totalFiles: 50,
languages: { typescript: 30, javascript: 20 },
hasTests: true,
hasCI: true,
hasDocs: true,
},
});
const project2 = await createOrUpdateProject({
id: "project2",
timestamp,
path: "/test/project2",
projectName: "Hugo Blog",
structure: {
totalFiles: 30,
languages: { go: 15, html: 15 },
hasTests: false,
hasCI: true,
hasDocs: true,
},
});
const project3 = await createOrUpdateProject({
id: "project3",
timestamp,
path: "/test/project3",
projectName: "MkDocs Docs",
structure: {
totalFiles: 40,
languages: { python: 25, markdown: 15 },
hasTests: true,
hasCI: true,
hasDocs: true,
},
});
// Track successful deployments
await trackDeployment(project1.id, "docusaurus", true, {
buildTime: 25000,
});
await trackDeployment(project1.id, "docusaurus", true, {
buildTime: 23000,
});
await trackDeployment(project2.id, "hugo", true, { buildTime: 15000 });
await trackDeployment(project2.id, "hugo", true, { buildTime: 14000 });
await trackDeployment(project2.id, "hugo", true, { buildTime: 16000 });
await trackDeployment(project3.id, "mkdocs", true, { buildTime: 30000 });
await trackDeployment(project3.id, "mkdocs", false, {
errorMessage: "Build failed",
});
return { project1, project2, project3 };
};
describe("Full Report Analysis", () => {
it("should generate comprehensive analytics report with no data", async () => {
const result = await analyzeDeployments({});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
expect(data.summary).toBeDefined();
expect(data.summary.totalProjects).toBe(0);
expect(data.summary.totalDeployments).toBe(0);
expect(data.patterns).toEqual([]);
// With 0 deployments, we get a warning insight about low success rate
expect(Array.isArray(data.insights)).toBe(true);
expect(data.recommendations).toBeDefined();
});
it("should generate comprehensive analytics report with sample data", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "full_report",
});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
// Verify summary
expect(data.summary).toBeDefined();
expect(data.summary.totalProjects).toBe(3);
// Each project has 1 configuration node, so 3 total deployments tracked
expect(data.summary.totalDeployments).toBeGreaterThanOrEqual(3);
expect(data.summary.overallSuccessRate).toBeGreaterThan(0);
expect(data.summary.mostUsedSSG).toBeDefined();
// Verify patterns
expect(data.patterns).toBeDefined();
expect(data.patterns.length).toBeGreaterThan(0);
expect(data.patterns[0]).toHaveProperty("ssg");
expect(data.patterns[0]).toHaveProperty("totalDeployments");
expect(data.patterns[0]).toHaveProperty("successRate");
// Verify insights and recommendations
expect(data.insights).toBeDefined();
expect(data.recommendations).toBeDefined();
});
it("should include insights about high success rates", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "full_report",
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Should have success insights for docusaurus and hugo
const successInsights = data.insights.filter(
(i: any) => i.type === "success",
);
expect(successInsights.length).toBeGreaterThan(0);
});
});
describe("SSG Statistics Analysis", () => {
it("should return error for non-existent SSG", async () => {
const result = await analyzeDeployments({
analysisType: "ssg_stats",
ssg: "nonexistent",
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Should return error response when SSG has no data
expect(data.success).toBe(false);
expect(data.error).toBeDefined();
});
it("should return statistics for specific SSG", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "ssg_stats",
ssg: "docusaurus",
});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
expect(data.ssg).toBe("docusaurus");
// project1 has 2 deployments with docusaurus
expect(data.totalDeployments).toBeGreaterThanOrEqual(1);
expect(data.successfulDeployments).toBeGreaterThanOrEqual(1);
expect(data.successRate).toBeGreaterThan(0);
expect(data.averageBuildTime).toBeDefined();
expect(data.projectCount).toBeGreaterThan(0);
});
it("should calculate average build time correctly", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "ssg_stats",
ssg: "hugo",
});
const content = result.content[0];
const data = JSON.parse(content.text);
expect(data.averageBuildTime).toBeDefined();
// Hugo has 3 deployments with build times
expect(data.averageBuildTime).toBeGreaterThan(0);
expect(data.averageBuildTime).toBeLessThan(20000);
});
it("should show success rate less than 100% for failed deployments", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "ssg_stats",
ssg: "mkdocs",
});
const content = result.content[0];
const data = JSON.parse(content.text);
expect(data.totalDeployments).toBeGreaterThanOrEqual(1);
expect(data.failedDeployments).toBeGreaterThanOrEqual(1);
expect(data.successRate).toBeLessThan(1.0);
});
});
describe("SSG Comparison Analysis", () => {
it("should fail without enough SSGs", async () => {
const result = await analyzeDeployments({
analysisType: "compare",
ssgs: ["docusaurus"],
});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
// Should be an error response
expect(data.success).toBe(false);
expect(data.error).toBeDefined();
expect(data.error.code).toBe("ANALYTICS_FAILED");
});
it("should compare multiple SSGs by success rate", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "compare",
ssgs: ["docusaurus", "hugo", "mkdocs"],
});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
expect(Array.isArray(data)).toBe(true);
expect(data.length).toBeGreaterThan(0);
// Should be sorted by success rate (descending)
for (let i = 0; i < data.length - 1; i++) {
expect(data[i].pattern.successRate).toBeGreaterThanOrEqual(
data[i + 1].pattern.successRate,
);
}
});
it("should include only SSGs with deployment data", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "compare",
ssgs: ["docusaurus", "nonexistent", "hugo"],
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Should only include docusaurus and hugo
expect(data.length).toBe(2);
const ssgs = data.map((d: any) => d.ssg);
expect(ssgs).toContain("docusaurus");
expect(ssgs).toContain("hugo");
expect(ssgs).not.toContain("nonexistent");
});
});
describe("Health Score Analysis", () => {
it("should calculate health score with no data", async () => {
const result = await analyzeDeployments({
analysisType: "health",
});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
expect(data.score).toBeDefined();
expect(data.score).toBeGreaterThanOrEqual(0);
expect(data.score).toBeLessThanOrEqual(100);
expect(data.factors).toBeDefined();
expect(Array.isArray(data.factors)).toBe(true);
expect(data.factors.length).toBe(4); // 4 factors
});
it("should calculate health score with sample data", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "health",
});
const content = result.content[0];
const data = JSON.parse(content.text);
expect(data.score).toBeGreaterThan(0);
expect(data.factors.length).toBe(4);
// Check all factors present
const factorNames = data.factors.map((f: any) => f.name);
expect(factorNames).toContain("Overall Success Rate");
expect(factorNames).toContain("Active Projects");
expect(factorNames).toContain("Deployment Activity");
expect(factorNames).toContain("SSG Diversity");
// Each factor should have impact and status
data.factors.forEach((factor: any) => {
expect(factor.impact).toBeDefined();
expect(factor.status).toMatch(/^(good|warning|critical)$/);
});
});
it("should have good health with high success rate", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "health",
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Should have decent health with our sample data
expect(data.score).toBeGreaterThan(30);
const successRateFactor = data.factors.find(
(f: any) => f.name === "Overall Success Rate",
);
expect(successRateFactor.status).toMatch(/^(good|warning)$/);
});
});
describe("Trend Analysis", () => {
it("should analyze trends with default period", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "trends",
});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
expect(Array.isArray(data)).toBe(true);
// Trends are grouped by time periods
});
it("should analyze trends with custom period", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "trends",
periodDays: 7,
});
const content = result.content[0];
const data = JSON.parse(content.text);
expect(Array.isArray(data)).toBe(true);
});
});
describe("Error Handling", () => {
it("should handle missing SSG parameter for ssg_stats", async () => {
const result = await analyzeDeployments({
analysisType: "ssg_stats",
});
const content = result.content[0];
const data = JSON.parse(content.text);
expect(data.success).toBe(false);
expect(data.error).toBeDefined();
expect(data.error.code).toBe("ANALYTICS_FAILED");
expect(data.error.message).toContain("SSG name required");
});
it("should handle invalid analysis type gracefully", async () => {
const result = await analyzeDeployments({
analysisType: "full_report",
});
const content = result.content[0];
expect(content.type).toBe("text");
// Should not throw, should return valid response
});
});
describe("Recommendations Generation", () => {
it("should generate recommendations based on patterns", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "full_report",
});
const content = result.content[0];
const data = JSON.parse(content.text);
expect(data.recommendations).toBeDefined();
expect(Array.isArray(data.recommendations)).toBe(true);
expect(data.recommendations.length).toBeGreaterThan(0);
});
it("should recommend best performing SSG", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "full_report",
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Should have recommendations
expect(data.recommendations.length).toBeGreaterThan(0);
// At least one recommendation should mention an SSG or general advice
const allText = data.recommendations.join(" ").toLowerCase();
expect(allText.length).toBeGreaterThan(0);
});
});
describe("Build Time Analysis", () => {
it("should identify fast builds in insights", async () => {
await createSampleDeployments();
const result = await analyzeDeployments({
analysisType: "full_report",
});
const content = result.content[0];
const data = JSON.parse(content.text);
// Hugo has ~15s builds, should be identified as fast
const fastBuildInsights = data.insights.filter(
(i: any) => i.title && i.title.includes("Fast Builds"),
);
expect(fastBuildInsights.length).toBeGreaterThan(0);
});
});
});
```
--------------------------------------------------------------------------------
/tests/memory/kg-code-integration.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tests for Knowledge Graph Code Integration
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import path from "path";
import { tmpdir } from "os";
import {
createCodeFileEntities,
createDocumentationEntities,
linkCodeToDocs,
} from "../../src/memory/kg-code-integration.js";
import { ExtractedContent } from "../../src/utils/content-extractor.js";
import {
initializeKnowledgeGraph,
getKnowledgeGraph,
} from "../../src/memory/kg-integration.js";
describe("KG Code Integration", () => {
let testDir: string;
let projectId: string;
beforeEach(async () => {
// Create temporary directory for test files
testDir = path.join(tmpdir(), `documcp-test-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
projectId = `project:test_${Date.now()}`;
// Initialize KG with test storage
const storageDir = path.join(testDir, ".documcp/memory");
await initializeKnowledgeGraph(storageDir);
});
afterEach(async () => {
// Cleanup
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch {
// Ignore cleanup errors
}
});
describe("createCodeFileEntities", () => {
it("should create code file entities from TypeScript files", async () => {
// Create a test TypeScript file
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
const tsContent = `
export class UserService {
async getUser(id: string) {
return { id, name: "Test User" };
}
async createUser(data: any) {
return { ...data, id: "123" };
}
}
export async function validateUser(user: any) {
return user.name && user.id;
}
`;
await fs.writeFile(path.join(srcDir, "user.ts"), tsContent, "utf-8");
// Create entities
const entities = await createCodeFileEntities(projectId, testDir);
// Assertions
expect(entities.length).toBe(1);
expect(entities[0].type).toBe("code_file");
expect(entities[0].properties.language).toBe("typescript");
expect(entities[0].properties.path).toBe("src/user.ts");
expect(entities[0].properties.classes).toContain("UserService");
expect(entities[0].properties.functions).toContain("validateUser");
expect(entities[0].properties.contentHash).toBeDefined();
expect(entities[0].properties.linesOfCode).toBeGreaterThan(0);
});
it("should create code file entities from Python files", async () => {
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
const pyContent = `
class Database:
def connect(self):
pass
def query(self, sql):
return []
def initialize_db():
return Database()
`;
await fs.writeFile(path.join(srcDir, "database.py"), pyContent, "utf-8");
const entities = await createCodeFileEntities(projectId, testDir);
expect(entities.length).toBe(1);
expect(entities[0].properties.language).toBe("python");
expect(entities[0].properties.classes).toContain("Database");
expect(entities[0].properties.functions).toContain("initialize_db");
});
it("should handle nested directories", async () => {
const nestedDir = path.join(testDir, "src", "services", "auth");
await fs.mkdir(nestedDir, { recursive: true });
await fs.writeFile(
path.join(nestedDir, "login.ts"),
"export function login() {}",
"utf-8",
);
const entities = await createCodeFileEntities(projectId, testDir);
expect(entities.length).toBe(1);
expect(entities[0].properties.path).toBe("src/services/auth/login.ts");
});
it("should skip non-code files", async () => {
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
await fs.writeFile(path.join(srcDir, "README.md"), "# Readme", "utf-8");
await fs.writeFile(path.join(srcDir, "config.json"), "{}", "utf-8");
const entities = await createCodeFileEntities(projectId, testDir);
expect(entities.length).toBe(0);
});
it("should estimate complexity correctly", async () => {
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
// Small file - low complexity
const smallFile = "export function simple() { return 1; }";
await fs.writeFile(path.join(srcDir, "small.ts"), smallFile, "utf-8");
// Large file - high complexity
const largeFile = Array(200)
.fill("function test() { return 1; }")
.join("\n");
await fs.writeFile(path.join(srcDir, "large.ts"), largeFile, "utf-8");
const entities = await createCodeFileEntities(projectId, testDir);
const smallEntity = entities.find((e) =>
e.properties.path.includes("small.ts"),
);
const largeEntity = entities.find((e) =>
e.properties.path.includes("large.ts"),
);
expect(smallEntity?.properties.complexity).toBe("low");
expect(largeEntity?.properties.complexity).toBe("high");
});
it("should create relationships with project", async () => {
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
await fs.writeFile(
path.join(srcDir, "test.ts"),
"export function test() {}",
"utf-8",
);
await createCodeFileEntities(projectId, testDir);
const kg = await getKnowledgeGraph();
const edges = await kg.findEdges({ source: projectId });
expect(edges.some((e) => e.type === "depends_on")).toBe(true);
});
});
describe("createDocumentationEntities", () => {
it("should create documentation section entities from README", async () => {
const extractedContent: ExtractedContent = {
readme: {
content: "# My Project\n\nThis is a test project.",
sections: [
{
title: "My Project",
content: "This is a test project.",
level: 1,
},
{ title: "Installation", content: "npm install", level: 2 },
],
},
existingDocs: [],
adrs: [],
codeExamples: [],
apiDocs: [],
};
const entities = await createDocumentationEntities(
projectId,
extractedContent,
);
expect(entities.length).toBe(2);
expect(entities[0].type).toBe("documentation_section");
expect(entities[0].properties.sectionTitle).toBe("My Project");
expect(entities[0].properties.contentHash).toBeDefined();
expect(entities[0].properties.category).toBe("reference");
});
it("should categorize documentation correctly", async () => {
const extractedContent: ExtractedContent = {
existingDocs: [
{
path: "docs/tutorials/getting-started.md",
title: "Getting Started",
content: "# Tutorial",
category: "tutorial",
},
{
path: "docs/how-to/deploy.md",
title: "Deploy Guide",
content: "# How to Deploy",
category: "how-to",
},
{
path: "docs/api/reference.md",
title: "API Reference",
content: "# API",
category: "reference",
},
],
adrs: [],
codeExamples: [],
apiDocs: [],
};
const entities = await createDocumentationEntities(
projectId,
extractedContent,
);
expect(entities.length).toBe(3);
expect(
entities.find((e) => e.properties.category === "tutorial"),
).toBeDefined();
expect(
entities.find((e) => e.properties.category === "how-to"),
).toBeDefined();
expect(
entities.find((e) => e.properties.category === "reference"),
).toBeDefined();
});
it("should extract code references from content", async () => {
const extractedContent: ExtractedContent = {
existingDocs: [
{
path: "docs/guide.md",
title: "Guide",
content:
"Call `getUserById()` from `src/user.ts` using `UserService` class",
category: "how-to",
},
],
adrs: [],
codeExamples: [],
apiDocs: [],
};
const entities = await createDocumentationEntities(
projectId,
extractedContent,
);
expect(entities[0].properties.referencedCodeFiles).toContain(
"src/user.ts",
);
expect(entities[0].properties.referencedFunctions).toContain(
"getUserById",
);
expect(entities[0].properties.referencedClasses).toContain("UserService");
});
it("should detect code examples in documentation", async () => {
const extractedContent: ExtractedContent = {
existingDocs: [
{
path: "docs/example.md",
title: "Example",
content: "# Example\n\n```typescript\nconst x = 1;\n```",
},
],
adrs: [],
codeExamples: [],
apiDocs: [],
};
const entities = await createDocumentationEntities(
projectId,
extractedContent,
);
expect(entities[0].properties.hasCodeExamples).toBe(true);
expect(entities[0].properties.effectivenessScore).toBeGreaterThan(0.5);
});
it("should process ADRs as explanation category", async () => {
const extractedContent: ExtractedContent = {
existingDocs: [],
adrs: [
{
number: "001",
title: "Use TypeScript",
status: "Accepted",
content: "We will use TypeScript for type safety",
decision: "Use TypeScript",
consequences: "Better IDE support",
},
],
codeExamples: [],
apiDocs: [],
};
const entities = await createDocumentationEntities(
projectId,
extractedContent,
);
expect(entities.length).toBe(1);
expect(entities[0].properties.category).toBe("explanation");
expect(entities[0].properties.sectionTitle).toBe("Use TypeScript");
});
});
describe("linkCodeToDocs", () => {
it("should create references edges when docs reference code", async () => {
// Create code entity
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
await fs.writeFile(
path.join(srcDir, "user.ts"),
"export function getUser() {}",
"utf-8",
);
const codeFiles = await createCodeFileEntities(projectId, testDir);
// Create doc entity that references the code
const extractedContent: ExtractedContent = {
existingDocs: [
{
path: "docs/api.md",
title: "API",
content: "Use `getUser()` from `src/user.ts`",
category: "reference",
},
],
adrs: [],
codeExamples: [],
apiDocs: [],
};
const docSections = await createDocumentationEntities(
projectId,
extractedContent,
);
// Link them
const edges = await linkCodeToDocs(codeFiles, docSections);
// Should create references edge (doc -> code)
const referencesEdge = edges.find((e) => e.type === "references");
expect(referencesEdge).toBeDefined();
expect(referencesEdge?.source).toBe(docSections[0].id);
expect(referencesEdge?.target).toBe(codeFiles[0].id);
expect(referencesEdge?.properties.referenceType).toBe("api-reference");
// Should create documents edge (code -> doc)
const documentsEdge = edges.find((e) => e.type === "documents");
expect(documentsEdge).toBeDefined();
expect(documentsEdge?.source).toBe(codeFiles[0].id);
expect(documentsEdge?.target).toBe(docSections[0].id);
});
it("should detect outdated documentation", async () => {
// Create code entity with recent modification
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
await fs.writeFile(
path.join(srcDir, "user.ts"),
"export function getUser() {}",
"utf-8",
);
const codeFiles = await createCodeFileEntities(projectId, testDir);
// Simulate old documentation (modify lastUpdated)
const extractedContent: ExtractedContent = {
existingDocs: [
{
path: "docs/api.md",
title: "API",
content: "Use `getUser()` from `src/user.ts`",
category: "reference",
},
],
adrs: [],
codeExamples: [],
apiDocs: [],
};
const docSections = await createDocumentationEntities(
projectId,
extractedContent,
);
// Manually set old timestamp on doc
docSections[0].properties.lastUpdated = new Date(
Date.now() - 86400000,
).toISOString();
const edges = await linkCodeToDocs(codeFiles, docSections);
// Should create outdated_for edge
const outdatedEdge = edges.find((e) => e.type === "outdated_for");
expect(outdatedEdge).toBeDefined();
expect(outdatedEdge?.properties.severity).toBe("medium");
});
it("should determine coverage based on referenced functions", async () => {
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
// Code with 3 functions
await fs.writeFile(
path.join(srcDir, "user.ts"),
`
export function getUser() {}
export function createUser() {}
export function deleteUser() {}
`,
"utf-8",
);
const codeFiles = await createCodeFileEntities(projectId, testDir);
// Doc that only references 2 functions (66% coverage)
const extractedContent: ExtractedContent = {
existingDocs: [
{
path: "docs/api.md",
title: "API",
content: "Use `getUser()` and `createUser()` from `src/user.ts`",
category: "reference",
},
],
adrs: [],
codeExamples: [],
apiDocs: [],
};
const docSections = await createDocumentationEntities(
projectId,
extractedContent,
);
const edges = await linkCodeToDocs(codeFiles, docSections);
const documentsEdge = edges.find((e) => e.type === "documents");
expect(documentsEdge?.properties.coverage).toBe("complete"); // >= 50%
});
it("should handle documentation with no code references", async () => {
const srcDir = path.join(testDir, "src");
await fs.mkdir(srcDir, { recursive: true });
await fs.writeFile(
path.join(srcDir, "user.ts"),
"export function getUser() {}",
"utf-8",
);
const codeFiles = await createCodeFileEntities(projectId, testDir);
// Doc with no code references
const extractedContent: ExtractedContent = {
existingDocs: [
{
path: "docs/guide.md",
title: "Guide",
content: "This is a general guide with no code references",
category: "tutorial",
},
],
adrs: [],
codeExamples: [],
apiDocs: [],
};
const docSections = await createDocumentationEntities(
projectId,
extractedContent,
);
const edges = await linkCodeToDocs(codeFiles, docSections);
// Should not create edges between unrelated code and docs
expect(edges.length).toBe(0);
});
});
});
```
--------------------------------------------------------------------------------
/tests/tools/recommend-ssg-historical.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tests for Phase 2.1: Historical Deployment Data Integration
* Tests the enhanced recommend_ssg tool with knowledge graph integration
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { tmpdir } from "os";
import {
initializeKnowledgeGraph,
createOrUpdateProject,
trackDeployment,
getMemoryManager,
} from "../../src/memory/kg-integration.js";
import { recommendSSG } from "../../src/tools/recommend-ssg.js";
import { MemoryManager } from "../../src/memory/manager.js";
describe("recommendSSG with Historical Data (Phase 2.1)", () => {
let testDir: string;
let originalEnv: string | undefined;
let memoryManager: MemoryManager;
beforeEach(async () => {
// Create temporary test directory
testDir = join(tmpdir(), `recommend-ssg-historical-test-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
// Set environment variable for storage
originalEnv = process.env.DOCUMCP_STORAGE_DIR;
process.env.DOCUMCP_STORAGE_DIR = testDir;
// Initialize KG and memory - this creates the global memory manager
await initializeKnowledgeGraph(testDir);
// Use the same memory manager instance that kg-integration created
memoryManager = await getMemoryManager();
});
afterEach(async () => {
// Restore environment
if (originalEnv) {
process.env.DOCUMCP_STORAGE_DIR = originalEnv;
} else {
delete process.env.DOCUMCP_STORAGE_DIR;
}
// Clean up test directory
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch (error) {
console.warn("Failed to clean up test directory:", error);
}
});
describe("Historical Data Retrieval", () => {
it("should include historical data when similar projects exist", async () => {
// Create a project with successful deployments
const project1 = await createOrUpdateProject({
id: "test_project_1",
timestamp: new Date().toISOString(),
path: "/test/project1",
projectName: "Test Project 1",
structure: {
totalFiles: 50,
languages: { typescript: 30, javascript: 20 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
});
// Track successful Docusaurus deployments
await trackDeployment(project1.id, "docusaurus", true, {
buildTime: 45,
});
await trackDeployment(project1.id, "docusaurus", true, {
buildTime: 42,
});
// Store analysis in memory for recommendation
const memoryEntry = await memoryManager.remember("analysis", {
path: "/test/project2",
dependencies: {
ecosystem: "javascript",
languages: ["typescript", "javascript"],
},
structure: { totalFiles: 60 },
});
// Get recommendation
const result = await recommendSSG({
analysisId: memoryEntry.id,
preferences: {},
});
const content = result.content[0];
expect(content.type).toBe("text");
const data = JSON.parse(content.text);
// Should include historical data
expect(data.historicalData).toBeDefined();
expect(data.historicalData.similarProjectCount).toBeGreaterThan(0);
expect(data.historicalData.successRates.docusaurus).toBeDefined();
expect(data.historicalData.successRates.docusaurus.rate).toBe(1.0);
expect(data.historicalData.successRates.docusaurus.sampleSize).toBe(2);
});
it("should boost confidence when historical success rate is high", async () => {
// Create multiple successful projects
for (let i = 0; i < 3; i++) {
const project = await createOrUpdateProject({
id: `project_${i}`,
timestamp: new Date().toISOString(),
path: `/test/project${i}`,
projectName: `Project ${i}`,
structure: {
totalFiles: 50,
languages: { typescript: 50 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
});
// Track successful Hugo deployments
await trackDeployment(project.id, "hugo", true, { buildTime: 30 });
}
// Store analysis
const memoryEntry = await memoryManager.remember("analysis", {
path: "/test/new-project",
dependencies: {
ecosystem: "go",
languages: ["typescript"],
},
structure: { totalFiles: 60 },
});
const result = await recommendSSG({ analysisId: memoryEntry.id });
const content = result.content[0];
const data = JSON.parse(content.text);
// Should have high confidence due to historical success
expect(data.confidence).toBeGreaterThan(0.9);
expect(data.reasoning[0]).toContain("100% success rate");
});
it("should reduce confidence when historical success rate is low", async () => {
// Create project with failed deployments
const project = await createOrUpdateProject({
id: "failing_project",
timestamp: new Date().toISOString(),
path: "/test/failing",
projectName: "Failing Project",
structure: {
totalFiles: 50,
languages: { python: 50 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
});
// Track mostly failed Jekyll deployments
await trackDeployment(project.id, "jekyll", false, {
errorMessage: "Build failed",
});
await trackDeployment(project.id, "jekyll", false, {
errorMessage: "Build failed",
});
await trackDeployment(project.id, "jekyll", true, { buildTime: 60 });
// Store analysis
const memoryEntry003 = await memoryManager.remember("analysis", {
path: "/test/new-python",
dependencies: {
ecosystem: "python",
languages: ["python"],
},
structure: { totalFiles: 60 },
});
const result = await recommendSSG({ analysisId: memoryEntry003.id });
const content = result.content[0];
const data = JSON.parse(content.text);
// Should have reduced confidence
expect(data.confidence).toBeLessThan(0.8);
expect(data.reasoning[0]).toContain("33% success rate");
});
it("should switch to top performer when significantly better", async () => {
// Create projects with mixed results
const project1 = await createOrUpdateProject({
id: "project_mixed_1",
timestamp: new Date().toISOString(),
path: "/test/mixed1",
projectName: "Mixed Project 1",
structure: {
totalFiles: 50,
languages: { javascript: 50 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
});
// Docusaurus: 50% success rate (2 samples)
await trackDeployment(project1.id, "docusaurus", true);
await trackDeployment(project1.id, "docusaurus", false);
// Eleventy: 100% success rate (3 samples)
const project2 = await createOrUpdateProject({
id: "project_mixed_2",
timestamp: new Date().toISOString(),
path: "/test/mixed2",
projectName: "Mixed Project 2",
structure: {
totalFiles: 50,
languages: { javascript: 50 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
});
await trackDeployment(project2.id, "eleventy", true);
await trackDeployment(project2.id, "eleventy", true);
await trackDeployment(project2.id, "eleventy", true);
// Store analysis preferring JavaScript
const memoryEntry004 = await memoryManager.remember("analysis", {
path: "/test/new-js",
dependencies: {
ecosystem: "javascript",
languages: ["javascript"],
},
structure: { totalFiles: 40 },
});
const result = await recommendSSG({ analysisId: memoryEntry004.id });
const content = result.content[0];
const data = JSON.parse(content.text);
// Should switch to Eleventy due to better success rate
expect(data.recommended).toBe("eleventy");
expect(data.reasoning[0]).toContain("Switching to eleventy");
expect(data.reasoning[0]).toContain("100% success rate");
});
it("should mention top performer as alternative if not switching", async () => {
// Create successful Hugo deployments
const project = await createOrUpdateProject({
id: "hugo_success",
timestamp: new Date().toISOString(),
path: "/test/hugo",
projectName: "Hugo Success",
structure: {
totalFiles: 100,
languages: { go: 80, markdown: 20 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
});
await trackDeployment(project.id, "hugo", true);
await trackDeployment(project.id, "hugo", true);
// Store analysis for different ecosystem
const memoryEntry005 = await memoryManager.remember("analysis", {
path: "/test/new-python",
dependencies: {
ecosystem: "python",
languages: ["python"],
},
structure: { totalFiles: 60 },
});
const result = await recommendSSG({ analysisId: memoryEntry005.id });
const content = result.content[0];
const data = JSON.parse(content.text);
// Should keep Python recommendation but mention Hugo
expect(data.recommended).toBe("mkdocs");
const hugoMention = data.reasoning.find((r: string) =>
r.includes("hugo"),
);
expect(hugoMention).toBeDefined();
});
it("should include deployment statistics in reasoning", async () => {
// Create multiple projects with various deployments
for (let i = 0; i < 3; i++) {
const project = await createOrUpdateProject({
id: `stats_project_${i}`,
timestamp: new Date().toISOString(),
path: `/test/stats${i}`,
projectName: `Stats Project ${i}`,
structure: {
totalFiles: 50,
languages: { typescript: 50 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
});
await trackDeployment(project.id, "docusaurus", true);
await trackDeployment(project.id, "docusaurus", true);
}
const memoryEntry006 = await memoryManager.remember("analysis", {
path: "/test/stats-new",
dependencies: {
ecosystem: "javascript",
languages: ["typescript"],
},
structure: { totalFiles: 50 },
});
const result = await recommendSSG({ analysisId: memoryEntry006.id });
const content = result.content[0];
const data = JSON.parse(content.text);
// Should mention deployment statistics
const statsReasoning = data.reasoning.find((r: string) =>
r.includes("deployment(s) across"),
);
expect(statsReasoning).toBeDefined();
expect(statsReasoning).toContain("6 deployment(s)");
expect(statsReasoning).toContain("3 similar project(s)");
});
});
describe("Historical Data Structure", () => {
it("should provide complete historical data structure", async () => {
const project = await createOrUpdateProject({
id: "structure_test",
timestamp: new Date().toISOString(),
path: "/test/structure",
projectName: "Structure Test",
structure: {
totalFiles: 50,
languages: { javascript: 50 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
});
await trackDeployment(project.id, "jekyll", true);
await trackDeployment(project.id, "hugo", true);
await trackDeployment(project.id, "hugo", true);
const memoryEntry007 = await memoryManager.remember("analysis", {
path: "/test/structure-new",
dependencies: {
ecosystem: "javascript",
languages: ["javascript"],
},
structure: { totalFiles: 50 },
});
const result = await recommendSSG({ analysisId: memoryEntry007.id });
const content = result.content[0];
const data = JSON.parse(content.text);
expect(data.historicalData).toBeDefined();
expect(data.historicalData.similarProjectCount).toBe(1);
expect(data.historicalData.successRates).toBeDefined();
expect(data.historicalData.successRates.jekyll).toEqual({
rate: 1.0,
sampleSize: 1,
});
expect(data.historicalData.successRates.hugo).toEqual({
rate: 1.0,
sampleSize: 2,
});
expect(data.historicalData.topPerformer).toBeDefined();
expect(data.historicalData.topPerformer?.ssg).toBe("hugo");
expect(data.historicalData.topPerformer?.deploymentCount).toBe(2);
});
it("should handle no historical data gracefully", async () => {
const memoryEntry008 = await memoryManager.remember("analysis", {
path: "/test/no-history",
dependencies: {
ecosystem: "ruby",
languages: ["ruby"],
},
structure: { totalFiles: 30 },
});
const result = await recommendSSG({ analysisId: memoryEntry008.id });
const content = result.content[0];
const data = JSON.parse(content.text);
// Should still make recommendation
expect(data.recommended).toBe("jekyll");
expect(data.confidence).toBeGreaterThan(0);
// Historical data should show no similar projects
expect(data.historicalData).toBeDefined();
expect(data.historicalData.similarProjectCount).toBe(0);
expect(Object.keys(data.historicalData.successRates)).toHaveLength(0);
});
});
describe("Edge Cases", () => {
it("should handle single deployment samples cautiously", async () => {
const project = await createOrUpdateProject({
id: "single_sample",
timestamp: new Date().toISOString(),
path: "/test/single",
projectName: "Single Sample",
structure: {
totalFiles: 50,
languages: { python: 50 },
hasTests: true,
hasCI: false,
hasDocs: false,
},
});
// Single successful deployment
await trackDeployment(project.id, "mkdocs", true);
const memoryEntry009 = await memoryManager.remember("analysis", {
path: "/test/single-new",
dependencies: {
ecosystem: "python",
languages: ["python"],
},
structure: { totalFiles: 50 },
});
const result = await recommendSSG({ analysisId: memoryEntry009.id });
const content = result.content[0];
const data = JSON.parse(content.text);
// Should not be a top performer with only 1 sample
expect(data.historicalData?.topPerformer).toBeUndefined();
});
it("should handle knowledge graph initialization failure", async () => {
// Use invalid storage directory
const invalidDir = "/invalid/path/that/does/not/exist";
const memoryEntry010 = await memoryManager.remember("analysis", {
path: "/test/kg-fail",
dependencies: {
ecosystem: "javascript",
languages: ["javascript"],
},
structure: { totalFiles: 50 },
});
// Should still make recommendation despite KG failure
const result = await recommendSSG({ analysisId: memoryEntry010.id });
const content = result.content[0];
const data = JSON.parse(content.text);
expect(data.recommended).toBeDefined();
expect(data.confidence).toBeGreaterThan(0);
});
});
});
```
--------------------------------------------------------------------------------
/src/memory/deployment-analytics.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Deployment Analytics Module
* Phase 2.4: Pattern Analysis and Insights
*
* Analyzes deployment history to identify patterns, trends, and provide insights
*/
import { getKnowledgeGraph } from "./kg-integration.js";
import { GraphNode, GraphEdge } from "./knowledge-graph.js";
export interface DeploymentPattern {
ssg: string;
totalDeployments: number;
successfulDeployments: number;
failedDeployments: number;
successRate: number;
averageBuildTime?: number;
commonTechnologies: string[];
projectCount: number;
}
export interface DeploymentTrend {
period: string;
deployments: number;
successRate: number;
topSSG: string;
}
export interface DeploymentInsight {
type: "success" | "warning" | "recommendation";
title: string;
description: string;
ssg?: string;
metric?: number;
}
export interface AnalyticsReport {
summary: {
totalProjects: number;
totalDeployments: number;
overallSuccessRate: number;
mostUsedSSG: string;
mostSuccessfulSSG: string;
};
patterns: DeploymentPattern[];
insights: DeploymentInsight[];
recommendations: string[];
}
/**
* Deployment Analytics Engine
*/
export class DeploymentAnalytics {
/**
* Generate comprehensive analytics report
*/
async generateReport(): Promise<AnalyticsReport> {
const kg = await getKnowledgeGraph();
// Get all projects and deployments
const projects = await kg.findNodes({ type: "project" });
const deploymentEdges = await kg.findEdges({
properties: { baseType: "project_deployed_with" },
});
// Aggregate deployment data by SSG
const ssgStats = await this.aggregateSSGStatistics(
projects,
deploymentEdges,
);
// Calculate summary metrics
const summary = this.calculateSummary(ssgStats, projects.length);
// Identify patterns
const patterns = this.identifyPatterns(ssgStats);
// Generate insights
const insights = this.generateInsights(patterns, summary);
// Generate recommendations
const recommendations = this.generateRecommendations(patterns, insights);
return {
summary,
patterns,
insights,
recommendations,
};
}
/**
* Get deployment statistics for a specific SSG
*/
async getSSGStatistics(ssg: string): Promise<DeploymentPattern | null> {
const kg = await getKnowledgeGraph();
const deployments = await kg.findEdges({
properties: { baseType: "project_deployed_with" },
});
const allNodes = await kg.getAllNodes();
// Filter deployments for this SSG
const ssgDeployments = deployments.filter((edge) => {
const configNode = allNodes.find((n) => n.id === edge.target);
return configNode?.properties.ssg === ssg;
});
if (ssgDeployments.length === 0) {
return null;
}
const successful = ssgDeployments.filter(
(d) => d.properties.success,
).length;
const failed = ssgDeployments.length - successful;
// Calculate average build time
const buildTimes = ssgDeployments
.filter((d) => d.properties.buildTime)
.map((d) => d.properties.buildTime as number);
const averageBuildTime =
buildTimes.length > 0
? buildTimes.reduce((a, b) => a + b, 0) / buildTimes.length
: undefined;
// Get unique projects using this SSG
const projectIds = new Set(ssgDeployments.map((d) => d.source));
// Get common technologies from projects
const technologies = new Set<string>();
for (const projectId of projectIds) {
const project = allNodes.find((n) => n.id === projectId);
if (project?.properties.technologies) {
project.properties.technologies.forEach((tech: string) =>
technologies.add(tech),
);
}
}
return {
ssg,
totalDeployments: ssgDeployments.length,
successfulDeployments: successful,
failedDeployments: failed,
successRate: successful / ssgDeployments.length,
averageBuildTime,
commonTechnologies: Array.from(technologies),
projectCount: projectIds.size,
};
}
/**
* Compare multiple SSGs
*/
async compareSSGs(
ssgs: string[],
): Promise<{ ssg: string; pattern: DeploymentPattern }[]> {
const comparisons: { ssg: string; pattern: DeploymentPattern }[] = [];
for (const ssg of ssgs) {
const pattern = await this.getSSGStatistics(ssg);
if (pattern) {
comparisons.push({ ssg, pattern });
}
}
// Sort by success rate
return comparisons.sort(
(a, b) => b.pattern.successRate - a.pattern.successRate,
);
}
/**
* Identify deployment trends over time
*/
async identifyTrends(periodDays: number = 30): Promise<DeploymentTrend[]> {
const kg = await getKnowledgeGraph();
const deployments = await kg.findEdges({
properties: { baseType: "project_deployed_with" },
});
// Group deployments by time period
const now = Date.now();
const periodMs = periodDays * 24 * 60 * 60 * 1000;
const trends: Map<string, DeploymentTrend> = new Map();
for (const deployment of deployments) {
const timestamp = deployment.properties.timestamp;
if (!timestamp) continue;
const deploymentTime = new Date(timestamp).getTime();
const periodsAgo = Math.floor((now - deploymentTime) / periodMs);
if (periodsAgo < 0 || periodsAgo > 12) continue; // Last 12 periods
const periodKey = `${periodsAgo} periods ago`;
if (!trends.has(periodKey)) {
trends.set(periodKey, {
period: periodKey,
deployments: 0,
successRate: 0,
topSSG: "",
});
}
const trend = trends.get(periodKey)!;
trend.deployments++;
if (deployment.properties.success) {
trend.successRate++;
}
}
// Calculate success rates and identify top SSG per period
for (const trend of trends.values()) {
trend.successRate = trend.successRate / trend.deployments;
}
return Array.from(trends.values()).sort((a, b) =>
a.period.localeCompare(b.period),
);
}
/**
* Get deployment health score (0-100)
*/
async getHealthScore(): Promise<{
score: number;
factors: {
name: string;
impact: number;
status: "good" | "warning" | "critical";
}[];
}> {
const report = await this.generateReport();
const factors: {
name: string;
impact: number;
status: "good" | "warning" | "critical";
}[] = [];
let totalScore = 0;
// Factor 1: Overall success rate (40 points)
const successRateScore = report.summary.overallSuccessRate * 40;
totalScore += successRateScore;
factors.push({
name: "Overall Success Rate",
impact: successRateScore,
status:
report.summary.overallSuccessRate > 0.8
? "good"
: report.summary.overallSuccessRate > 0.5
? "warning"
: "critical",
});
// Factor 2: Number of projects (20 points)
const projectScore = Math.min(20, report.summary.totalProjects * 2);
totalScore += projectScore;
factors.push({
name: "Active Projects",
impact: projectScore,
status:
report.summary.totalProjects > 5
? "good"
: report.summary.totalProjects > 2
? "warning"
: "critical",
});
// Factor 3: Deployment frequency (20 points)
const deploymentScore = Math.min(20, report.summary.totalDeployments * 1.5);
totalScore += deploymentScore;
factors.push({
name: "Deployment Activity",
impact: deploymentScore,
status:
report.summary.totalDeployments > 10
? "good"
: report.summary.totalDeployments > 5
? "warning"
: "critical",
});
// Factor 4: SSG diversity (20 points)
const ssgDiversity = report.patterns.length;
const diversityScore = Math.min(20, ssgDiversity * 5);
totalScore += diversityScore;
factors.push({
name: "SSG Diversity",
impact: diversityScore,
status:
ssgDiversity > 3 ? "good" : ssgDiversity > 1 ? "warning" : "critical",
});
return {
score: Math.round(totalScore),
factors,
};
}
/**
* Private: Aggregate SSG statistics
*/
private async aggregateSSGStatistics(
projects: GraphNode[],
deploymentEdges: GraphEdge[],
): Promise<Map<string, DeploymentPattern>> {
const kg = await getKnowledgeGraph();
const allNodes = await kg.getAllNodes();
const ssgStats = new Map<string, DeploymentPattern>();
for (const deployment of deploymentEdges) {
const configNode = allNodes.find((n) => n.id === deployment.target);
if (!configNode || configNode.type !== "configuration") continue;
const ssg = configNode.properties.ssg;
if (!ssg) continue;
if (!ssgStats.has(ssg)) {
ssgStats.set(ssg, {
ssg,
totalDeployments: 0,
successfulDeployments: 0,
failedDeployments: 0,
successRate: 0,
commonTechnologies: [],
projectCount: 0,
});
}
const stats = ssgStats.get(ssg)!;
stats.totalDeployments++;
if (deployment.properties.success) {
stats.successfulDeployments++;
} else {
stats.failedDeployments++;
}
// Track build times
if (deployment.properties.buildTime) {
if (!stats.averageBuildTime) {
stats.averageBuildTime = 0;
}
stats.averageBuildTime += deployment.properties.buildTime;
}
}
// Calculate final metrics
for (const stats of ssgStats.values()) {
stats.successRate = stats.successfulDeployments / stats.totalDeployments;
if (stats.averageBuildTime) {
stats.averageBuildTime /= stats.totalDeployments;
}
}
return ssgStats;
}
/**
* Private: Calculate summary metrics
*/
private calculateSummary(
ssgStats: Map<string, DeploymentPattern>,
projectCount: number,
): AnalyticsReport["summary"] {
let totalDeployments = 0;
let totalSuccessful = 0;
let mostUsedSSG = "";
let mostUsedCount = 0;
let mostSuccessfulSSG = "";
let highestSuccessRate = 0;
for (const [ssg, stats] of ssgStats.entries()) {
totalDeployments += stats.totalDeployments;
totalSuccessful += stats.successfulDeployments;
if (stats.totalDeployments > mostUsedCount) {
mostUsedCount = stats.totalDeployments;
mostUsedSSG = ssg;
}
if (
stats.successRate > highestSuccessRate &&
stats.totalDeployments >= 2
) {
highestSuccessRate = stats.successRate;
mostSuccessfulSSG = ssg;
}
}
return {
totalProjects: projectCount,
totalDeployments,
overallSuccessRate:
totalDeployments > 0 ? totalSuccessful / totalDeployments : 0,
mostUsedSSG: mostUsedSSG || "none",
mostSuccessfulSSG: mostSuccessfulSSG || mostUsedSSG || "none",
};
}
/**
* Private: Identify patterns
*/
private identifyPatterns(
ssgStats: Map<string, DeploymentPattern>,
): DeploymentPattern[] {
return Array.from(ssgStats.values()).sort(
(a, b) => b.totalDeployments - a.totalDeployments,
);
}
/**
* Private: Generate insights
*/
private generateInsights(
patterns: DeploymentPattern[],
summary: AnalyticsReport["summary"],
): DeploymentInsight[] {
const insights: DeploymentInsight[] = [];
// Overall health insight
if (summary.overallSuccessRate > 0.8) {
insights.push({
type: "success",
title: "High Success Rate",
description: `Excellent! ${(summary.overallSuccessRate * 100).toFixed(
1,
)}% of deployments succeed`,
metric: summary.overallSuccessRate,
});
} else if (summary.overallSuccessRate < 0.5) {
insights.push({
type: "warning",
title: "Low Success Rate",
description: `Only ${(summary.overallSuccessRate * 100).toFixed(
1,
)}% of deployments succeed. Review common failure patterns.`,
metric: summary.overallSuccessRate,
});
}
// SSG-specific insights
for (const pattern of patterns) {
if (pattern.successRate === 1.0 && pattern.totalDeployments >= 3) {
insights.push({
type: "success",
title: `${pattern.ssg} Perfect Track Record`,
description: `All ${pattern.totalDeployments} deployments with ${pattern.ssg} succeeded`,
ssg: pattern.ssg,
metric: pattern.successRate,
});
} else if (pattern.successRate < 0.5 && pattern.totalDeployments >= 2) {
insights.push({
type: "warning",
title: `${pattern.ssg} Struggling`,
description: `Only ${(pattern.successRate * 100).toFixed(
0,
)}% success rate with ${pattern.ssg}`,
ssg: pattern.ssg,
metric: pattern.successRate,
});
}
// Build time insights
if (pattern.averageBuildTime) {
if (pattern.averageBuildTime < 30000) {
insights.push({
type: "success",
title: `${pattern.ssg} Fast Builds`,
description: `Average build time: ${(
pattern.averageBuildTime / 1000
).toFixed(1)}s`,
ssg: pattern.ssg,
metric: pattern.averageBuildTime,
});
} else if (pattern.averageBuildTime > 120000) {
insights.push({
type: "warning",
title: `${pattern.ssg} Slow Builds`,
description: `Average build time: ${(
pattern.averageBuildTime / 1000
).toFixed(1)}s. Consider optimization.`,
ssg: pattern.ssg,
metric: pattern.averageBuildTime,
});
}
}
}
return insights;
}
/**
* Private: Generate recommendations
*/
private generateRecommendations(
patterns: DeploymentPattern[],
insights: DeploymentInsight[],
): string[] {
const recommendations: string[] = [];
// Find best performing SSG
const bestSSG = patterns.find(
(p) => p.successRate > 0.8 && p.totalDeployments >= 2,
);
if (bestSSG) {
recommendations.push(
`Consider using ${bestSSG.ssg} for new projects (${(
bestSSG.successRate * 100
).toFixed(0)}% success rate)`,
);
}
// Identify problematic SSGs
const problematicSSG = patterns.find(
(p) => p.successRate < 0.5 && p.totalDeployments >= 3,
);
if (problematicSSG) {
recommendations.push(
`Review ${problematicSSG.ssg} deployment process - ${problematicSSG.failedDeployments} recent failures`,
);
}
// Diversity recommendation
if (patterns.length < 2) {
recommendations.push(
"Experiment with different SSGs to find the best fit for different project types",
);
}
// Activity recommendation
const totalDeployments = patterns.reduce(
(sum, p) => sum + p.totalDeployments,
0,
);
if (totalDeployments < 5) {
recommendations.push(
"Deploy more projects to build a robust historical dataset for better recommendations",
);
}
// Warning-based recommendations
const warnings = insights.filter((i) => i.type === "warning");
if (warnings.length > 2) {
recommendations.push(
"Multiple deployment issues detected - consider reviewing documentation setup process",
);
}
return recommendations;
}
}
/**
* Get singleton analytics instance
*/
let analyticsInstance: DeploymentAnalytics | null = null;
export function getDeploymentAnalytics(): DeploymentAnalytics {
if (!analyticsInstance) {
analyticsInstance = new DeploymentAnalytics();
}
return analyticsInstance;
}
```
--------------------------------------------------------------------------------
/tests/tools/generate-readme-template.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import * as path from "path";
import * as tmp from "tmp";
import {
generateReadmeTemplate,
ReadmeTemplateGenerator,
GenerateReadmeTemplateSchema,
TemplateType,
} from "../../src/tools/generate-readme-template";
describe("README Template Generator", () => {
let tempDir: string;
let generator: ReadmeTemplateGenerator;
beforeEach(() => {
tempDir = tmp.dirSync({ unsafeCleanup: true }).name;
generator = new ReadmeTemplateGenerator();
});
afterEach(async () => {
try {
await fs.rmdir(tempDir, { recursive: true });
} catch {
// Ignore cleanup errors
}
});
describe("Input Validation", () => {
it("should validate required fields", () => {
expect(() => GenerateReadmeTemplateSchema.parse({})).toThrow();
expect(() =>
GenerateReadmeTemplateSchema.parse({
projectName: "",
description: "test",
}),
).toThrow();
expect(() =>
GenerateReadmeTemplateSchema.parse({
projectName: "test",
description: "",
}),
).toThrow();
});
it("should accept valid input with defaults", () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "test-project",
description: "A test project",
templateType: "library",
});
expect(input.license).toBe("MIT");
expect(input.includeScreenshots).toBe(false);
expect(input.includeBadges).toBe(true);
expect(input.includeContributing).toBe(true);
});
it("should validate template types", () => {
expect(() =>
GenerateReadmeTemplateSchema.parse({
projectName: "test",
description: "test",
templateType: "invalid-type",
}),
).toThrow();
const validTypes: TemplateType[] = [
"library",
"application",
"cli-tool",
"api",
"documentation",
];
for (const type of validTypes) {
expect(() =>
GenerateReadmeTemplateSchema.parse({
projectName: "test",
description: "test",
templateType: type,
}),
).not.toThrow();
}
});
});
describe("Template Generation", () => {
it("should generate library template correctly", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "awesome-lib",
description: "An awesome JavaScript library",
templateType: "library",
author: "john-doe",
});
const result = await generateReadmeTemplate(input);
expect(result.content).toContain("# awesome-lib");
expect(result.content).toContain("> An awesome JavaScript library");
expect(result.content).toContain("npm install awesome-lib");
expect(result.content).toContain(
"const awesomeLib = require('awesome-lib')",
);
expect(result.content).toContain("## TL;DR");
expect(result.content).toContain("## Quick Start");
expect(result.content).toContain("## API Documentation");
expect(result.content).toContain("MIT © john-doe");
expect(result.metadata.templateType).toBe("library");
expect(result.metadata.estimatedLength).toBe(150);
});
it("should generate application template correctly", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "my-app",
description: "A web application",
templateType: "application",
author: "jane-doe",
includeScreenshots: true,
});
const result = await generateReadmeTemplate(input);
expect(result.content).toContain("# my-app");
expect(result.content).toContain("> A web application");
expect(result.content).toContain("## What This Does");
expect(result.content).toContain(
"git clone https://github.com/jane-doe/my-app.git",
);
expect(result.content).toContain("npm start");
expect(result.content).toContain("## Configuration");
expect(result.content).toContain("![my-app Screenshot]");
expect(result.metadata.templateType).toBe("application");
});
it("should generate CLI tool template correctly", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "my-cli",
description: "A command line tool",
templateType: "cli-tool",
author: "dev-user",
});
const result = await generateReadmeTemplate(input);
expect(result.content).toContain("# my-cli");
expect(result.content).toContain("npm install -g my-cli");
expect(result.content).toContain("npx my-cli --help");
expect(result.content).toContain("## Usage");
expect(result.content).toContain("## Options");
expect(result.content).toContain("| Option | Description | Default |");
expect(result.metadata.templateType).toBe("cli-tool");
});
it("should handle camelCase conversion correctly", () => {
const testCases = [
{ input: "my-awesome-lib", expected: "myAwesomeLib" },
{ input: "simple_package", expected: "simplePackage" },
{ input: "Mixed-Case_Name", expected: "mixedCaseName" },
{ input: "single", expected: "single" },
];
for (const testCase of testCases) {
const generator = new ReadmeTemplateGenerator();
const input = GenerateReadmeTemplateSchema.parse({
projectName: testCase.input,
description: "test",
templateType: "library",
});
const result = generator.generateTemplate(input);
expect(result).toContain(
`const ${testCase.expected} = require('${testCase.input}')`,
);
}
});
});
describe("Badge Generation", () => {
it("should include badges when enabled", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "badge-lib",
description: "Library with badges",
templateType: "library",
author: "dev",
includeBadges: true,
});
const result = await generateReadmeTemplate(input);
expect(result.content).toContain("[![npm version]");
expect(result.content).toContain("[![Build Status]");
expect(result.content).toContain("[![License: MIT]");
expect(result.content).toContain("dev/badge-lib");
});
it("should exclude badges when disabled", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "no-badge-lib",
description: "Library without badges",
templateType: "library",
includeBadges: false,
});
const result = await generateReadmeTemplate(input);
expect(result.content).not.toContain("[",
);
expect(result.content).toContain("*Add a screenshot or demo GIF here*");
});
it("should exclude screenshots when disabled", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "no-screenshot-app",
description: "App without screenshots",
templateType: "application",
includeScreenshots: false,
});
const result = await generateReadmeTemplate(input);
expect(result.content).not.toContain("![visual-app Screenshot]");
});
});
describe("Contributing Section", () => {
it("should include contributing section when enabled", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "contrib-lib",
description: "Library with contributing section",
templateType: "library",
includeContributing: true,
});
const result = await generateReadmeTemplate(input);
expect(result.content).toContain("## Contributing");
expect(result.content).toContain("CONTRIBUTING.md");
});
it("should exclude contributing section when disabled", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "no-contrib-lib",
description: "Library without contributing section",
templateType: "library",
includeContributing: false,
});
const result = await generateReadmeTemplate(input);
expect(result.content).not.toContain("## Contributing");
});
});
describe("File Output", () => {
it("should write to file when outputPath is specified", async () => {
const outputPath = path.join(tempDir, "README.md");
const input = GenerateReadmeTemplateSchema.parse({
projectName: "output-lib",
description: "Library with file output",
templateType: "library",
outputPath: outputPath,
});
const result = await generateReadmeTemplate(input);
await expect(fs.access(outputPath)).resolves.toBeUndefined();
const fileContent = await fs.readFile(outputPath, "utf-8");
expect(fileContent).toBe(result.content);
expect(fileContent).toContain("# output-lib");
});
it("should not write file when outputPath is not specified", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "no-file-test",
description: "Library without file output",
templateType: "library",
});
await generateReadmeTemplate(input);
const possiblePath = path.join(tempDir, "README.md");
await expect(fs.access(possiblePath)).rejects.toThrow();
});
});
describe("Template Metadata", () => {
it("should return correct metadata for each template type", () => {
const templateTypes: TemplateType[] = [
"library",
"application",
"cli-tool",
];
for (const type of templateTypes) {
const info = generator.getTemplateInfo(type);
expect(info).toBeDefined();
expect(info!.type).toBe(type);
expect(info!.estimatedLength).toBeGreaterThan(0);
}
});
it("should return null for invalid template type", () => {
const info = generator.getTemplateInfo("invalid" as TemplateType);
expect(info).toBeNull();
});
it("should count sections correctly", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "error-lib",
description: "Library that causes error",
templateType: "library",
});
const result = await generateReadmeTemplate(input);
const sectionCount = (result.content.match(/^##\s/gm) || []).length;
expect(result.metadata.sectionsIncluded).toBeGreaterThanOrEqual(
sectionCount,
);
expect(result.metadata.sectionsIncluded).toBeGreaterThan(3);
});
});
describe("Available Templates", () => {
it("should return list of available template types", () => {
const availableTypes = generator.getAvailableTemplates();
expect(availableTypes).toContain("library");
expect(availableTypes).toContain("application");
expect(availableTypes).toContain("cli-tool");
expect(availableTypes.length).toBeGreaterThan(0);
});
});
describe("Error Handling", () => {
it("should throw error for unsupported template type", async () => {
const generator = new ReadmeTemplateGenerator();
expect(() =>
generator.generateTemplate({
projectName: "test",
description: "test",
templateType: "unsupported" as TemplateType,
license: "MIT",
includeScreenshots: false,
includeBadges: true,
includeContributing: true,
}),
).toThrow('Template type "unsupported" not supported');
});
it("should handle file write errors gracefully", async () => {
const invalidPath = "/invalid/nonexistent/path/README.md";
const input = GenerateReadmeTemplateSchema.parse({
projectName: "error-test",
description: "test error handling",
templateType: "library",
outputPath: invalidPath,
});
await expect(generateReadmeTemplate(input)).rejects.toThrow();
});
});
describe("Variable Replacement", () => {
it("should replace all template variables correctly", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "license-lib",
description: "Library with custom license",
templateType: "library",
author: "dev",
license: "Apache-2.0",
});
const result = await generateReadmeTemplate(input);
expect(result.content).not.toContain("{{projectName}}");
expect(result.content).not.toContain("{{description}}");
expect(result.content).not.toContain("{{author}}");
expect(result.content).not.toContain("{{license}}");
expect(result.content).toContain("license-lib");
expect(result.content).toContain("Library with custom license");
expect(result.content).toContain("dev");
expect(result.content).toContain("Apache-2.0");
});
it("should use default values for missing optional fields", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "time-lib",
description: "Library with timing",
templateType: "library",
});
const result = await generateReadmeTemplate(input);
expect(result.content).toContain("your-username");
expect(result.content).toContain("MIT");
});
});
describe("Template Structure Validation", () => {
it("should generate valid markdown structure", async () => {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "structure-test",
description: "test structure",
templateType: "library",
});
const result = await generateReadmeTemplate(input);
// Check for proper heading hierarchy
const lines = result.content.split("\n");
const headings = lines.filter((line) => line.startsWith("#"));
expect(headings.length).toBeGreaterThan(0);
expect(headings[0]).toMatch(/^#\s+/); // Main title
// Check for code blocks
expect(result.content).toMatch(/```[\s\S]*?```/);
// Check for proper spacing
expect(result.content).not.toMatch(/#{1,6}\s*\n\s*#{1,6}/);
});
it("should maintain consistent formatting across templates", async () => {
const templateTypes: TemplateType[] = [
"library",
"application",
"cli-tool",
];
for (const type of templateTypes) {
const input = GenerateReadmeTemplateSchema.parse({
projectName: "format-test",
description: "test format",
templateType: type,
});
const result = await generateReadmeTemplate(input);
// All templates should have main title
expect(result.content).toMatch(/^#\s+format-test/m);
// All templates should have license section
expect(result.content).toContain("## License");
// All templates should end with license info
expect(result.content.trim()).toMatch(/MIT © your-username$/);
}
});
});
});
```
--------------------------------------------------------------------------------
/tests/memory/user-preferences.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tests for User Preference Management
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { tmpdir } from "os";
import {
UserPreferenceManager,
getUserPreferenceManager,
clearPreferenceManagerCache,
} from "../../src/memory/user-preferences.js";
import {
getKnowledgeGraph,
initializeKnowledgeGraph,
} from "../../src/memory/kg-integration.js";
describe("UserPreferenceManager", () => {
let testDir: string;
beforeEach(async () => {
// Create temporary test directory
testDir = join(tmpdir(), `user-prefs-test-${Date.now()}`);
await fs.mkdir(testDir, { recursive: true });
// Initialize KG with test directory
await initializeKnowledgeGraph(testDir);
clearPreferenceManagerCache();
});
afterEach(async () => {
clearPreferenceManagerCache();
// Clean up test directory
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe("Initialization", () => {
it("should create default preferences for new user", async () => {
const manager = new UserPreferenceManager("test-user");
await manager.initialize();
const prefs = await manager.getPreferences();
expect(prefs.userId).toBe("test-user");
expect(prefs.preferredSSGs).toEqual([]);
expect(prefs.documentationStyle).toBe("comprehensive");
expect(prefs.expertiseLevel).toBe("intermediate");
expect(prefs.autoApplyPreferences).toBe(true);
});
it("should load existing preferences from knowledge graph", async () => {
// Create a user with preferences
const kg = await getKnowledgeGraph();
kg.addNode({
id: "user:existing-user",
type: "user",
label: "existing-user",
properties: {
userId: "existing-user",
preferredSSGs: ["jekyll", "hugo"],
documentationStyle: "minimal",
expertiseLevel: "advanced",
preferredTechnologies: ["typescript"],
preferredDiataxisCategories: ["tutorials"],
autoApplyPreferences: false,
lastActive: "2025-01-01T00:00:00.000Z",
},
weight: 1.0,
});
const manager = new UserPreferenceManager("existing-user");
await manager.initialize();
const prefs = await manager.getPreferences();
expect(prefs.userId).toBe("existing-user");
expect(prefs.preferredSSGs).toEqual(["jekyll", "hugo"]);
expect(prefs.documentationStyle).toBe("minimal");
expect(prefs.expertiseLevel).toBe("advanced");
expect(prefs.autoApplyPreferences).toBe(false);
});
it("should handle getPreferences before initialization", async () => {
const manager = new UserPreferenceManager("auto-init");
const prefs = await manager.getPreferences();
expect(prefs.userId).toBe("auto-init");
expect(prefs.preferredSSGs).toEqual([]);
});
});
describe("Update Preferences", () => {
it("should update preferences and save to knowledge graph", async () => {
const manager = new UserPreferenceManager("update-test");
await manager.initialize();
await manager.updatePreferences({
documentationStyle: "tutorial-heavy",
expertiseLevel: "beginner",
preferredTechnologies: ["python", "go"],
});
const prefs = await manager.getPreferences();
expect(prefs.documentationStyle).toBe("tutorial-heavy");
expect(prefs.expertiseLevel).toBe("beginner");
expect(prefs.preferredTechnologies).toEqual(["python", "go"]);
});
it("should initialize before update if not already initialized", async () => {
const manager = new UserPreferenceManager("lazy-init");
await manager.updatePreferences({
expertiseLevel: "advanced",
});
const prefs = await manager.getPreferences();
expect(prefs.expertiseLevel).toBe("advanced");
});
});
describe("Track SSG Usage", () => {
it("should track successful SSG usage and create preference", async () => {
const manager = new UserPreferenceManager("ssg-tracker");
await manager.initialize();
await manager.trackSSGUsage({
ssg: "jekyll",
success: true,
timestamp: "2025-01-01T00:00:00.000Z",
});
const prefs = await manager.getPreferences();
expect(prefs.preferredSSGs).toContain("jekyll");
});
it("should track failed SSG usage", async () => {
const manager = new UserPreferenceManager("fail-tracker");
await manager.initialize();
await manager.trackSSGUsage({
ssg: "hugo",
success: false,
timestamp: "2025-01-01T00:00:00.000Z",
});
const kg = await getKnowledgeGraph();
const edges = await kg.findEdges({
type: "user_prefers_ssg",
});
expect(edges.length).toBeGreaterThan(0);
const edge = edges.find((e) => e.target.includes("hugo"));
expect(edge).toBeDefined();
expect(edge!.weight).toBe(0.5); // Failed usage has lower weight
});
it("should update existing SSG preference", async () => {
const manager = new UserPreferenceManager("update-tracker");
await manager.initialize();
// First usage - success
await manager.trackSSGUsage({
ssg: "docusaurus",
success: true,
timestamp: "2025-01-01T00:00:00.000Z",
});
// Second usage - success
await manager.trackSSGUsage({
ssg: "docusaurus",
success: true,
timestamp: "2025-01-02T00:00:00.000Z",
});
const kg = await getKnowledgeGraph();
const edges = await kg.findEdges({
type: "user_prefers_ssg",
});
const docEdge = edges.find((e) => e.target.includes("docusaurus"));
expect(docEdge!.properties.usageCount).toBe(2);
expect(docEdge!.properties.successRate).toBe(1.0);
});
it("should calculate average success rate correctly", async () => {
const manager = new UserPreferenceManager("avg-tracker");
await manager.initialize();
// Success
await manager.trackSSGUsage({
ssg: "mkdocs",
success: true,
timestamp: "2025-01-01T00:00:00.000Z",
});
// Failure
await manager.trackSSGUsage({
ssg: "mkdocs",
success: false,
timestamp: "2025-01-02T00:00:00.000Z",
});
const kg = await getKnowledgeGraph();
const edges = await kg.findEdges({
type: "user_prefers_ssg",
});
const mkdocsEdge = edges.find((e) => e.target.includes("mkdocs"));
expect(mkdocsEdge!.properties.successRate).toBe(0.5);
});
it("should create user node if it doesn't exist during tracking", async () => {
const manager = new UserPreferenceManager("new-tracker");
// Don't initialize - let trackSSGUsage create it
await manager.trackSSGUsage({
ssg: "eleventy",
success: true,
timestamp: "2025-01-01T00:00:00.000Z",
});
const kg = await getKnowledgeGraph();
const userNode = await kg.findNode({
type: "user",
properties: { userId: "new-tracker" },
});
expect(userNode).toBeDefined();
});
});
describe("SSG Recommendations", () => {
it("should return recommendations sorted by score", async () => {
const manager = new UserPreferenceManager("rec-test");
await manager.initialize();
// Track multiple SSGs with different success rates
await manager.trackSSGUsage({
ssg: "jekyll",
success: true,
timestamp: "2025-01-01T00:00:00.000Z",
});
await manager.trackSSGUsage({
ssg: "jekyll",
success: true,
timestamp: "2025-01-02T00:00:00.000Z",
});
await manager.trackSSGUsage({
ssg: "hugo",
success: true,
timestamp: "2025-01-03T00:00:00.000Z",
});
const recommendations = await manager.getSSGRecommendations();
expect(recommendations.length).toBeGreaterThan(0);
expect(recommendations[0].ssg).toBe("jekyll"); // Higher usage count
expect(recommendations[0].score).toBeGreaterThan(
recommendations[1].score,
);
});
it("should include reason with high success rate", async () => {
const manager = new UserPreferenceManager("reason-test");
await manager.initialize();
await manager.trackSSGUsage({
ssg: "docusaurus",
success: true,
timestamp: "2025-01-01T00:00:00.000Z",
});
const recommendations = await manager.getSSGRecommendations();
const docRec = recommendations.find((r) => r.ssg === "docusaurus");
expect(docRec!.reason).toContain("100% success rate");
});
it("should include reason with low success rate", async () => {
const manager = new UserPreferenceManager("low-success-test");
await manager.initialize();
// Track both success and failure to get a low rate (not exactly 0)
await manager.trackSSGUsage({
ssg: "eleventy",
success: true,
timestamp: "2025-01-01T00:00:00.000Z",
});
await manager.trackSSGUsage({
ssg: "eleventy",
success: false,
timestamp: "2025-01-02T00:00:00.000Z",
});
await manager.trackSSGUsage({
ssg: "eleventy",
success: false,
timestamp: "2025-01-03T00:00:00.000Z",
});
const recommendations = await manager.getSSGRecommendations();
const eleventyRec = recommendations.find((r) => r.ssg === "eleventy");
expect(eleventyRec!.reason).toContain("only");
expect(eleventyRec!.reason).toContain("success rate");
});
it("should return empty array if no user node exists", async () => {
const manager = new UserPreferenceManager("no-user");
// Don't initialize or create user node
const recommendations = await manager.getSSGRecommendations();
expect(recommendations).toEqual([]);
});
});
describe("Apply Preferences to Recommendation", () => {
it("should return original recommendation if autoApply is false", async () => {
const manager = new UserPreferenceManager("no-auto");
await manager.updatePreferences({
autoApplyPreferences: false,
preferredSSGs: ["jekyll"],
});
const result = manager.applyPreferencesToRecommendation("hugo", [
"jekyll",
"hugo",
]);
expect(result.recommended).toBe("hugo");
expect(result.adjustmentReason).toBeUndefined();
});
it("should keep recommendation if it matches preferred SSG", async () => {
const manager = new UserPreferenceManager("match-pref");
await manager.updatePreferences({
preferredSSGs: ["jekyll", "hugo"],
});
const result = manager.applyPreferencesToRecommendation("jekyll", [
"jekyll",
"hugo",
"mkdocs",
]);
expect(result.recommended).toBe("jekyll");
expect(result.adjustmentReason).toContain("Matches your preferred SSG");
});
it("should switch to preferred SSG if in alternatives", async () => {
const manager = new UserPreferenceManager("switch-pref");
await manager.updatePreferences({
preferredSSGs: ["docusaurus"],
});
const result = manager.applyPreferencesToRecommendation("jekyll", [
"jekyll",
"docusaurus",
"hugo",
]);
expect(result.recommended).toBe("docusaurus");
expect(result.adjustmentReason).toContain(
"Switched to docusaurus based on your usage history",
);
});
it("should return original if no preferred SSGs match", async () => {
const manager = new UserPreferenceManager("no-match");
await manager.updatePreferences({
preferredSSGs: ["eleventy"],
});
const result = manager.applyPreferencesToRecommendation("jekyll", [
"jekyll",
"hugo",
]);
expect(result.recommended).toBe("jekyll");
expect(result.adjustmentReason).toBeUndefined();
});
it("should return original if no preferences set", async () => {
const manager = new UserPreferenceManager("empty-pref");
await manager.initialize();
const result = manager.applyPreferencesToRecommendation("jekyll", [
"jekyll",
"hugo",
]);
expect(result.recommended).toBe("jekyll");
expect(result.adjustmentReason).toBeUndefined();
});
});
describe("Reset Preferences", () => {
it("should reset preferences to defaults", async () => {
const manager = new UserPreferenceManager("reset-test");
await manager.updatePreferences({
documentationStyle: "minimal",
expertiseLevel: "advanced",
preferredSSGs: ["jekyll", "hugo"],
});
await manager.resetPreferences();
const prefs = await manager.getPreferences();
expect(prefs.documentationStyle).toBe("comprehensive");
expect(prefs.expertiseLevel).toBe("intermediate");
expect(prefs.preferredSSGs).toEqual([]);
});
});
describe("Export/Import Preferences", () => {
it("should export preferences as JSON", async () => {
const manager = new UserPreferenceManager("export-test");
await manager.updatePreferences({
expertiseLevel: "advanced",
preferredSSGs: ["jekyll"],
});
const exported = await manager.exportPreferences();
const parsed = JSON.parse(exported);
expect(parsed.userId).toBe("export-test");
expect(parsed.expertiseLevel).toBe("advanced");
expect(parsed.preferredSSGs).toEqual(["jekyll"]);
});
it("should import preferences from JSON", async () => {
const manager = new UserPreferenceManager("import-test");
await manager.initialize();
const importData = {
userId: "import-test",
preferredSSGs: ["hugo", "docusaurus"],
documentationStyle: "tutorial-heavy" as const,
expertiseLevel: "beginner" as const,
preferredTechnologies: ["python"],
preferredDiataxisCategories: ["tutorials" as const],
autoApplyPreferences: false,
lastUpdated: "2025-01-01T00:00:00.000Z",
};
await manager.importPreferences(JSON.stringify(importData));
const prefs = await manager.getPreferences();
expect(prefs.expertiseLevel).toBe("beginner");
expect(prefs.preferredSSGs).toEqual(["hugo", "docusaurus"]);
expect(prefs.autoApplyPreferences).toBe(false);
});
it("should throw error on userId mismatch during import", async () => {
const manager = new UserPreferenceManager("user1");
await manager.initialize();
const importData = {
userId: "user2", // Different user ID
preferredSSGs: [],
documentationStyle: "comprehensive" as const,
expertiseLevel: "intermediate" as const,
preferredTechnologies: [],
preferredDiataxisCategories: [],
autoApplyPreferences: true,
lastUpdated: "2025-01-01T00:00:00.000Z",
};
await expect(
manager.importPreferences(JSON.stringify(importData)),
).rejects.toThrow("User ID mismatch");
});
});
describe("Manager Cache", () => {
it("should cache preference managers", async () => {
const manager1 = await getUserPreferenceManager("cached-user");
const manager2 = await getUserPreferenceManager("cached-user");
expect(manager1).toBe(manager2); // Same instance
});
it("should create different managers for different users", async () => {
const manager1 = await getUserPreferenceManager("user1");
const manager2 = await getUserPreferenceManager("user2");
expect(manager1).not.toBe(manager2);
});
it("should clear cache", async () => {
const manager1 = await getUserPreferenceManager("clear-test");
clearPreferenceManagerCache();
const manager2 = await getUserPreferenceManager("clear-test");
expect(manager1).not.toBe(manager2); // Different instances after clear
});
});
});
```
--------------------------------------------------------------------------------
/src/tools/optimize-readme.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from "zod";
import { promises as fs } from "fs";
import path from "path";
import { MCPToolResponse } from "../types/api.js";
// Input validation schema
const OptimizeReadmeInputSchema = z.object({
readme_path: z.string().min(1, "README path is required"),
strategy: z
.enum([
"community_focused",
"enterprise_focused",
"developer_focused",
"general",
])
.optional()
.default("community_focused"),
max_length: z.number().min(50).max(1000).optional().default(300),
include_tldr: z.boolean().optional().default(true),
preserve_existing: z.boolean().optional().default(false),
output_path: z.string().optional(),
create_docs_directory: z.boolean().optional().default(true),
});
export type OptimizeReadmeInput = z.infer<typeof OptimizeReadmeInputSchema>;
interface OptimizationResult {
originalLength: number;
optimizedLength: number;
reductionPercentage: number;
optimizedContent: string;
extractedSections: ExtractedSection[];
tldrGenerated: string | null;
restructuringChanges: RestructuringChange[];
recommendations: string[];
}
interface ExtractedSection {
title: string;
content: string;
suggestedLocation: string;
reason: string;
}
interface RestructuringChange {
type: "moved" | "condensed" | "split" | "added" | "removed";
section: string;
description: string;
impact: string;
}
/**
* Optimizes README content by restructuring, condensing, and extracting detailed sections.
*
* Performs intelligent README optimization including length reduction, structure improvement,
* content extraction to separate documentation, and TL;DR generation. Uses different strategies
* based on target audience (community, enterprise, developer, general) to maximize effectiveness.
*
* @param input - The input parameters for README optimization
* @param input.readme_path - The file system path to the README file to optimize
* @param input.strategy - The optimization strategy to apply (default: "community_focused")
* @param input.max_length - Target maximum length in lines (default: 300)
* @param input.include_tldr - Whether to generate a TL;DR section (default: true)
* @param input.preserve_existing - Whether to preserve existing content structure (default: false)
* @param input.output_path - Optional output path for optimized README
* @param input.create_docs_directory - Whether to create docs/ directory for extracted content (default: true)
*
* @returns Promise resolving to README optimization results
* @returns optimization - Complete optimization results including length reduction and restructuring
* @returns nextSteps - Array of recommended next actions after optimization
*
* @throws {Error} When README file is inaccessible or invalid
* @throws {Error} When optimization processing fails
* @throws {Error} When output directory cannot be created
*
* @example
* ```typescript
* // Optimize README for community contributors
* const result = await optimizeReadme({
* readme_path: "./README.md",
* strategy: "community_focused",
* max_length: 300,
* include_tldr: true
* });
*
* console.log(`Reduced from ${result.data.optimization.originalLength} to ${result.data.optimization.optimizedLength} lines`);
* console.log(`Reduction: ${result.data.optimization.reductionPercentage}%`);
*
* // Optimize for enterprise with aggressive reduction
* const enterprise = await optimizeReadme({
* readme_path: "./README.md",
* strategy: "enterprise_focused",
* max_length: 200,
* preserve_existing: true
* });
* ```
*
* @since 1.0.0
*/
export async function optimizeReadme(
input: Partial<OptimizeReadmeInput>,
): Promise<
MCPToolResponse<{ optimization: OptimizationResult; nextSteps: string[] }>
> {
const startTime = Date.now();
try {
// Validate input
const validatedInput = OptimizeReadmeInputSchema.parse(input);
const {
readme_path,
strategy,
max_length,
include_tldr,
output_path,
create_docs_directory,
} = validatedInput;
// Read original README
const originalContent = await fs.readFile(readme_path, "utf-8");
const originalLength = originalContent.split("\n").length;
// Parse README structure
const sections = parseReadmeStructure(originalContent);
// Generate TL;DR if requested
const tldrGenerated = include_tldr
? generateTldr(originalContent, sections)
: null;
// Identify sections to extract
const extractedSections = identifySectionsToExtract(
sections,
strategy,
max_length,
);
// Create basic optimization result
const optimizedContent =
originalContent +
"\n\n## TL;DR\n\n" +
(tldrGenerated || "Quick overview of the project.");
const restructuringChanges = [
{
type: "added" as const,
section: "TL;DR",
description: "Added concise project overview",
impact: "Helps users quickly understand project value",
},
];
const optimizedLength = optimizedContent.split("\n").length;
const reductionPercentage = Math.round(
((originalLength - optimizedLength) / originalLength) * 100,
);
// Create docs directory and extract detailed content if requested
if (create_docs_directory && extractedSections.length > 0) {
await createDocsStructure(path.dirname(readme_path), extractedSections);
}
// Write optimized README if output path specified
if (output_path) {
await fs.writeFile(output_path, optimizedContent, "utf-8");
}
const recommendations = generateOptimizationRecommendations(
originalLength,
optimizedLength,
extractedSections,
strategy,
);
const optimization: OptimizationResult = {
originalLength,
optimizedLength,
reductionPercentage,
optimizedContent,
extractedSections,
tldrGenerated,
restructuringChanges,
recommendations,
};
const nextSteps = generateOptimizationNextSteps(
optimization,
validatedInput,
);
return {
success: true,
data: {
optimization,
nextSteps,
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
};
} catch (error) {
return {
success: false,
error: {
code: "OPTIMIZATION_FAILED",
message: "Failed to optimize README",
details: error instanceof Error ? error.message : "Unknown error",
resolution: "Check README file path and permissions",
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
};
}
}
interface ReadmeSection {
title: string;
content: string;
level: number;
startLine: number;
endLine: number;
wordCount: number;
isEssential: boolean;
}
function parseReadmeStructure(content: string): ReadmeSection[] {
const lines = content.split("\n");
const sections: ReadmeSection[] = [];
let currentTitle = "";
let currentLevel = 0;
let currentStartLine = 0;
lines.forEach((line, index) => {
const headingMatch = line.match(/^(#{1,6})\s+(.+)$/);
if (headingMatch) {
// Save previous section
if (currentTitle) {
const endLine = index - 1;
const sectionContent = lines
.slice(currentStartLine, endLine + 1)
.join("\n");
const wordCount = sectionContent.split(/\s+/).length;
const isEssential = isEssentialSection(currentTitle);
sections.push({
title: currentTitle,
content: sectionContent,
level: currentLevel,
startLine: currentStartLine,
endLine: endLine,
wordCount: wordCount,
isEssential: isEssential,
});
}
// Start new section
currentTitle = headingMatch[2].trim();
currentLevel = headingMatch[1].length;
currentStartLine = index;
}
});
// Add final section
if (currentTitle) {
const endLine = lines.length - 1;
const sectionContent = lines
.slice(currentStartLine, endLine + 1)
.join("\n");
const wordCount = sectionContent.split(/\s+/).length;
const isEssential = isEssentialSection(currentTitle);
sections.push({
title: currentTitle,
content: sectionContent,
level: currentLevel,
startLine: currentStartLine,
endLine: endLine,
wordCount: wordCount,
isEssential: isEssential,
});
}
return sections;
}
function isEssentialSection(title: string): boolean {
const essentialKeywords = [
"installation",
"install",
"setup",
"getting started",
"quick start",
"usage",
"example",
"api",
"license",
"contributing",
];
return essentialKeywords.some((keyword) =>
title.toLowerCase().includes(keyword),
);
}
function generateTldr(content: string, sections: ReadmeSection[]): string {
// Extract project name from first heading
const projectNameMatch = content.match(/^#\s+(.+)$/m);
const projectName = projectNameMatch ? projectNameMatch[1] : "This project";
// Extract description (usually after title or in blockquote)
const descriptionMatch = content.match(/>\s*(.+)/);
let description = descriptionMatch ? descriptionMatch[1] : "";
// If no description found, try to extract from first paragraph
if (!description) {
const firstParagraphMatch = content.match(/^[^#\n].{20,200}/m);
description = firstParagraphMatch
? firstParagraphMatch[0].substring(0, 100) + "..."
: "";
}
// Identify key features or use cases
const features: string[] = [];
sections.forEach((section) => {
if (
section.title.toLowerCase().includes("feature") ||
section.title.toLowerCase().includes("what") ||
section.title.toLowerCase().includes("why")
) {
const bullets = section.content.match(/^\s*[-*+]\s+(.+)$/gm);
if (bullets && bullets.length > 0) {
features.push(
...bullets
.slice(0, 3)
.map((b) => b.replace(/^\s*[-*+]\s+/, "").trim()),
);
}
}
});
let tldr = `## TL;DR\n\n${projectName} ${description}\n\n`;
if (features.length > 0) {
tldr += `**Key features:**\n`;
features.slice(0, 3).forEach((feature) => {
tldr += `- ${feature}\n`;
});
tldr += "\n";
}
// Add quick start reference
const hasInstallSection = sections.some(
(s) =>
s.title.toLowerCase().includes("install") ||
s.title.toLowerCase().includes("setup"),
);
if (hasInstallSection) {
tldr += `**Quick start:** See [Installation](#installation) → [Usage](#usage)\n\n`;
}
return tldr;
}
function identifySectionsToExtract(
sections: ReadmeSection[],
strategy: string,
maxLength: number,
): ExtractedSection[] {
const extractedSections: ExtractedSection[] = [];
const currentLength = sections.reduce(
(sum, s) => sum + s.content.split("\n").length,
0,
);
if (currentLength <= maxLength) {
return extractedSections; // No extraction needed
}
// Define extraction rules based on strategy
const extractionRules = getExtractionRules(strategy);
sections.forEach((section) => {
for (const rule of extractionRules) {
if (rule.matcher(section) && !section.isEssential) {
extractedSections.push({
title: section.title,
content: section.content,
suggestedLocation: rule.suggestedLocation,
reason: rule.reason,
});
break;
}
}
});
return extractedSections;
}
function getExtractionRules(strategy: string) {
const baseRules = [
{
matcher: (section: ReadmeSection) => section.wordCount > 200,
suggestedLocation: "docs/detailed-guide.md",
reason: "Section too long for main README",
},
{
matcher: (section: ReadmeSection) =>
/troubleshoot|faq|common issues|problems/i.test(section.title),
suggestedLocation: "docs/troubleshooting.md",
reason: "Troubleshooting content better suited for separate document",
},
{
matcher: (section: ReadmeSection) =>
/advanced|configuration|config/i.test(section.title),
suggestedLocation: "docs/configuration.md",
reason: "Advanced configuration details",
},
{
matcher: (section: ReadmeSection) =>
/development|developer|build|compile/i.test(section.title),
suggestedLocation: "docs/development.md",
reason: "Development-specific information",
},
];
if (strategy === "community_focused") {
baseRules.push({
matcher: (section: ReadmeSection) =>
/architecture|design|technical/i.test(section.title),
suggestedLocation: "docs/technical.md",
reason: "Technical details can overwhelm community contributors",
});
}
return baseRules;
}
async function createDocsStructure(
projectDir: string,
extractedSections: ExtractedSection[],
): Promise<void> {
const docsDir = path.join(projectDir, "docs");
try {
await fs.mkdir(docsDir, { recursive: true });
} catch {
// Directory might already exist
}
// Create extracted documentation files
for (const section of extractedSections) {
const filePath = path.join(projectDir, section.suggestedLocation);
const fileDir = path.dirname(filePath);
try {
await fs.mkdir(fileDir, { recursive: true });
await fs.writeFile(filePath, section.content, "utf-8");
} catch (error) {
console.warn(`Failed to create ${filePath}:`, error);
}
}
// Create docs index
const indexContent = generateDocsIndex(extractedSections);
await fs.writeFile(path.join(docsDir, "README.md"), indexContent, "utf-8");
}
function generateDocsIndex(extractedSections: ExtractedSection[]): string {
let content = "# Documentation\n\n";
content +=
"This directory contains detailed documentation extracted from the main README for better organization.\n\n";
content += "## Available Documentation\n\n";
extractedSections.forEach((section) => {
const filename = path.basename(section.suggestedLocation);
content += `- [${section.title}](${filename}) - ${section.reason}\n`;
});
return content;
}
function generateOptimizationRecommendations(
originalLength: number,
optimizedLength: number,
extractedSections: ExtractedSection[],
strategy: string,
): string[] {
const recommendations: string[] = [];
const reduction = originalLength - optimizedLength;
if (reduction > 0) {
recommendations.push(
`✅ Reduced README length by ${reduction} lines (${Math.round(
(reduction / originalLength) * 100,
)}%)`,
);
}
if (extractedSections.length > 0) {
recommendations.push(
`📁 Moved ${extractedSections.length} detailed sections to docs/ directory`,
);
}
if (strategy === "community_focused") {
recommendations.push(
"👥 Optimized for community contributors - prioritized quick start and contribution info",
);
}
recommendations.push(
"🔗 Added links to detailed documentation for users who need more information",
);
recommendations.push(
"📊 Consider adding a table of contents for sections with 5+ headings",
);
return recommendations;
}
function generateOptimizationNextSteps(
optimization: OptimizationResult,
input: OptimizeReadmeInput,
): string[] {
const steps: string[] = [];
if (!input.output_path) {
steps.push("💾 Review optimized content and save to README.md when ready");
}
if (optimization.extractedSections.length > 0) {
steps.push("📝 Review extracted documentation files in docs/ directory");
steps.push("🔗 Update any internal links that may have been affected");
}
if (optimization.reductionPercentage > 30) {
steps.push(
"👀 Have team members review the condensed content for accuracy",
);
}
steps.push("📈 Run analyze_readme again to verify improvements");
steps.push("🎯 Consider setting up automated README length monitoring");
return steps;
}
```
--------------------------------------------------------------------------------
/src/memory/kg-integration.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Knowledge Graph Integration Helper
* Implements Phase 1.2: Context-Aware Repository Analysis Integration
*
* Provides helper functions for integrating the Knowledge Graph
* with DocuMCP tools and workflows.
*/
import KnowledgeGraph, { GraphNode } from "./knowledge-graph.js";
import { KGStorage } from "./kg-storage.js";
import { MemoryManager } from "./manager.js";
import {
createCodeFileEntities,
createDocumentationEntities,
linkCodeToDocs,
} from "./kg-code-integration.js";
let globalKnowledgeGraph: KnowledgeGraph | null = null;
let globalKGStorage: KGStorage | null = null;
let globalMemoryManager: MemoryManager | null = null;
let currentStorageDir: string | null = null;
/**
* Initialize the global Knowledge Graph instance
*/
export async function initializeKnowledgeGraph(
storageDir?: string,
): Promise<KnowledgeGraph> {
const dir =
storageDir ||
process.env.DOCUMCP_STORAGE_DIR ||
`${process.cwd()}/.documcp/memory`;
// Reinitialize if storage directory changed
if (currentStorageDir !== dir) {
globalKnowledgeGraph = null;
globalKGStorage = null;
globalMemoryManager = null;
currentStorageDir = dir;
}
if (!globalKnowledgeGraph) {
// Initialize memory manager
globalMemoryManager = new MemoryManager(dir);
await globalMemoryManager.initialize();
// Initialize KG storage
globalKGStorage = new KGStorage({ storageDir: dir });
await globalKGStorage.initialize();
// Initialize knowledge graph
globalKnowledgeGraph = new KnowledgeGraph(globalMemoryManager);
await globalKnowledgeGraph.initialize();
// Load existing graph data if available
const { entities, relationships } = await globalKGStorage.loadGraph();
for (const entity of entities) {
globalKnowledgeGraph.addNode(entity);
}
for (const relationship of relationships) {
globalKnowledgeGraph.addEdge(relationship);
}
}
return globalKnowledgeGraph;
}
/**
* Get the global Knowledge Graph instance
*/
export async function getKnowledgeGraph(): Promise<KnowledgeGraph> {
if (!globalKnowledgeGraph) {
return await initializeKnowledgeGraph();
}
return globalKnowledgeGraph;
}
/**
* Get the global KG Storage instance
*/
export async function getKGStorage(): Promise<KGStorage> {
if (!globalKGStorage) {
await initializeKnowledgeGraph();
}
return globalKGStorage!;
}
/**
* Get the global Memory Manager instance
*/
export async function getMemoryManager(): Promise<MemoryManager> {
if (!globalMemoryManager) {
await initializeKnowledgeGraph();
}
return globalMemoryManager!;
}
/**
* Convert file extension to language name
*/
function convertExtToLanguage(ext: string): string | null {
const languageMap: Record<string, string> = {
".js": "javascript",
".jsx": "javascript",
".ts": "typescript",
".tsx": "typescript",
".py": "python",
".rb": "ruby",
".go": "go",
".java": "java",
".c": "c",
".cpp": "cpp",
".cs": "csharp",
".php": "php",
".rs": "rust",
".kt": "kotlin",
".swift": "swift",
".sh": "shell",
".bash": "shell",
".zsh": "shell",
".fish": "shell",
".yml": "yaml",
".yaml": "yaml",
".sql": "sql",
".html": "html",
".css": "css",
".scss": "scss",
".vue": "vue",
".dart": "dart",
};
return languageMap[ext] || null;
}
/**
* Save the Knowledge Graph to persistent storage
*/
export async function saveKnowledgeGraph(): Promise<void> {
if (!globalKnowledgeGraph || !globalKGStorage) {
throw new Error("Knowledge Graph not initialized");
}
const entities = await globalKnowledgeGraph.getAllNodes();
const edges = await globalKnowledgeGraph.getAllEdges();
await globalKGStorage.saveGraph(entities, edges);
}
/**
* Create or update a Project entity in the Knowledge Graph
*/
export async function createOrUpdateProject(analysis: any): Promise<GraphNode> {
const kg = await getKnowledgeGraph();
// Check for existing project
const projectId = `project:${analysis.id}`;
const existingProject = await kg.findNode({
type: "project",
properties: { path: analysis.path },
});
// Categorize project size
const size =
analysis.structure.totalFiles < 50
? "small"
: analysis.structure.totalFiles < 500
? "medium"
: "large";
// Determine primary language
const languages = analysis.structure.languages || {};
const primaryKey = Object.keys(languages).reduce(
(a, b) => (languages[a] > languages[b] ? a : b),
Object.keys(languages)[0] || "unknown",
);
// Convert to language name if it's an extension
let primaryLanguage: string;
if (analysis.recommendations?.primaryLanguage) {
primaryLanguage = analysis.recommendations.primaryLanguage;
} else if (primaryKey.startsWith(".")) {
primaryLanguage = convertExtToLanguage(primaryKey) || "unknown";
} else {
primaryLanguage = primaryKey; // Already a language name
}
// Convert all extensions to language names
// Handle both extensions (.ts) and language names (typescript)
const technologies = Object.keys(languages)
.map((key) => {
// If it starts with '.', it's an extension - convert it
if (key.startsWith(".")) {
return convertExtToLanguage(key);
}
// Otherwise it's already a language name - use as is
return key;
})
.filter((lang): lang is string => lang !== null && lang !== "unknown");
// Create/update project node
const projectNode = kg.addNode({
id: existingProject?.id || projectId,
type: "project",
label: analysis.projectName || "Unnamed Project",
properties: {
name: analysis.projectName,
path: analysis.path,
technologies,
size,
primaryLanguage,
hasTests: analysis.structure.hasTests || false,
hasCI: analysis.structure.hasCI || false,
hasDocs: analysis.structure.hasDocs || false,
totalFiles: analysis.structure.totalFiles,
lastAnalyzed: analysis.timestamp,
analysisCount: existingProject
? (existingProject.properties.analysisCount || 0) + 1
: 1,
},
weight: 1.0,
});
// Create technology nodes and relationships
for (const [key, fileCount] of Object.entries(languages) as [
string,
number,
][]) {
// Handle both extensions (.ts) and language names (typescript)
let langName: string;
if (key.startsWith(".")) {
const converted = convertExtToLanguage(key);
if (!converted) continue; // Skip unknown extensions
langName = converted;
} else {
langName = key; // Already a language name
}
const techNodeId = `technology:${langName.toLowerCase()}`;
// Create technology node if it doesn't exist
const existingTech = await kg.findNode({
type: "technology",
properties: { name: langName },
});
const techNode = kg.addNode({
id: existingTech?.id || techNodeId,
type: "technology",
label: langName,
properties: {
name: langName,
category: "language",
usageCount: existingTech
? (existingTech.properties.usageCount || 0) + 1
: 1,
},
weight: 1.0,
});
// Create relationship
kg.addEdge({
source: projectNode.id,
target: techNode.id,
type: "project_uses_technology",
weight: fileCount / analysis.structure.totalFiles,
confidence: 1.0,
properties: {
fileCount,
percentage: (fileCount / analysis.structure.totalFiles) * 100,
isPrimary: langName === primaryLanguage,
},
});
}
// Phase 1.2: Create code file and documentation entities
try {
const codeFiles = await createCodeFileEntities(
projectNode.id,
analysis.path,
);
if (analysis.documentation?.extractedContent) {
const docSections = await createDocumentationEntities(
projectNode.id,
analysis.documentation.extractedContent,
);
// Link code files to documentation sections
await linkCodeToDocs(codeFiles, docSections);
}
} catch (error) {
console.warn("Failed to populate code/docs entities:", error);
// Continue without code/docs entities - not a fatal error
}
// Save to persistent storage
await saveKnowledgeGraph();
return projectNode;
}
/**
* Get historical context for a project
*/
export async function getProjectContext(projectPath: string): Promise<{
previousAnalyses: number;
lastAnalyzed: string | null;
knownTechnologies: string[];
similarProjects: GraphNode[];
}> {
const kg = await getKnowledgeGraph();
// Find project node
const projectNode = await kg.findNode({
type: "project",
properties: { path: projectPath },
});
if (!projectNode) {
return {
previousAnalyses: 0,
lastAnalyzed: null,
knownTechnologies: [],
similarProjects: [],
};
}
// Get project's technologies
const techEdges = await kg.findEdges({
source: projectNode.id,
type: "project_uses_technology",
});
const technologies: string[] = [];
for (const edge of techEdges) {
const techNode = (await kg.getAllNodes()).find((n) => n.id === edge.target);
if (techNode) {
technologies.push(techNode.properties.name);
}
}
// Find similar projects
const allProjects = await kg.findNodes({ type: "project" });
const similarProjects: GraphNode[] = [];
for (const otherProject of allProjects) {
if (otherProject.id === projectNode.id) continue;
// Check for similarity based on shared technologies
const otherTechEdges = await kg.findEdges({
source: otherProject.id,
type: "project_uses_technology",
});
const otherTechs = new Set(otherTechEdges.map((e) => e.target));
const sharedTechs = techEdges.filter((e) => otherTechs.has(e.target));
if (sharedTechs.length > 0) {
similarProjects.push(otherProject);
}
}
// Sort by similarity (shared tech count)
similarProjects.sort((a, b) => {
const aShared = techEdges.filter(async (e) => {
const aEdges = await kg.findEdges({ source: a.id });
return aEdges.some((ae) => ae.target === e.target);
}).length;
const bShared = techEdges.filter(async (e) => {
const bEdges = await kg.findEdges({ source: b.id });
return bEdges.some((be) => be.target === e.target);
}).length;
return bShared - aShared;
});
return {
previousAnalyses: projectNode.properties.analysisCount || 0,
lastAnalyzed: projectNode.properties.lastAnalyzed || null,
knownTechnologies: technologies,
similarProjects: similarProjects.slice(0, 5),
};
}
// Counter to ensure unique deployment IDs even when timestamps collide
let deploymentCounter = 0;
/**
* Track a deployment outcome in the Knowledge Graph
*/
export async function trackDeployment(
projectNodeId: string,
ssg: string,
success: boolean,
metadata?: {
buildTime?: number;
errorMessage?: string;
deploymentUrl?: string;
},
): Promise<void> {
const kg = await getKnowledgeGraph();
// Find project node by ID
const allNodes = await kg.getAllNodes();
const projectNode = allNodes.find((n) => n.id === projectNodeId);
if (!projectNode) {
throw new Error(`Project not found: ${projectNodeId}`);
}
// Find or create configuration node
const configNodeId = `configuration:${ssg}`;
let configNode = await kg.findNode({
type: "configuration",
properties: { ssg },
});
if (!configNode) {
configNode = kg.addNode({
id: configNodeId,
type: "configuration",
label: `${ssg} configuration`,
properties: {
ssg,
settings: {},
deploymentSuccessRate: success ? 1.0 : 0.0,
usageCount: 1,
lastUsed: new Date().toISOString(),
},
weight: 1.0,
});
} else {
// Update success rate
const currentRate = configNode.properties.deploymentSuccessRate || 0.5;
const currentCount = configNode.properties.usageCount || 1;
const newRate =
(currentRate * currentCount + (success ? 1.0 : 0.0)) / (currentCount + 1);
configNode.properties.deploymentSuccessRate = newRate;
configNode.properties.usageCount = currentCount + 1;
configNode.properties.lastUsed = new Date().toISOString();
if (metadata?.buildTime) {
const currentAvg = configNode.properties.buildTimeAverage || 0;
configNode.properties.buildTimeAverage =
(currentAvg * currentCount + metadata.buildTime) / (currentCount + 1);
}
// Re-add the node to update it in the knowledge graph
kg.addNode(configNode);
}
// Create deployment relationship with unique timestamp+counter to allow multiple deployments
const timestamp = new Date().toISOString();
const uniqueId = `${timestamp}:${deploymentCounter++}`;
kg.addEdge({
source: projectNode.id,
target: configNode.id,
type: `project_deployed_with:${uniqueId}`,
weight: success ? 1.0 : 0.5,
confidence: 1.0,
properties: {
success,
timestamp,
buildTime: metadata?.buildTime,
errorMessage: metadata?.errorMessage,
deploymentUrl: metadata?.deploymentUrl,
// Store the base type for filtering
baseType: "project_deployed_with",
},
});
await saveKnowledgeGraph();
}
/**
* Get deployment recommendations based on historical data
*/
export async function getDeploymentRecommendations(projectId: string): Promise<
Array<{
ssg: string;
confidence: number;
reasoning: string[];
successRate: number;
}>
> {
const kg = await getKnowledgeGraph();
// Find project
const projectNode = await kg.findNode({
type: "project",
properties: { id: projectId },
});
if (!projectNode) {
return [];
}
// Find similar projects
const similarProjects = await kg.findNodes({
type: "project",
});
const recommendations = new Map<
string,
{
ssg: string;
totalWeight: number;
count: number;
successRate: number;
reasoning: string[];
}
>();
// Analyze deployments from similar projects
for (const similar of similarProjects) {
if (similar.id === projectNode.id) continue;
const deployments = await kg.findEdges({
source: similar.id,
type: "project_deployed_with",
});
for (const deployment of deployments) {
const configNode = (await kg.getAllNodes()).find(
(n) => n.id === deployment.target,
);
if (!configNode) continue;
const ssg = configNode.properties.ssg;
const existing = recommendations.get(ssg) || {
ssg,
totalWeight: 0,
count: 0,
successRate: 0,
reasoning: [] as string[],
};
existing.totalWeight += deployment.weight;
existing.count += 1;
existing.successRate = configNode.properties.deploymentSuccessRate || 0;
if (deployment.properties.success) {
existing.reasoning.push(
`Successfully used by similar project ${similar.label}`,
);
}
recommendations.set(ssg, existing);
}
}
// Convert to array and calculate confidence
return Array.from(recommendations.values())
.map((rec) => ({
ssg: rec.ssg,
confidence: (rec.totalWeight / rec.count) * rec.successRate,
reasoning: rec.reasoning.slice(0, 3), // Top 3 reasons
successRate: rec.successRate,
}))
.sort((a, b) => b.confidence - a.confidence);
}
/**
* Get Knowledge Graph statistics
*/
export async function getKGStatistics(): Promise<{
nodeCount: number;
edgeCount: number;
projectCount: number;
technologyCount: number;
configurationCount: number;
storageStats: any;
}> {
const kg = await getKnowledgeGraph();
const storage = await getKGStorage();
const stats = await kg.getStatistics();
const storageStats = await storage.getStatistics();
return {
nodeCount: stats.nodeCount,
edgeCount: stats.edgeCount,
projectCount: stats.nodesByType["project"] || 0,
technologyCount: stats.nodesByType["technology"] || 0,
configurationCount: stats.nodesByType["configuration"] || 0,
storageStats,
};
}
```
--------------------------------------------------------------------------------
/src/memory/kg-storage.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Knowledge Graph Storage Module
* Implements Phase 1.1: Enhanced Storage Format
*
* Provides persistent storage for knowledge graph entities and relationships
* using separate JSONL files with safety mechanisms.
*/
import { promises as fs } from "fs";
import { join, dirname } from "path";
import { GraphNode, GraphEdge } from "./knowledge-graph.js";
import { SCHEMA_METADATA } from "./schemas.js";
// File markers for safety
const ENTITY_FILE_MARKER = `# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES v${SCHEMA_METADATA.version}`;
const RELATIONSHIP_FILE_MARKER = `# DOCUMCP_KNOWLEDGE_GRAPH_RELATIONSHIPS v${SCHEMA_METADATA.version}`;
export interface KGStorageConfig {
storageDir: string;
backupOnWrite?: boolean;
validateOnRead?: boolean;
}
export interface KGStorageStats {
entityCount: number;
relationshipCount: number;
lastModified: string;
schemaVersion: string;
fileSize: {
entities: number;
relationships: number;
};
}
export class KGStorage {
private config: Required<KGStorageConfig>;
private entityFilePath: string;
private relationshipFilePath: string;
private backupDir: string;
constructor(config: KGStorageConfig) {
this.config = {
backupOnWrite: true,
validateOnRead: true,
...config,
};
this.entityFilePath = join(
config.storageDir,
"knowledge-graph-entities.jsonl",
);
this.relationshipFilePath = join(
config.storageDir,
"knowledge-graph-relationships.jsonl",
);
this.backupDir = join(config.storageDir, "backups");
}
/**
* Initialize storage (create directories and files if needed)
*/
async initialize(): Promise<void> {
try {
// Create storage directory
await fs.mkdir(this.config.storageDir, { recursive: true });
// Create backup directory
if (this.config.backupOnWrite) {
await fs.mkdir(this.backupDir, { recursive: true });
}
// Initialize entity file
await this.initializeFile(this.entityFilePath, ENTITY_FILE_MARKER);
// Initialize relationship file
await this.initializeFile(
this.relationshipFilePath,
RELATIONSHIP_FILE_MARKER,
);
} catch (error) {
throw new Error(
`Failed to initialize KG storage: ${
error instanceof Error ? error.message : String(error)
}`,
);
}
}
/**
* Initialize a JSONL file with marker
*/
private async initializeFile(
filePath: string,
marker: string,
): Promise<void> {
try {
// Check if file exists
await fs.access(filePath);
// File exists, verify marker
const firstLine = await this.readFirstLine(filePath);
if (!firstLine.startsWith("# DOCUMCP_KNOWLEDGE_GRAPH")) {
throw new Error(
`File ${filePath} is not a DocuMCP knowledge graph file. ` +
`Refusing to overwrite to prevent data loss.`,
);
}
} catch (error: any) {
if (error.code === "ENOENT") {
// File doesn't exist, create it with marker
await fs.writeFile(filePath, marker + "\n", "utf-8");
} else {
throw error;
}
}
}
/**
* Read the first line of a file
*/
private async readFirstLine(filePath: string): Promise<string> {
const content = await fs.readFile(filePath, "utf-8");
return content.split("\n")[0];
}
/**
* Save entities to storage
*/
async saveEntities(entities: GraphNode[]): Promise<void> {
try {
// Ensure parent directory exists
await fs.mkdir(dirname(this.entityFilePath), { recursive: true });
// Create backup if enabled
if (this.config.backupOnWrite) {
await this.backupFile(this.entityFilePath, "entities");
}
// Write to temporary file first (atomic write)
const tempFile = `${this.entityFilePath}.tmp`;
// Write marker
await fs.writeFile(tempFile, ENTITY_FILE_MARKER + "\n", "utf-8");
// Append entities as JSONL
for (const entity of entities) {
const line = JSON.stringify(entity) + "\n";
await fs.appendFile(tempFile, line, "utf-8");
}
// Atomic rename
await fs.rename(tempFile, this.entityFilePath);
} catch (error) {
throw new Error(
`Failed to save entities: ${
error instanceof Error ? error.message : String(error)
}`,
);
}
}
/**
* Load entities from storage
*/
async loadEntities(): Promise<GraphNode[]> {
try {
// Check if file exists
await fs.access(this.entityFilePath);
const content = await fs.readFile(this.entityFilePath, "utf-8");
const lines = content.split("\n").filter((line) => line.trim());
// Skip marker line
const dataLines = lines.slice(1);
const entities: GraphNode[] = [];
for (const line of dataLines) {
try {
const entity = JSON.parse(line) as GraphNode;
// Validate if enabled
if (this.config.validateOnRead) {
this.validateEntity(entity);
}
entities.push(entity);
} catch (error) {
console.error(`Failed to parse entity line: ${line}`, error);
}
}
return entities;
} catch (error: any) {
if (error.code === "ENOENT") {
return []; // File doesn't exist yet
}
throw new Error(
`Failed to load entities: ${
error instanceof Error ? error.message : String(error)
}`,
);
}
}
/**
* Save relationships to storage
*/
async saveRelationships(relationships: GraphEdge[]): Promise<void> {
try {
// Ensure parent directory exists
await fs.mkdir(dirname(this.relationshipFilePath), { recursive: true });
// Create backup if enabled
if (this.config.backupOnWrite) {
await this.backupFile(this.relationshipFilePath, "relationships");
}
// Write to temporary file first (atomic write)
const tempFile = `${this.relationshipFilePath}.tmp`;
// Write marker
await fs.writeFile(tempFile, RELATIONSHIP_FILE_MARKER + "\n", "utf-8");
// Append relationships as JSONL
for (const relationship of relationships) {
const line = JSON.stringify(relationship) + "\n";
await fs.appendFile(tempFile, line, "utf-8");
}
// Atomic rename
await fs.rename(tempFile, this.relationshipFilePath);
} catch (error) {
throw new Error(
`Failed to save relationships: ${
error instanceof Error ? error.message : String(error)
}`,
);
}
}
/**
* Load relationships from storage
*/
async loadRelationships(): Promise<GraphEdge[]> {
try {
// Check if file exists
await fs.access(this.relationshipFilePath);
const content = await fs.readFile(this.relationshipFilePath, "utf-8");
const lines = content.split("\n").filter((line) => line.trim());
// Skip marker line
const dataLines = lines.slice(1);
const relationships: GraphEdge[] = [];
for (const line of dataLines) {
try {
const relationship = JSON.parse(line) as GraphEdge;
// Validate if enabled
if (this.config.validateOnRead) {
this.validateRelationship(relationship);
}
relationships.push(relationship);
} catch (error) {
console.error(`Failed to parse relationship line: ${line}`, error);
}
}
return relationships;
} catch (error: any) {
if (error.code === "ENOENT") {
return []; // File doesn't exist yet
}
throw new Error(
`Failed to load relationships: ${
error instanceof Error ? error.message : String(error)
}`,
);
}
}
/**
* Save complete graph (entities + relationships)
*/
async saveGraph(
entities: GraphNode[],
relationships: GraphEdge[],
): Promise<void> {
await Promise.all([
this.saveEntities(entities),
this.saveRelationships(relationships),
]);
}
/**
* Load complete graph (entities + relationships)
*/
async loadGraph(): Promise<{
entities: GraphNode[];
relationships: GraphEdge[];
}> {
const [entities, relationships] = await Promise.all([
this.loadEntities(),
this.loadRelationships(),
]);
return { entities, relationships };
}
/**
* Create a backup of a file
*/
private async backupFile(
filePath: string,
type: "entities" | "relationships",
): Promise<void> {
try {
// Check if file exists
await fs.access(filePath);
// Ensure backup directory exists
await fs.mkdir(this.backupDir, { recursive: true });
// Create backup filename with timestamp
const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
const backupFilename = `${type}-${timestamp}.jsonl`;
const backupPath = join(this.backupDir, backupFilename);
// Copy file
await fs.copyFile(filePath, backupPath);
// Clean up old backups (keep last 10)
await this.cleanupOldBackups(type);
} catch (error: any) {
if (error.code !== "ENOENT") {
// Only warn if it's not a "file not found" error
console.warn(`Failed to backup file ${filePath}:`, error);
}
}
}
/**
* Clean up old backup files (keep last N)
*/
private async cleanupOldBackups(
type: "entities" | "relationships",
keepCount: number = 10,
): Promise<void> {
try {
// Ensure backup directory exists before reading
await fs.mkdir(this.backupDir, { recursive: true });
const files = await fs.readdir(this.backupDir);
// Filter files by type
const typeFiles = files
.filter((file) => file.startsWith(type))
.map((file) => join(this.backupDir, file));
// Sort by modification time (newest first)
const filesWithStats = await Promise.all(
typeFiles.map(async (file) => {
try {
const stats = await fs.stat(file);
return { file, mtime: stats.mtime.getTime() };
} catch (error) {
// File might have been deleted, return null
return null;
}
}),
);
// Filter out null values and sort
const validFiles = filesWithStats.filter((f) => f !== null) as Array<{
file: string;
mtime: number;
}>;
validFiles.sort((a, b) => b.mtime - a.mtime);
// Delete old files
const filesToDelete = validFiles.slice(keepCount);
await Promise.all(
filesToDelete.map(({ file }) => fs.unlink(file).catch(() => {})),
);
} catch (error) {
// Only log if it's not a missing directory error
if ((error as any).code !== "ENOENT") {
console.warn(`Failed to cleanup old backups:`, error);
}
}
}
/**
* Get storage statistics
*/
async getStatistics(): Promise<KGStorageStats> {
const [entities, relationships] = await Promise.all([
this.loadEntities(),
this.loadRelationships(),
]);
const [entitiesStats, relationshipsStats] = await Promise.all([
fs
.stat(this.entityFilePath)
.catch(() => ({ size: 0, mtime: new Date() })),
fs
.stat(this.relationshipFilePath)
.catch(() => ({ size: 0, mtime: new Date() })),
]);
const lastModified = new Date(
Math.max(
entitiesStats.mtime.getTime(),
relationshipsStats.mtime.getTime(),
),
).toISOString();
return {
entityCount: entities.length,
relationshipCount: relationships.length,
lastModified,
schemaVersion: SCHEMA_METADATA.version,
fileSize: {
entities: entitiesStats.size,
relationships: relationshipsStats.size,
},
};
}
/**
* Restore from backup
*/
async restoreFromBackup(
type: "entities" | "relationships",
timestamp?: string,
): Promise<void> {
try {
const files = await fs.readdir(this.backupDir);
// Filter backup files by type
const backupFiles = files.filter((file) => file.startsWith(type));
if (backupFiles.length === 0) {
throw new Error(`No backups found for ${type}`);
}
let backupFile: string;
if (timestamp) {
// Find backup with specific timestamp
backupFile = backupFiles.find((file) => file.includes(timestamp)) || "";
if (!backupFile) {
throw new Error(`Backup with timestamp ${timestamp} not found`);
}
} else {
// Use most recent backup
const filesWithStats = await Promise.all(
backupFiles.map(async (file) => {
const stats = await fs.stat(join(this.backupDir, file));
return { file, mtime: stats.mtime.getTime() };
}),
);
filesWithStats.sort((a, b) => b.mtime - a.mtime);
backupFile = filesWithStats[0].file;
}
const backupPath = join(this.backupDir, backupFile);
const targetPath =
type === "entities" ? this.entityFilePath : this.relationshipFilePath;
// Restore backup
await fs.copyFile(backupPath, targetPath);
// Log restoration success (can be monitored)
if (process.env.DEBUG) {
// eslint-disable-next-line no-console
console.log(`Restored ${type} from backup: ${backupFile}`);
}
} catch (error) {
throw new Error(
`Failed to restore from backup: ${
error instanceof Error ? error.message : String(error)
}`,
);
}
}
/**
* Validate entity structure
*/
private validateEntity(entity: GraphNode): void {
if (!entity.id || !entity.type || !entity.label) {
throw new Error(`Invalid entity structure: missing required fields`);
}
}
/**
* Validate relationship structure
*/
private validateRelationship(relationship: GraphEdge): void {
if (
!relationship.id ||
!relationship.source ||
!relationship.target ||
!relationship.type
) {
throw new Error(
`Invalid relationship structure: missing required fields`,
);
}
}
/**
* Verify integrity of stored data
*/
async verifyIntegrity(): Promise<{
valid: boolean;
errors: string[];
warnings: string[];
}> {
const errors: string[] = [];
const warnings: string[] = [];
try {
// Load all data
const { entities, relationships } = await this.loadGraph();
// Check for orphaned relationships
const entityIds = new Set(entities.map((e) => e.id));
for (const relationship of relationships) {
if (!entityIds.has(relationship.source)) {
warnings.push(
`Relationship ${relationship.id} references missing source entity: ${relationship.source}`,
);
}
if (!entityIds.has(relationship.target)) {
warnings.push(
`Relationship ${relationship.id} references missing target entity: ${relationship.target}`,
);
}
}
// Check for duplicate entities
const idCounts = new Map<string, number>();
for (const entity of entities) {
idCounts.set(entity.id, (idCounts.get(entity.id) || 0) + 1);
}
for (const [id, count] of idCounts) {
if (count > 1) {
errors.push(`Duplicate entity ID found: ${id} (${count} instances)`);
}
}
return {
valid: errors.length === 0,
errors,
warnings,
};
} catch (error) {
errors.push(
`Integrity check failed: ${
error instanceof Error ? error.message : String(error)
}`,
);
return { valid: false, errors, warnings };
}
}
/**
* Export graph as JSON (for inspection/debugging)
*/
async exportAsJSON(): Promise<string> {
const { entities, relationships } = await this.loadGraph();
return JSON.stringify(
{
metadata: {
version: SCHEMA_METADATA.version,
exportDate: new Date().toISOString(),
entityCount: entities.length,
relationshipCount: relationships.length,
},
entities,
relationships,
},
null,
2,
);
}
}
```
--------------------------------------------------------------------------------
/tests/tools/tool-error-handling.test.ts:
--------------------------------------------------------------------------------
```typescript
import { promises as fs } from "fs";
import { join } from "path";
import { tmpdir } from "os";
import { analyzeRepository } from "../../src/tools/analyze-repository.js";
import { recommendSSG } from "../../src/tools/recommend-ssg.js";
import { generateConfig } from "../../src/tools/generate-config.js";
import { setupStructure } from "../../src/tools/setup-structure.js";
import { deployPages } from "../../src/tools/deploy-pages.js";
describe("Tool Error Handling and Edge Cases", () => {
let tempDir: string;
beforeEach(async () => {
tempDir = join(
tmpdir(),
`test-errors-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
);
await fs.mkdir(tempDir, { recursive: true });
});
afterEach(async () => {
try {
await fs.rm(tempDir, { recursive: true });
} catch {
// Ignore cleanup errors
}
});
describe("Repository Analysis Error Handling", () => {
it.skip("should handle non-existent directories gracefully", async () => {
const nonExistentPath = join(tempDir, "non-existent");
await expect(
analyzeRepository({ path: nonExistentPath }),
).rejects.toThrow();
});
it("should handle empty directories", async () => {
const result = await analyzeRepository({ path: tempDir });
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(Array.isArray(result.content)).toBe(true);
});
it.skip("should handle directories with permission issues", async () => {
const restrictedDir = join(tempDir, "restricted");
await fs.mkdir(restrictedDir);
try {
// Make directory unreadable
await fs.chmod(restrictedDir, 0o000);
await expect(
analyzeRepository({ path: restrictedDir }),
).rejects.toThrow();
} finally {
// Restore permissions for cleanup
await fs.chmod(restrictedDir, 0o755);
}
});
it("should handle malformed package.json files", async () => {
await fs.writeFile(join(tempDir, "package.json"), "invalid json content");
const result = await analyzeRepository({ path: tempDir });
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(Array.isArray(result.content)).toBe(true);
});
it("should handle very large directories efficiently", async () => {
// Create many files to test performance
const promises = [];
for (let i = 0; i < 100; i++) {
promises.push(
fs.writeFile(join(tempDir, `file-${i}.js`), `console.log(${i});`),
);
}
await Promise.all(promises);
const startTime = Date.now();
const result = await analyzeRepository({ path: tempDir, depth: "quick" });
const duration = Date.now() - startTime;
expect(result).toBeDefined();
expect(duration).toBeLessThan(2000); // Should complete within 2 seconds
});
it.skip("should handle invalid depth parameters", async () => {
await fs.writeFile(join(tempDir, "package.json"), '{"name": "test"}');
// @ts-ignore - Testing invalid parameter
const result = await analyzeRepository({
path: tempDir,
depth: "invalid" as any,
});
expect(result).toBeDefined();
// Should default to 'standard' depth
});
});
describe("SSG Recommendation Error Handling", () => {
it("should handle missing analysis data", async () => {
await expect(recommendSSG({})).rejects.toThrow();
});
it.skip("should handle invalid analysis IDs", async () => {
await expect(
recommendSSG({ analysisId: "non-existent-id" }),
).rejects.toThrow();
});
it.skip("should provide fallback recommendations for edge cases", async () => {
// Create minimal valid analysis
const minimalAnalysis = {
projectType: "unknown",
languages: [],
frameworks: [],
complexity: "low" as const,
dependencies: [],
devDependencies: [],
scripts: {},
fileCount: 0,
totalSize: 0,
};
// Test with various edge case preferences
const testCases = [
{ ecosystem: "invalid" as any },
{ priority: "unknown" as any },
{ ecosystem: "javascript", priority: "performance" },
];
for (const preferences of testCases) {
const result = await recommendSSG({
analysisId: "test-analysis-id",
preferences,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(Array.isArray(result.content)).toBe(true);
}
});
it.skip("should handle analysis with missing required fields", async () => {
const incompleteAnalysis = {
projectType: "javascript",
// Missing other required fields
};
await expect(
recommendSSG({
// @ts-ignore - Testing incomplete data
analysisId: undefined,
}),
).rejects.toThrow();
});
});
describe("Configuration Generation Error Handling", () => {
it("should handle invalid SSG types", async () => {
await expect(
generateConfig({
ssg: "invalid-ssg" as any,
projectName: "test",
outputPath: tempDir,
}),
).rejects.toThrow();
});
it("should handle missing required parameters", async () => {
await expect(generateConfig({})).rejects.toThrow();
await expect(generateConfig({ ssg: "jekyll" })).rejects.toThrow();
await expect(
generateConfig({ ssg: "jekyll", projectName: "test" }),
).rejects.toThrow();
});
it("should handle write permission issues", async () => {
const readOnlyDir = join(tempDir, "readonly");
await fs.mkdir(readOnlyDir);
try {
await fs.chmod(readOnlyDir, 0o444);
const result = await generateConfig({
ssg: "jekyll",
projectName: "test",
outputPath: readOnlyDir,
});
expect((result as any).isError).toBe(true);
expect(result.content).toBeDefined();
expect(
result.content.some(
(item: any) => item.text && item.text.includes("permission denied"),
),
).toBe(true);
} finally {
await fs.chmod(readOnlyDir, 0o755);
}
});
it("should handle extremely long project names", async () => {
const longName = "a".repeat(1000);
const result = await generateConfig({
ssg: "jekyll",
projectName: longName,
outputPath: tempDir,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
});
it("should handle special characters in project names", async () => {
const specialChars = [
"test@project",
"test#project",
"test space project",
"test/project",
];
for (const projectName of specialChars) {
const result = await generateConfig({
ssg: "jekyll",
projectName,
outputPath: tempDir,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
}
});
it("should validate SSG-specific configuration options", async () => {
const ssgTypes = ["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"];
for (const ssg of ssgTypes) {
const result = await generateConfig({
ssg: ssg as any,
projectName: `test-${ssg}`,
outputPath: tempDir,
projectDescription: `Test project for ${ssg}`,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(result.content).toBeDefined();
expect(result.content.length).toBeGreaterThan(0);
}
});
});
describe("Structure Setup Error Handling", () => {
it("should handle invalid output paths", async () => {
// Use a path that will definitely fail - a file path instead of directory
// First create a file, then try to use it as a directory path
const invalidPath = join(tempDir, "not-a-directory.txt");
await fs.writeFile(invalidPath, "this is a file, not a directory");
const result = await setupStructure({
path: invalidPath,
ssg: "jekyll",
});
expect((result as any).isError).toBe(true);
expect(result.content).toBeDefined();
expect(
result.content.some(
(item: any) =>
item.text &&
(item.text.includes("ENOTDIR") ||
item.text.includes("EEXIST") ||
item.text.includes("not a directory")),
),
).toBe(true);
});
it("should handle missing SSG parameter", async () => {
await expect(
setupStructure({
path: tempDir,
}),
).rejects.toThrow();
});
it("should create structure in existing directories with files", async () => {
// Create some existing files
await fs.writeFile(
join(tempDir, "existing-file.txt"),
"existing content",
);
await fs.mkdir(join(tempDir, "existing-dir"));
const result = await setupStructure({
path: tempDir,
ssg: "jekyll",
includeExamples: true,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(Array.isArray(result.content)).toBe(true);
// Should not overwrite existing files
const existingContent = await fs.readFile(
join(tempDir, "existing-file.txt"),
"utf8",
);
expect(existingContent).toBe("existing content");
});
it("should handle different Diataxis structure options", async () => {
const options = [
{ includeExamples: true },
{ includeExamples: false },
{
includeExamples: true,
customStructure: { tutorials: ["custom-tutorial"] },
},
];
for (const option of options) {
const testDir = join(
tempDir,
`test-${Math.random().toString(36).substr(2, 5)}`,
);
await fs.mkdir(testDir);
const result = await setupStructure({
path: testDir,
ssg: "docusaurus",
...option,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
}
});
});
describe("Deployment Setup Error Handling", () => {
it("should handle repositories without proper configuration", async () => {
const result = await deployPages({
repository: "invalid/repo/format",
ssg: "jekyll",
});
// deployPages actually succeeds with invalid repo format - it just creates workflow
expect(result.content).toBeDefined();
expect(result.content[0].text).toContain("invalid/repo/format");
});
it("should handle missing repository parameter", async () => {
await expect(
deployPages({
ssg: "jekyll",
}),
).rejects.toThrow();
});
it("should handle different branch configurations", async () => {
const branchConfigs = [
{ branch: "main" },
{ branch: "master" },
{ branch: "gh-pages" },
{ branch: "custom-branch" },
];
for (const config of branchConfigs) {
const result = await deployPages({
repository: "user/test-repo",
ssg: "jekyll",
...config,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(result.content[0].text).toContain(config.branch);
}
});
it("should handle custom domain configurations", async () => {
const customDomains = [
"example.com",
"docs.example.com",
"sub.domain.example.org",
"localhost", // Edge case
];
for (const customDomain of customDomains) {
const result = await deployPages({
repository: "user/test-repo",
ssg: "jekyll",
customDomain,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(result.content[0].text).toContain(customDomain);
}
});
it("should generate workflows for all supported SSGs", async () => {
const ssgTypes = ["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"];
for (const ssg of ssgTypes) {
const result = await deployPages({
repository: "user/test-repo",
ssg: ssg as any,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(result.content[0].text).toContain(ssg);
expect(result.content).toBeDefined();
}
});
});
describe("Input Validation", () => {
it("should validate string inputs for XSS and injection attacks", async () => {
const maliciousInputs = [
'<script>alert("xss")</script>',
"${process.env}",
"../../../etc/passwd",
"test`rm -rf /`test",
"test && rm -rf /",
"test; cat /etc/passwd",
];
for (const maliciousInput of maliciousInputs) {
// Test with different tools
const result = await generateConfig({
ssg: "jekyll",
projectName: maliciousInput,
outputPath: tempDir,
projectDescription: maliciousInput,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
// Should sanitize or escape malicious content
}
});
it("should handle Unicode and international characters", async () => {
const unicodeInputs = [
"тест", // Cyrillic
"测试", // Chinese
"🚀📊", // Emojis
"café", // Accented characters
"مشروع", // Arabic
];
for (const unicodeInput of unicodeInputs) {
const result = await generateConfig({
ssg: "jekyll",
projectName: unicodeInput,
outputPath: tempDir,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
}
});
it("should handle extremely large parameter values", async () => {
const largeDescription = "A".repeat(10000);
const result = await generateConfig({
ssg: "jekyll",
projectName: "test",
outputPath: tempDir,
projectDescription: largeDescription,
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
});
});
describe("Concurrent Operations", () => {
it("should handle multiple simultaneous tool calls", async () => {
// Create test directories
const dirs = await Promise.all([
fs.mkdir(join(tempDir, "test1"), { recursive: true }),
fs.mkdir(join(tempDir, "test2"), { recursive: true }),
fs.mkdir(join(tempDir, "test3"), { recursive: true }),
]);
// Run multiple operations concurrently
const promises = [
generateConfig({
ssg: "jekyll",
projectName: "test1",
outputPath: join(tempDir, "test1"),
}),
generateConfig({
ssg: "hugo",
projectName: "test2",
outputPath: join(tempDir, "test2"),
}),
generateConfig({
ssg: "docusaurus",
projectName: "test3",
outputPath: join(tempDir, "test3"),
}),
];
const results = await Promise.all(promises);
results.forEach((result) => {
expect(result).toBeDefined();
expect(result.content).toBeDefined();
});
});
it("should handle resource contention gracefully", async () => {
// Multiple operations on same directory
const promises = Array(5)
.fill(null)
.map((_, i) =>
setupStructure({
path: join(tempDir, `concurrent-${i}`),
ssg: "jekyll",
includeExamples: false,
}),
);
// Create directories first
await Promise.all(
promises.map((_, i) =>
fs.mkdir(join(tempDir, `concurrent-${i}`), { recursive: true }),
),
);
const results = await Promise.allSettled(promises);
// All should succeed or fail gracefully
results.forEach((result) => {
if (result.status === "fulfilled") {
expect(result.value.content).toBeDefined();
} else {
expect(result.reason).toBeInstanceOf(Error);
}
});
});
});
});
```
--------------------------------------------------------------------------------
/tests/memory/storage.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Comprehensive unit tests for Memory Storage System
* Tests JSONL storage, indexing, CRUD operations, and performance
* Part of Issue #54 - Core Memory System Unit Tests
*/
import { promises as fs } from "fs";
import path from "path";
import os from "os";
import { JSONLStorage, MemoryEntry } from "../../src/memory/storage.js";
describe("JSONLStorage", () => {
let storage: JSONLStorage;
let tempDir: string;
beforeEach(async () => {
// Create unique temp directory for each test
tempDir = path.join(
os.tmpdir(),
`memory-storage-test-${Date.now()}-${Math.random()
.toString(36)
.substr(2, 9)}`,
);
await fs.mkdir(tempDir, { recursive: true });
storage = new JSONLStorage(tempDir);
await storage.initialize();
});
afterEach(async () => {
// Cleanup temp directory
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe("Basic Storage Operations", () => {
test("should create storage instance and initialize", async () => {
expect(storage).toBeDefined();
expect(storage).toBeInstanceOf(JSONLStorage);
// Verify storage directory was created
const stats = await fs.stat(tempDir);
expect(stats.isDirectory()).toBe(true);
});
test("should append and retrieve memory entries", async () => {
const entry = {
timestamp: new Date().toISOString(),
type: "analysis" as const,
data: { project: "test-project", result: "success" },
metadata: { projectId: "test-proj", tags: ["test"] },
};
const stored = await storage.append(entry);
expect(stored.id).toBeDefined();
expect(stored.checksum).toBeDefined();
expect(stored.type).toBe("analysis");
expect(stored.data).toEqual(entry.data);
});
test("should handle different entry types", async () => {
const entryTypes: Array<MemoryEntry["type"]> = [
"analysis",
"recommendation",
"deployment",
"configuration",
"interaction",
];
for (const type of entryTypes) {
const entry = {
timestamp: new Date().toISOString(),
type,
data: { testType: type },
metadata: { projectId: "test-types" },
};
const stored = await storage.append(entry);
expect(stored.type).toBe(type);
expect(stored.data.testType).toBe(type);
}
});
test("should generate unique IDs for different entries", async () => {
const entry1 = {
timestamp: new Date().toISOString(),
type: "analysis" as const,
data: { project: "test-1" },
metadata: { projectId: "test-1" },
};
const entry2 = {
timestamp: new Date().toISOString(),
type: "analysis" as const,
data: { project: "test-2" },
metadata: { projectId: "test-2" },
};
const stored1 = await storage.append(entry1);
const stored2 = await storage.append(entry2);
expect(stored1.id).not.toBe(stored2.id);
expect(stored1.checksum).not.toBe(stored2.checksum);
});
test("should generate same ID for identical entries", async () => {
const entry = {
timestamp: new Date().toISOString(),
type: "analysis" as const,
data: { project: "identical-test" },
metadata: { projectId: "identical" },
};
const stored1 = await storage.append(entry);
const stored2 = await storage.append(entry);
expect(stored1.id).toBe(stored2.id);
expect(stored1.checksum).toBe(stored2.checksum);
});
});
describe("File Management", () => {
test("should create proper JSONL file structure", async () => {
const entry = {
timestamp: "2024-01-15T10:30:00.000Z",
type: "analysis" as const,
data: { fileTest: true },
metadata: { projectId: "file-proj" },
};
await storage.append(entry);
// Check that file was created with expected name pattern
const files = await fs.readdir(tempDir);
const jsonlFiles = files.filter((f) => f.endsWith(".jsonl"));
expect(jsonlFiles.length).toBeGreaterThan(0);
// Should have analysis_2024_01.jsonl
const expectedFile = "analysis_2024_01.jsonl";
expect(jsonlFiles).toContain(expectedFile);
// Verify file contains the entry
const filePath = path.join(tempDir, expectedFile);
const content = await fs.readFile(filePath, "utf-8");
const lines = content.trim().split("\n");
expect(lines.length).toBeGreaterThan(0);
const parsedEntry = JSON.parse(lines[0]);
expect(parsedEntry.data.fileTest).toBe(true);
});
test("should organize files by type and date", async () => {
const entries = [
{
timestamp: "2024-01-15T10:30:00.000Z",
type: "analysis" as const,
data: { test: "analysis-jan" },
metadata: { projectId: "date-test" },
},
{
timestamp: "2024-02-15T10:30:00.000Z",
type: "analysis" as const,
data: { test: "analysis-feb" },
metadata: { projectId: "date-test" },
},
{
timestamp: "2024-01-15T10:30:00.000Z",
type: "recommendation" as const,
data: { test: "recommendation-jan" },
metadata: { projectId: "date-test" },
},
];
for (const entry of entries) {
await storage.append(entry);
}
const files = await fs.readdir(tempDir);
const jsonlFiles = files.filter((f) => f.endsWith(".jsonl"));
expect(jsonlFiles).toContain("analysis_2024_01.jsonl");
expect(jsonlFiles).toContain("analysis_2024_02.jsonl");
expect(jsonlFiles).toContain("recommendation_2024_01.jsonl");
});
test("should handle index persistence", async () => {
const entry = {
timestamp: new Date().toISOString(),
type: "configuration" as const,
data: { indexTest: true },
metadata: { projectId: "index-test" },
};
await storage.append(entry);
// Check that index file was created
const indexPath = path.join(tempDir, ".index.json");
const indexExists = await fs
.access(indexPath)
.then(() => true)
.catch(() => false);
expect(indexExists).toBe(true);
// Index should contain entry information
const indexContent = await fs.readFile(indexPath, "utf-8");
const indexData = JSON.parse(indexContent);
expect(typeof indexData).toBe("object");
expect(Array.isArray(indexData.entries)).toBe(true);
expect(indexData.entries.length).toBeGreaterThan(0);
});
});
describe("Data Integrity", () => {
test("should generate checksums for data integrity", async () => {
const entry = {
timestamp: new Date().toISOString(),
type: "deployment" as const,
data: { integrity: "test", checkData: "important" },
metadata: { projectId: "integrity-test" },
};
const stored = await storage.append(entry);
expect(stored.checksum).toBeDefined();
expect(typeof stored.checksum).toBe("string");
expect(stored.checksum?.length).toBe(32); // MD5 hash length
});
test("should handle entry timestamps correctly", async () => {
const customTimestamp = "2024-06-15T14:30:00.000Z";
const entry = {
timestamp: customTimestamp,
type: "interaction" as const,
data: { timestampTest: true },
metadata: { projectId: "timestamp-test" },
};
const stored = await storage.append(entry);
expect(stored.timestamp).toBe(customTimestamp);
});
test("should auto-generate timestamp if not provided", async () => {
const entry = {
timestamp: "", // Will be auto-generated
type: "analysis" as const,
data: { autoTimestamp: true },
metadata: { projectId: "auto-timestamp-test" },
};
const beforeTime = new Date().toISOString();
const stored = await storage.append(entry);
const afterTime = new Date().toISOString();
expect(stored.timestamp).toBeDefined();
expect(stored.timestamp >= beforeTime).toBe(true);
expect(stored.timestamp <= afterTime).toBe(true);
});
});
describe("Metadata Handling", () => {
test("should preserve metadata structure", async () => {
const metadata = {
projectId: "metadata-test",
repository: "github.com/test/repo",
ssg: "docusaurus",
tags: ["frontend", "typescript"],
version: "1.0.0",
};
const entry = {
timestamp: new Date().toISOString(),
type: "recommendation" as const,
data: { recommendation: "use-docusaurus" },
metadata,
};
const stored = await storage.append(entry);
expect(stored.metadata).toEqual(metadata);
expect(stored.metadata.projectId).toBe("metadata-test");
expect(stored.metadata.tags).toEqual(["frontend", "typescript"]);
});
test("should handle optional metadata fields", async () => {
const entry = {
timestamp: new Date().toISOString(),
type: "analysis" as const,
data: { minimal: true },
metadata: { projectId: "minimal-test" },
};
const stored = await storage.append(entry);
expect(stored.metadata.projectId).toBe("minimal-test");
expect(stored.metadata.repository).toBeUndefined();
expect(stored.metadata.tags).toBeUndefined();
});
test("should handle compression metadata", async () => {
const metadata = {
projectId: "compression-test",
compressed: true,
compressionType: "gzip",
compressedAt: new Date().toISOString(),
originalSize: 1024,
};
const entry = {
timestamp: new Date().toISOString(),
type: "configuration" as const,
data: { compressed: "data" },
metadata,
};
const stored = await storage.append(entry);
expect(stored.metadata.compressed).toBe(true);
expect(stored.metadata.compressionType).toBe("gzip");
expect(stored.metadata.originalSize).toBe(1024);
});
});
describe("Performance and Concurrency", () => {
test("should handle concurrent writes safely", async () => {
const concurrentWrites = 10;
const promises: Promise<MemoryEntry>[] = [];
// Create multiple concurrent append operations
for (let i = 0; i < concurrentWrites; i++) {
const promise = storage.append({
timestamp: new Date().toISOString(),
type: "analysis",
data: { index: i, concurrent: true },
metadata: { projectId: "concurrent-test" },
});
promises.push(promise);
}
const results = await Promise.all(promises);
expect(results).toHaveLength(concurrentWrites);
// All IDs should be unique (since data is different)
const ids = results.map((r) => r.id);
expect(new Set(ids).size).toBe(concurrentWrites);
// All should have correct structure
results.forEach((result, index) => {
expect(result.data.index).toBe(index);
expect(result.metadata.projectId).toBe("concurrent-test");
});
});
test("should handle bulk append operations efficiently", async () => {
const startTime = Date.now();
const bulkSize = 50;
// Append bulk entries
for (let i = 0; i < bulkSize; i++) {
await storage.append({
timestamp: new Date().toISOString(),
type: i % 2 === 0 ? "analysis" : "recommendation",
data: { index: i, bulk: true },
metadata: {
projectId: "bulk-test",
},
});
}
const appendTime = Date.now() - startTime;
expect(appendTime).toBeLessThan(5000); // Should complete within 5 seconds
// Verify files were created
const files = await fs.readdir(tempDir);
const jsonlFiles = files.filter((f) => f.endsWith(".jsonl"));
expect(jsonlFiles.length).toBeGreaterThan(0);
});
test("should maintain performance with large data entries", async () => {
const largeData = {
description: "x".repeat(10000), // 10KB string
array: new Array(1000).fill(0).map((_, i) => ({
id: i,
data: `large-item-${i}`,
metadata: { processed: true },
})),
};
const entry = {
timestamp: new Date().toISOString(),
type: "analysis" as const,
data: largeData,
metadata: { projectId: "large-test" },
};
const startTime = Date.now();
const stored = await storage.append(entry);
const appendTime = Date.now() - startTime;
expect(appendTime).toBeLessThan(1000); // Should append within 1 second
expect(stored.data.description).toHaveLength(10000);
expect(stored.data.array).toHaveLength(1000);
});
});
describe("Error Handling and Edge Cases", () => {
test("should handle special characters in data", async () => {
const entry = {
timestamp: new Date().toISOString(),
type: "interaction" as const,
data: {
message: "Special chars: äöü 🚀 @#$%^&*()[]{}|\\:\";'<>?,./`~",
unicode: "测试中文字符",
emoji: "🎉🔥💯⚡🚀",
json: { nested: { deeply: { value: "test" } } },
},
metadata: {
projectId: "special-chars-项目-🏗️",
tags: ["special", "unicode", "特殊字符"],
},
};
const stored = await storage.append(entry);
expect(stored.data.message).toContain("Special chars");
expect(stored.data.unicode).toBe("测试中文字符");
expect(stored.data.emoji).toBe("🎉🔥💯⚡🚀");
expect(stored.metadata.projectId).toBe("special-chars-项目-🏗️");
});
test("should handle empty data gracefully", async () => {
const entry = {
timestamp: new Date().toISOString(),
type: "configuration" as const,
data: {},
metadata: { projectId: "empty-test" },
};
const stored = await storage.append(entry);
expect(stored.data).toEqual({});
expect(stored.id).toBeDefined();
expect(stored.checksum).toBeDefined();
});
test("should handle missing storage directory", async () => {
const nonExistentDir = path.join(
tempDir,
"non-existent",
"deeply",
"nested",
);
const newStorage = new JSONLStorage(nonExistentDir);
// Should create directory during initialization
await newStorage.initialize();
const stats = await fs.stat(nonExistentDir);
expect(stats.isDirectory()).toBe(true);
// Should be able to append entries
const entry = {
timestamp: new Date().toISOString(),
type: "analysis" as const,
data: { recovery: true },
metadata: { projectId: "recovery-test" },
};
const stored = await newStorage.append(entry);
expect(stored.data.recovery).toBe(true);
});
test("should maintain data consistency across operations", async () => {
const entries = [
{
timestamp: new Date().toISOString(),
type: "analysis" as const,
data: { step: 1, consistency: "test" },
metadata: { projectId: "consistency-test" },
},
{
timestamp: new Date().toISOString(),
type: "recommendation" as const,
data: { step: 2, consistency: "test" },
metadata: { projectId: "consistency-test" },
},
{
timestamp: new Date().toISOString(),
type: "deployment" as const,
data: { step: 3, consistency: "test" },
metadata: { projectId: "consistency-test" },
},
];
const storedEntries = [];
for (const entry of entries) {
const stored = await storage.append(entry);
storedEntries.push(stored);
}
// Verify all entries were stored correctly
expect(storedEntries).toHaveLength(3);
storedEntries.forEach((stored, index) => {
expect(stored.data.step).toBe(index + 1);
expect(stored.metadata.projectId).toBe("consistency-test");
expect(stored.id).toBeDefined();
expect(stored.checksum).toBeDefined();
});
// All IDs should be unique
const ids = storedEntries.map((s) => s.id);
expect(new Set(ids).size).toBe(3);
});
});
});
```
--------------------------------------------------------------------------------
/src/tools/generate-config.ts:
--------------------------------------------------------------------------------
```typescript
import { promises as fs } from "fs";
import path from "path";
import { z } from "zod";
import { MCPToolResponse, formatMCPResponse } from "../types/api.js";
const inputSchema = z.object({
ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
projectName: z.string(),
projectDescription: z.string().optional(),
outputPath: z.string(),
});
/**
* Generates configuration files for a specified static site generator.
*
* Creates comprehensive configuration files tailored to the chosen SSG, including
* build configurations, theme settings, plugin configurations, and deployment
* settings. The generated configurations are optimized based on the project
* characteristics and follow best practices for each SSG.
*
* @param args - The input arguments for configuration generation
* @param args.ssg - The static site generator to generate configuration for
* @param args.projectName - The name of the project for configuration customization
* @param args.projectDescription - Optional description for the project
* @param args.outputPath - The directory path where configuration files should be written
*
* @returns Promise resolving to configuration generation results
* @returns content - Array containing the generation results in MCP tool response format
*
* @throws {Error} When the output path is inaccessible or invalid
* @throws {Error} When the SSG type is unsupported
* @throws {Error} When configuration file generation fails
*
* @example
* ```typescript
* // Generate Docusaurus configuration
* const result = await generateConfig({
* ssg: "docusaurus",
* projectName: "My Documentation",
* projectDescription: "Comprehensive project documentation",
* outputPath: "./docs"
* });
*
* // Generate Hugo configuration
* const hugoConfig = await generateConfig({
* ssg: "hugo",
* projectName: "My Site",
* outputPath: "./site"
* });
* ```
*
* @since 1.0.0
*/
export async function generateConfig(
args: unknown,
): Promise<{ content: any[] }> {
const startTime = Date.now();
const { ssg, projectName, projectDescription, outputPath } =
inputSchema.parse(args);
try {
// Ensure output directory exists
await fs.mkdir(outputPath, { recursive: true });
let configFiles: Array<{ path: string; content: string }> = [];
switch (ssg) {
case "docusaurus":
configFiles = await generateDocusaurusConfig(
projectName,
projectDescription || "",
);
break;
case "mkdocs":
configFiles = await generateMkDocsConfig(
projectName,
projectDescription || "",
);
break;
case "hugo":
configFiles = await generateHugoConfig(
projectName,
projectDescription || "",
);
break;
case "jekyll":
configFiles = await generateJekyllConfig(
projectName,
projectDescription || "",
);
break;
case "eleventy":
configFiles = await generateEleventyConfig(
projectName,
projectDescription || "",
);
break;
}
// Write all config files
for (const file of configFiles) {
const filePath = path.join(outputPath, file.path);
await fs.mkdir(path.dirname(filePath), { recursive: true });
await fs.writeFile(filePath, file.content);
}
const configResult = {
ssg,
projectName,
projectDescription,
outputPath,
filesCreated: configFiles.map((f) => f.path),
totalFiles: configFiles.length,
};
const response: MCPToolResponse<typeof configResult> = {
success: true,
data: configResult,
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
recommendations: [
{
type: "info",
title: "Configuration Complete",
description: `Generated ${configFiles.length} configuration files for ${ssg}`,
},
],
nextSteps: [
{
action: "Setup Documentation Structure",
toolRequired: "setup_structure",
description: `Create Diataxis-compliant documentation structure`,
priority: "high",
},
],
};
return formatMCPResponse(response);
} catch (error) {
const errorResponse: MCPToolResponse = {
success: false,
error: {
code: "CONFIG_GENERATION_FAILED",
message: `Failed to generate config: ${error}`,
resolution: "Ensure output path is writable and SSG type is supported",
},
metadata: {
toolVersion: "1.0.0",
executionTime: Date.now() - startTime,
timestamp: new Date().toISOString(),
},
};
return formatMCPResponse(errorResponse);
}
}
async function generateDocusaurusConfig(
projectName: string,
projectDescription: string,
): Promise<Array<{ path: string; content: string }>> {
return [
{
path: "docusaurus.config.js",
content: `module.exports = {
title: '${projectName}',
tagline: '${projectDescription}',
url: 'https://your-domain.com',
baseUrl: '/your-repo/',
onBrokenLinks: 'throw',
onBrokenMarkdownLinks: 'warn',
favicon: 'img/favicon.ico',
organizationName: 'your-org',
projectName: '${projectName.toLowerCase().replace(/\\s+/g, "-")}',
presets: [
[
'classic',
{
docs: {
sidebarPath: require.resolve('./sidebars.js'),
editUrl: 'https://github.com/your-org/your-repo/tree/main/docs/',
path: '../docs',
routeBasePath: '/',
},
theme: {
customCss: require.resolve('./src/css/custom.css'),
},
blog: false,
},
],
],
themeConfig: {
navbar: {
title: '${projectName}',
items: [
{
type: 'doc',
docId: 'index',
position: 'left',
label: 'Documentation',
},
{
href: 'https://github.com/your-org/your-repo',
label: 'GitHub',
position: 'right',
},
],
},
},
};`,
},
{
path: "package.json",
content: JSON.stringify(
{
name: `${projectName.toLowerCase().replace(/\\s+/g, "-")}-docs`,
version: "0.0.0",
private: true,
scripts: {
docusaurus: "docusaurus",
start: "docusaurus start",
build: "docusaurus build",
swizzle: "docusaurus swizzle",
deploy: "docusaurus deploy",
clear: "docusaurus clear",
serve: "docusaurus serve --port 3001",
},
dependencies: {
"@docusaurus/core": "^3.0.0",
"@docusaurus/preset-classic": "^3.0.0",
"@mdx-js/react": "^3.0.0",
clsx: "^2.0.0",
"prism-react-renderer": "^2.1.0",
react: "^18.0.0",
"react-dom": "^18.0.0",
},
devDependencies: {
"@docusaurus/types": "^3.0.0",
},
},
null,
2,
),
},
{
path: "sidebars.js",
content: `/**
* Creating a sidebar enables you to:
- create an ordered group of docs
- render a sidebar for each doc of that group
- provide next/previous navigation
The sidebars can be generated from the filesystem, or explicitly defined here.
Create as many sidebars as you want.
*/
// @ts-check
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
const sidebars = {
// Main documentation sidebar
docs: [
'index',
{
type: 'category',
label: 'Tutorials',
items: [
'tutorials/getting-started',
],
},
{
type: 'category',
label: 'How-to Guides',
items: [
'how-to/prompting-guide',
],
},
{
type: 'category',
label: 'Reference',
items: [
'reference/mcp-tools',
],
},
{
type: 'category',
label: 'Explanation',
items: [
'explanation/architecture',
],
},
],
};
module.exports = sidebars;`,
},
{
path: "src/css/custom.css",
content: `/**
* Any CSS included here will be global. The classic template
* bundles Infima by default. Infima is a CSS framework designed to
* work well for content-centric websites.
*/
/* You can override the default Infima variables here. */
:root {
--ifm-color-primary: #2e8555;
--ifm-color-primary-dark: #29784c;
--ifm-color-primary-darker: #277148;
--ifm-color-primary-darkest: #205d3b;
--ifm-color-primary-light: #33925d;
--ifm-color-primary-lighter: #359962;
--ifm-color-primary-lightest: #3cad6e;
--ifm-code-font-size: 95%;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
}
/* For readability concerns, you should choose a lighter palette in dark mode. */
[data-theme='dark'] {
--ifm-color-primary: #25c2a0;
--ifm-color-primary-dark: #21af90;
--ifm-color-primary-darker: #1fa588;
--ifm-color-primary-darkest: #1a8870;
--ifm-color-primary-light: #29d5b0;
--ifm-color-primary-lighter: #32d8b4;
--ifm-color-primary-lightest: #4fddbf;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
}`,
},
{
path: "Dockerfile.docs",
content: `# Documentation testing container
# Generated by DocuMCP
FROM node:20-alpine
WORKDIR /app
# Copy package files
COPY docs-site/package*.json ./docs-site/
COPY docs-site/docusaurus.config.js ./docs-site/
COPY docs-site/sidebars.js ./docs-site/
COPY docs-site/src ./docs-site/src/
# Copy documentation source
COPY docs ./docs/
# Install dependencies
RUN cd docs-site && npm install
# Build documentation
RUN cd docs-site && npm run build
# Expose port
EXPOSE 3001
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
CMD curl -f http://localhost:3001/ || exit 1
# Start server
CMD ["sh", "-c", "cd docs-site && npm run serve"]`,
},
{
path: "test-docs-local.sh",
content: `#!/bin/bash
# Containerized documentation testing script
# Generated by DocuMCP
set -e
# Detect container runtime
if command -v podman &> /dev/null; then
CONTAINER_CMD="podman"
elif command -v docker &> /dev/null; then
CONTAINER_CMD="docker"
else
echo "❌ Neither Podman nor Docker found. Please install one of them."
echo "📖 Podman: https://podman.io/getting-started/installation"
echo "📖 Docker: https://docs.docker.com/get-docker/"
exit 1
fi
echo "🔧 Using $CONTAINER_CMD for containerized documentation testing..."
# Build the documentation container
echo "📦 Building documentation container..."
$CONTAINER_CMD build -f Dockerfile.docs -t documcp-docs .
if [ $? -ne 0 ]; then
echo "❌ Container build failed!"
exit 1
fi
echo "✅ Container build successful!"
# Run link checking outside container (faster)
echo "🔗 Checking for broken links..."
if command -v markdown-link-check &> /dev/null; then
find docs -name "*.md" -exec markdown-link-check {} \\;
else
echo "⚠️ markdown-link-check not found. Install with: npm install -g markdown-link-check"
fi
# Start the container
echo ""
echo "🚀 Starting documentation server in container..."
echo "📖 Documentation will be available at: http://localhost:3001"
echo "💡 Press Ctrl+C to stop the server"
echo ""
# Run container with port mapping and cleanup
$CONTAINER_CMD run --rm -p 3001:3001 --name documcp-docs-test documcp-docs`,
},
{
path: "docker-compose.docs.yml",
content: `# Docker Compose for documentation testing
# Generated by DocuMCP
version: '3.8'
services:
docs:
build:
context: .
dockerfile: Dockerfile.docs
ports:
- "3001:3001"
container_name: documcp-docs
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001/"]
interval: 30s
timeout: 10s
retries: 3
volumes:
# Mount docs directory for live editing (optional)
- ./docs:/app/docs:ro
environment:
- NODE_ENV=production`,
},
{
path: ".dockerignore",
content: `# Documentation container ignore file
# Generated by DocuMCP
# Node modules (will be installed in container)
node_modules/
docs-site/node_modules/
docs-site/.docusaurus/
docs-site/build/
# Git files
.git/
.gitignore
# Development files
.env*
*.log
npm-debug.log*
# OS files
.DS_Store
Thumbs.db
# IDE files
.vscode/
.idea/
*.swp
*.swo
# Build artifacts
dist/
build/
*.tgz
# Test files
coverage/
.nyc_output/
# Documentation build (will be generated)
docs-site/build/`,
},
];
}
async function generateMkDocsConfig(
projectName: string,
projectDescription: string,
): Promise<Array<{ path: string; content: string }>> {
return [
{
path: "mkdocs.yml",
content: `site_name: ${projectName}
site_description: ${projectDescription}
site_url: https://your-domain.com
theme:
name: material
features:
- navigation.tabs
- navigation.sections
- navigation.expand
- navigation.top
- search.suggest
- search.highlight
palette:
- scheme: default
primary: indigo
accent: indigo
toggle:
icon: material/brightness-7
name: Switch to dark mode
- scheme: slate
primary: indigo
accent: indigo
toggle:
icon: material/brightness-4
name: Switch to light mode
plugins:
- search
- mermaid2
markdown_extensions:
- pymdownx.highlight
- pymdownx.superfences
- pymdownx.tabbed
- pymdownx.details
- admonition
- toc:
permalink: true
nav:
- Home: index.md
- Tutorials:
- Getting Started: tutorials/getting-started.md
- How-To Guides:
- Installation: how-to/installation.md
- Reference:
- API: reference/api.md
- Explanation:
- Architecture: explanation/architecture.md`,
},
{
path: "requirements.txt",
content: `mkdocs>=1.5.0
mkdocs-material>=9.0.0
mkdocs-mermaid2-plugin>=1.0.0`,
},
];
}
async function generateHugoConfig(
projectName: string,
projectDescription: string,
): Promise<Array<{ path: string; content: string }>> {
return [
{
path: "hugo.toml",
content: `baseURL = 'https://your-domain.com/'
languageCode = 'en-us'
title = '${projectName}'
[params]
description = '${projectDescription}'
[[menu.main]]
name = 'Tutorials'
url = '/tutorials/'
weight = 10
[[menu.main]]
name = 'How-To'
url = '/how-to/'
weight = 20
[[menu.main]]
name = 'Reference'
url = '/reference/'
weight = 30
[[menu.main]]
name = 'Explanation'
url = '/explanation/'
weight = 40`,
},
];
}
async function generateJekyllConfig(
projectName: string,
projectDescription: string,
): Promise<Array<{ path: string; content: string }>> {
return [
{
path: "_config.yml",
content: `title: ${projectName}
description: ${projectDescription}
baseurl: ""
url: "https://your-domain.com"
theme: minima
plugins:
- jekyll-feed
- jekyll-seo-tag
- jekyll-sitemap
collections:
tutorials:
output: true
permalink: /tutorials/:name
how-to:
output: true
permalink: /how-to/:name
reference:
output: true
permalink: /reference/:name
explanation:
output: true
permalink: /explanation/:name`,
},
{
path: "Gemfile",
content: `source "https://rubygems.org"
gem "jekyll", "~> 4.3"
gem "minima", "~> 2.5"
group :jekyll_plugins do
gem "jekyll-feed", "~> 0.12"
gem "jekyll-seo-tag", "~> 2.8"
gem "jekyll-sitemap", "~> 1.4"
end`,
},
];
}
async function generateEleventyConfig(
projectName: string,
projectDescription: string,
): Promise<Array<{ path: string; content: string }>> {
return [
{
path: ".eleventy.js",
content: `module.exports = function(eleventyConfig) {
eleventyConfig.addPassthroughCopy("css");
return {
dir: {
input: "src",
output: "_site",
includes: "_includes",
layouts: "_layouts"
},
templateFormats: ["md", "njk", "html"],
markdownTemplateEngine: "njk",
htmlTemplateEngine: "njk"
};
};`,
},
{
path: "package.json",
content: JSON.stringify(
{
name: projectName.toLowerCase().replace(/\\s+/g, "-"),
version: "1.0.0",
description: projectDescription,
scripts: {
build: "eleventy",
serve: "eleventy --serve",
debug: "DEBUG=* eleventy",
},
devDependencies: {
"@11ty/eleventy": "^2.0.0",
},
},
null,
2,
),
},
];
}
```