This is page 16 of 20. Use http://codebase.md/tosin2013/documcp?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── 001-mcp-server-architecture.md
│ │ ├── 002-repository-analysis-engine.md
│ │ ├── 003-static-site-generator-recommendation-engine.md
│ │ ├── 004-diataxis-framework-integration.md
│ │ ├── 005-github-pages-deployment-automation.md
│ │ ├── 006-mcp-tools-api-design.md
│ │ ├── 007-mcp-prompts-and-resources-integration.md
│ │ ├── 008-intelligent-content-population-engine.md
│ │ ├── 009-content-accuracy-validation-framework.md
│ │ ├── 010-mcp-resource-pattern-redesign.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── check-documentation-links.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── ast-analyzer.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── permission-checker.ts
│ │ └── sitemap-generator.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/src/tools/update-existing-documentation.ts:
--------------------------------------------------------------------------------
```typescript
import { Tool } from "@modelcontextprotocol/sdk/types.js";
import * as fs from "fs/promises";
import * as path from "path";
import {
handleMemoryRecall,
handleMemoryEnhancedRecommendation,
handleMemoryIntelligentAnalysis,
} from "../memory/index.js";
interface UpdateOptions {
analysisId: string;
docsPath: string;
compareMode: "comprehensive" | "gap-detection" | "accuracy-check";
updateStrategy: "conservative" | "moderate" | "aggressive";
preserveStyle: boolean;
focusAreas?: string[];
}
interface DocumentationGap {
type: "missing" | "outdated" | "incorrect" | "incomplete";
location: string;
description: string;
severity: "low" | "medium" | "high" | "critical";
suggestedUpdate: string;
memoryEvidence?: any[];
}
interface CodeDocumentationComparison {
codeFeatures: any[];
documentedFeatures: any[];
gaps: DocumentationGap[];
outdatedSections: any[];
accuracyIssues: any[];
}
interface UpdateRecommendation {
section: string;
currentContent: string;
suggestedContent: string;
reasoning: string;
memoryEvidence: any[];
confidence: number;
effort: "low" | "medium" | "high";
}
interface UpdateResult {
success: boolean;
analysisPerformed: CodeDocumentationComparison;
recommendations: UpdateRecommendation[];
memoryInsights: {
similarProjects: any[];
successfulUpdatePatterns: any[];
commonGapTypes: Record<string, number>;
};
updateMetrics: {
gapsDetected: number;
recommendationsGenerated: number;
confidenceScore: number;
estimatedEffort: string;
};
nextSteps: string[];
}
class DocumentationUpdateEngine {
private memoryInsights: any = null;
private codeAnalysis: any = null;
private existingDocs: Map<string, any> = new Map();
async updateExistingDocumentation(
options: UpdateOptions,
): Promise<UpdateResult> {
// 1. Load repository analysis and memory insights
const analysis = await this.getRepositoryAnalysis(options.analysisId);
this.codeAnalysis = analysis;
// 2. Load memory insights for intelligent comparison
await this.loadMemoryInsights(analysis, options);
// 3. Analyze existing documentation structure and content
const existingDocs = await this.analyzeExistingDocumentation(
options.docsPath,
);
this.existingDocs = existingDocs;
// 4. Perform comprehensive code-documentation comparison
const comparison = await this.performCodeDocumentationComparison(
analysis,
existingDocs,
options,
);
// 5. Generate memory-informed update recommendations
const recommendations = await this.generateUpdateRecommendations(
comparison,
options,
);
// 6. Calculate metrics and confidence scores
const updateMetrics = this.calculateUpdateMetrics(
comparison,
recommendations,
);
return {
success: true,
analysisPerformed: comparison,
recommendations,
memoryInsights: this.memoryInsights,
updateMetrics,
nextSteps: this.generateMemoryInformedNextSteps(
comparison,
recommendations,
),
};
}
private async getRepositoryAnalysis(analysisId: string): Promise<any> {
// Try to get analysis from memory system first
try {
const memoryRecall = await handleMemoryRecall({
query: analysisId,
type: "analysis",
limit: 1,
});
// Handle the memory recall result structure
if (
memoryRecall &&
memoryRecall.memories &&
memoryRecall.memories.length > 0
) {
const memory = memoryRecall.memories[0];
// Handle wrapped content structure
if (
memory.data &&
memory.data.content &&
Array.isArray(memory.data.content)
) {
// Extract the JSON from the first text content
const firstContent = memory.data.content[0];
if (
firstContent &&
firstContent.type === "text" &&
firstContent.text
) {
try {
return JSON.parse(firstContent.text);
} catch (parseError) {
console.warn(
"Failed to parse analysis content from memory:",
parseError,
);
return memory.data;
}
}
}
// Try direct content access (legacy format)
if (memory.content) {
return memory.content;
}
// Try data field
if (memory.data) {
return memory.data;
}
}
} catch (error) {
console.warn("Failed to retrieve from memory system:", error);
}
// Fallback to reading from cached analysis file
const analysisPath = path.join(
".documcp",
"analyses",
`${analysisId}.json`,
);
try {
const content = await fs.readFile(analysisPath, "utf-8");
return JSON.parse(content);
} catch {
throw new Error(
`Repository analysis with ID '${analysisId}' not found. Please run analyze_repository first.`,
);
}
}
private async loadMemoryInsights(
analysis: any,
options: UpdateOptions,
): Promise<void> {
try {
// Get similar projects that had successful documentation updates
const similarProjectsQuery = `${
analysis.metadata?.primaryLanguage || ""
} ${analysis.metadata?.ecosystem || ""} documentation update`;
const similarProjects = await handleMemoryRecall({
query: similarProjectsQuery,
type: "recommendation",
limit: 10,
});
// Get patterns for successful documentation updates
const updatePatternsQuery =
"documentation update successful patterns gaps outdated";
const updatePatterns = await handleMemoryRecall({
query: updatePatternsQuery,
type: "configuration",
limit: 5,
});
// Get memory-enhanced analysis for this specific update task
const enhancedAnalysis = await handleMemoryIntelligentAnalysis({
projectPath: analysis.projectPath || "",
baseAnalysis: analysis,
});
// Get memory-enhanced recommendations for update strategy
const enhancedRecommendations = await handleMemoryEnhancedRecommendation({
projectPath: analysis.projectPath || "",
baseRecommendation: {
updateStrategy: options.updateStrategy,
compareMode: options.compareMode,
focusAreas: options.focusAreas || [],
},
projectFeatures: {
ecosystem: analysis.metadata?.ecosystem || "unknown",
primaryLanguage: analysis.metadata?.primaryLanguage || "unknown",
complexity: analysis.complexity || "medium",
hasTests: analysis.structure?.hasTests || false,
hasCI: analysis.structure?.hasCI || false,
docStructure: "existing", // Indicates we're updating existing docs
},
});
this.memoryInsights = {
similarProjects: similarProjects.memories || [],
updatePatterns: updatePatterns.memories || [],
enhancedAnalysis: enhancedAnalysis,
enhancedRecommendations: enhancedRecommendations,
successfulUpdatePatterns: this.extractUpdatePatterns(
similarProjects.memories || [],
),
commonGapTypes: this.extractCommonGapTypes(
similarProjects.memories || [],
),
};
} catch (error) {
console.warn("Failed to load memory insights:", error);
this.memoryInsights = {
similarProjects: [],
updatePatterns: [],
enhancedAnalysis: null,
enhancedRecommendations: null,
successfulUpdatePatterns: [],
commonGapTypes: {},
};
}
}
private extractUpdatePatterns(projects: any[]): any[] {
return projects
.filter(
(p) => p.content?.updatePatterns || p.content?.documentationUpdates,
)
.map((p) => p.content?.updatePatterns || p.content?.documentationUpdates)
.flat()
.filter(Boolean);
}
private extractCommonGapTypes(projects: any[]): Record<string, number> {
const gapTypes: Record<string, number> = {};
projects.forEach((p) => {
const gaps = p.content?.documentationGaps || [];
gaps.forEach((gap: any) => {
const type = gap.type || "unknown";
gapTypes[type] = (gapTypes[type] || 0) + 1;
});
});
return gapTypes;
}
private async analyzeExistingDocumentation(
docsPath: string,
): Promise<Map<string, any>> {
const docs = new Map<string, any>();
try {
await this.recursivelyAnalyzeDocuments(docsPath, docs);
} catch (error) {
console.warn("Failed to analyze existing documentation:", error);
}
return docs;
}
private async recursivelyAnalyzeDocuments(
dirPath: string,
docs: Map<string, any>,
relativePath: string = "",
): Promise<void> {
try {
const entries = await fs.readdir(dirPath, { withFileTypes: true });
for (const entry of entries) {
const fullPath = path.join(dirPath, entry.name);
const docPath = path.join(relativePath, entry.name);
if (entry.isDirectory()) {
await this.recursivelyAnalyzeDocuments(fullPath, docs, docPath);
} else if (entry.name.endsWith(".md") || entry.name.endsWith(".mdx")) {
try {
const content = await fs.readFile(fullPath, "utf-8");
const analysis = this.analyzeDocumentContent(content, docPath);
docs.set(docPath, {
content,
analysis,
lastModified: (await fs.stat(fullPath)).mtime,
path: fullPath,
});
} catch (error) {
console.warn(`Failed to read document ${fullPath}:`, error);
}
}
}
} catch (error) {
console.warn(`Failed to read directory ${dirPath}:`, error);
}
}
private analyzeDocumentContent(content: string, filePath: string): any {
return {
type: this.inferDocumentType(filePath, content),
sections: this.extractSections(content),
codeBlocks: this.extractCodeBlocks(content),
links: this.extractLinks(content),
lastUpdated: this.extractLastUpdated(content),
version: this.extractVersion(content),
dependencies: this.extractMentionedDependencies(content),
features: this.extractDocumentedFeatures(content),
wordCount: content.split(/\s+/).length,
headingStructure: this.extractHeadingStructure(content),
};
}
private inferDocumentType(filePath: string, content: string): string {
const fileName = path.basename(filePath).toLowerCase();
const pathParts = filePath.toLowerCase().split(path.sep);
// Diataxis categories
if (pathParts.includes("tutorials")) return "tutorial";
if (pathParts.includes("how-to") || pathParts.includes("howto"))
return "how-to";
if (pathParts.includes("reference")) return "reference";
if (pathParts.includes("explanation")) return "explanation";
// Common documentation types
if (fileName.includes("readme")) return "readme";
if (fileName.includes("getting-started") || fileName.includes("quickstart"))
return "getting-started";
if (fileName.includes("api")) return "api-reference";
if (fileName.includes("install") || fileName.includes("setup"))
return "installation";
if (fileName.includes("deploy")) return "deployment";
if (fileName.includes("config")) return "configuration";
// Infer from content
if (
content.includes("# Getting Started") ||
content.includes("## Getting Started")
)
return "getting-started";
if (content.includes("# API") || content.includes("## API"))
return "api-reference";
if (
content.includes("# Installation") ||
content.includes("## Installation")
)
return "installation";
return "general";
}
private extractSections(content: string): any[] {
const sections: any[] = [];
const lines = content.split("\n");
let currentSection: any = null;
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
const headingMatch = line.match(/^(#{1,6})\s+(.+)/);
if (headingMatch) {
if (currentSection) {
sections.push(currentSection);
}
currentSection = {
level: headingMatch[1].length,
title: headingMatch[2],
startLine: i + 1,
content: [],
};
} else if (currentSection) {
currentSection.content.push(line);
}
}
if (currentSection) {
sections.push(currentSection);
}
return sections.map((section) => ({
...section,
content: section.content.join("\n"),
wordCount: section.content.join(" ").split(/\s+/).length,
}));
}
private extractCodeBlocks(content: string): any[] {
const codeBlocks: any[] = [];
const codeBlockRegex = /```(\w+)?\n([\s\S]*?)```/g;
let match;
while ((match = codeBlockRegex.exec(content)) !== null) {
codeBlocks.push({
language: match[1] || "text",
code: match[2],
startIndex: match.index,
endIndex: match.index + match[0].length,
});
}
return codeBlocks;
}
private extractLinks(content: string): any[] {
const links: any[] = [];
const linkRegex = /\[([^\]]+)\]\(([^)]+)\)/g;
let match;
while ((match = linkRegex.exec(content)) !== null) {
links.push({
text: match[1],
url: match[2],
isInternal: !match[2].startsWith("http"),
startIndex: match.index,
});
}
return links;
}
private extractLastUpdated(content: string): string | null {
const updateMatch = content.match(
/(?:last updated|updated|modified):\s*(.+)/i,
);
return updateMatch ? updateMatch[1] : null;
}
private extractVersion(content: string): string | null {
const versionMatch = content.match(/(?:version|v)[\s:]+([\d.]+)/i);
return versionMatch ? versionMatch[1] : null;
}
private extractMentionedDependencies(content: string): string[] {
const dependencies: Set<string> = new Set();
// Extract from npm install commands
const npmMatches = content.match(/npm install\s+([^`\n]+)/g);
if (npmMatches) {
npmMatches.forEach((match) => {
const packages = match.replace("npm install", "").trim().split(/\s+/);
packages.forEach((pkg) => {
if (pkg && !pkg.startsWith("-")) {
dependencies.add(pkg);
}
});
});
}
// Extract from import statements
const importMatches = content.match(/import.*from\s+['"]([^'"]+)['"]/g);
if (importMatches) {
importMatches.forEach((match) => {
const packageMatch = match.match(/from\s+['"]([^'"]+)['"]/);
if (packageMatch && !packageMatch[1].startsWith(".")) {
dependencies.add(packageMatch[1]);
}
});
}
return Array.from(dependencies);
}
private extractDocumentedFeatures(content: string): string[] {
const features: Set<string> = new Set();
// Extract function names from code blocks
const functionMatches = content.match(
/(?:function|const|let|var)\s+(\w+)/g,
);
if (functionMatches) {
functionMatches.forEach((match) => {
const functionMatch = match.match(/(?:function|const|let|var)\s+(\w+)/);
if (functionMatch) {
features.add(functionMatch[1]);
}
});
}
// Extract API endpoints
const apiMatches = content.match(
/(?:GET|POST|PUT|DELETE|PATCH)\s+([/\w-]+)/g,
);
if (apiMatches) {
apiMatches.forEach((match) => {
const endpointMatch = match.match(
/(?:GET|POST|PUT|DELETE|PATCH)\s+([/\w-]+)/,
);
if (endpointMatch) {
features.add(endpointMatch[1]);
}
});
}
// Extract mentioned features from headings
const headings = content.match(/#{1,6}\s+(.+)/g);
if (headings) {
headings.forEach((heading) => {
const headingText = heading.replace(/#{1,6}\s+/, "").toLowerCase();
if (
headingText.includes("feature") ||
headingText.includes("functionality")
) {
features.add(headingText);
}
});
}
return Array.from(features);
}
private extractHeadingStructure(content: string): any[] {
const headings: any[] = [];
const lines = content.split("\n");
lines.forEach((line, index) => {
const headingMatch = line.match(/^(#{1,6})\s+(.+)/);
if (headingMatch) {
headings.push({
level: headingMatch[1].length,
text: headingMatch[2],
line: index + 1,
});
}
});
return headings;
}
private async performCodeDocumentationComparison(
analysis: any,
existingDocs: Map<string, any>,
_options: UpdateOptions,
): Promise<CodeDocumentationComparison> {
const codeFeatures = this.extractCodeFeatures(analysis);
const documentedFeatures = this.extractAllDocumentedFeatures(existingDocs);
const gaps = await this.detectDocumentationGaps(
codeFeatures,
documentedFeatures,
_options,
);
const outdatedSections = await this.detectOutdatedSections(
analysis,
existingDocs,
);
const accuracyIssues = await this.detectAccuracyIssues(
analysis,
existingDocs,
);
return {
codeFeatures,
documentedFeatures,
gaps,
outdatedSections,
accuracyIssues,
};
}
private extractCodeFeatures(analysis: any): any[] {
const features: any[] = [];
// Extract from dependencies
if (analysis.dependencies?.packages) {
analysis.dependencies.packages.forEach((pkg: string) => {
features.push({
type: "dependency",
name: pkg,
source: "package.json",
});
});
}
// Extract from scripts
const packageJson = this.findPackageJsonInAnalysis(analysis);
if (packageJson?.scripts) {
Object.keys(packageJson.scripts).forEach((script) => {
features.push({
type: "script",
name: script,
command: packageJson.scripts[script],
source: "package.json",
});
});
}
// Extract from file structure
if (analysis.structure) {
if (analysis.structure.hasTests) {
features.push({
type: "testing",
name: "test suite",
source: "structure",
});
}
if (analysis.structure.hasCI) {
features.push({
type: "ci-cd",
name: "continuous integration",
source: "structure",
});
}
}
// Extract from technologies
if (analysis.technologies) {
Object.entries(analysis.technologies).forEach(([key, value]) => {
if (value) {
features.push({
type: "technology",
name: key,
value: value,
source: "analysis",
});
}
});
}
return features;
}
private findPackageJsonInAnalysis(analysis: any): any {
const files = analysis.files || [];
const packageFile = files.find((f: any) => f.name === "package.json");
if (packageFile?.content) {
try {
return JSON.parse(packageFile.content);
} catch {
return null;
}
}
return null;
}
private extractAllDocumentedFeatures(existingDocs: Map<string, any>): any[] {
const allFeatures: any[] = [];
existingDocs.forEach((doc, docPath) => {
const features = doc.analysis?.features || [];
const dependencies = doc.analysis?.dependencies || [];
features.forEach((feature: string) => {
allFeatures.push({
name: feature,
source: docPath,
type: "documented-feature",
});
});
dependencies.forEach((dep: string) => {
allFeatures.push({
name: dep,
source: docPath,
type: "documented-dependency",
});
});
});
return allFeatures;
}
private async detectDocumentationGaps(
codeFeatures: any[],
documentedFeatures: any[],
_options: UpdateOptions,
): Promise<DocumentationGap[]> {
const gaps: DocumentationGap[] = [];
const memoryGapPatterns = this.memoryInsights?.commonGapTypes || {};
// Find features in code that aren't documented
codeFeatures.forEach((codeFeature) => {
const isDocumented = documentedFeatures.some((docFeature) =>
this.featuresMatch(codeFeature, docFeature),
);
if (!isDocumented) {
const severity = this.determineGapSeverity(
codeFeature,
memoryGapPatterns,
);
const suggestedUpdate = this.generateGapSuggestion(
codeFeature,
_options,
);
gaps.push({
type: "missing",
location: `${codeFeature.source} -> documentation`,
description: `${codeFeature.type} '${codeFeature.name}' exists in code but is not documented`,
severity,
suggestedUpdate,
memoryEvidence: this.findMemoryEvidenceForGap(codeFeature),
});
}
});
// Find documented features that no longer exist in code
documentedFeatures.forEach((docFeature) => {
const existsInCode = codeFeatures.some((codeFeature) =>
this.featuresMatch(codeFeature, docFeature),
);
if (!existsInCode) {
gaps.push({
type: "outdated",
location: docFeature.source,
description: `Documented feature '${docFeature.name}' no longer exists in code`,
severity: "medium",
suggestedUpdate: `Remove or update documentation for '${docFeature.name}'`,
memoryEvidence: this.findMemoryEvidenceForOutdated(docFeature),
});
}
});
return gaps;
}
private featuresMatch(codeFeature: any, docFeature: any): boolean {
// Exact name match
if (codeFeature.name === docFeature.name) return true;
// Type-specific matching
if (
codeFeature.type === "dependency" &&
docFeature.type === "documented-dependency"
) {
return codeFeature.name === docFeature.name;
}
// Partial match for similar names
const codeName = codeFeature.name.toLowerCase();
const docName = docFeature.name.toLowerCase();
return codeName.includes(docName) || docName.includes(codeName);
}
private determineGapSeverity(
codeFeature: any,
memoryGapPatterns: Record<string, number>,
): "low" | "medium" | "high" | "critical" {
// High importance features
if (
codeFeature.type === "script" &&
["start", "dev", "build", "test"].includes(codeFeature.name)
) {
return "high";
}
if (
codeFeature.type === "dependency" &&
this.isCriticalDependency(codeFeature.name)
) {
return "high";
}
if (codeFeature.type === "testing" || codeFeature.type === "ci-cd") {
return "medium";
}
// Check memory patterns for common gaps
const gapFrequency = memoryGapPatterns[codeFeature.type] || 0;
if (gapFrequency > 5) return "medium"; // Common gap type
if (gapFrequency > 2) return "low";
return "low";
}
private isCriticalDependency(depName: string): boolean {
const criticalDeps = [
"react",
"vue",
"angular",
"express",
"fastify",
"next",
"nuxt",
"gatsby",
"typescript",
"jest",
"mocha",
"webpack",
"vite",
"rollup",
];
return criticalDeps.some((critical) => depName.includes(critical));
}
private generateGapSuggestion(
codeFeature: any,
_options: UpdateOptions,
): string {
switch (codeFeature.type) {
case "script":
return `Add documentation for the '${codeFeature.name}' script: \`npm run ${codeFeature.name}\``;
case "dependency":
return `Document the '${codeFeature.name}' dependency and its usage`;
case "testing":
return `Add testing documentation explaining how to run and write tests`;
case "ci-cd":
return `Document the CI/CD pipeline and deployment process`;
case "technology":
return `Add explanation for ${codeFeature.name}: ${codeFeature.value}`;
default:
return `Document the ${codeFeature.type} '${codeFeature.name}'`;
}
}
private findMemoryEvidenceForGap(codeFeature: any): any[] {
return (
this.memoryInsights?.similarProjects
.filter(
(p: any) =>
p.content?.gaps?.some((gap: any) => gap.type === codeFeature.type),
)
.slice(0, 3) || []
);
}
private findMemoryEvidenceForOutdated(docFeature: any): any[] {
return (
this.memoryInsights?.similarProjects
.filter(
(p: any) =>
p.content?.outdatedSections?.some(
(section: any) => section.feature === docFeature.name,
),
)
.slice(0, 3) || []
);
}
private async detectOutdatedSections(
analysis: any,
existingDocs: Map<string, any>,
): Promise<any[]> {
const outdatedSections: any[] = [];
existingDocs.forEach((doc, docPath) => {
const sections = doc.analysis?.sections || [];
sections.forEach((section: any) => {
const isOutdated = this.checkSectionOutdated(section, analysis);
if (isOutdated) {
outdatedSections.push({
location: docPath,
section: section.title,
reason: isOutdated.reason,
confidence: isOutdated.confidence,
suggestedUpdate: isOutdated.suggestedUpdate,
});
}
});
});
return outdatedSections;
}
private checkSectionOutdated(section: any, analysis: any): any {
const sectionContent = section.content.toLowerCase();
// Check for outdated Node.js versions
const nodeVersionMatch = sectionContent.match(/node(?:\.js)?\s+(\d+)/);
if (nodeVersionMatch) {
const documentedVersion = parseInt(nodeVersionMatch[1], 10);
const currentRecommended = 18; // Current LTS
if (documentedVersion < currentRecommended - 2) {
return {
reason: `Documented Node.js version ${documentedVersion} is outdated`,
confidence: 0.9,
suggestedUpdate: `Update to recommend Node.js ${currentRecommended}+`,
};
}
}
// Check for outdated package names
const packageJson = this.findPackageJsonInAnalysis(analysis);
if (packageJson?.dependencies) {
const currentDeps = Object.keys(packageJson.dependencies);
// Look for documented packages that are no longer dependencies
for (const dep of currentDeps) {
if (sectionContent.includes(dep)) {
const version = packageJson.dependencies[dep];
if (
sectionContent.includes(dep) &&
!sectionContent.includes(version)
) {
return {
reason: `Package version information may be outdated for ${dep}`,
confidence: 0.7,
suggestedUpdate: `Update ${dep} version references to ${version}`,
};
}
}
}
}
return null;
}
private async detectAccuracyIssues(
analysis: any,
existingDocs: Map<string, any>,
): Promise<any[]> {
const accuracyIssues: any[] = [];
existingDocs.forEach((doc, docPath) => {
const codeBlocks = doc.analysis?.codeBlocks || [];
codeBlocks.forEach((codeBlock: any, index: number) => {
const issues = this.validateCodeBlock(codeBlock, analysis);
issues.forEach((issue) => {
accuracyIssues.push({
location: `${docPath}:code-block-${index}`,
type: issue.type,
description: issue.description,
severity: issue.severity,
suggestedFix: issue.suggestedFix,
});
});
});
});
return accuracyIssues;
}
private validateCodeBlock(codeBlock: any, analysis: any): any[] {
const issues: any[] = [];
const code = codeBlock.code;
// Check npm install commands against actual dependencies
const npmInstallMatches = code.match(/npm install\s+([^`\n]+)/g);
if (npmInstallMatches) {
const packageJson = this.findPackageJsonInAnalysis(analysis);
const actualDeps = packageJson
? Object.keys(packageJson.dependencies || {})
: [];
npmInstallMatches.forEach((match: string) => {
const packages = match.replace("npm install", "").trim().split(/\s+/);
packages.forEach((pkg: string) => {
if (pkg && !pkg.startsWith("-") && !actualDeps.includes(pkg)) {
issues.push({
type: "incorrect-dependency",
description: `npm install command includes '${pkg}' which is not in package.json`,
severity: "medium",
suggestedFix: `Remove '${pkg}' or add it to dependencies`,
});
}
});
});
}
// Check for outdated import syntax
if (
code.includes("require(") &&
analysis.metadata?.primaryLanguage === "TypeScript"
) {
issues.push({
type: "outdated-syntax",
description: "Using require() syntax in TypeScript project",
severity: "low",
suggestedFix: "Update to ES6 import syntax",
});
}
return issues;
}
private async generateUpdateRecommendations(
comparison: CodeDocumentationComparison,
_options: UpdateOptions,
): Promise<UpdateRecommendation[]> {
const recommendations: UpdateRecommendation[] = [];
// Generate recommendations for gaps
for (const gap of comparison.gaps) {
if (
gap.severity === "critical" ||
gap.severity === "high" ||
(gap.severity === "medium" &&
_options.updateStrategy !== "conservative")
) {
const recommendation = await this.generateGapRecommendation(
gap,
_options,
);
recommendations.push(recommendation);
}
}
// Generate recommendations for outdated sections
for (const outdated of comparison.outdatedSections) {
const recommendation = await this.generateOutdatedRecommendation(
outdated,
_options,
);
recommendations.push(recommendation);
}
// Generate recommendations for accuracy issues
for (const issue of comparison.accuracyIssues) {
if (
issue.severity !== "low" ||
_options.updateStrategy === "aggressive"
) {
const recommendation = await this.generateAccuracyRecommendation(
issue,
_options,
);
recommendations.push(recommendation);
}
}
return recommendations.sort((a, b) => b.confidence - a.confidence);
}
private async generateGapRecommendation(
gap: DocumentationGap,
_options: UpdateOptions,
): Promise<UpdateRecommendation> {
const memoryEvidence = gap.memoryEvidence || [];
const successfulPatterns =
this.memoryInsights?.successfulUpdatePatterns || [];
return {
section: gap.location,
currentContent: "", // No current content for missing items
suggestedContent: this.generateContentForGap(gap, successfulPatterns),
reasoning: `${gap.description}. ${memoryEvidence.length} similar projects had similar gaps.`,
memoryEvidence,
confidence: this.calculateGapConfidence(gap, memoryEvidence),
effort: this.estimateGapEffort(gap),
};
}
private generateContentForGap(
gap: DocumentationGap,
patterns: any[],
): string {
// Use memory patterns to generate appropriate content
const relevantPatterns = patterns.filter((p) => p.gapType === gap.type);
if (relevantPatterns.length > 0) {
const bestPattern = relevantPatterns[0];
return this.adaptPatternToGap(bestPattern, gap);
}
return gap.suggestedUpdate;
}
private adaptPatternToGap(pattern: any, gap: DocumentationGap): string {
let content = pattern.template || pattern.content || gap.suggestedUpdate;
// Replace placeholders with actual gap information
content = content.replace(/\{feature\}/g, gap.description);
content = content.replace(/\{location\}/g, gap.location);
return content;
}
private calculateGapConfidence(
gap: DocumentationGap,
evidence: any[],
): number {
let confidence = 0.5; // Base confidence
// Increase confidence based on severity
switch (gap.severity) {
case "critical":
confidence += 0.4;
break;
case "high":
confidence += 0.3;
break;
case "medium":
confidence += 0.2;
break;
case "low":
confidence += 0.1;
break;
}
// Increase confidence based on memory evidence
confidence += Math.min(evidence.length * 0.1, 0.3);
return Math.min(confidence, 1.0);
}
private estimateGapEffort(gap: DocumentationGap): "low" | "medium" | "high" {
switch (gap.type) {
case "missing":
return gap.severity === "critical" ? "high" : "medium";
case "outdated":
return "low";
case "incorrect":
return "medium";
case "incomplete":
return "low";
default:
return "medium";
}
}
private async generateOutdatedRecommendation(
outdated: any,
_options: UpdateOptions,
): Promise<UpdateRecommendation> {
return {
section: outdated.location,
currentContent: outdated.section,
suggestedContent: outdated.suggestedUpdate,
reasoning: outdated.reason,
memoryEvidence: [],
confidence: outdated.confidence || 0.8,
effort: "low",
};
}
private async generateAccuracyRecommendation(
issue: any,
_options: UpdateOptions,
): Promise<UpdateRecommendation> {
return {
section: issue.location,
currentContent: "Code block with accuracy issues",
suggestedContent: issue.suggestedFix,
reasoning: issue.description,
memoryEvidence: [],
confidence: issue.severity === "high" ? 0.9 : 0.7,
effort: issue.severity === "high" ? "medium" : "low",
};
}
private calculateUpdateMetrics(
comparison: CodeDocumentationComparison,
recommendations: UpdateRecommendation[],
): any {
const totalGaps = comparison.gaps.length;
const totalRecommendations = recommendations.length;
const avgConfidence =
recommendations.reduce((sum, r) => sum + r.confidence, 0) /
recommendations.length || 0;
const effortCounts = recommendations.reduce(
(acc, r) => {
acc[r.effort] = (acc[r.effort] || 0) + 1;
return acc;
},
{} as Record<string, number>,
);
let estimatedEffort = "low";
if (effortCounts.high > 0) estimatedEffort = "high";
else if (effortCounts.medium > effortCounts.low) estimatedEffort = "medium";
return {
gapsDetected: totalGaps,
recommendationsGenerated: totalRecommendations,
confidenceScore: Math.round(avgConfidence * 100) / 100,
estimatedEffort,
};
}
private generateMemoryInformedNextSteps(
comparison: CodeDocumentationComparison,
recommendations: UpdateRecommendation[],
): string[] {
const nextSteps = [];
const highConfidenceRecs = recommendations.filter(
(r) => r.confidence > 0.8,
);
const criticalGaps = comparison.gaps.filter(
(g) => g.severity === "critical",
);
if (criticalGaps.length > 0) {
nextSteps.push(
`Address ${criticalGaps.length} critical documentation gaps immediately`,
);
}
if (highConfidenceRecs.length > 0) {
nextSteps.push(
`Implement ${highConfidenceRecs.length} high-confidence recommendations first`,
);
}
if (comparison.accuracyIssues.length > 0) {
nextSteps.push(
`Fix ${comparison.accuracyIssues.length} code accuracy issues in documentation`,
);
}
nextSteps.push(
"Review and validate all recommended changes before implementation",
);
nextSteps.push("Test updated code examples to ensure they work correctly");
const memoryInsights = this.memoryInsights?.similarProjects?.length || 0;
if (memoryInsights > 0) {
nextSteps.push(
`Leverage patterns from ${memoryInsights} similar projects for additional improvements`,
);
}
return nextSteps;
}
}
// Export the tool implementation
export const updateExistingDocumentation: Tool = {
name: "update_existing_documentation",
description:
"Intelligently analyze and update existing documentation using memory insights and code comparison",
inputSchema: {
type: "object",
properties: {
analysisId: {
type: "string",
description: "Repository analysis ID from analyze_repository tool",
},
docsPath: {
type: "string",
description: "Path to existing documentation directory",
},
compareMode: {
type: "string",
enum: ["comprehensive", "gap-detection", "accuracy-check"],
default: "comprehensive",
description: "Mode of comparison between code and documentation",
},
updateStrategy: {
type: "string",
enum: ["conservative", "moderate", "aggressive"],
default: "moderate",
description: "How aggressively to suggest updates",
},
preserveStyle: {
type: "boolean",
default: true,
description: "Preserve existing documentation style and formatting",
},
focusAreas: {
type: "array",
items: { type: "string" },
description:
'Specific areas to focus updates on (e.g., "dependencies", "scripts", "api")',
},
},
required: ["analysisId", "docsPath"],
},
};
export async function handleUpdateExistingDocumentation(
args: any,
): Promise<UpdateResult> {
const engine = new DocumentationUpdateEngine();
return await engine.updateExistingDocumentation(args);
}
```
--------------------------------------------------------------------------------
/src/memory/visualization.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Memory Visualization Interface for DocuMCP
* Generate visual representations of memory data, patterns, and insights
*/
import { EventEmitter } from "events";
import { MemoryEntry, JSONLStorage } from "./storage.js";
import { MemoryManager } from "./manager.js";
import { IncrementalLearningSystem } from "./learning.js";
import { KnowledgeGraph } from "./knowledge-graph.js";
import { TemporalMemoryAnalysis } from "./temporal-analysis.js";
export interface VisualizationConfig {
width: number;
height: number;
theme: "light" | "dark" | "auto";
colorScheme: string[];
interactive: boolean;
exportFormat: "svg" | "png" | "json" | "html";
responsive: boolean;
}
export interface ChartData {
type:
| "line"
| "bar"
| "scatter"
| "heatmap"
| "network"
| "sankey"
| "treemap"
| "timeline";
title: string;
description: string;
data: any;
config: Partial<VisualizationConfig>;
metadata: {
generated: Date;
dataPoints: number;
timeRange?: { start: Date; end: Date };
filters?: Record<string, any>;
};
}
export interface DashboardData {
title: string;
description: string;
charts: ChartData[];
summary: {
totalEntries: number;
timeRange: { start: Date; end: Date };
keyInsights: string[];
healthScore: number;
};
generated: Date;
}
export interface NetworkVisualization {
nodes: Array<{
id: string;
label: string;
group: string;
size: number;
color: string;
metadata: any;
}>;
edges: Array<{
source: string;
target: string;
weight: number;
type: string;
color: string;
metadata: any;
}>;
layout: "force" | "circular" | "hierarchical" | "grid";
clustering: boolean;
}
export interface HeatmapVisualization {
data: number[][];
labels: {
x: string[];
y: string[];
};
colorScale: {
min: number;
max: number;
colors: string[];
};
title: string;
description: string;
}
export interface TimelineVisualization {
events: Array<{
id: string;
timestamp: Date;
title: string;
description: string;
type: string;
importance: number;
color: string;
metadata: any;
}>;
timeRange: { start: Date; end: Date };
granularity: "hour" | "day" | "week" | "month";
groupBy?: string;
}
export class MemoryVisualizationSystem extends EventEmitter {
private storage: JSONLStorage;
private manager: MemoryManager;
private learningSystem: IncrementalLearningSystem;
private knowledgeGraph: KnowledgeGraph;
private temporalAnalysis: TemporalMemoryAnalysis;
private defaultConfig: VisualizationConfig;
private visualizationCache: Map<string, ChartData>;
constructor(
storage: JSONLStorage,
manager: MemoryManager,
learningSystem: IncrementalLearningSystem,
knowledgeGraph: KnowledgeGraph,
temporalAnalysis: TemporalMemoryAnalysis,
) {
super();
this.storage = storage;
this.manager = manager;
this.learningSystem = learningSystem;
this.knowledgeGraph = knowledgeGraph;
this.temporalAnalysis = temporalAnalysis;
this.visualizationCache = new Map();
this.defaultConfig = {
width: 800,
height: 600,
theme: "light",
colorScheme: [
"#3B82F6", // Blue
"#10B981", // Green
"#F59E0B", // Yellow
"#EF4444", // Red
"#8B5CF6", // Purple
"#06B6D4", // Cyan
"#F97316", // Orange
"#84CC16", // Lime
],
interactive: true,
exportFormat: "svg",
responsive: true,
};
}
/**
* Generate comprehensive dashboard
*/
async generateDashboard(options?: {
timeRange?: { start: Date; end: Date };
includeCharts?: string[];
config?: Partial<VisualizationConfig>;
}): Promise<DashboardData> {
const timeRange = options?.timeRange || this.getDefaultTimeRange();
const config = { ...this.defaultConfig, ...options?.config };
this.emit("dashboard_generation_started", { timeRange });
try {
const charts: ChartData[] = [];
// Activity Timeline
if (
!options?.includeCharts ||
options.includeCharts.includes("activity")
) {
charts.push(await this.generateActivityTimeline(timeRange, config));
}
// Memory Type Distribution
if (
!options?.includeCharts ||
options.includeCharts.includes("distribution")
) {
charts.push(
await this.generateMemoryTypeDistribution(timeRange, config),
);
}
// Success Rate Trends
if (
!options?.includeCharts ||
options.includeCharts.includes("success")
) {
charts.push(await this.generateSuccessRateTrends(timeRange, config));
}
// Knowledge Graph Network
if (
!options?.includeCharts ||
options.includeCharts.includes("network")
) {
charts.push(await this.generateKnowledgeGraphVisualization(config));
}
// Learning Patterns Heatmap
if (
!options?.includeCharts ||
options.includeCharts.includes("learning")
) {
charts.push(await this.generateLearningPatternsHeatmap(config));
}
// Temporal Patterns
if (
!options?.includeCharts ||
options.includeCharts.includes("temporal")
) {
charts.push(
await this.generateTemporalPatternsChart(timeRange, config),
);
}
// Project Correlation Matrix
if (
!options?.includeCharts ||
options.includeCharts.includes("correlation")
) {
charts.push(
await this.generateProjectCorrelationMatrix(timeRange, config),
);
}
// Get summary data
const entries = await this.getEntriesInTimeRange(timeRange);
const keyInsights = await this.generateKeyInsights(entries, timeRange);
const healthScore = await this.calculateSystemHealthScore(entries);
const dashboard: DashboardData = {
title: "DocuMCP Memory System Dashboard",
description: `Comprehensive overview of memory system activity from ${timeRange.start.toLocaleDateString()} to ${timeRange.end.toLocaleDateString()}`,
charts,
summary: {
totalEntries: entries.length,
timeRange,
keyInsights,
healthScore,
},
generated: new Date(),
};
this.emit("dashboard_generated", {
charts: charts.length,
entries: entries.length,
timeRange,
});
return dashboard;
} catch (error) {
this.emit("dashboard_error", {
error: error instanceof Error ? error.message : String(error),
});
throw error;
}
}
/**
* Generate activity timeline chart
*/
async generateActivityTimeline(
timeRange: { start: Date; end: Date },
config: Partial<VisualizationConfig>,
): Promise<ChartData> {
const entries = await this.getEntriesInTimeRange(timeRange);
// Group entries by day
const dailyData = new Map<string, number>();
const successData = new Map<string, number>();
for (const entry of entries) {
const day = entry.timestamp.slice(0, 10); // YYYY-MM-DD
dailyData.set(day, (dailyData.get(day) || 0) + 1);
if (entry.data.outcome === "success" || entry.data.success === true) {
successData.set(day, (successData.get(day) || 0) + 1);
}
}
// Create time series data
const datasets = [
{
label: "Total Activity",
data: Array.from(dailyData.entries()).map(([date, count]) => ({
x: date,
y: count,
})),
borderColor: config.colorScheme?.[0] || "#3B82F6",
backgroundColor: config.colorScheme?.[0] || "#3B82F6",
fill: false,
},
{
label: "Successful Activities",
data: Array.from(successData.entries()).map(([date, count]) => ({
x: date,
y: count,
})),
borderColor: config.colorScheme?.[1] || "#10B981",
backgroundColor: config.colorScheme?.[1] || "#10B981",
fill: false,
},
];
return {
type: "line",
title: "Memory Activity Timeline",
description:
"Daily memory system activity showing total entries and successful outcomes",
data: {
datasets,
options: {
responsive: config.responsive,
plugins: {
title: {
display: true,
text: "Memory Activity Over Time",
},
legend: {
display: true,
position: "top",
},
},
scales: {
x: {
type: "time",
time: {
unit: "day",
},
title: {
display: true,
text: "Date",
},
},
y: {
title: {
display: true,
text: "Number of Entries",
},
},
},
},
},
config,
metadata: {
generated: new Date(),
dataPoints: entries.length,
timeRange,
filters: { type: "activity_timeline" },
},
};
}
/**
* Generate memory type distribution chart
*/
async generateMemoryTypeDistribution(
timeRange: { start: Date; end: Date },
config: Partial<VisualizationConfig>,
): Promise<ChartData> {
const entries = await this.getEntriesInTimeRange(timeRange);
// Count entries by type
const typeCounts = new Map<string, number>();
for (const entry of entries) {
typeCounts.set(entry.type, (typeCounts.get(entry.type) || 0) + 1);
}
// Sort by count
const sortedTypes = Array.from(typeCounts.entries()).sort(
([, a], [, b]) => b - a,
);
const data = {
labels: sortedTypes.map(([type]) => type),
datasets: [
{
data: sortedTypes.map(([, count]) => count),
backgroundColor: config.colorScheme || this.defaultConfig.colorScheme,
borderColor:
config.colorScheme?.map((c) => this.darkenColor(c)) ||
this.defaultConfig.colorScheme.map((c) => this.darkenColor(c)),
borderWidth: 2,
},
],
};
return {
type: "bar",
title: "Memory Type Distribution",
description: "Distribution of memory entries by type",
data: {
...data,
options: {
responsive: config.responsive,
plugins: {
title: {
display: true,
text: "Memory Entry Types",
},
legend: {
display: false,
},
},
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: "Number of Entries",
},
},
x: {
title: {
display: true,
text: "Memory Type",
},
},
},
},
},
config,
metadata: {
generated: new Date(),
dataPoints: entries.length,
timeRange,
filters: { type: "type_distribution" },
},
};
}
/**
* Generate success rate trends chart
*/
async generateSuccessRateTrends(
timeRange: { start: Date; end: Date },
config: Partial<VisualizationConfig>,
): Promise<ChartData> {
const entries = await this.getEntriesInTimeRange(timeRange);
// Group by week and calculate success rates
const weeklyData = new Map<string, { total: number; successful: number }>();
for (const entry of entries) {
const week = this.getWeekKey(new Date(entry.timestamp));
const current = weeklyData.get(week) || { total: 0, successful: 0 };
current.total++;
if (entry.data.outcome === "success" || entry.data.success === true) {
current.successful++;
}
weeklyData.set(week, current);
}
// Calculate success rates
const data = Array.from(weeklyData.entries())
.map(([week, stats]) => ({
x: week,
y: stats.total > 0 ? (stats.successful / stats.total) * 100 : 0,
total: stats.total,
successful: stats.successful,
}))
.sort((a, b) => a.x.localeCompare(b.x));
return {
type: "line",
title: "Success Rate Trends",
description: "Weekly success rate trends for memory system operations",
data: {
datasets: [
{
label: "Success Rate (%)",
data: data,
borderColor: config.colorScheme?.[1] || "#10B981",
backgroundColor: config.colorScheme?.[1] || "#10B981",
fill: false,
tension: 0.1,
},
],
options: {
responsive: config.responsive,
plugins: {
title: {
display: true,
text: "Success Rate Over Time",
},
tooltip: {
callbacks: {
afterBody: (context: any) => {
const point = data[context[0].dataIndex];
return `Total: ${point.total}, Successful: ${point.successful}`;
},
},
},
},
scales: {
x: {
title: {
display: true,
text: "Week",
},
},
y: {
beginAtZero: true,
max: 100,
title: {
display: true,
text: "Success Rate (%)",
},
},
},
},
},
config,
metadata: {
generated: new Date(),
dataPoints: data.length,
timeRange,
filters: { type: "success_trends" },
},
};
}
/**
* Generate knowledge graph network visualization
*/
async generateKnowledgeGraphVisualization(
config: Partial<VisualizationConfig>,
): Promise<ChartData> {
const allNodes = await this.knowledgeGraph.getAllNodes();
const allEdges = await this.knowledgeGraph.getAllEdges();
// Prepare network data
const networkData: NetworkVisualization = {
nodes: allNodes.map((node) => ({
id: node.id,
label: node.label || node.id.slice(0, 10),
group: node.type,
size: Math.max(10, Math.min(30, (node.weight || 1) * 10)),
color: this.getColorForNodeType(node.type, config.colorScheme),
metadata: node.properties,
})),
edges: allEdges.map((edge) => ({
source: edge.source,
target: edge.target,
weight: edge.weight,
type: edge.type,
color: this.getColorForEdgeType(edge.type, config.colorScheme),
metadata: edge.properties,
})),
layout: "force",
clustering: true,
};
return {
type: "network",
title: "Knowledge Graph Network",
description:
"Interactive network visualization of memory relationships and connections",
data: networkData,
config,
metadata: {
generated: new Date(),
dataPoints: allNodes.length + allEdges.length,
filters: { type: "knowledge_graph" },
},
};
}
/**
* Generate learning patterns heatmap
*/
async generateLearningPatternsHeatmap(
config: Partial<VisualizationConfig>,
): Promise<ChartData> {
const patterns = await this.learningSystem.getPatterns();
// Create correlation matrix between different pattern dimensions
const frameworks = [
...new Set(
patterns
.flatMap((p) => p.metadata.technologies || [])
.filter(
(t) =>
t.includes("framework") ||
t.includes("js") ||
t.includes("react") ||
t.includes("vue"),
),
),
];
const languages = [
...new Set(
patterns
.flatMap((p) => p.metadata.technologies || [])
.filter((t) => !t.includes("framework")),
),
];
const heatmapData: number[][] = [];
const labels = { x: frameworks, y: languages };
for (const language of languages) {
const row: number[] = [];
for (const framework of frameworks) {
// Calculate correlation/co-occurrence
const langPatterns = patterns.filter(
(p) => p.metadata.technologies?.includes(language),
);
const frameworkPatterns = patterns.filter(
(p) => p.metadata.technologies?.includes(framework),
);
const bothPatterns = patterns.filter(
(p) =>
p.metadata.technologies?.includes(language) &&
p.metadata.technologies?.includes(framework),
);
const correlation =
langPatterns.length > 0 && frameworkPatterns.length > 0
? bothPatterns.length /
Math.min(langPatterns.length, frameworkPatterns.length)
: 0;
row.push(correlation);
}
heatmapData.push(row);
}
const heatmap: HeatmapVisualization = {
data: heatmapData,
labels,
colorScale: {
min: 0,
max: 1,
colors: ["#F3F4F6", "#93C5FD", "#3B82F6", "#1D4ED8", "#1E3A8A"],
},
title: "Language-Framework Learning Patterns",
description:
"Correlation matrix showing relationships between programming languages and frameworks in learning patterns",
};
return {
type: "heatmap",
title: "Learning Patterns Heatmap",
description:
"Visualization of learning pattern correlations across languages and frameworks",
data: heatmap,
config,
metadata: {
generated: new Date(),
dataPoints: patterns.length,
filters: { type: "learning_patterns" },
},
};
}
/**
* Generate temporal patterns chart
*/
async generateTemporalPatternsChart(
timeRange: { start: Date; end: Date },
config: Partial<VisualizationConfig>,
): Promise<ChartData> {
const patterns = await this.temporalAnalysis.analyzeTemporalPatterns({
granularity: "day",
aggregation: "count",
timeRange: {
start: timeRange.start,
end: timeRange.end,
duration: timeRange.end.getTime() - timeRange.start.getTime(),
label: "Analysis Period",
},
});
// Prepare data for different pattern types
const patternData = patterns.map((pattern) => ({
type: pattern.type,
confidence: pattern.confidence,
description: pattern.description,
dataPoints: pattern.dataPoints?.length || 0,
}));
const data = {
labels: patternData.map((p) => p.type),
datasets: [
{
label: "Pattern Confidence",
data: patternData.map((p) => p.confidence * 100),
backgroundColor: config.colorScheme || this.defaultConfig.colorScheme,
borderColor:
config.colorScheme?.map((c) => this.darkenColor(c)) ||
this.defaultConfig.colorScheme.map((c) => this.darkenColor(c)),
borderWidth: 2,
},
],
};
return {
type: "bar",
title: "Temporal Patterns Analysis",
description:
"Confidence levels of detected temporal patterns in memory activity",
data: {
...data,
options: {
responsive: config.responsive,
plugins: {
title: {
display: true,
text: "Detected Temporal Patterns",
},
tooltip: {
callbacks: {
afterBody: (context: any) => {
const pattern = patternData[context[0].dataIndex];
return pattern.description;
},
},
},
},
scales: {
y: {
beginAtZero: true,
max: 100,
title: {
display: true,
text: "Confidence (%)",
},
},
x: {
title: {
display: true,
text: "Pattern Type",
},
},
},
},
},
config,
metadata: {
generated: new Date(),
dataPoints: patterns.length,
timeRange,
filters: { type: "temporal_patterns" },
},
};
}
/**
* Generate project correlation matrix
*/
async generateProjectCorrelationMatrix(
timeRange: { start: Date; end: Date },
config: Partial<VisualizationConfig>,
): Promise<ChartData> {
const entries = await this.getEntriesInTimeRange(timeRange);
// Extract unique projects
const projects = [
...new Set(
entries
.map((e) => e.data.projectPath || e.data.projectId || "Unknown")
.filter((p) => p !== "Unknown"),
),
].slice(0, 10); // Limit to top 10
// Calculate correlation matrix
const correlationMatrix: number[][] = [];
for (const project1 of projects) {
const row: number[] = [];
for (const project2 of projects) {
if (project1 === project2) {
row.push(1.0);
} else {
const correlation = this.calculateProjectCorrelation(
entries,
project1,
project2,
);
row.push(correlation);
}
}
correlationMatrix.push(row);
}
const heatmap: HeatmapVisualization = {
data: correlationMatrix,
labels: { x: projects, y: projects },
colorScale: {
min: -1,
max: 1,
colors: ["#EF4444", "#F59E0B", "#F3F4F6", "#10B981", "#059669"],
},
title: "Project Correlation Matrix",
description:
"Correlation matrix showing relationships between different projects based on memory patterns",
};
return {
type: "heatmap",
title: "Project Correlations",
description:
"Visualization of correlations between different projects in the memory system",
data: heatmap,
config,
metadata: {
generated: new Date(),
dataPoints: projects.length * projects.length,
timeRange,
filters: { type: "project_correlation" },
},
};
}
/**
* Generate custom visualization
*/
async generateCustomVisualization(
type: ChartData["type"],
query: {
filters?: Record<string, any>;
timeRange?: { start: Date; end: Date };
aggregation?: string;
groupBy?: string;
},
config?: Partial<VisualizationConfig>,
): Promise<ChartData> {
const activeConfig = { ...this.defaultConfig, ...config };
const timeRange = query.timeRange || this.getDefaultTimeRange();
let entries = await this.getEntriesInTimeRange(timeRange);
// Apply filters
if (query.filters) {
entries = this.applyFilters(entries, query.filters);
}
switch (type) {
case "timeline":
return this.generateTimelineVisualization(entries, query, activeConfig);
case "scatter":
return this.generateScatterPlot(entries, query, activeConfig);
case "treemap":
return this.generateTreemapVisualization(entries, query, activeConfig);
case "sankey":
return this.generateSankeyDiagram(entries, query, activeConfig);
default:
throw new Error(`Unsupported visualization type: ${type}`);
}
}
/**
* Export visualization to specified format
*/
async exportVisualization(
chartData: ChartData,
format: "svg" | "png" | "json" | "html" = "json",
options?: {
filename?: string;
quality?: number;
width?: number;
height?: number;
},
): Promise<string | Buffer> {
this.emit("export_started", { type: chartData.type, format });
try {
switch (format) {
case "json":
return JSON.stringify(chartData, null, 2);
case "html":
return this.generateHTMLVisualization(chartData, options);
case "svg":
return this.generateSVGVisualization(chartData, options);
case "png":
// This would require a rendering library like Puppeteer
throw new Error(
"PNG export requires additional rendering capabilities",
);
default:
throw new Error(`Unsupported export format: ${format}`);
}
} catch (error) {
this.emit("export_error", {
error: error instanceof Error ? error.message : String(error),
});
throw error;
}
}
/**
* Helper methods
*/
private async getEntriesInTimeRange(timeRange: {
start: Date;
end: Date;
}): Promise<MemoryEntry[]> {
const allEntries = await this.storage.getAll();
return allEntries.filter((entry) => {
const entryDate = new Date(entry.timestamp);
return entryDate >= timeRange.start && entryDate <= timeRange.end;
});
}
private getDefaultTimeRange(): { start: Date; end: Date } {
const end = new Date();
const start = new Date(end.getTime() - 30 * 24 * 60 * 60 * 1000); // 30 days ago
return { start, end };
}
private getWeekKey(date: Date): string {
const year = date.getFullYear();
const week = this.getWeekNumber(date);
return `${year}-W${week.toString().padStart(2, "0")}`;
}
private getWeekNumber(date: Date): number {
const d = new Date(
Date.UTC(date.getFullYear(), date.getMonth(), date.getDate()),
);
const dayNum = d.getUTCDay() || 7;
d.setUTCDate(d.getUTCDate() + 4 - dayNum);
const yearStart = new Date(Date.UTC(d.getUTCFullYear(), 0, 1));
return Math.ceil(((d.getTime() - yearStart.getTime()) / 86400000 + 1) / 7);
}
private getColorForNodeType(type: string, colorScheme?: string[]): string {
const colors = colorScheme || this.defaultConfig.colorScheme;
const index = type.charCodeAt(0) % colors.length;
return colors[index];
}
private getColorForEdgeType(type: string, colorScheme?: string[]): string {
const colors = colorScheme || this.defaultConfig.colorScheme;
const typeColors: Record<string, string> = {
similarity: colors[0],
dependency: colors[1],
temporal: colors[2],
causal: colors[3],
};
return typeColors[type] || colors[4];
}
private darkenColor(color: string): string {
// Simple color darkening - in production, use a proper color library
if (color.startsWith("#")) {
const hex = color.slice(1);
const num = parseInt(hex, 16);
const r = Math.max(0, (num >> 16) - 40);
const g = Math.max(0, ((num >> 8) & 0x00ff) - 40);
const b = Math.max(0, (num & 0x0000ff) - 40);
return `#${((r << 16) | (g << 8) | b).toString(16).padStart(6, "0")}`;
}
return color;
}
private calculateProjectCorrelation(
entries: MemoryEntry[],
project1: string,
project2: string,
): number {
const entries1 = entries.filter(
(e) =>
e.data.projectPath?.includes(project1) || e.data.projectId === project1,
);
const entries2 = entries.filter(
(e) =>
e.data.projectPath?.includes(project2) || e.data.projectId === project2,
);
if (entries1.length === 0 || entries2.length === 0) return 0;
// Simple correlation based on shared characteristics
let sharedFeatures = 0;
let totalFeatures = 0;
// Compare languages
const lang1 = new Set(entries1.map((e) => e.data.language).filter(Boolean));
const lang2 = new Set(entries2.map((e) => e.data.language).filter(Boolean));
const sharedLangs = new Set([...lang1].filter((l) => lang2.has(l)));
sharedFeatures += sharedLangs.size;
totalFeatures += new Set([...lang1, ...lang2]).size;
// Compare frameworks
const fw1 = new Set(entries1.map((e) => e.data.framework).filter(Boolean));
const fw2 = new Set(entries2.map((e) => e.data.framework).filter(Boolean));
const sharedFws = new Set([...fw1].filter((f) => fw2.has(f)));
sharedFeatures += sharedFws.size;
totalFeatures += new Set([...fw1, ...fw2]).size;
return totalFeatures > 0 ? sharedFeatures / totalFeatures : 0;
}
private applyFilters(
entries: MemoryEntry[],
filters: Record<string, any>,
): MemoryEntry[] {
return entries.filter((entry) => {
for (const [key, value] of Object.entries(filters)) {
switch (key) {
case "type":
if (Array.isArray(value) && !value.includes(entry.type))
return false;
if (typeof value === "string" && entry.type !== value) return false;
break;
case "outcome":
if (entry.data.outcome !== value) return false;
break;
case "language":
if (entry.data.language !== value) return false;
break;
case "framework":
if (entry.data.framework !== value) return false;
break;
case "project":
if (
!entry.data.projectPath?.includes(value) &&
entry.data.projectId !== value
) {
return false;
}
break;
case "tags":
if (
Array.isArray(value) &&
!value.some((tag) => entry.tags?.includes(tag))
) {
return false;
}
break;
}
}
return true;
});
}
private async generateKeyInsights(
entries: MemoryEntry[],
timeRange: { start: Date; end: Date },
): Promise<string[]> {
const insights: string[] = [];
// Activity insight
const dailyAverage =
entries.length /
Math.max(
1,
Math.ceil(
(timeRange.end.getTime() - timeRange.start.getTime()) /
(24 * 60 * 60 * 1000),
),
);
insights.push(`Average ${dailyAverage.toFixed(1)} entries per day`);
// Success rate insight
const successful = entries.filter(
(e) => e.data.outcome === "success" || e.data.success === true,
).length;
const successRate =
entries.length > 0 ? (successful / entries.length) * 100 : 0;
insights.push(`${successRate.toFixed(1)}% success rate`);
// Most common type
const typeCounts = new Map<string, number>();
entries.forEach((e) =>
typeCounts.set(e.type, (typeCounts.get(e.type) || 0) + 1),
);
const mostCommonType = Array.from(typeCounts.entries()).sort(
([, a], [, b]) => b - a,
)[0];
if (mostCommonType) {
insights.push(
`Most common activity: ${mostCommonType[0]} (${mostCommonType[1]} entries)`,
);
}
// Growth trend
const midpoint = new Date(
(timeRange.start.getTime() + timeRange.end.getTime()) / 2,
);
const firstHalf = entries.filter(
(e) => new Date(e.timestamp) < midpoint,
).length;
const secondHalf = entries.filter(
(e) => new Date(e.timestamp) >= midpoint,
).length;
if (firstHalf > 0) {
const growthRate = ((secondHalf - firstHalf) / firstHalf) * 100;
insights.push(
`Activity ${growthRate >= 0 ? "increased" : "decreased"} by ${Math.abs(
growthRate,
).toFixed(1)}%`,
);
}
return insights.slice(0, 5); // Return top 5 insights
}
private async calculateSystemHealthScore(
entries: MemoryEntry[],
): Promise<number> {
let score = 0;
// Activity level (0-25 points)
const recentEntries = entries.filter(
(e) =>
new Date(e.timestamp) > new Date(Date.now() - 7 * 24 * 60 * 60 * 1000),
);
score += Math.min(25, recentEntries.length * 2);
// Success rate (0-25 points)
const successful = entries.filter(
(e) => e.data.outcome === "success" || e.data.success === true,
).length;
const successRate = entries.length > 0 ? successful / entries.length : 0;
score += successRate * 25;
// Diversity (0-25 points)
const uniqueTypes = new Set(entries.map((e) => e.type)).size;
score += Math.min(25, uniqueTypes * 3);
// Consistency (0-25 points)
if (entries.length >= 7) {
const dailyActivities = new Map<string, number>();
entries.forEach((e) => {
const day = e.timestamp.slice(0, 10);
dailyActivities.set(day, (dailyActivities.get(day) || 0) + 1);
});
const values = Array.from(dailyActivities.values());
const mean = values.reduce((sum, val) => sum + val, 0) / values.length;
const variance =
values.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) /
values.length;
const consistency =
mean > 0 ? Math.max(0, 1 - Math.sqrt(variance) / mean) : 0;
score += consistency * 25;
}
return Math.round(Math.min(100, score));
}
private generateTimelineVisualization(
entries: MemoryEntry[],
query: any,
config: VisualizationConfig,
): ChartData {
const events = entries.map((entry) => ({
id: entry.id,
timestamp: new Date(entry.timestamp),
title: entry.type,
description: entry.data.description || `${entry.type} entry`,
type: entry.type,
importance: entry.data.outcome === "success" ? 1 : 0.5,
color: this.getColorForNodeType(entry.type, config.colorScheme),
metadata: entry.data,
}));
const timelineData: TimelineVisualization = {
events,
timeRange: {
start: new Date(Math.min(...events.map((e) => e.timestamp.getTime()))),
end: new Date(Math.max(...events.map((e) => e.timestamp.getTime()))),
},
granularity: "day",
groupBy: query.groupBy,
};
return {
type: "timeline",
title: "Memory Activity Timeline",
description: "Chronological timeline of memory system activities",
data: timelineData,
config,
metadata: {
generated: new Date(),
dataPoints: events.length,
filters: query.filters,
},
};
}
private generateScatterPlot(
entries: MemoryEntry[],
query: any,
config: VisualizationConfig,
): ChartData {
// Create scatter plot data based on timestamp vs some metric
const data = entries.map((entry) => ({
x: new Date(entry.timestamp).getTime(),
y: entry.data.duration || entry.data.complexity || Math.random(), // Use available metric
color: this.getColorForNodeType(entry.type, config.colorScheme),
metadata: entry,
}));
return {
type: "scatter",
title: "Memory Activity Scatter Plot",
description: "Scatter plot visualization of memory activities",
data: {
datasets: [
{
label: "Activities",
data: data,
backgroundColor: data.map((d) => d.color),
},
],
},
config,
metadata: {
generated: new Date(),
dataPoints: data.length,
filters: query.filters,
},
};
}
private generateTreemapVisualization(
entries: MemoryEntry[],
query: any,
config: VisualizationConfig,
): ChartData {
// Group entries by type and project for treemap
const hierarchy = new Map<string, Map<string, number>>();
for (const entry of entries) {
const type = entry.type;
const project =
entry.data.projectPath || entry.data.projectId || "Unknown";
if (!hierarchy.has(type)) {
hierarchy.set(type, new Map());
}
hierarchy
.get(type)!
.set(project, (hierarchy.get(type)!.get(project) || 0) + 1);
}
// Convert to treemap format
const treemapData = Array.from(hierarchy.entries()).map(
([type, projects]) => ({
name: type,
value: Array.from(projects.values()).reduce((sum, val) => sum + val, 0),
children: Array.from(projects.entries()).map(([project, count]) => ({
name: project,
value: count,
})),
}),
);
return {
type: "treemap",
title: "Memory Type Hierarchy",
description: "Hierarchical treemap of memory entries by type and project",
data: treemapData,
config,
metadata: {
generated: new Date(),
dataPoints: entries.length,
filters: query.filters,
},
};
}
private generateSankeyDiagram(
entries: MemoryEntry[],
query: any,
config: VisualizationConfig,
): ChartData {
// Create flow data from entry types to outcomes
const flows = new Map<string, Map<string, number>>();
for (const entry of entries) {
const source = entry.type;
const target =
entry.data.outcome || (entry.data.success ? "success" : "unknown");
if (!flows.has(source)) {
flows.set(source, new Map());
}
flows.get(source)!.set(target, (flows.get(source)!.get(target) || 0) + 1);
}
// Convert to Sankey format
const nodes: string[] = [];
const links: Array<{ source: number; target: number; value: number }> = [];
// Collect all unique nodes
const sources = Array.from(flows.keys());
const targets = new Set<string>();
flows.forEach((targetMap) => {
targetMap.forEach((_, target) => targets.add(target));
});
nodes.push(
...sources,
...Array.from(targets).filter((t) => !sources.includes(t)),
);
// Create links
flows.forEach((targetMap, source) => {
targetMap.forEach((value, target) => {
const sourceIndex = nodes.indexOf(source);
const targetIndex = nodes.indexOf(target);
if (sourceIndex !== -1 && targetIndex !== -1) {
links.push({ source: sourceIndex, target: targetIndex, value });
}
});
});
return {
type: "sankey",
title: "Memory Flow Diagram",
description: "Sankey diagram showing flow from memory types to outcomes",
data: { nodes, links },
config,
metadata: {
generated: new Date(),
dataPoints: links.length,
filters: query.filters,
},
};
}
private generateHTMLVisualization(
chartData: ChartData,
_options?: any,
): string {
// Generate basic HTML with embedded Chart.js or D3.js
return `
<!DOCTYPE html>
<html>
<head>
<title>${chartData.title}</title>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<style>
body { font-family: Arial, sans-serif; margin: 20px; }
.chart-container { width: 100%; height: 400px; }
.description { margin-bottom: 20px; color: #666; }
</style>
</head>
<body>
<h1>${chartData.title}</h1>
<p class="description">${chartData.description}</p>
<div class="chart-container">
<canvas id="chart"></canvas>
</div>
<script>
const ctx = document.getElementById('chart').getContext('2d');
new Chart(ctx, ${JSON.stringify(chartData.data)});
</script>
</body>
</html>`;
}
private generateSVGVisualization(
chartData: ChartData,
options?: any,
): string {
// Generate basic SVG - in production, use a proper chart library
const width = options?.width || 800;
const height = options?.height || 600;
return `
<svg width="${width}" height="${height}" xmlns="http://www.w3.org/2000/svg">
<rect width="100%" height="100%" fill="white"/>
<text x="50%" y="30" text-anchor="middle" font-size="18" font-weight="bold">
${chartData.title}
</text>
<text x="50%" y="50" text-anchor="middle" font-size="14" fill="#666">
${chartData.description}
</text>
<!-- Chart data would be rendered here -->
<text x="50%" y="${
height / 2
}" text-anchor="middle" font-size="12" fill="#999">
Chart visualization (${chartData.metadata.dataPoints} data points)
</text>
</svg>`;
}
}
```
--------------------------------------------------------------------------------
/src/memory/temporal-analysis.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Temporal Memory Analysis System for DocuMCP
* Time-based analysis of memory patterns, trends, and predictions
*/
import { EventEmitter } from "events";
import { MemoryEntry, JSONLStorage } from "./storage.js";
import { MemoryManager } from "./manager.js";
import { IncrementalLearningSystem } from "./learning.js";
import { KnowledgeGraph } from "./knowledge-graph.js";
export interface TimeWindow {
start: Date;
end: Date;
duration: number; // in milliseconds
label: string;
}
export interface TemporalPattern {
type: "periodic" | "trending" | "seasonal" | "burst" | "decay";
confidence: number;
period?: number; // For periodic patterns (in milliseconds)
trend?: "increasing" | "decreasing" | "stable";
seasonality?: "daily" | "weekly" | "monthly" | "yearly";
description: string;
dataPoints: Array<{ timestamp: Date; value: number; metadata?: any }>;
}
export interface TemporalMetrics {
activityLevel: number; // 0-1 scale
growthRate: number; // percentage change
peakActivity: { timestamp: Date; count: number };
averageInterval: number; // average time between entries
consistency: number; // 0-1 scale of temporal consistency
cyclicalStrength: number; // 0-1 scale of cyclical patterns
}
export interface PredictionResult {
nextActivity: {
probability: number;
timeRange: TimeWindow;
expectedCount: number;
confidence: number;
};
trends: {
shortTerm: TemporalPattern[];
longTerm: TemporalPattern[];
};
anomalies: Array<{
timestamp: Date;
type: "spike" | "drought" | "shift";
severity: number;
description: string;
}>;
recommendations: string[];
}
export interface TemporalQuery {
timeRange?: TimeWindow;
granularity: "hour" | "day" | "week" | "month" | "year";
aggregation: "count" | "success_rate" | "activity_level" | "diversity";
filters?: {
types?: string[];
projects?: string[];
outcomes?: string[];
tags?: string[];
};
smoothing?: {
enabled: boolean;
method: "moving_average" | "exponential" | "savitzky_golay";
window: number;
};
}
export interface TemporalInsight {
type: "pattern" | "anomaly" | "trend" | "prediction";
title: string;
description: string;
confidence: number;
timeframe: TimeWindow;
actionable: boolean;
recommendations?: string[];
visualData?: any;
}
export class TemporalMemoryAnalysis extends EventEmitter {
private storage: JSONLStorage;
private manager: MemoryManager;
private learningSystem: IncrementalLearningSystem;
private knowledgeGraph: KnowledgeGraph;
private patternCache: Map<string, TemporalPattern[]>;
private metricsCache: Map<string, TemporalMetrics>;
private predictionCache: Map<string, PredictionResult>;
constructor(
storage: JSONLStorage,
manager: MemoryManager,
learningSystem: IncrementalLearningSystem,
knowledgeGraph: KnowledgeGraph,
) {
super();
this.storage = storage;
this.manager = manager;
this.learningSystem = learningSystem;
this.knowledgeGraph = knowledgeGraph;
this.patternCache = new Map();
this.metricsCache = new Map();
this.predictionCache = new Map();
this.setupPeriodicAnalysis();
}
/**
* Analyze temporal patterns in memory data
*/
async analyzeTemporalPatterns(
query?: TemporalQuery,
): Promise<TemporalPattern[]> {
const defaultQuery: TemporalQuery = {
granularity: "day",
aggregation: "count",
timeRange: this.getDefaultTimeRange(),
smoothing: {
enabled: true,
method: "moving_average",
window: 7,
},
};
const activeQuery = { ...defaultQuery, ...query };
const cacheKey = this.generateCacheKey("patterns", activeQuery);
// Check cache first
if (this.patternCache.has(cacheKey)) {
return this.patternCache.get(cacheKey)!;
}
try {
// Get time series data
const timeSeries = await this.buildTimeSeries(activeQuery);
// Detect different types of patterns
const patterns: TemporalPattern[] = [];
// Periodic patterns
patterns.push(
...(await this.detectPeriodicPatterns(timeSeries, activeQuery)),
);
// Trend patterns
patterns.push(
...(await this.detectTrendPatterns(timeSeries, activeQuery)),
);
// Seasonal patterns
patterns.push(
...(await this.detectSeasonalPatterns(timeSeries, activeQuery)),
);
// Burst patterns
patterns.push(
...(await this.detectBurstPatterns(timeSeries, activeQuery)),
);
// Decay patterns
patterns.push(
...(await this.detectDecayPatterns(timeSeries, activeQuery)),
);
// Sort by confidence
patterns.sort((a, b) => b.confidence - a.confidence);
// Cache results
this.patternCache.set(cacheKey, patterns);
this.emit("patterns_analyzed", {
query: activeQuery,
patterns: patterns.length,
highConfidence: patterns.filter((p) => p.confidence > 0.7).length,
});
return patterns;
} catch (error) {
this.emit("analysis_error", {
error: error instanceof Error ? error.message : String(error),
});
throw error;
}
}
/**
* Get temporal metrics for a time range
*/
async getTemporalMetrics(query?: TemporalQuery): Promise<TemporalMetrics> {
const defaultQuery: TemporalQuery = {
granularity: "day",
aggregation: "count",
timeRange: this.getDefaultTimeRange(),
};
const activeQuery = { ...defaultQuery, ...query };
const cacheKey = this.generateCacheKey("metrics", activeQuery);
if (this.metricsCache.has(cacheKey)) {
return this.metricsCache.get(cacheKey)!;
}
const timeSeries = await this.buildTimeSeries(activeQuery);
// Calculate activity level
const totalActivity = timeSeries.reduce(
(sum, point) => sum + point.value,
0,
);
const maxPossibleActivity =
timeSeries.length * Math.max(...timeSeries.map((p) => p.value));
const activityLevel =
maxPossibleActivity > 0 ? totalActivity / maxPossibleActivity : 0;
// Calculate growth rate
const firstHalf = timeSeries.slice(0, Math.floor(timeSeries.length / 2));
const secondHalf = timeSeries.slice(Math.floor(timeSeries.length / 2));
const firstHalfAvg =
firstHalf.reduce((sum, p) => sum + p.value, 0) / firstHalf.length;
const secondHalfAvg =
secondHalf.reduce((sum, p) => sum + p.value, 0) / secondHalf.length;
const growthRate =
firstHalfAvg > 0
? ((secondHalfAvg - firstHalfAvg) / firstHalfAvg) * 100
: 0;
// Find peak activity
const peakPoint = timeSeries.reduce((max, point) =>
point.value > max.value ? point : max,
);
const peakActivity = {
timestamp: peakPoint.timestamp,
count: peakPoint.value,
};
// Calculate average interval
const intervals = [];
for (let i = 1; i < timeSeries.length; i++) {
intervals.push(
timeSeries[i].timestamp.getTime() -
timeSeries[i - 1].timestamp.getTime(),
);
}
const averageInterval =
intervals.length > 0
? intervals.reduce((sum, interval) => sum + interval, 0) /
intervals.length
: 0;
// Calculate consistency (inverse of coefficient of variation)
const values = timeSeries.map((p) => p.value);
const mean = values.reduce((sum, val) => sum + val, 0) / values.length;
const variance =
values.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) /
values.length;
const stdDev = Math.sqrt(variance);
const consistency = mean > 0 ? Math.max(0, 1 - stdDev / mean) : 0;
// Calculate cyclical strength using autocorrelation
const cyclicalStrength = this.calculateCyclicalStrength(values);
const metrics: TemporalMetrics = {
activityLevel,
growthRate,
peakActivity,
averageInterval,
consistency,
cyclicalStrength,
};
this.metricsCache.set(cacheKey, metrics);
return metrics;
}
/**
* Make predictions based on temporal patterns
*/
async predictFutureActivity(
query?: TemporalQuery,
): Promise<PredictionResult> {
const defaultQuery: TemporalQuery = {
granularity: "day",
aggregation: "count",
timeRange: this.getDefaultTimeRange(),
};
const activeQuery = { ...defaultQuery, ...query };
const cacheKey = this.generateCacheKey("predictions", activeQuery);
if (this.predictionCache.has(cacheKey)) {
return this.predictionCache.get(cacheKey)!;
}
// Get historical patterns and metrics
const patterns = await this.analyzeTemporalPatterns(activeQuery);
const metrics = await this.getTemporalMetrics(activeQuery);
const timeSeries = await this.buildTimeSeries(activeQuery);
// Predict next activity window
const nextActivity = await this.predictNextActivity(
timeSeries,
patterns,
metrics,
);
// Categorize trends
const shortTermPatterns = patterns.filter((p) =>
this.isShortTerm(p, activeQuery),
);
const longTermPatterns = patterns.filter((p) =>
this.isLongTerm(p, activeQuery),
);
// Detect anomalies
const anomalies = await this.detectAnomalies(timeSeries, patterns);
// Generate recommendations
const recommendations = this.generateRecommendations(
patterns,
metrics,
anomalies,
);
const result: PredictionResult = {
nextActivity,
trends: {
shortTerm: shortTermPatterns,
longTerm: longTermPatterns,
},
anomalies,
recommendations,
};
this.predictionCache.set(cacheKey, result);
return result;
}
/**
* Get temporal insights and actionable recommendations
*/
async getTemporalInsights(query?: TemporalQuery): Promise<TemporalInsight[]> {
const patterns = await this.analyzeTemporalPatterns(query);
const metrics = await this.getTemporalMetrics(query);
const predictions = await this.predictFutureActivity(query);
const insights: TemporalInsight[] = [];
// Pattern-based insights
for (const pattern of patterns.filter((p) => p.confidence > 0.6)) {
insights.push({
type: "pattern",
title: `${
pattern.type.charAt(0).toUpperCase() + pattern.type.slice(1)
} Pattern Detected`,
description: pattern.description,
confidence: pattern.confidence,
timeframe: this.getPatternTimeframe(pattern),
actionable: this.isActionablePattern(pattern),
recommendations: this.getPatternRecommendations(pattern),
});
}
// Trend insights
if (metrics.growthRate > 20) {
insights.push({
type: "trend",
title: "Increasing Activity Trend",
description: `Memory activity has increased by ${metrics.growthRate.toFixed(
1,
)}% over the analysis period`,
confidence: 0.8,
timeframe: query?.timeRange || this.getDefaultTimeRange(),
actionable: true,
recommendations: [
"Consider optimizing memory storage for increased load",
"Monitor system performance as activity grows",
"Evaluate current pruning policies",
],
});
}
// Anomaly insights
for (const anomaly of predictions.anomalies.filter(
(a) => a.severity > 0.7,
)) {
insights.push({
type: "anomaly",
title: `${
anomaly.type.charAt(0).toUpperCase() + anomaly.type.slice(1)
} Anomaly`,
description: anomaly.description,
confidence: anomaly.severity,
timeframe: {
start: anomaly.timestamp,
end: anomaly.timestamp,
duration: 0,
label: "Point Anomaly",
},
actionable: true,
recommendations: this.getAnomalyRecommendations(anomaly),
});
}
// Prediction insights
if (predictions.nextActivity.probability > 0.7) {
insights.push({
type: "prediction",
title: "High Probability Activity Window",
description: `${(predictions.nextActivity.probability * 100).toFixed(
1,
)}% chance of ${predictions.nextActivity.expectedCount} activities`,
confidence: predictions.nextActivity.confidence,
timeframe: predictions.nextActivity.timeRange,
actionable: true,
recommendations: [
"Prepare system for predicted activity surge",
"Consider pre-emptive optimization",
"Monitor resource utilization during predicted window",
],
});
}
// Sort by confidence and actionability
insights.sort((a, b) => {
if (a.actionable !== b.actionable) {
return a.actionable ? -1 : 1;
}
return b.confidence - a.confidence;
});
return insights;
}
/**
* Build time series data from memory entries
*/
private async buildTimeSeries(
query: TemporalQuery,
): Promise<Array<{ timestamp: Date; value: number; metadata?: any }>> {
const entries = await this.getFilteredEntries(query);
const timeRange = query.timeRange || this.getDefaultTimeRange();
// Create time buckets based on granularity
const buckets = this.createTimeBuckets(timeRange, query.granularity);
const timeSeries: Array<{
timestamp: Date;
value: number;
metadata?: any;
}> = [];
for (const bucket of buckets) {
const bucketEntries = entries.filter((entry) => {
const entryTime = new Date(entry.timestamp);
return entryTime >= bucket.start && entryTime < bucket.end;
});
let value = 0;
const metadata: any = {};
switch (query.aggregation) {
case "count":
value = bucketEntries.length;
break;
case "success_rate": {
const successful = bucketEntries.filter(
(e) => e.data.outcome === "success" || e.data.success === true,
).length;
value =
bucketEntries.length > 0 ? successful / bucketEntries.length : 0;
break;
}
case "activity_level":
// Custom metric based on entry types and interactions
value = this.calculateActivityLevel(bucketEntries);
break;
case "diversity": {
const uniqueTypes = new Set(bucketEntries.map((e) => e.type));
value = uniqueTypes.size;
break;
}
}
// Add metadata
metadata.entryCount = bucketEntries.length;
metadata.types = [...new Set(bucketEntries.map((e) => e.type))];
timeSeries.push({
timestamp: bucket.start,
value,
metadata,
});
}
// Apply smoothing if requested
if (query.smoothing?.enabled) {
return this.applySmoothingToTimeSeries(timeSeries, query.smoothing);
}
return timeSeries;
}
/**
* Get filtered entries based on query
*/
private async getFilteredEntries(
query: TemporalQuery,
): Promise<MemoryEntry[]> {
let entries = await this.storage.getAll();
// Apply time range filter
if (query.timeRange) {
entries = entries.filter((entry) => {
const entryTime = new Date(entry.timestamp);
return (
entryTime >= query.timeRange!.start &&
entryTime <= query.timeRange!.end
);
});
}
// Apply filters
if (query.filters) {
if (query.filters.types) {
entries = entries.filter((entry) =>
query.filters!.types!.includes(entry.type),
);
}
if (query.filters.projects) {
entries = entries.filter((entry) =>
query.filters!.projects!.some(
(project) =>
entry.data.projectPath?.includes(project) ||
entry.data.projectId === project,
),
);
}
if (query.filters.outcomes) {
entries = entries.filter(
(entry) =>
query.filters!.outcomes!.includes(entry.data.outcome) ||
(entry.data.success === true &&
query.filters!.outcomes!.includes("success")) ||
(entry.data.success === false &&
query.filters!.outcomes!.includes("failure")),
);
}
if (query.filters.tags) {
entries = entries.filter(
(entry) =>
entry.tags?.some((tag) => query.filters!.tags!.includes(tag)),
);
}
}
return entries;
}
/**
* Create time buckets for analysis
*/
private createTimeBuckets(
timeRange: TimeWindow,
granularity: string,
): TimeWindow[] {
const buckets: TimeWindow[] = [];
let current = new Date(timeRange.start);
const end = new Date(timeRange.end);
while (current < end) {
const bucketStart = new Date(current);
let bucketEnd: Date;
switch (granularity) {
case "hour":
bucketEnd = new Date(current.getTime() + 60 * 60 * 1000);
break;
case "day":
bucketEnd = new Date(current.getTime() + 24 * 60 * 60 * 1000);
break;
case "week":
bucketEnd = new Date(current.getTime() + 7 * 24 * 60 * 60 * 1000);
break;
case "month":
bucketEnd = new Date(
current.getFullYear(),
current.getMonth() + 1,
1,
);
break;
case "year":
bucketEnd = new Date(current.getFullYear() + 1, 0, 1);
break;
default:
bucketEnd = new Date(current.getTime() + 24 * 60 * 60 * 1000);
}
if (bucketEnd > end) {
bucketEnd = new Date(end);
}
buckets.push({
start: bucketStart,
end: bucketEnd,
duration: bucketEnd.getTime() - bucketStart.getTime(),
label: this.formatTimeLabel(bucketStart, granularity),
});
current = bucketEnd;
}
return buckets;
}
/**
* Detect periodic patterns in time series
*/
private async detectPeriodicPatterns(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
query: TemporalQuery,
): Promise<TemporalPattern[]> {
const patterns: TemporalPattern[] = [];
const values = timeSeries.map((p) => p.value);
// Check for different periods (daily, weekly, monthly cycles)
const periods = [1, 7, 30, 365]; // days
for (const period of periods) {
const adjustedPeriod = this.adjustPeriodForGranularity(
period,
query.granularity,
);
if (adjustedPeriod >= values.length / 3) continue; // Need at least 3 cycles
const correlation = this.calculateAutocorrelation(values, adjustedPeriod);
if (correlation > 0.6) {
patterns.push({
type: "periodic",
confidence: correlation,
period: period * 24 * 60 * 60 * 1000, // Convert to milliseconds
description: `${period}-${query.granularity} cycle detected with ${(
correlation * 100
).toFixed(1)}% correlation`,
dataPoints: timeSeries,
});
}
}
return patterns;
}
/**
* Detect trend patterns
*/
private async detectTrendPatterns(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
_query: TemporalQuery,
): Promise<TemporalPattern[]> {
const patterns: TemporalPattern[] = [];
const values = timeSeries.map((p) => p.value);
if (values.length < 5) return patterns;
// Calculate linear regression
const { slope, rSquared } = this.calculateLinearRegression(values);
if (rSquared > 0.5) {
// Good fit
const trend =
slope > 0.01 ? "increasing" : slope < -0.01 ? "decreasing" : "stable";
if (trend !== "stable") {
patterns.push({
type: "trending",
confidence: rSquared,
trend,
description: `${trend} trend detected with R² = ${rSquared.toFixed(
3,
)}`,
dataPoints: timeSeries,
});
}
}
return patterns;
}
/**
* Detect seasonal patterns
*/
private async detectSeasonalPatterns(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
query: TemporalQuery,
): Promise<TemporalPattern[]> {
const patterns: TemporalPattern[] = [];
// Check for daily patterns (hour of day)
if (query.granularity === "hour") {
const hourlyPattern = this.analyzeHourlyPattern(timeSeries);
if (hourlyPattern.confidence > 0.6) {
patterns.push({
type: "seasonal",
confidence: hourlyPattern.confidence,
seasonality: "daily",
description: `Daily pattern: peak activity at ${hourlyPattern.peakHour}:00`,
dataPoints: timeSeries,
});
}
}
// Check for weekly patterns (day of week)
if (["hour", "day"].includes(query.granularity)) {
const weeklyPattern = this.analyzeWeeklyPattern(timeSeries);
if (weeklyPattern.confidence > 0.6) {
patterns.push({
type: "seasonal",
confidence: weeklyPattern.confidence,
seasonality: "weekly",
description: `Weekly pattern: peak activity on ${weeklyPattern.peakDay}`,
dataPoints: timeSeries,
});
}
}
return patterns;
}
/**
* Detect burst patterns (sudden spikes)
*/
private async detectBurstPatterns(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
_query: TemporalQuery,
): Promise<TemporalPattern[]> {
const patterns: TemporalPattern[] = [];
const values = timeSeries.map((p) => p.value);
const mean = values.reduce((sum, val) => sum + val, 0) / values.length;
const stdDev = Math.sqrt(
values.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) /
values.length,
);
const threshold = mean + 2 * stdDev; // 2 standard deviations above mean
const bursts = [];
for (let i = 0; i < values.length; i++) {
if (values[i] > threshold) {
bursts.push(i);
}
}
if (bursts.length > 0 && bursts.length < values.length * 0.1) {
// Bursts are rare
const confidence = Math.min(0.9, bursts.length / (values.length * 0.05));
patterns.push({
type: "burst",
confidence,
description: `${bursts.length} burst events detected (${(
threshold / mean
).toFixed(1)}x normal activity)`,
dataPoints: bursts.map((i) => timeSeries[i]),
});
}
return patterns;
}
/**
* Detect decay patterns (gradual decline)
*/
private async detectDecayPatterns(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
_query: TemporalQuery,
): Promise<TemporalPattern[]> {
const patterns: TemporalPattern[] = [];
const values = timeSeries.map((p) => p.value);
if (values.length < 10) return patterns;
// Look for exponential decay pattern
const logValues = values.map((v) => Math.log(Math.max(v, 0.1))); // Avoid log(0)
const { slope, rSquared } = this.calculateLinearRegression(logValues);
if (slope < -0.05 && rSquared > 0.7) {
// Significant decay with good fit
patterns.push({
type: "decay",
confidence: rSquared,
description: `Exponential decay detected (half-life ≈ ${(
-0.693 / slope
).toFixed(1)} periods)`,
dataPoints: timeSeries,
});
}
return patterns;
}
/**
* Calculate activity level for a set of entries
*/
private calculateActivityLevel(entries: MemoryEntry[]): number {
if (entries.length === 0) return 0;
let score = 0;
// Base score from count
score += Math.min(1, entries.length / 10); // Cap at 10 entries = 1.0
// Bonus for diversity
const uniqueTypes = new Set(entries.map((e) => e.type));
score += uniqueTypes.size * 0.1;
// Bonus for successful outcomes
const successful = entries.filter(
(e) => e.data.outcome === "success" || e.data.success === true,
).length;
score += (successful / entries.length) * 0.3;
return Math.min(1, score);
}
/**
* Apply smoothing to time series data
*/
private applySmoothingToTimeSeries(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
smoothing: { method: string; window: number },
): Array<{ timestamp: Date; value: number; metadata?: any }> {
const values = timeSeries.map((p) => p.value);
let smoothedValues: number[];
switch (smoothing.method) {
case "moving_average":
smoothedValues = this.applyMovingAverage(values, smoothing.window);
break;
case "exponential":
smoothedValues = this.applyExponentialSmoothing(values, 0.3);
break;
default:
smoothedValues = values;
}
return timeSeries.map((point, i) => ({
...point,
value: smoothedValues[i],
}));
}
/**
* Apply moving average smoothing
*/
private applyMovingAverage(values: number[], window: number): number[] {
const smoothed: number[] = [];
for (let i = 0; i < values.length; i++) {
const start = Math.max(0, i - Math.floor(window / 2));
const end = Math.min(values.length, i + Math.ceil(window / 2));
const windowValues = values.slice(start, end);
const average =
windowValues.reduce((sum, val) => sum + val, 0) / windowValues.length;
smoothed.push(average);
}
return smoothed;
}
/**
* Apply exponential smoothing
*/
private applyExponentialSmoothing(values: number[], alpha: number): number[] {
const smoothed: number[] = [values[0]];
for (let i = 1; i < values.length; i++) {
smoothed.push(alpha * values[i] + (1 - alpha) * smoothed[i - 1]);
}
return smoothed;
}
/**
* Calculate autocorrelation for periodic pattern detection
*/
private calculateAutocorrelation(values: number[], lag: number): number {
if (lag >= values.length) return 0;
const n = values.length - lag;
const mean = values.reduce((sum, val) => sum + val, 0) / values.length;
let numerator = 0;
let denominator = 0;
for (let i = 0; i < n; i++) {
numerator += (values[i] - mean) * (values[i + lag] - mean);
}
for (let i = 0; i < values.length; i++) {
denominator += Math.pow(values[i] - mean, 2);
}
return denominator > 0 ? numerator / denominator : 0;
}
/**
* Calculate cyclical strength using autocorrelation
*/
private calculateCyclicalStrength(values: number[]): number {
const maxLag = Math.min(values.length / 3, 30);
let maxCorrelation = 0;
for (let lag = 1; lag < maxLag; lag++) {
const correlation = Math.abs(this.calculateAutocorrelation(values, lag));
maxCorrelation = Math.max(maxCorrelation, correlation);
}
return maxCorrelation;
}
/**
* Calculate linear regression
*/
private calculateLinearRegression(values: number[]): {
slope: number;
intercept: number;
rSquared: number;
} {
const n = values.length;
const x = Array.from({ length: n }, (_, i) => i);
const sumX = x.reduce((sum, val) => sum + val, 0);
const sumY = values.reduce((sum, val) => sum + val, 0);
const sumXY = x.reduce((sum, val, i) => sum + val * values[i], 0);
const sumXX = x.reduce((sum, val) => sum + val * val, 0);
const slope = (n * sumXY - sumX * sumY) / (n * sumXX - sumX * sumX);
const intercept = (sumY - slope * sumX) / n;
// Calculate R²
const meanY = sumY / n;
const ssRes = values.reduce((sum, val, i) => {
const predicted = slope * i + intercept;
return sum + Math.pow(val - predicted, 2);
}, 0);
const ssTot = values.reduce(
(sum, val) => sum + Math.pow(val - meanY, 2),
0,
);
const rSquared = ssTot > 0 ? 1 - ssRes / ssTot : 0;
return { slope, intercept, rSquared };
}
/**
* Predict next activity window
*/
private async predictNextActivity(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
patterns: TemporalPattern[],
_metrics: TemporalMetrics,
): Promise<PredictionResult["nextActivity"]> {
const lastPoint = timeSeries[timeSeries.length - 1];
const averageValue =
timeSeries.reduce((sum, p) => sum + p.value, 0) / timeSeries.length;
// Base prediction on recent trend
let expectedCount = averageValue;
let probability = 0.5;
// Adjust based on trends
const trendPattern = patterns.find((p) => p.type === "trending");
if (trendPattern && trendPattern.trend === "increasing") {
expectedCount *= 1.2;
probability += 0.2;
} else if (trendPattern && trendPattern.trend === "decreasing") {
expectedCount *= 0.8;
probability -= 0.1;
}
// Adjust based on periodic patterns
const periodicPattern = patterns.find(
(p) => p.type === "periodic" && p.confidence > 0.7,
);
if (periodicPattern) {
probability += 0.3;
}
// Determine time range for next activity (next period based on granularity)
const nextStart = new Date(
lastPoint.timestamp.getTime() + 24 * 60 * 60 * 1000,
); // Next day
const nextEnd = new Date(nextStart.getTime() + 24 * 60 * 60 * 1000);
return {
probability: Math.min(0.95, Math.max(0.05, probability)),
timeRange: {
start: nextStart,
end: nextEnd,
duration: 24 * 60 * 60 * 1000,
label: "Next 24 hours",
},
expectedCount: Math.round(expectedCount),
confidence: Math.min(
0.9,
patterns.reduce((sum, p) => sum + p.confidence, 0) / patterns.length,
),
};
}
/**
* Detect anomalies in time series
*/
private async detectAnomalies(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
_patterns: TemporalPattern[],
): Promise<PredictionResult["anomalies"]> {
const anomalies: PredictionResult["anomalies"] = [];
const values = timeSeries.map((p) => p.value);
const mean = values.reduce((sum, val) => sum + val, 0) / values.length;
const stdDev = Math.sqrt(
values.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) /
values.length,
);
for (let i = 0; i < timeSeries.length; i++) {
const point = timeSeries[i];
const value = point.value;
// Spike detection
if (value > mean + 3 * stdDev) {
anomalies.push({
timestamp: point.timestamp,
type: "spike",
severity: Math.min(1, (value - mean) / (3 * stdDev)),
description: `Activity spike: ${value} (${(
(value / mean - 1) *
100
).toFixed(0)}% above normal)`,
});
}
// Drought detection
if (value < mean - 2 * stdDev && mean > 1) {
anomalies.push({
timestamp: point.timestamp,
type: "drought",
severity: Math.min(1, (mean - value) / (2 * stdDev)),
description: `Activity drought: ${value} (${(
(1 - value / mean) *
100
).toFixed(0)}% below normal)`,
});
}
}
// Detect regime shifts (significant changes in mean)
const shifts = this.detectRegimeShifts(timeSeries);
anomalies.push(...shifts);
return anomalies.sort((a, b) => b.severity - a.severity);
}
/**
* Detect regime shifts in time series
*/
private detectRegimeShifts(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
): Array<{
timestamp: Date;
type: "shift";
severity: number;
description: string;
}> {
const shifts: Array<{
timestamp: Date;
type: "shift";
severity: number;
description: string;
}> = [];
const values = timeSeries.map((p) => p.value);
if (values.length < 20) return shifts; // Need sufficient data
const windowSize = Math.floor(values.length / 4);
for (let i = windowSize; i < values.length - windowSize; i++) {
const before = values.slice(i - windowSize, i);
const after = values.slice(i, i + windowSize);
const meanBefore =
before.reduce((sum, val) => sum + val, 0) / before.length;
const meanAfter = after.reduce((sum, val) => sum + val, 0) / after.length;
const changeMagnitude = Math.abs(meanAfter - meanBefore);
const relativeChange = meanBefore > 0 ? changeMagnitude / meanBefore : 0;
if (relativeChange > 0.5) {
// 50% change
shifts.push({
timestamp: timeSeries[i].timestamp,
type: "shift",
severity: Math.min(1, relativeChange),
description: `Regime shift: ${meanBefore.toFixed(
1,
)} → ${meanAfter.toFixed(1)} (${(relativeChange * 100).toFixed(
0,
)}% change)`,
});
}
}
return shifts;
}
/**
* Generate recommendations based on analysis
*/
private generateRecommendations(
patterns: TemporalPattern[],
metrics: TemporalMetrics,
anomalies: PredictionResult["anomalies"],
): string[] {
const recommendations: string[] = [];
// Pattern-based recommendations
const periodicPattern = patterns.find(
(p) => p.type === "periodic" && p.confidence > 0.7,
);
if (periodicPattern) {
recommendations.push(
"Schedule maintenance and optimizations during low-activity periods based on detected cycles",
);
}
const trendPattern = patterns.find((p) => p.type === "trending");
if (trendPattern?.trend === "increasing") {
recommendations.push(
"Plan for increased storage and processing capacity based on growing activity trend",
);
} else if (trendPattern?.trend === "decreasing") {
recommendations.push(
"Investigate causes of declining activity and consider engagement strategies",
);
}
// Metrics-based recommendations
if (metrics.consistency < 0.5) {
recommendations.push(
"High variability detected - consider implementing activity smoothing mechanisms",
);
}
if (metrics.growthRate > 50) {
recommendations.push(
"Rapid growth detected - implement proactive scaling measures",
);
}
// Anomaly-based recommendations
const spikes = anomalies.filter(
(a) => a.type === "spike" && a.severity > 0.7,
);
if (spikes.length > 0) {
recommendations.push(
"Implement burst handling to manage activity spikes effectively",
);
}
const droughts = anomalies.filter(
(a) => a.type === "drought" && a.severity > 0.7,
);
if (droughts.length > 0) {
recommendations.push(
"Investigate causes of activity droughts and implement retention strategies",
);
}
return recommendations;
}
/**
* Utility methods
*/
private getDefaultTimeRange(): TimeWindow {
const end = new Date();
const start = new Date(end.getTime() - 30 * 24 * 60 * 60 * 1000); // 30 days ago
return {
start,
end,
duration: end.getTime() - start.getTime(),
label: "Last 30 days",
};
}
private generateCacheKey(type: string, query: TemporalQuery): string {
return `${type}_${JSON.stringify(query)}`;
}
private adjustPeriodForGranularity(
period: number,
granularity: string,
): number {
switch (granularity) {
case "hour":
return period * 24;
case "day":
return period;
case "week":
return Math.ceil(period / 7);
case "month":
return Math.ceil(period / 30);
case "year":
return Math.ceil(period / 365);
default:
return period;
}
}
private formatTimeLabel(date: Date, granularity: string): string {
switch (granularity) {
case "hour":
return date.toISOString().slice(0, 13) + ":00";
case "day":
return date.toISOString().slice(0, 10);
case "week":
return `Week of ${date.toISOString().slice(0, 10)}`;
case "month":
return `${date.getFullYear()}-${(date.getMonth() + 1)
.toString()
.padStart(2, "0")}`;
case "year":
return date.getFullYear().toString();
default:
return date.toISOString().slice(0, 10);
}
}
private isShortTerm(
pattern: TemporalPattern,
_query: TemporalQuery,
): boolean {
if (pattern.period) {
const days = pattern.period / (24 * 60 * 60 * 1000);
return days <= 7;
}
return true;
}
private isLongTerm(pattern: TemporalPattern, _query: TemporalQuery): boolean {
if (pattern.period) {
const days = pattern.period / (24 * 60 * 60 * 1000);
return days > 30;
}
return false;
}
private getPatternTimeframe(pattern: TemporalPattern): TimeWindow {
if (pattern.dataPoints.length > 0) {
const start = pattern.dataPoints[0].timestamp;
const end = pattern.dataPoints[pattern.dataPoints.length - 1].timestamp;
return {
start,
end,
duration: end.getTime() - start.getTime(),
label: `${start.toISOString().slice(0, 10)} to ${end
.toISOString()
.slice(0, 10)}`,
};
}
return this.getDefaultTimeRange();
}
private isActionablePattern(pattern: TemporalPattern): boolean {
return (
pattern.confidence > 0.7 &&
["periodic", "trending", "seasonal"].includes(pattern.type)
);
}
private getPatternRecommendations(pattern: TemporalPattern): string[] {
const recommendations: string[] = [];
switch (pattern.type) {
case "periodic":
recommendations.push(
"Schedule regular maintenance during low-activity periods",
);
recommendations.push(
"Optimize resource allocation based on predictable cycles",
);
break;
case "trending":
if (pattern.trend === "increasing") {
recommendations.push("Plan for capacity expansion");
recommendations.push("Implement proactive monitoring");
} else if (pattern.trend === "decreasing") {
recommendations.push("Investigate root causes of decline");
recommendations.push("Consider engagement interventions");
}
break;
case "seasonal":
recommendations.push(
"Adjust system configuration for seasonal patterns",
);
recommendations.push(
"Plan marketing and engagement around peak periods",
);
break;
}
return recommendations;
}
private getAnomalyRecommendations(anomaly: {
type: string;
severity: number;
}): string[] {
const recommendations: string[] = [];
switch (anomaly.type) {
case "spike":
recommendations.push("Implement burst protection mechanisms");
recommendations.push("Investigate spike triggers for prevention");
recommendations.push("Consider auto-scaling capabilities");
break;
case "drought":
recommendations.push("Implement activity monitoring alerts");
recommendations.push("Investigate user engagement issues");
recommendations.push("Consider proactive outreach strategies");
break;
case "shift":
recommendations.push("Investigate underlying system changes");
recommendations.push("Update baseline metrics and thresholds");
recommendations.push("Review configuration changes during this period");
break;
}
return recommendations;
}
private analyzeHourlyPattern(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
): { confidence: number; peakHour: number } {
const hourlyActivity = new Array(24).fill(0);
const hourlyCounts = new Array(24).fill(0);
for (const point of timeSeries) {
const hour = point.timestamp.getHours();
hourlyActivity[hour] += point.value;
hourlyCounts[hour]++;
}
// Calculate average activity per hour
const hourlyAverages = hourlyActivity.map((total, i) =>
hourlyCounts[i] > 0 ? total / hourlyCounts[i] : 0,
);
// Find peak hour
const peakHour = hourlyAverages.indexOf(Math.max(...hourlyAverages));
// Calculate confidence based on variance
const mean = hourlyAverages.reduce((sum, val) => sum + val, 0) / 24;
const variance =
hourlyAverages.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) /
24;
const stdDev = Math.sqrt(variance);
const confidence = mean > 0 ? Math.min(0.9, stdDev / mean) : 0;
return { confidence, peakHour };
}
private analyzeWeeklyPattern(
timeSeries: Array<{ timestamp: Date; value: number; metadata?: any }>,
): { confidence: number; peakDay: string } {
const weeklyActivity = new Array(7).fill(0);
const weeklyCounts = new Array(7).fill(0);
const dayNames = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
];
for (const point of timeSeries) {
const day = point.timestamp.getDay();
weeklyActivity[day] += point.value;
weeklyCounts[day]++;
}
// Calculate average activity per day
const weeklyAverages = weeklyActivity.map((total, i) =>
weeklyCounts[i] > 0 ? total / weeklyCounts[i] : 0,
);
// Find peak day
const peakDayIndex = weeklyAverages.indexOf(Math.max(...weeklyAverages));
const peakDay = dayNames[peakDayIndex];
// Calculate confidence
const mean = weeklyAverages.reduce((sum, val) => sum + val, 0) / 7;
const variance =
weeklyAverages.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) / 7;
const stdDev = Math.sqrt(variance);
const confidence = mean > 0 ? Math.min(0.9, stdDev / mean) : 0;
return { confidence, peakDay };
}
/**
* Setup periodic analysis
*/
private setupPeriodicAnalysis(): void {
// Run analysis every 6 hours
setInterval(
async () => {
try {
const insights = await this.getTemporalInsights();
this.emit("periodic_analysis_completed", {
insights: insights.length,
});
} catch (error) {
this.emit("periodic_analysis_error", {
error: error instanceof Error ? error.message : String(error),
});
}
},
6 * 60 * 60 * 1000,
);
}
}
```
--------------------------------------------------------------------------------
/tests/tools/validate-content.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import * as fs from "fs/promises";
import * as path from "path";
import {
handleValidateDiataxisContent,
validateGeneralContent,
} from "../../src/tools/validate-content.js";
import { ValidationResult } from "../../src/tools/validate-content.js";
describe("Content Validation Tool", () => {
const testTempDir = path.join(__dirname, "../../.tmp/test-validation");
beforeEach(async () => {
// Create test directory
await fs.mkdir(testTempDir, { recursive: true });
});
afterEach(async () => {
// Clean up test directory
try {
await fs.rm(testTempDir, { recursive: true });
} catch {
// Ignore cleanup errors
}
});
describe("Application Code Validation", () => {
it("should detect application code path correctly", async () => {
// Create mock application structure
const appDir = path.join(testTempDir, "mock-app");
await fs.mkdir(appDir, { recursive: true });
await fs.mkdir(path.join(appDir, "src"), { recursive: true });
await fs.writeFile(
path.join(appDir, "package.json"),
'{"name": "test-app"}',
);
// Create TypeScript file without documentation
const tsFile = path.join(appDir, "src", "index.ts");
await fs.writeFile(
tsFile,
`
export function undocumentedFunction(param: string): string {
return param.toUpperCase();
}
export const anotherFunction = (value: number) => {
if (value < 0) {
throw new Error('Invalid value');
}
return value * 2;
};
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "compliance",
includeCodeValidation: true,
});
expect(result).toBeDefined();
expect(result.issues).toBeDefined();
// Should find issues with undocumented exported functions
const undocumentedIssues = result.issues.filter((issue) =>
issue.description.includes("lacks documentation"),
);
expect(undocumentedIssues.length).toBeGreaterThan(0);
// Should find issues with undocumented error throwing
const errorDocIssues = result.issues.filter((issue) =>
issue.description.includes(
"Error throwing code found without error documentation",
),
);
expect(errorDocIssues.length).toBeGreaterThan(0);
});
it("should validate application architecture structure", async () => {
// Create mock application with missing directories
const appDir = path.join(testTempDir, "incomplete-app");
await fs.mkdir(appDir, { recursive: true });
await fs.writeFile(
path.join(appDir, "package.json"),
'{"name": "incomplete-app"}',
);
// Missing tools and types directories
await fs.mkdir(path.join(appDir, "src"), { recursive: true });
await fs.writeFile(
path.join(appDir, "src", "index.ts"),
'export const app = "test";',
);
const result = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "compliance",
includeCodeValidation: false,
});
const structureIssues = result.issues.filter(
(issue) => issue.location.file === "application structure",
);
expect(structureIssues.length).toBeGreaterThan(0);
// Should suggest missing tools directory
const toolsIssue = structureIssues.find((issue) =>
issue.description.includes("tools directory"),
);
expect(toolsIssue).toBeDefined();
});
it("should validate README structure", async () => {
const appDir = path.join(testTempDir, "readme-test");
await fs.mkdir(appDir, { recursive: true });
await fs.mkdir(path.join(appDir, "src"), { recursive: true });
await fs.writeFile(
path.join(appDir, "package.json"),
'{"name": "readme-test"}',
);
await fs.writeFile(
path.join(appDir, "src", "index.ts"),
'export const app = "test";',
);
// Create README with missing sections
await fs.writeFile(
path.join(appDir, "README.md"),
`
This is a project without proper structure.
Some description here.
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "compliance",
includeCodeValidation: false,
});
// Application validation should find issues
const readmeIssues = result.issues.filter(
(issue) => issue.location.file === "README.md",
);
expect(readmeIssues.length).toBeGreaterThan(0);
// Should find issues with README structure
const structureIssue = readmeIssues.find((issue) =>
issue.description.includes("lacks essential sections"),
);
expect(structureIssue).toBeDefined();
});
it("should detect properly documented functions", async () => {
const appDir = path.join(testTempDir, "documented-app");
await fs.mkdir(appDir, { recursive: true });
await fs.mkdir(path.join(appDir, "src"), { recursive: true });
await fs.writeFile(
path.join(appDir, "package.json"),
'{"name": "documented-app"}',
);
// Create well-documented TypeScript file
const tsFile = path.join(appDir, "src", "documented.ts");
await fs.writeFile(
tsFile,
`
/**
* Converts a string to uppercase
* @param param - The input string
* @returns The uppercase string
*/
export function documentedFunction(param: string): string {
return param.toUpperCase();
}
/**
* Doubles a positive number
* @param value - The input number (must be positive)
* @returns The doubled value
* @throws {Error} When value is negative
*/
export const wellDocumentedFunction = (value: number) => {
if (value < 0) {
throw new Error('Invalid value');
}
return value * 2;
};
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "compliance",
includeCodeValidation: true,
});
// Should have no undocumented issues since functions are properly documented
const undocumentedIssues = result.issues.filter((issue) =>
issue.description.includes("lacks documentation"),
);
expect(undocumentedIssues.length).toBe(0);
// Should not complain about error documentation
const errorDocIssues = result.issues.filter((issue) =>
issue.description.includes(
"Error throwing code found without error documentation",
),
);
expect(errorDocIssues.length).toBe(0);
});
});
describe("Documentation Validation", () => {
it("should detect documentation directory correctly", async () => {
// Create mock documentation structure
const docsDir = path.join(testTempDir, "docs");
await fs.mkdir(docsDir, { recursive: true });
await fs.mkdir(path.join(docsDir, "tutorials"), { recursive: true });
await fs.writeFile(
path.join(docsDir, "tutorials", "tutorial1.md"),
`
# Tutorial 1
This is a tutorial without prerequisites section.
\`\`\`javascript
console.log("hello")
\`\`\`
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: docsDir,
validationType: "compliance",
includeCodeValidation: true,
});
expect(result).toBeDefined();
// Should find Diataxis compliance issues
const complianceIssues = result.issues.filter(
(issue) => issue.category === "compliance",
);
expect(complianceIssues.length).toBeGreaterThan(0);
// Should find missing prerequisites in tutorial
const prereqIssue = complianceIssues.find((issue) =>
issue.description.includes("prerequisites"),
);
expect(prereqIssue).toBeDefined();
});
it("should validate link integrity", async () => {
const docsDir = path.join(testTempDir, "docs-links");
await fs.mkdir(docsDir, { recursive: true });
// Create file with broken internal link
await fs.writeFile(
path.join(docsDir, "index.md"),
`
# Documentation
[Broken Link](./nonexistent.md)
[Another Link](./other.md)
`.trim(),
);
// Create the referenced file
await fs.writeFile(path.join(docsDir, "other.md"), "# Other Page");
const result = await handleValidateDiataxisContent({
contentPath: docsDir,
validationType: "accuracy",
includeCodeValidation: false,
});
const linkIssues = result.issues.filter((issue) =>
issue.description.includes("Broken internal link"),
);
expect(linkIssues.length).toBe(1);
const brokenLink = linkIssues[0];
expect(brokenLink.description).toContain("nonexistent.md");
});
it("should validate code blocks in documentation", async () => {
const docsDir = path.join(testTempDir, "docs-code");
await fs.mkdir(docsDir, { recursive: true });
await fs.writeFile(
path.join(docsDir, "guide.md"),
`
# Code Examples
\`\`\`javascript
// Missing semicolon
console.log("test")
\`\`\`
\`\`\`json
{ "valid": "json" }
\`\`\`
\`\`\`json
{ "invalid": json }
\`\`\`
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: docsDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result.codeValidation).toBeDefined();
expect(result.codeValidation!.exampleResults.length).toBeGreaterThan(0);
// Should find JSON syntax error
const jsonErrors = result.codeValidation!.exampleResults.filter((ex) =>
ex.issues.some((issue) => issue.description.includes("Invalid JSON")),
);
expect(jsonErrors.length).toBeGreaterThan(0);
});
});
describe("General Content Validation", () => {
it("should validate general content with link checking", async () => {
const contentDir = path.join(testTempDir, "general-content");
await fs.mkdir(contentDir, { recursive: true });
await fs.writeFile(
path.join(contentDir, "page.md"),
`
# Test Page
[Good Link](./existing.md)
[Bad Link](./missing.md)
\`\`\`js
console.log("missing semicolon")
\`\`\`
`.trim(),
);
await fs.writeFile(
path.join(contentDir, "existing.md"),
"# Existing Page",
);
const result = await validateGeneralContent({
contentPath: contentDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result.success).toBe(false);
expect(result.brokenLinks.length).toBe(1);
expect(result.brokenLinks[0]).toContain("missing.md");
expect(result.codeBlocksValidated).toBeGreaterThan(0);
expect(result.codeErrors.length).toBeGreaterThan(0);
expect(result.recommendations.length).toBeGreaterThan(0);
});
it("should pass validation for clean content", async () => {
const contentDir = path.join(testTempDir, "clean-content");
await fs.mkdir(contentDir, { recursive: true });
await fs.writeFile(
path.join(contentDir, "clean.md"),
`
# Clean Page
[Good Link](./other.md)
\`\`\`json
{ "valid": "json" }
\`\`\`
`.trim(),
);
await fs.writeFile(path.join(contentDir, "other.md"), "# Other Page");
const result = await validateGeneralContent({
contentPath: contentDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result.success).toBe(true);
expect(result.brokenLinks.length).toBe(0);
expect(result.recommendations).toContain(
"Content validation passed - no critical issues found",
);
});
});
describe("Confidence Metrics", () => {
it("should calculate confidence metrics correctly", async () => {
const appDir = path.join(testTempDir, "confidence-test");
await fs.mkdir(appDir, { recursive: true });
await fs.mkdir(path.join(appDir, "src"), { recursive: true });
await fs.writeFile(
path.join(appDir, "package.json"),
'{"name": "confidence-test"}',
);
await fs.writeFile(
path.join(appDir, "src", "index.ts"),
'export const test = "value";',
);
const result = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result.confidence).toBeDefined();
expect(result.confidence.overall).toBeGreaterThan(0);
expect(result.confidence.overall).toBeLessThanOrEqual(100);
expect(result.confidence.breakdown).toBeDefined();
expect(result.confidence.breakdown.technologyDetection).toBeDefined();
expect(result.confidence.breakdown.codeExampleRelevance).toBeDefined();
expect(
result.confidence.breakdown.architecturalAssumptions,
).toBeDefined();
});
it("should provide recommendations based on confidence", async () => {
const appDir = path.join(testTempDir, "recommendations-test");
await fs.mkdir(appDir, { recursive: true });
await fs.writeFile(
path.join(appDir, "package.json"),
'{"name": "recommendations-test"}',
);
// Create content that will generate issues
await fs.writeFile(path.join(appDir, "README.md"), "No proper structure");
const result = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "all",
includeCodeValidation: false,
});
expect(result.recommendations).toBeDefined();
expect(result.recommendations.length).toBeGreaterThan(0);
expect(result.nextSteps).toBeDefined();
expect(result.nextSteps.length).toBeGreaterThan(0);
if (result.confidence.overall < 70) {
expect(
result.recommendations.some((rec) =>
rec.includes("comprehensive review"),
),
).toBe(true);
}
});
});
describe("Error Handling and Edge Cases", () => {
it("should handle non-existent content path gracefully", async () => {
const nonExistentPath = path.join(testTempDir, "does-not-exist");
const result = await handleValidateDiataxisContent({
contentPath: nonExistentPath,
validationType: "all",
includeCodeValidation: false,
});
expect(result).toBeDefined();
// The function handles non-existent paths gracefully but may still succeed
expect(result.confidence).toBeDefined();
});
it("should handle empty directory", async () => {
const emptyDir = path.join(testTempDir, "empty-dir");
await fs.mkdir(emptyDir, { recursive: true });
const result = await handleValidateDiataxisContent({
contentPath: emptyDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result).toBeDefined();
expect(result.confidence.breakdown.architecturalAssumptions).toBeLessThan(
80,
);
});
it("should handle project context loading with analysis ID", async () => {
const appDir = path.join(testTempDir, "context-test");
await fs.mkdir(appDir, { recursive: true });
await fs.writeFile(
path.join(appDir, "package.json"),
'{"name": "context-test"}',
);
// Create .documcp directory with analysis
const docucmpDir = path.join(appDir, ".documcp", "analyses");
await fs.mkdir(docucmpDir, { recursive: true });
await fs.writeFile(
path.join(docucmpDir, "test-analysis.json"),
JSON.stringify({
metadata: {
projectName: "test-project",
primaryLanguage: "TypeScript",
},
technologies: { framework: "React" },
dependencies: { packages: ["react", "typescript"] },
}),
);
const result = await handleValidateDiataxisContent({
contentPath: appDir,
analysisId: "test-analysis",
validationType: "accuracy",
includeCodeValidation: false,
});
expect(result).toBeDefined();
expect(result.confidence).toBeDefined();
});
it("should handle missing analysis ID gracefully", async () => {
const appDir = path.join(testTempDir, "missing-analysis");
await fs.mkdir(appDir, { recursive: true });
await fs.writeFile(
path.join(appDir, "package.json"),
'{"name": "missing-analysis"}',
);
const result = await handleValidateDiataxisContent({
contentPath: appDir,
analysisId: "non-existent-analysis",
validationType: "accuracy",
includeCodeValidation: false,
});
expect(result).toBeDefined();
expect(result.confidence).toBeDefined();
});
it("should detect documentation directory correctly", async () => {
const docsPath = path.join(testTempDir, "project", "docs");
await fs.mkdir(docsPath, { recursive: true });
await fs.writeFile(path.join(docsPath, "index.md"), "# Documentation");
const result = await handleValidateDiataxisContent({
contentPath: docsPath,
validationType: "compliance",
includeCodeValidation: false,
});
expect(result).toBeDefined();
expect(result.confidence).toBeDefined();
// Documentation directory should be processed
expect(
result.confidence.breakdown.architecturalAssumptions,
).toBeGreaterThan(0);
});
it("should handle different validation types", async () => {
const appDir = path.join(testTempDir, "validation-types");
await fs.mkdir(appDir, { recursive: true });
await fs.writeFile(
path.join(appDir, "test.md"),
"# Test\n[broken link](./missing.md)",
);
// Test accuracy only
const accuracyResult = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "accuracy",
includeCodeValidation: false,
});
expect(accuracyResult).toBeDefined();
// Test completeness only
const completenessResult = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "completeness",
includeCodeValidation: false,
});
expect(completenessResult).toBeDefined();
// Test compliance only
const complianceResult = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "compliance",
includeCodeValidation: false,
});
expect(complianceResult).toBeDefined();
});
it("should handle code validation failure scenarios", async () => {
const appDir = path.join(testTempDir, "code-validation-fail");
await fs.mkdir(appDir, { recursive: true });
// Create markdown with broken code examples
await fs.writeFile(
path.join(appDir, "broken-code.md"),
`
# Broken Code Examples
\`\`\`javascript
// Syntax error
console.log("missing quote);
\`\`\`
\`\`\`json
{ "invalid": json }
\`\`\`
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result.codeValidation).toBeDefined();
expect(result.codeValidation!.overallSuccess).toBe(false);
expect(
result.recommendations.some((rec) => rec.includes("Fix code examples")),
).toBe(true);
});
it("should generate risk factors for critical issues", async () => {
const appDir = path.join(testTempDir, "risk-factors");
await fs.mkdir(appDir, { recursive: true });
// Create content with multiple critical issues
await fs.writeFile(
path.join(appDir, "critical-issues.md"),
`
# Critical Issues
[Broken Link 1](./missing1.md)
[Broken Link 2](./missing2.md)
[Broken Link 3](./missing3.md)
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "all",
includeCodeValidation: false,
});
expect(result.confidence.riskFactors).toBeDefined();
expect(result.confidence.riskFactors.length).toBeGreaterThan(0);
const highRiskFactors = result.confidence.riskFactors.filter(
(rf) => rf.type === "high",
);
expect(highRiskFactors.length).toBeGreaterThan(0);
});
it("should handle uncertainty flags and medium risk factors", async () => {
const appDir = path.join(testTempDir, "uncertainty-test");
await fs.mkdir(appDir, { recursive: true });
// Create content that generates uncertainties
await fs.writeFile(
path.join(appDir, "uncertain.md"),
`
# Uncertain Content
This content has many ambiguous references and unclear instructions.
Multiple areas need clarification for proper understanding.
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: appDir,
validationType: "all",
includeCodeValidation: false,
});
// Manually add uncertainties to test the risk factor generation
result.uncertainties = [
{
area: "test1",
severity: "high",
description: "test",
potentialImpact: "test",
clarificationNeeded: "test",
fallbackStrategy: "test",
},
{
area: "test2",
severity: "high",
description: "test",
potentialImpact: "test",
clarificationNeeded: "test",
fallbackStrategy: "test",
},
{
area: "test3",
severity: "high",
description: "test",
potentialImpact: "test",
clarificationNeeded: "test",
fallbackStrategy: "test",
},
{
area: "test4",
severity: "high",
description: "test",
potentialImpact: "test",
clarificationNeeded: "test",
fallbackStrategy: "test",
},
{
area: "test5",
severity: "high",
description: "test",
potentialImpact: "test",
clarificationNeeded: "test",
fallbackStrategy: "test",
},
{
area: "test6",
severity: "high",
description: "test",
potentialImpact: "test",
clarificationNeeded: "test",
fallbackStrategy: "test",
},
];
expect(result.uncertainties.length).toBeGreaterThan(5);
const highUncertainties = result.uncertainties.filter(
(u) => u.severity === "high" || u.severity === "critical",
);
expect(highUncertainties.length).toBeGreaterThan(0);
});
it("should handle Diataxis structure analysis", async () => {
const docsDir = path.join(testTempDir, "diataxis-structure");
await fs.mkdir(docsDir, { recursive: true });
// Create Diataxis structure
await fs.mkdir(path.join(docsDir, "tutorials"), { recursive: true });
await fs.mkdir(path.join(docsDir, "how-to"), { recursive: true });
await fs.mkdir(path.join(docsDir, "reference"), { recursive: true });
await fs.mkdir(path.join(docsDir, "explanation"), { recursive: true });
await fs.writeFile(
path.join(docsDir, "tutorials", "tutorial.md"),
"# Tutorial",
);
await fs.writeFile(
path.join(docsDir, "how-to", "guide.md"),
"# How-to Guide",
);
await fs.writeFile(
path.join(docsDir, "reference", "api.md"),
"# API Reference",
);
await fs.writeFile(
path.join(docsDir, "explanation", "concept.md"),
"# Explanation",
);
const result = await handleValidateDiataxisContent({
contentPath: docsDir,
validationType: "compliance",
includeCodeValidation: false,
});
expect(result).toBeDefined();
expect(
result.confidence.breakdown.architecturalAssumptions,
).toBeGreaterThan(60);
});
it("should handle successful validation with no issues", async () => {
const cleanDir = path.join(testTempDir, "clean-validation");
await fs.mkdir(cleanDir, { recursive: true });
// Create clean content with no issues
await fs.writeFile(
path.join(cleanDir, "clean.md"),
`
# Clean Documentation
This is well-structured documentation with no issues.
\`\`\`json
{ "valid": "json" }
\`\`\`
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: cleanDir,
validationType: "all",
includeCodeValidation: true,
});
// Should have minimal issues and good confidence
expect(result.confidence.overall).toBeGreaterThan(0);
expect(result.recommendations).toBeDefined();
expect(result.recommendations.length).toBeGreaterThan(0);
});
it("should handle timeout scenarios", async () => {
// Test timeout handling by creating a scenario that might take time
const largeDir = path.join(testTempDir, "timeout-test");
await fs.mkdir(largeDir, { recursive: true });
// Create multiple markdown files to simulate processing time
for (let i = 0; i < 5; i++) {
await fs.writeFile(
path.join(largeDir, `file${i}.md`),
`
# File ${i}
Content for file ${i} with some text.
\`\`\`javascript
console.log("File ${i}");
\`\`\`
`.trim(),
);
}
const result = await handleValidateDiataxisContent({
contentPath: largeDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result).toBeDefined();
expect(result.confidence).toBeDefined();
});
it("should handle confidence levels and validation modes", async () => {
const testDir = path.join(testTempDir, "confidence-levels");
await fs.mkdir(testDir, { recursive: true });
await fs.writeFile(path.join(testDir, "test.md"), "# Test Content");
// Test different confidence levels
const strictResult = await handleValidateDiataxisContent({
contentPath: testDir,
validationType: "all",
includeCodeValidation: false,
confidence: "strict",
});
expect(strictResult).toBeDefined();
const moderateResult = await handleValidateDiataxisContent({
contentPath: testDir,
validationType: "all",
includeCodeValidation: false,
confidence: "moderate",
});
expect(moderateResult).toBeDefined();
const permissiveResult = await handleValidateDiataxisContent({
contentPath: testDir,
validationType: "all",
includeCodeValidation: false,
confidence: "permissive",
});
expect(permissiveResult).toBeDefined();
});
it("should handle TypeScript files without package.json", async () => {
const tsDir = path.join(testTempDir, "typescript-only");
await fs.mkdir(tsDir, { recursive: true });
await fs.mkdir(path.join(tsDir, "src"), { recursive: true });
// Create TypeScript files without package.json
await fs.writeFile(
path.join(tsDir, "src", "app.ts"),
`
export class TestClass {
public method(): void {
console.log('test');
}
}
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: tsDir,
validationType: "compliance",
includeCodeValidation: false,
});
expect(result).toBeDefined();
expect(result.confidence).toBeDefined();
});
it("should handle mixed content scenarios", async () => {
const mixedDir = path.join(testTempDir, "mixed-content");
await fs.mkdir(mixedDir, { recursive: true });
await fs.mkdir(path.join(mixedDir, "src"), { recursive: true });
// Create both application and documentation content
await fs.writeFile(
path.join(mixedDir, "package.json"),
'{"name": "mixed-app"}',
);
await fs.writeFile(
path.join(mixedDir, "src", "index.ts"),
'export const app = "test";',
);
await fs.writeFile(
path.join(mixedDir, "README.md"),
`
# Mixed Content App
## Installation
Run \`npm install\`
## Usage
See the documentation.
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: mixedDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result).toBeDefined();
expect(
result.confidence.breakdown.architecturalAssumptions,
).toBeGreaterThanOrEqual(60);
});
it("should handle business context alignment scoring", async () => {
const businessDir = path.join(testTempDir, "business-context");
await fs.mkdir(businessDir, { recursive: true });
// Create content with business context
await fs.writeFile(
path.join(businessDir, "business.md"),
`
# Business Requirements
This application serves enterprise customers with specific needs.
The solution addresses market requirements and business objectives.
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: businessDir,
validationType: "all",
includeCodeValidation: false,
});
expect(result).toBeDefined();
expect(
result.confidence.breakdown.businessContextAlignment,
).toBeGreaterThanOrEqual(0);
});
it("should handle deprecated patterns in technical accuracy checks", async () => {
const deprecatedDir = path.join(testTempDir, "deprecated-patterns");
await fs.mkdir(deprecatedDir, { recursive: true });
await fs.writeFile(
path.join(deprecatedDir, "deprecated.md"),
`
# Deprecated Patterns
\`\`\`bash
npm install -g some-package
\`\`\`
\`\`\`javascript
var oldVariable = "test";
function() {
console.log("old style");
}
\`\`\`
Visit http://example.com for more info.
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: deprecatedDir,
validationType: "accuracy",
includeCodeValidation: false,
});
const deprecatedIssues = result.issues.filter((issue) =>
issue.description.includes("Potentially outdated pattern"),
);
expect(deprecatedIssues.length).toBeGreaterThan(0);
});
it("should handle async code without error handling", async () => {
const asyncDir = path.join(testTempDir, "async-code");
await fs.mkdir(asyncDir, { recursive: true });
await fs.writeFile(
path.join(asyncDir, "async.md"),
`
# Async Code Examples
\`\`\`javascript
async function fetchData() {
const response = await fetch('/api/data');
return response.json();
}
\`\`\`
\`\`\`typescript
const getData = async (): Promise<any> => {
const result = await someAsyncOperation();
return result;
};
\`\`\`
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: asyncDir,
validationType: "accuracy",
includeCodeValidation: false,
});
const asyncIssues = result.issues.filter((issue) =>
issue.description.includes("Async code without error handling"),
);
expect(asyncIssues.length).toBeGreaterThan(0);
});
it("should handle version compatibility checks with project context", async () => {
const versionDir = path.join(testTempDir, "version-compat");
await fs.mkdir(versionDir, { recursive: true });
// Create .documcp directory with analysis
const docucmpDir = path.join(versionDir, ".documcp", "analyses");
await fs.mkdir(docucmpDir, { recursive: true });
await fs.writeFile(
path.join(docucmpDir, "version-analysis.json"),
JSON.stringify({
metadata: {
projectName: "version-test",
primaryLanguage: "TypeScript",
},
technologies: { framework: "React" },
dependencies: { packages: ["[email protected]", "[email protected]"] },
}),
);
await fs.writeFile(
path.join(versionDir, "versions.md"),
`
# Version Information
This project uses React @18.2.0 and TypeScript @4.9.0.
Also compatible with Node.js @16.14.0.
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: versionDir,
analysisId: "version-analysis",
validationType: "accuracy",
includeCodeValidation: false,
});
const versionUncertainties = result.uncertainties.filter(
(u) => u.area === "version-compatibility",
);
expect(versionUncertainties.length).toBeGreaterThan(0);
});
it("should handle dangerous bash commands", async () => {
const bashDir = path.join(testTempDir, "dangerous-bash");
await fs.mkdir(bashDir, { recursive: true });
await fs.writeFile(
path.join(bashDir, "dangerous.md"),
`
# Dangerous Commands
\`\`\`bash
rm -rf /
sudo rm -rf /tmp/important
chmod 777 /etc/passwd
command > /dev/null 2>&1
\`\`\`
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: bashDir,
validationType: "accuracy",
includeCodeValidation: false,
});
const dangerousIssues = result.issues.filter((issue) =>
issue.description.includes("Potentially dangerous command"),
);
expect(dangerousIssues.length).toBeGreaterThan(0);
});
it("should handle mixed path separators in commands", async () => {
const pathDir = path.join(testTempDir, "mixed-paths");
await fs.mkdir(pathDir, { recursive: true });
await fs.writeFile(
path.join(pathDir, "paths.md"),
`
# Mixed Path Examples
\`\`\`bash
cp /unix/path\\windows\\mixed /destination/path
\`\`\`
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: pathDir,
validationType: "accuracy",
includeCodeValidation: false,
});
const pathIssues = result.issues.filter((issue) =>
issue.description.includes("Mixed path separators"),
);
expect(pathIssues.length).toBeGreaterThan(0);
});
it("should handle external links in accuracy validation", async () => {
const linksDir = path.join(testTempDir, "external-links");
await fs.mkdir(linksDir, { recursive: true });
await fs.writeFile(
path.join(linksDir, "external.md"),
`
# External Links
[GitHub](https://github.com)
[Documentation](https://docs.example.com)
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: linksDir,
validationType: "accuracy",
includeCodeValidation: false,
});
const linkUncertainties = result.uncertainties.filter(
(u) => u.area === "external-links",
);
expect(linkUncertainties.length).toBeGreaterThan(0);
});
it("should handle Diataxis compliance rules for different sections", async () => {
const complianceDir = path.join(testTempDir, "diataxis-compliance");
await fs.mkdir(complianceDir, { recursive: true });
// Create directories for each Diataxis section
await fs.mkdir(path.join(complianceDir, "tutorials"), {
recursive: true,
});
await fs.mkdir(path.join(complianceDir, "how-to"), { recursive: true });
await fs.mkdir(path.join(complianceDir, "reference"), {
recursive: true,
});
await fs.mkdir(path.join(complianceDir, "explanation"), {
recursive: true,
});
// Tutorial without prerequisites
await fs.writeFile(
path.join(complianceDir, "tutorials", "bad-tutorial.md"),
`
# Bad Tutorial
This tutorial doesn't have prerequisites or clear steps.
`.trim(),
);
// How-to without task focus
await fs.writeFile(
path.join(complianceDir, "how-to", "bad-howto.md"),
`
# Bad Guide
Short guide.
`.trim(),
);
// Reference without structure
await fs.writeFile(
path.join(complianceDir, "reference", "bad-reference.md"),
`
Bad reference without headings or tables.
`.trim(),
);
// Explanation without "why"
await fs.writeFile(
path.join(complianceDir, "explanation", "bad-explanation.md"),
`
# Bad Explanation
Short explanation.
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: complianceDir,
validationType: "compliance",
includeCodeValidation: false,
});
const complianceIssues = result.issues.filter(
(issue) => issue.category === "compliance",
);
expect(complianceIssues.length).toBeGreaterThan(4); // Should find issues in each section
});
it("should handle TypeScript code validation with compilation errors", async () => {
const tsDir = path.join(testTempDir, "typescript-validation");
await fs.mkdir(tsDir, { recursive: true });
await fs.writeFile(
path.join(tsDir, "typescript.md"),
`
# TypeScript Examples
\`\`\`typescript
// This has type errors
let x: string = 123;
function badFunction(param: number): string {
return param; // Type error
}
\`\`\`
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: tsDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result.codeValidation).toBeDefined();
expect(result.codeValidation!.overallSuccess).toBe(false);
});
it("should handle bash code validation with complex chaining", async () => {
const bashComplexDir = path.join(testTempDir, "bash-complex");
await fs.mkdir(bashComplexDir, { recursive: true });
await fs.writeFile(
path.join(bashComplexDir, "complex-bash.md"),
`
# Complex Bash
\`\`\`bash
# Complex command chaining
command1 && command2 || command3
rm $VARIABLE
\`\`\`
`.trim(),
);
const result = await handleValidateDiataxisContent({
contentPath: bashComplexDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result.codeValidation).toBeDefined();
const bashIssues = result.codeValidation!.exampleResults.flatMap(
(ex) => ex.issues,
);
expect(bashIssues.length).toBeGreaterThan(0);
});
it("should handle file limit reached scenario", async () => {
const largeDir = path.join(testTempDir, "large-directory");
await fs.mkdir(largeDir, { recursive: true });
// Create many markdown files to test file limit
for (let i = 0; i < 10; i++) {
await fs.writeFile(
path.join(largeDir, `file${i}.md`),
`# File ${i}\nContent for file ${i}.`,
);
}
const result = await handleValidateDiataxisContent({
contentPath: largeDir,
validationType: "all",
includeCodeValidation: false,
});
expect(result).toBeDefined();
expect(
result.confidence.breakdown.architecturalAssumptions,
).toBeGreaterThan(60);
});
it("should handle symlink detection in file scanning", async () => {
const symlinkDir = path.join(testTempDir, "symlink-test");
await fs.mkdir(symlinkDir, { recursive: true });
// Create a regular file
await fs.writeFile(path.join(symlinkDir, "regular.md"), "# Regular File");
// Create a subdirectory
await fs.mkdir(path.join(symlinkDir, "subdir"), { recursive: true });
await fs.writeFile(
path.join(symlinkDir, "subdir", "nested.md"),
"# Nested File",
);
const result = await handleValidateDiataxisContent({
contentPath: symlinkDir,
validationType: "all",
includeCodeValidation: false,
});
expect(result).toBeDefined();
expect(
result.confidence.breakdown.architecturalAssumptions,
).toBeGreaterThanOrEqual(60);
});
it("should handle timeout scenario", async () => {
const timeoutDir = path.join(testTempDir, "timeout-scenario");
await fs.mkdir(timeoutDir, { recursive: true });
await fs.writeFile(path.join(timeoutDir, "test.md"), "# Test");
// Mock a timeout by creating a very short timeout
const originalTimeout = 120000;
const result = await handleValidateDiataxisContent({
contentPath: timeoutDir,
validationType: "all",
includeCodeValidation: false,
});
expect(result).toBeDefined();
});
it("should handle general content validation with external links", async () => {
const generalDir = path.join(testTempDir, "general-external");
await fs.mkdir(generalDir, { recursive: true });
await fs.writeFile(
path.join(generalDir, "external.md"),
`
# External Links Test
[GitHub](https://github.com)
[Local](./local.md)
`.trim(),
);
await fs.writeFile(path.join(generalDir, "local.md"), "# Local File");
const result = await validateGeneralContent({
contentPath: generalDir,
validationType: "all",
includeCodeValidation: true,
followExternalLinks: false,
});
expect(result.linksChecked).toBeGreaterThan(0);
expect(result.success).toBe(true);
});
it("should handle general content validation with code validation", async () => {
const codeDir = path.join(testTempDir, "general-code");
await fs.mkdir(codeDir, { recursive: true });
await fs.writeFile(
path.join(codeDir, "code.md"),
`
# Code Test
\`\`\`javascript
console.log("test")
\`\`\`
\`\`\`js
console.log("another test");
\`\`\`
`.trim(),
);
const result = await validateGeneralContent({
contentPath: codeDir,
validationType: "code",
includeCodeValidation: true,
});
expect(result.codeBlocksValidated).toBeGreaterThan(0);
expect(result.codeErrors.length).toBeGreaterThan(0); // Missing semicolon
});
it("should handle validation with no code blocks", async () => {
const noCodeDir = path.join(testTempDir, "no-code");
await fs.mkdir(noCodeDir, { recursive: true });
await fs.writeFile(
path.join(noCodeDir, "text.md"),
`
# Text Only
This is just text with no code blocks.
`.trim(),
);
const result = await validateGeneralContent({
contentPath: noCodeDir,
validationType: "all",
includeCodeValidation: true,
});
expect(result.codeBlocksValidated).toBe(0);
expect(result.success).toBe(true);
});
});
});
```