This is page 12 of 29. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── 001-mcp-server-architecture.md
│ │ ├── 002-repository-analysis-engine.md
│ │ ├── 003-static-site-generator-recommendation-engine.md
│ │ ├── 004-diataxis-framework-integration.md
│ │ ├── 005-github-pages-deployment-automation.md
│ │ ├── 006-mcp-tools-api-design.md
│ │ ├── 007-mcp-prompts-and-resources-integration.md
│ │ ├── 008-intelligent-content-population-engine.md
│ │ ├── 009-content-accuracy-validation-framework.md
│ │ ├── 010-mcp-resource-pattern-redesign.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── check-documentation-links.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── ast-analyzer.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── permission-checker.ts
│ │ └── sitemap-generator.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/src/memory/enhanced-manager.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Enhanced Memory Manager with Learning and Knowledge Graph Integration
3 | * Combines Issues #47 and #48 for intelligent memory management
4 | */
5 |
6 | import { MemoryManager } from "./manager.js";
7 | import { MemoryEntry } from "./storage.js";
8 | import IncrementalLearningSystem, {
9 | ProjectFeatures,
10 | LearningInsight,
11 | } from "./learning.js";
12 | import KnowledgeGraph, { RecommendationPath } from "./knowledge-graph.js";
13 |
14 | export interface EnhancedRecommendation {
15 | baseRecommendation: any;
16 | learningEnhanced: any;
17 | graphBased: RecommendationPath[];
18 | insights: LearningInsight[];
19 | confidence: number;
20 | reasoning: string[];
21 | metadata: {
22 | usedLearning: boolean;
23 | usedKnowledgeGraph: boolean;
24 | patternsFound: number;
25 | similarProjects: number;
26 | };
27 | }
28 |
29 | export interface IntelligentAnalysis {
30 | analysis: any;
31 | patterns: string[];
32 | predictions: Array<{
33 | type: "success_rate" | "optimal_ssg" | "potential_issues";
34 | prediction: string;
35 | confidence: number;
36 | }>;
37 | recommendations: string[];
38 | learningData: {
39 | similarProjects: number;
40 | confidenceLevel: number;
41 | dataQuality: "low" | "medium" | "high";
42 | };
43 | }
44 |
45 | export class EnhancedMemoryManager extends MemoryManager {
46 | private learningSystem: IncrementalLearningSystem;
47 | private knowledgeGraph: KnowledgeGraph;
48 | private initialized: boolean = false;
49 |
50 | constructor(storageDir?: string) {
51 | super(storageDir);
52 | this.learningSystem = new IncrementalLearningSystem(this);
53 | this.knowledgeGraph = new KnowledgeGraph(this);
54 | }
55 |
56 | async initialize(): Promise<void> {
57 | await super.initialize();
58 |
59 | if (!this.initialized) {
60 | await this.learningSystem.initialize();
61 | await this.knowledgeGraph.initialize();
62 | this.initialized = true;
63 |
64 | // Set up automatic learning from new memories
65 | this.on("memory-created", this.handleNewMemory.bind(this));
66 | }
67 | }
68 |
69 | /**
70 | * Enhanced recommendation that combines base analysis with learning and graph intelligence
71 | */
72 | async getEnhancedRecommendation(
73 | projectPath: string,
74 | baseRecommendation: any,
75 | projectFeatures: ProjectFeatures,
76 | ): Promise<EnhancedRecommendation> {
77 | await this.initialize();
78 |
79 | // Get learning-enhanced recommendation
80 | const learningResult = await this.learningSystem.getImprovedRecommendation(
81 | projectFeatures,
82 | baseRecommendation,
83 | );
84 |
85 | // Get knowledge graph-based recommendations
86 | const candidateSSGs = this.extractCandidateSSGs(baseRecommendation);
87 | const graphRecommendations =
88 | await this.knowledgeGraph.getGraphBasedRecommendation(
89 | projectFeatures,
90 | candidateSSGs,
91 | );
92 |
93 | // Combine insights and reasoning
94 | const allInsights = [...learningResult.insights];
95 | const reasoning: string[] = [];
96 |
97 | // Add graph-based reasoning
98 | if (graphRecommendations.length > 0) {
99 | const topRecommendation = graphRecommendations[0];
100 | reasoning.push(...topRecommendation.reasoning);
101 |
102 | allInsights.push({
103 | type: "recommendation",
104 | message: `Knowledge graph analysis suggests ${topRecommendation.to.label} based on similar successful projects`,
105 | confidence: topRecommendation.confidence,
106 | actionable: true,
107 | data: { graphPath: topRecommendation.path },
108 | });
109 | }
110 |
111 | // Calculate combined confidence
112 | const combinedConfidence = this.calculateCombinedConfidence(
113 | baseRecommendation.confidence,
114 | learningResult.confidence,
115 | graphRecommendations[0]?.confidence || 0,
116 | );
117 |
118 | // Determine final recommendation
119 | const finalRecommendation = learningResult.recommendation;
120 | if (
121 | graphRecommendations.length > 0 &&
122 | graphRecommendations[0].confidence > 0.8
123 | ) {
124 | const graphChoice = graphRecommendations[0].to.label;
125 | if (graphChoice !== finalRecommendation.recommended) {
126 | finalRecommendation.graphSuggestion = graphChoice;
127 | finalRecommendation.conflictDetected = true;
128 | reasoning.push(
129 | `Knowledge graph suggests ${graphChoice} while learning system suggests ${finalRecommendation.recommended}`,
130 | );
131 | }
132 | }
133 |
134 | return {
135 | baseRecommendation,
136 | learningEnhanced: learningResult.recommendation,
137 | graphBased: graphRecommendations,
138 | insights: allInsights,
139 | confidence: combinedConfidence,
140 | reasoning,
141 | metadata: {
142 | usedLearning: learningResult.insights.length > 0,
143 | usedKnowledgeGraph: graphRecommendations.length > 0,
144 | patternsFound: await this.countRelevantPatterns(projectFeatures),
145 | similarProjects: graphRecommendations.length,
146 | },
147 | };
148 | }
149 |
150 | /**
151 | * Enhanced analysis that provides intelligent insights
152 | */
153 | async getIntelligentAnalysis(
154 | projectPath: string,
155 | baseAnalysis: any,
156 | ): Promise<IntelligentAnalysis> {
157 | await this.initialize();
158 |
159 | const projectFeatures = this.extractProjectFeatures(baseAnalysis);
160 |
161 | // Find patterns from similar projects
162 | const patterns = await this.findProjectPatterns(projectFeatures);
163 |
164 | // Generate predictions
165 | const predictions = await this.generatePredictions(
166 | projectFeatures,
167 | patterns,
168 | );
169 |
170 | // Generate recommendations
171 | const recommendations = await this.generateIntelligentRecommendations(
172 | projectFeatures,
173 | patterns,
174 | predictions,
175 | );
176 |
177 | // Assess learning data quality
178 | const learningData = await this.assessLearningData(projectFeatures);
179 |
180 | return {
181 | analysis: baseAnalysis,
182 | patterns: patterns.map((p) => p.description),
183 | predictions,
184 | recommendations,
185 | learningData,
186 | };
187 | }
188 |
189 | /**
190 | * Learn from new memory entries automatically
191 | */
192 | private async handleNewMemory(memory: MemoryEntry): Promise<void> {
193 | try {
194 | // Determine outcome for learning
195 | const outcome = this.inferOutcome(memory);
196 |
197 | if (outcome) {
198 | await this.learningSystem.learn(memory, outcome);
199 | }
200 |
201 | // Update knowledge graph
202 | await this.knowledgeGraph.buildFromMemories();
203 |
204 | // Periodically save graph to persistent storage
205 | if (Math.random() < 0.1) {
206 | // 10% chance to save
207 | await this.knowledgeGraph.saveToMemory();
208 | }
209 | } catch (error) {
210 | console.error("Error in automatic learning:", error);
211 | }
212 | }
213 |
214 | /**
215 | * Extract project features from analysis data
216 | */
217 | private extractProjectFeatures(analysis: any): ProjectFeatures {
218 | return {
219 | language: analysis.language?.primary || "unknown",
220 | framework: analysis.framework?.name,
221 | size: this.categorizeProjectSize(analysis.stats?.files || 0),
222 | complexity: this.categorizeProjectComplexity(analysis),
223 | hasTests: Boolean(analysis.testing?.hasTests),
224 | hasCI: Boolean(analysis.ci?.hasCI),
225 | hasDocs: Boolean(analysis.documentation?.exists),
226 | teamSize: analysis.team?.size,
227 | isOpenSource: Boolean(analysis.repository?.isPublic),
228 | };
229 | }
230 |
231 | private categorizeProjectSize(
232 | fileCount: number,
233 | ): "small" | "medium" | "large" {
234 | if (fileCount < 50) return "small";
235 | if (fileCount < 200) return "medium";
236 | return "large";
237 | }
238 |
239 | private categorizeProjectComplexity(
240 | analysis: any,
241 | ): "simple" | "moderate" | "complex" {
242 | let complexity = 0;
243 |
244 | if (analysis.dependencies?.count > 20) complexity++;
245 | if (analysis.framework?.name) complexity++;
246 | if (analysis.testing?.frameworks?.length > 1) complexity++;
247 | if (analysis.ci?.workflows?.length > 2) complexity++;
248 | if (analysis.architecture?.patterns?.length > 3) complexity++;
249 |
250 | if (complexity <= 1) return "simple";
251 | if (complexity <= 3) return "moderate";
252 | return "complex";
253 | }
254 |
255 | /**
256 | * Extract candidate SSGs from base recommendation
257 | */
258 | private extractCandidateSSGs(baseRecommendation: any): string[] {
259 | const candidates = [baseRecommendation.recommended];
260 |
261 | if (baseRecommendation.alternatives) {
262 | candidates.push(
263 | ...baseRecommendation.alternatives.map((alt: any) => alt.name || alt),
264 | );
265 | }
266 |
267 | return [...new Set(candidates)].filter(Boolean);
268 | }
269 |
270 | /**
271 | * Calculate combined confidence from multiple sources
272 | */
273 | private calculateCombinedConfidence(
274 | baseConfidence: number,
275 | learningConfidence: number,
276 | graphConfidence: number,
277 | ): number {
278 | // Weighted average with emphasis on learning and graph data when available
279 | const weights = {
280 | base: 0.4,
281 | learning: learningConfidence > 0 ? 0.4 : 0,
282 | graph: graphConfidence > 0 ? 0.2 : 0,
283 | };
284 |
285 | // Redistribute weights if some sources are unavailable
286 | const totalWeight = weights.base + weights.learning + weights.graph;
287 | const normalizedWeights = {
288 | base: weights.base / totalWeight,
289 | learning: weights.learning / totalWeight,
290 | graph: weights.graph / totalWeight,
291 | };
292 |
293 | return (
294 | baseConfidence * normalizedWeights.base +
295 | learningConfidence * normalizedWeights.learning +
296 | graphConfidence * normalizedWeights.graph
297 | );
298 | }
299 |
300 | /**
301 | * Count patterns relevant to project features
302 | */
303 | private async countRelevantPatterns(
304 | features: ProjectFeatures,
305 | ): Promise<number> {
306 | const tags = [features.language, features.framework, features.size].filter(
307 | (tag): tag is string => Boolean(tag),
308 | );
309 | const memories = await this.search({
310 | tags,
311 | });
312 |
313 | return memories.length;
314 | }
315 |
316 | /**
317 | * Find patterns from similar projects
318 | */
319 | private async findProjectPatterns(features: ProjectFeatures): Promise<
320 | Array<{
321 | type: string;
322 | description: string;
323 | confidence: number;
324 | frequency: number;
325 | }>
326 | > {
327 | const patterns: Array<{
328 | type: string;
329 | description: string;
330 | confidence: number;
331 | frequency: number;
332 | }> = [];
333 |
334 | // Find similar projects based on features
335 | const similarMemories = await this.search({
336 | tags: [features.language],
337 | });
338 |
339 | if (similarMemories.length >= 3) {
340 | // Pattern: Most common SSG for this language
341 | const ssgCounts = new Map<string, number>();
342 | for (const memory of similarMemories) {
343 | if (memory.metadata.ssg) {
344 | ssgCounts.set(
345 | memory.metadata.ssg,
346 | (ssgCounts.get(memory.metadata.ssg) || 0) + 1,
347 | );
348 | }
349 | }
350 |
351 | if (ssgCounts.size > 0) {
352 | const topSSG = Array.from(ssgCounts.entries()).sort(
353 | ([, a], [, b]) => b - a,
354 | )[0];
355 |
356 | patterns.push({
357 | type: "ssg_preference",
358 | description: `${topSSG[0]} is commonly used with ${features.language} (${topSSG[1]}/${similarMemories.length} projects)`,
359 | confidence: topSSG[1] / similarMemories.length,
360 | frequency: topSSG[1],
361 | });
362 | }
363 |
364 | // Pattern: Success rate analysis
365 | const deploymentMemories = similarMemories.filter(
366 | (m) => m.type === "deployment",
367 | );
368 | if (deploymentMemories.length >= 2) {
369 | const successRate =
370 | deploymentMemories.filter((m) => m.data.status === "success").length /
371 | deploymentMemories.length;
372 |
373 | patterns.push({
374 | type: "success_rate",
375 | description: `Similar ${features.language} projects have ${(
376 | successRate * 100
377 | ).toFixed(0)}% deployment success rate`,
378 | confidence: Math.min(deploymentMemories.length / 10, 1.0),
379 | frequency: deploymentMemories.length,
380 | });
381 | }
382 | }
383 |
384 | return patterns;
385 | }
386 |
387 | /**
388 | * Generate predictions based on patterns and features
389 | */
390 | private async generatePredictions(
391 | features: ProjectFeatures,
392 | patterns: Array<{ type: string; description: string; confidence: number }>,
393 | ): Promise<
394 | Array<{
395 | type: "success_rate" | "optimal_ssg" | "potential_issues";
396 | prediction: string;
397 | confidence: number;
398 | }>
399 | > {
400 | const predictions: Array<{
401 | type: "success_rate" | "optimal_ssg" | "potential_issues";
402 | prediction: string;
403 | confidence: number;
404 | }> = [];
405 |
406 | // Predict success rate
407 | const successPattern = patterns.find((p) => p.type === "success_rate");
408 | if (successPattern) {
409 | predictions.push({
410 | type: "success_rate",
411 | prediction: `Expected deployment success rate: ${
412 | successPattern.description.match(/(\d+)%/)?.[1] || "unknown"
413 | }%`,
414 | confidence: successPattern.confidence,
415 | });
416 | }
417 |
418 | // Predict optimal SSG
419 | const ssgPattern = patterns.find((p) => p.type === "ssg_preference");
420 | if (ssgPattern) {
421 | const ssg = ssgPattern.description.split(" ")[0];
422 | predictions.push({
423 | type: "optimal_ssg",
424 | prediction: `${ssg} is likely the optimal choice based on similar projects`,
425 | confidence: ssgPattern.confidence,
426 | });
427 | }
428 |
429 | // Predict potential issues
430 | if (!features.hasTests && features.size !== "small") {
431 | predictions.push({
432 | type: "potential_issues",
433 | prediction:
434 | "Deployment issues likely due to lack of tests in medium/large project",
435 | confidence: 0.7,
436 | });
437 | }
438 |
439 | if (!features.hasCI && features.complexity === "complex") {
440 | predictions.push({
441 | type: "potential_issues",
442 | prediction:
443 | "Complex project without CI/CD may face integration challenges",
444 | confidence: 0.6,
445 | });
446 | }
447 |
448 | return predictions;
449 | }
450 |
451 | /**
452 | * Generate intelligent recommendations
453 | */
454 | private async generateIntelligentRecommendations(
455 | features: ProjectFeatures,
456 | patterns: Array<{ type: string; description: string; confidence: number }>,
457 | predictions: Array<{
458 | type: string;
459 | prediction: string;
460 | confidence: number;
461 | }>,
462 | ): Promise<string[]> {
463 | const recommendations: string[] = [];
464 |
465 | // Recommendations based on patterns
466 | const ssgPattern = patterns.find((p) => p.type === "ssg_preference");
467 | if (ssgPattern && ssgPattern.confidence > 0.7) {
468 | const ssg = ssgPattern.description.split(" ")[0];
469 | recommendations.push(
470 | `Consider ${ssg} - it's proven successful for similar ${features.language} projects`,
471 | );
472 | }
473 |
474 | // Recommendations based on predictions
475 | const issuesPrediction = predictions.find(
476 | (p) => p.type === "potential_issues",
477 | );
478 | if (issuesPrediction && issuesPrediction.confidence > 0.6) {
479 | if (issuesPrediction.prediction.includes("tests")) {
480 | recommendations.push(
481 | "Set up automated testing before deployment to improve success rate",
482 | );
483 | }
484 | if (issuesPrediction.prediction.includes("CI/CD")) {
485 | recommendations.push(
486 | "Implement CI/CD pipeline to handle project complexity",
487 | );
488 | }
489 | }
490 |
491 | // Recommendations based on features
492 | if (features.complexity === "complex" && !features.hasDocs) {
493 | recommendations.push(
494 | "Invest in comprehensive documentation for this complex project",
495 | );
496 | }
497 |
498 | if (features.isOpenSource && features.size === "large") {
499 | recommendations.push(
500 | "Consider community-friendly documentation tools for large open-source project",
501 | );
502 | }
503 |
504 | return recommendations;
505 | }
506 |
507 | /**
508 | * Assess quality of learning data
509 | */
510 | private async assessLearningData(features: ProjectFeatures): Promise<{
511 | similarProjects: number;
512 | confidenceLevel: number;
513 | dataQuality: "low" | "medium" | "high";
514 | }> {
515 | const tags = [features.language, features.framework].filter(
516 | (tag): tag is string => Boolean(tag),
517 | );
518 | const similarMemories = await this.search({
519 | tags,
520 | });
521 |
522 | const similarProjects = similarMemories.length;
523 | let confidenceLevel = 0;
524 |
525 | if (similarProjects >= 10) {
526 | confidenceLevel = 0.9;
527 | } else if (similarProjects >= 5) {
528 | confidenceLevel = 0.7;
529 | } else if (similarProjects >= 2) {
530 | confidenceLevel = 0.5;
531 | } else {
532 | confidenceLevel = 0.2;
533 | }
534 |
535 | let dataQuality: "low" | "medium" | "high";
536 | if (similarProjects >= 8 && confidenceLevel >= 0.8) {
537 | dataQuality = "high";
538 | } else if (similarProjects >= 3 && confidenceLevel >= 0.5) {
539 | dataQuality = "medium";
540 | } else {
541 | dataQuality = "low";
542 | }
543 |
544 | return {
545 | similarProjects,
546 | confidenceLevel,
547 | dataQuality,
548 | };
549 | }
550 |
551 | /**
552 | * Infer outcome from memory entry
553 | */
554 | private inferOutcome(
555 | memory: MemoryEntry,
556 | ): "success" | "failure" | "neutral" | null {
557 | if (memory.type === "deployment") {
558 | if (memory.data.status === "success") return "success";
559 | if (memory.data.status === "failed") return "failure";
560 | }
561 |
562 | if (memory.type === "recommendation" && memory.data.feedback) {
563 | const rating = memory.data.feedback.rating || memory.data.feedback.score;
564 | if (rating > 3) return "success";
565 | if (rating < 3) return "failure";
566 | }
567 |
568 | return "neutral";
569 | }
570 |
571 | /**
572 | * Get comprehensive learning statistics
573 | */
574 | async getLearningStatistics(): Promise<{
575 | learning: any;
576 | knowledgeGraph: any;
577 | combined: {
578 | totalMemories: number;
579 | enhancedRecommendations: number;
580 | accuracyImprovement: number;
581 | systemMaturity: "nascent" | "developing" | "mature";
582 | };
583 | }> {
584 | const learningStats = await this.learningSystem.getStatistics();
585 | const graphStats = this.knowledgeGraph.getStatistics();
586 |
587 | const totalMemories = (await this.search("")).length;
588 | const graphStatsResult = await graphStats;
589 | const enhancedRecommendations =
590 | learningStats.totalPatterns + graphStatsResult.nodeCount;
591 |
592 | // Estimate accuracy improvement based on data volume
593 | let accuracyImprovement = 0;
594 | if (totalMemories >= 50) {
595 | accuracyImprovement = Math.min(0.3, totalMemories / 200);
596 | }
597 |
598 | // Determine system maturity
599 | let systemMaturity: "nascent" | "developing" | "mature";
600 | if (totalMemories >= 100 && learningStats.totalPatterns >= 20) {
601 | systemMaturity = "mature";
602 | } else if (totalMemories >= 20 && learningStats.totalPatterns >= 5) {
603 | systemMaturity = "developing";
604 | } else {
605 | systemMaturity = "nascent";
606 | }
607 |
608 | return {
609 | learning: learningStats,
610 | knowledgeGraph: graphStats,
611 | combined: {
612 | totalMemories,
613 | enhancedRecommendations,
614 | accuracyImprovement,
615 | systemMaturity,
616 | },
617 | };
618 | }
619 |
620 | /**
621 | * Close and cleanup
622 | */
623 | async close(): Promise<void> {
624 | await this.knowledgeGraph.saveToMemory();
625 | await super.close();
626 | }
627 | }
628 |
629 | export default EnhancedMemoryManager;
630 |
```
--------------------------------------------------------------------------------
/tests/integration/memory-mcp-tools.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Memory MCP Tools Integration Tests
3 | * Tests integration between memory system and MCP tools
4 | * Part of Issue #56 - Memory MCP Tools Integration Tests
5 | */
6 |
7 | import { promises as fs } from "fs";
8 | import path from "path";
9 | import os from "os";
10 | import {
11 | initializeMemory,
12 | rememberAnalysis,
13 | rememberRecommendation,
14 | rememberDeployment,
15 | rememberConfiguration,
16 | recallProjectHistory,
17 | getProjectInsights,
18 | getSimilarProjects,
19 | getMemoryStatistics,
20 | } from "../../src/memory/integration.js";
21 | import { analyzeRepository } from "../../src/tools/analyze-repository.js";
22 | import { recommendSSG } from "../../src/tools/recommend-ssg.js";
23 |
24 | describe("Memory MCP Tools Integration", () => {
25 | let tempDir: string;
26 | let testProjectDir: string;
27 |
28 | beforeEach(async () => {
29 | // Create unique temp directory for each test
30 | tempDir = path.join(
31 | os.tmpdir(),
32 | `memory-mcp-integration-${Date.now()}-${Math.random()
33 | .toString(36)
34 | .substr(2, 9)}`,
35 | );
36 | await fs.mkdir(tempDir, { recursive: true });
37 |
38 | // Create a mock project structure for testing
39 | testProjectDir = path.join(tempDir, "test-project");
40 | await createMockProject(testProjectDir);
41 | });
42 |
43 | afterEach(async () => {
44 | // Cleanup temp directory
45 | try {
46 | await fs.rm(tempDir, { recursive: true, force: true });
47 | } catch (error) {
48 | // Ignore cleanup errors
49 | }
50 | });
51 |
52 | async function createMockProject(projectPath: string) {
53 | await fs.mkdir(projectPath, { recursive: true });
54 |
55 | // Create package.json
56 | await fs.writeFile(
57 | path.join(projectPath, "package.json"),
58 | JSON.stringify(
59 | {
60 | name: "test-project",
61 | version: "1.0.0",
62 | dependencies: {
63 | react: "^18.0.0",
64 | typescript: "^5.0.0",
65 | },
66 | devDependencies: {
67 | jest: "^29.0.0",
68 | },
69 | },
70 | null,
71 | 2,
72 | ),
73 | );
74 |
75 | // Create README.md
76 | await fs.writeFile(
77 | path.join(projectPath, "README.md"),
78 | `# Test Project
79 |
80 | A test project for memory integration testing.
81 |
82 | ## Features
83 | - TypeScript support
84 | - React components
85 | - Jest testing
86 | `,
87 | );
88 |
89 | // Create src directory with TypeScript files
90 | await fs.mkdir(path.join(projectPath, "src"));
91 | await fs.writeFile(
92 | path.join(projectPath, "src/index.ts"),
93 | 'export const hello = "world";',
94 | );
95 | await fs.writeFile(
96 | path.join(projectPath, "src/component.tsx"),
97 | 'import React from "react"; export const Component = () => <div>Hello</div>;',
98 | );
99 |
100 | // Create tests directory
101 | await fs.mkdir(path.join(projectPath, "__tests__"));
102 | await fs.writeFile(
103 | path.join(projectPath, "__tests__/index.test.ts"),
104 | 'test("hello world", () => { expect(true).toBe(true); });',
105 | );
106 |
107 | // Create docs directory
108 | await fs.mkdir(path.join(projectPath, "docs"));
109 | await fs.writeFile(
110 | path.join(projectPath, "docs/setup.md"),
111 | "# Setup Guide\n\nHow to set up the project.",
112 | );
113 | }
114 |
115 | describe("Memory Integration Initialization", () => {
116 | test("should initialize memory system for MCP tools", async () => {
117 | const memoryManager = await initializeMemory();
118 |
119 | expect(memoryManager).toBeDefined();
120 | expect(memoryManager.constructor.name).toBe("MemoryManager");
121 | });
122 |
123 | test("should handle memory system events", async () => {
124 | const memoryManager = await initializeMemory();
125 | let eventsFired = 0;
126 |
127 | memoryManager.on("memory-created", () => {
128 | eventsFired++;
129 | });
130 |
131 | // Create a memory entry
132 | await memoryManager.remember("analysis", { test: true });
133 |
134 | // Give events time to fire
135 | await new Promise((resolve) => setTimeout(resolve, 50));
136 |
137 | expect(eventsFired).toBeGreaterThanOrEqual(1);
138 | });
139 | });
140 |
141 | describe("Repository Analysis Integration", () => {
142 | test("should integrate repository analysis with memory system", async () => {
143 | // Run repository analysis tool
144 | const analysisResult = await analyzeRepository({
145 | path: testProjectDir,
146 | depth: "standard",
147 | });
148 |
149 | expect(analysisResult.isError).toBeFalsy();
150 | expect(analysisResult.content).toBeDefined();
151 |
152 | // Extract analysis data from MCP response
153 | const analysisContent = analysisResult.content.find(
154 | (c) => c.type === "text" && c.text.includes("Analysis Complete"),
155 | );
156 | expect(analysisContent).toBeDefined();
157 |
158 | // Remember analysis in memory system
159 | const analysisData = {
160 | projectId: "test-project",
161 | language: { primary: "typescript" },
162 | framework: { name: "react" },
163 | stats: { files: 5 },
164 | repository: { url: "github.com/test/project" },
165 | };
166 |
167 | const memoryId = await rememberAnalysis(testProjectDir, analysisData);
168 | expect(memoryId).toBeDefined();
169 | expect(typeof memoryId).toBe("string");
170 |
171 | // Verify memory was stored
172 | const memoryManager = await initializeMemory();
173 | const recalled = await memoryManager.recall(memoryId);
174 |
175 | expect(recalled).not.toBeNull();
176 | expect(recalled?.data.language.primary).toBe("typescript");
177 | expect(recalled?.data.framework.name).toBe("react");
178 | });
179 |
180 | test("should store analysis metadata correctly", async () => {
181 | const analysisData = {
182 | projectId: "metadata-test",
183 | language: { primary: "javascript" },
184 | framework: { name: "vue" },
185 | stats: { files: 25 },
186 | repository: { url: "github.com/test/vue-project" },
187 | };
188 |
189 | const memoryId = await rememberAnalysis(
190 | "/test/vue-project",
191 | analysisData,
192 | );
193 |
194 | const memoryManager = await initializeMemory();
195 | const recalled = await memoryManager.recall(memoryId);
196 |
197 | expect(recalled?.metadata.repository).toBe("github.com/test/vue-project");
198 | expect(recalled?.metadata.tags).toContain("analysis");
199 | expect(recalled?.metadata.tags).toContain("javascript");
200 | expect(recalled?.metadata.tags).toContain("vue");
201 | });
202 | });
203 |
204 | describe("SSG Recommendation Integration", () => {
205 | test("should integrate SSG recommendation with memory system", async () => {
206 | // First create an analysis
207 | const analysisData = {
208 | projectId: "ssg-test",
209 | language: { primary: "typescript" },
210 | framework: { name: "react" },
211 | };
212 |
213 | const analysisId = await rememberAnalysis(
214 | "/test/ssg-project",
215 | analysisData,
216 | );
217 |
218 | // Run SSG recommendation tool
219 | const recommendationResult = await recommendSSG({
220 | analysisId,
221 | preferences: {
222 | priority: "features",
223 | ecosystem: "javascript",
224 | },
225 | });
226 |
227 | expect(recommendationResult.content).toBeDefined();
228 |
229 | // Extract recommendation data
230 | const recommendationData = {
231 | recommended: "docusaurus",
232 | confidence: 0.85,
233 | reasoning: ["React-based", "TypeScript support"],
234 | analysisId,
235 | };
236 |
237 | const memoryId = await rememberRecommendation(
238 | analysisId,
239 | recommendationData,
240 | );
241 | expect(memoryId).toBeDefined();
242 |
243 | // Verify memory linking
244 | const memoryManager = await initializeMemory();
245 | const recalled = await memoryManager.recall(memoryId);
246 |
247 | expect(recalled?.data.recommended).toBe("docusaurus");
248 | expect(recalled?.metadata.ssg).toBe("docusaurus");
249 | expect(recalled?.metadata.tags).toContain("recommendation");
250 | expect(recalled?.metadata.tags).toContain("docusaurus");
251 | });
252 |
253 | test("should link recommendations to analysis", async () => {
254 | // Create analysis
255 | const analysisData = {
256 | projectId: "linked-test",
257 | language: { primary: "python" },
258 | };
259 | const analysisId = await rememberAnalysis(
260 | "/test/python-project",
261 | analysisData,
262 | );
263 |
264 | // Create recommendation
265 | const recommendationData = {
266 | recommended: "mkdocs",
267 | confidence: 0.9,
268 | };
269 | const recommendationId = await rememberRecommendation(
270 | analysisId,
271 | recommendationData,
272 | );
273 |
274 | // Verify linking
275 | const memoryManager = await initializeMemory();
276 | const recommendation = await memoryManager.recall(recommendationId);
277 | const analysis = await memoryManager.recall(analysisId);
278 |
279 | expect(recommendation?.metadata.projectId).toBe(
280 | analysis?.metadata.projectId,
281 | );
282 | expect(recommendation?.metadata.projectId).toBe("linked-test");
283 | });
284 | });
285 |
286 | describe("Deployment Memory Integration", () => {
287 | test("should store deployment results in memory", async () => {
288 | const deploymentData = {
289 | ssg: "hugo",
290 | status: "success",
291 | duration: 120,
292 | url: "https://test-project.github.io",
293 | branch: "gh-pages",
294 | };
295 |
296 | const memoryId = await rememberDeployment(
297 | "github.com/test/project",
298 | deploymentData,
299 | );
300 |
301 | const memoryManager = await initializeMemory();
302 | const recalled = await memoryManager.recall(memoryId);
303 |
304 | expect(recalled?.data.ssg).toBe("hugo");
305 | expect(recalled?.data.status).toBe("success");
306 | expect(recalled?.metadata.repository).toBe("github.com/test/project");
307 | expect(recalled?.metadata.tags).toContain("deployment");
308 | expect(recalled?.metadata.tags).toContain("success");
309 | expect(recalled?.metadata.tags).toContain("hugo");
310 | });
311 |
312 | test("should track deployment failures", async () => {
313 | const failedDeployment = {
314 | ssg: "jekyll",
315 | status: "failed",
316 | error: "Build failed: missing dependency",
317 | duration: 45,
318 | };
319 |
320 | const memoryId = await rememberDeployment(
321 | "github.com/test/failed-project",
322 | failedDeployment,
323 | );
324 |
325 | const memoryManager = await initializeMemory();
326 | const recalled = await memoryManager.recall(memoryId);
327 |
328 | expect(recalled?.data.status).toBe("failed");
329 | expect(recalled?.data.error).toContain("Build failed");
330 | expect(recalled?.metadata.tags).toContain("failed");
331 | });
332 | });
333 |
334 | describe("Configuration Memory Integration", () => {
335 | test("should store configuration data in memory", async () => {
336 | const configData = {
337 | title: "Test Documentation",
338 | theme: "material",
339 | plugins: ["search", "navigation"],
340 | build: {
341 | outputDir: "_site",
342 | baseUrl: "/docs/",
343 | },
344 | };
345 |
346 | const memoryId = await rememberConfiguration(
347 | "test-docs",
348 | "mkdocs",
349 | configData,
350 | );
351 |
352 | const memoryManager = await initializeMemory();
353 | const recalled = await memoryManager.recall(memoryId);
354 |
355 | expect(recalled?.data.title).toBe("Test Documentation");
356 | expect(recalled?.data.theme).toBe("material");
357 | expect(recalled?.metadata.ssg).toBe("mkdocs");
358 | expect(recalled?.metadata.tags).toContain("configuration");
359 | expect(recalled?.metadata.tags).toContain("mkdocs");
360 | expect(recalled?.metadata.tags).toContain("test-docs");
361 | });
362 | });
363 |
364 | describe("Project History and Insights", () => {
365 | test("should recall comprehensive project history", async () => {
366 | const projectId = "history-test";
367 |
368 | // Create a complete project workflow in memory
369 | const analysisData = {
370 | projectId,
371 | language: { primary: "typescript" },
372 | framework: { name: "react" },
373 | };
374 | await rememberAnalysis("/test/history-project", analysisData);
375 |
376 | const recommendationData = {
377 | recommended: "docusaurus",
378 | confidence: 0.8,
379 | };
380 | await rememberRecommendation("analysis-id", recommendationData);
381 |
382 | const deploymentData = {
383 | ssg: "docusaurus",
384 | status: "success",
385 | duration: 90,
386 | };
387 | await rememberDeployment(
388 | "github.com/test/history-project",
389 | deploymentData,
390 | );
391 |
392 | // Recall project history
393 | const history = await recallProjectHistory(projectId);
394 |
395 | expect(history.projectId).toBe(projectId);
396 | expect(history.history).toBeDefined();
397 | expect(history.insights).toBeDefined();
398 | expect(Array.isArray(history.insights)).toBe(true);
399 | });
400 |
401 | test("should generate meaningful project insights", async () => {
402 | const projectId = "insights-test";
403 |
404 | // Create deployment history
405 | const successfulDeployment = {
406 | ssg: "hugo",
407 | status: "success",
408 | duration: 60,
409 | };
410 | await rememberDeployment(
411 | "github.com/test/insights-project",
412 | successfulDeployment,
413 | );
414 |
415 | const failedDeployment = {
416 | ssg: "hugo",
417 | status: "failed",
418 | error: "Build timeout",
419 | };
420 | await rememberDeployment(
421 | "github.com/test/insights-project",
422 | failedDeployment,
423 | );
424 |
425 | const insights = await getProjectInsights(projectId);
426 |
427 | expect(Array.isArray(insights)).toBe(true);
428 |
429 | // Insights may be empty if memories don't meet criteria, that's ok
430 | if (insights.length > 0) {
431 | // Should include deployment success rate if deployments exist
432 | const successRateInsight = insights.find((i) =>
433 | i.includes("success rate"),
434 | );
435 | expect(successRateInsight).toBeDefined();
436 | }
437 | });
438 | });
439 |
440 | describe("Similar Projects Discovery", () => {
441 | test("should find similar projects based on characteristics", async () => {
442 | // Create multiple projects with different characteristics
443 | await rememberAnalysis("/project1", {
444 | projectId: "project1",
445 | language: { primary: "typescript" },
446 | framework: { name: "react" },
447 | });
448 |
449 | await rememberAnalysis("/project2", {
450 | projectId: "project2",
451 | language: { primary: "typescript" },
452 | framework: { name: "vue" },
453 | });
454 |
455 | await rememberAnalysis("/project3", {
456 | projectId: "project3",
457 | language: { primary: "python" },
458 | framework: { name: "django" },
459 | });
460 |
461 | // Search for similar projects
462 | const targetProject = {
463 | language: { primary: "typescript" },
464 | framework: { name: "react" },
465 | stats: { files: 100 },
466 | };
467 |
468 | const similarProjects = await getSimilarProjects(targetProject, 3);
469 |
470 | expect(Array.isArray(similarProjects)).toBe(true);
471 |
472 | // Similar projects may be empty if no matches found, that's ok
473 | if (similarProjects.length > 0) {
474 | // Should find TypeScript projects first
475 | const tsProjects = similarProjects.filter((p) => p.similarity > 0);
476 | expect(tsProjects.length).toBeGreaterThan(0);
477 | }
478 | });
479 |
480 | test("should calculate similarity scores correctly", async () => {
481 | // Create projects with known characteristics
482 | await rememberAnalysis("/exact-match", {
483 | projectId: "exact-match",
484 | language: { primary: "javascript" },
485 | framework: { name: "vue" },
486 | stats: { files: 50 },
487 | documentation: { type: "api" },
488 | });
489 |
490 | await rememberAnalysis("/partial-match", {
491 | projectId: "partial-match",
492 | language: { primary: "javascript" },
493 | framework: { name: "react" },
494 | stats: { files: 45 },
495 | });
496 |
497 | const targetProject = {
498 | language: { primary: "javascript" },
499 | framework: { name: "vue" },
500 | stats: { files: 52 },
501 | documentation: { type: "api" },
502 | };
503 |
504 | const similarProjects = await getSimilarProjects(targetProject, 5);
505 |
506 | expect(Array.isArray(similarProjects)).toBe(true);
507 |
508 | // Similar projects may be empty, but if found should have similarity scores
509 | if (similarProjects.length > 0) {
510 | const exactMatch = similarProjects.find(
511 | (p) => p.projectId === "exact-match",
512 | );
513 | const partialMatch = similarProjects.find(
514 | (p) => p.projectId === "partial-match",
515 | );
516 |
517 | if (exactMatch && partialMatch) {
518 | expect(exactMatch.similarity).toBeGreaterThan(
519 | partialMatch.similarity,
520 | );
521 | }
522 | }
523 | });
524 | });
525 |
526 | describe("Memory Statistics and Analytics", () => {
527 | test("should provide memory statistics for tools", async () => {
528 | // Create some test data
529 | await rememberAnalysis("/stats-test", {
530 | projectId: "stats-test",
531 | language: { primary: "go" },
532 | });
533 |
534 | await rememberDeployment("github.com/test/stats", {
535 | ssg: "hugo",
536 | status: "success",
537 | });
538 |
539 | const stats = await getMemoryStatistics();
540 |
541 | expect(stats).toBeDefined();
542 | expect(typeof stats).toBe("object");
543 | });
544 | });
545 |
546 | describe("Error Handling and Edge Cases", () => {
547 | test("should handle malformed analysis data gracefully", async () => {
548 | const malformedData = {
549 | // Missing required fields
550 | language: null,
551 | framework: undefined,
552 | };
553 |
554 | // Should not throw but handle gracefully
555 | const memoryId = await rememberAnalysis("/malformed", malformedData);
556 | expect(memoryId).toBeDefined();
557 |
558 | const memoryManager = await initializeMemory();
559 | const recalled = await memoryManager.recall(memoryId);
560 | expect(recalled).not.toBeNull();
561 | });
562 |
563 | test("should handle missing analysis when creating recommendations", async () => {
564 | const recommendationData = {
565 | recommended: "jekyll",
566 | confidence: 0.7,
567 | };
568 |
569 | // Reference non-existent analysis
570 | const memoryId = await rememberRecommendation(
571 | "non-existent-analysis",
572 | recommendationData,
573 | );
574 | expect(memoryId).toBeDefined();
575 |
576 | const memoryManager = await initializeMemory();
577 | const recalled = await memoryManager.recall(memoryId);
578 | expect(recalled?.data.recommended).toBe("jekyll");
579 | });
580 |
581 | test("should handle empty project history gracefully", async () => {
582 | const history = await recallProjectHistory("non-existent-project");
583 |
584 | expect(history.projectId).toBe("non-existent-project");
585 | expect(history.history).toBeDefined();
586 | expect(Array.isArray(history.insights)).toBe(true);
587 | });
588 |
589 | test("should handle similar projects search with no matches", async () => {
590 | const uniqueProject = {
591 | language: { primary: "rust" },
592 | framework: { name: "actix" },
593 | stats: { files: 500 },
594 | };
595 |
596 | const similarProjects = await getSimilarProjects(uniqueProject, 5);
597 |
598 | expect(Array.isArray(similarProjects)).toBe(true);
599 | // Should return empty array or minimal matches
600 | expect(similarProjects.length).toBeGreaterThanOrEqual(0);
601 | });
602 | });
603 | });
604 |
```
--------------------------------------------------------------------------------
/tests/utils/content-extractor.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { promises as fs } from "fs";
2 | import path from "path";
3 | import os from "os";
4 | import { extractRepositoryContent } from "../../src/utils/content-extractor";
5 |
6 | describe("Content Extractor", () => {
7 | let tempDir: string;
8 |
9 | beforeEach(async () => {
10 | tempDir = path.join(os.tmpdir(), `content-extractor-${Date.now()}`);
11 | await fs.mkdir(tempDir, { recursive: true });
12 | });
13 |
14 | afterEach(async () => {
15 | try {
16 | await fs.rm(tempDir, { recursive: true, force: true });
17 | } catch {
18 | // Ignore cleanup errors
19 | }
20 | });
21 |
22 | describe("extractRepositoryContent", () => {
23 | it("should extract README.md content", async () => {
24 | const readmeContent = `# Test Project\n## Installation\nRun npm install\n## Usage\nUse it like this`;
25 | await fs.writeFile(path.join(tempDir, "README.md"), readmeContent);
26 |
27 | const result = await extractRepositoryContent(tempDir);
28 |
29 | expect(result.readme).toBeDefined();
30 | expect(result.readme?.content).toBe(readmeContent);
31 | expect(result.readme?.sections.length).toBeGreaterThan(0);
32 | expect(result.readme?.sections[0].title).toBe("Test Project");
33 | });
34 |
35 | it("should extract readme.md (lowercase) content", async () => {
36 | const readmeContent = `# Test\nContent`;
37 | await fs.writeFile(path.join(tempDir, "readme.md"), readmeContent);
38 |
39 | const result = await extractRepositoryContent(tempDir);
40 |
41 | expect(result.readme).toBeDefined();
42 | expect(result.readme?.content).toBe(readmeContent);
43 | });
44 |
45 | it("should extract Readme.md (mixed case) content", async () => {
46 | const readmeContent = `# Test\nContent`;
47 | await fs.writeFile(path.join(tempDir, "Readme.md"), readmeContent);
48 |
49 | const result = await extractRepositoryContent(tempDir);
50 |
51 | expect(result.readme).toBeDefined();
52 | expect(result.readme?.content).toBe(readmeContent);
53 | });
54 |
55 | it("should return undefined when no README exists", async () => {
56 | const result = await extractRepositoryContent(tempDir);
57 |
58 | expect(result.readme).toBeUndefined();
59 | });
60 |
61 | it("should extract existing documentation from docs directory", async () => {
62 | const docsDir = path.join(tempDir, "docs");
63 | await fs.mkdir(docsDir, { recursive: true });
64 | await fs.writeFile(
65 | path.join(docsDir, "guide.md"),
66 | "# Guide\nHow to do things",
67 | );
68 |
69 | const result = await extractRepositoryContent(tempDir);
70 |
71 | expect(result.existingDocs.length).toBeGreaterThan(0);
72 | expect(result.existingDocs[0].title).toBe("Guide");
73 | expect(result.existingDocs[0].path).toContain("guide.md");
74 | });
75 |
76 | it("should extract documentation from documentation directory", async () => {
77 | const docsDir = path.join(tempDir, "documentation");
78 | await fs.mkdir(docsDir, { recursive: true });
79 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test Doc\nContent");
80 |
81 | const result = await extractRepositoryContent(tempDir);
82 |
83 | expect(result.existingDocs.length).toBeGreaterThan(0);
84 | });
85 |
86 | it("should extract documentation from doc directory", async () => {
87 | const docsDir = path.join(tempDir, "doc");
88 | await fs.mkdir(docsDir, { recursive: true });
89 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test\nContent");
90 |
91 | const result = await extractRepositoryContent(tempDir);
92 |
93 | expect(result.existingDocs.length).toBeGreaterThan(0);
94 | });
95 |
96 | it("should extract .mdx files", async () => {
97 | const docsDir = path.join(tempDir, "docs");
98 | await fs.mkdir(docsDir, { recursive: true });
99 | await fs.writeFile(
100 | path.join(docsDir, "component.mdx"),
101 | "# Component\nJSX content",
102 | );
103 |
104 | const result = await extractRepositoryContent(tempDir);
105 |
106 | expect(result.existingDocs.length).toBeGreaterThan(0);
107 | expect(result.existingDocs[0].path).toContain("component.mdx");
108 | });
109 |
110 | it("should recursively extract docs from subdirectories", async () => {
111 | const docsDir = path.join(tempDir, "docs");
112 | const subDir = path.join(docsDir, "guides");
113 | await fs.mkdir(subDir, { recursive: true });
114 | await fs.writeFile(
115 | path.join(subDir, "tutorial.md"),
116 | "# Tutorial\nStep by step",
117 | );
118 |
119 | const result = await extractRepositoryContent(tempDir);
120 |
121 | expect(result.existingDocs.length).toBeGreaterThan(0);
122 | expect(result.existingDocs[0].path).toContain("guides");
123 | });
124 |
125 | it("should skip hidden directories", async () => {
126 | const docsDir = path.join(tempDir, "docs");
127 | const hiddenDir = path.join(docsDir, ".hidden");
128 | await fs.mkdir(hiddenDir, { recursive: true });
129 | await fs.writeFile(
130 | path.join(hiddenDir, "secret.md"),
131 | "# Secret\nContent",
132 | );
133 |
134 | const result = await extractRepositoryContent(tempDir);
135 |
136 | expect(
137 | result.existingDocs.find((d) => d.path.includes(".hidden")),
138 | ).toBeUndefined();
139 | });
140 |
141 | it("should categorize documents as tutorial", async () => {
142 | const docsDir = path.join(tempDir, "docs");
143 | await fs.mkdir(docsDir, { recursive: true });
144 | await fs.writeFile(
145 | path.join(docsDir, "getting-started.md"),
146 | "# Getting Started\n## Step 1\nFirst, do this",
147 | );
148 |
149 | const result = await extractRepositoryContent(tempDir);
150 |
151 | expect(result.existingDocs[0].category).toBe("tutorial");
152 | });
153 |
154 | it("should categorize documents as how-to", async () => {
155 | const docsDir = path.join(tempDir, "docs");
156 | await fs.mkdir(docsDir, { recursive: true });
157 | await fs.writeFile(
158 | path.join(docsDir, "how-to-deploy.md"),
159 | "# How to Deploy\nYou can deploy by...",
160 | );
161 |
162 | const result = await extractRepositoryContent(tempDir);
163 |
164 | expect(result.existingDocs[0].category).toBe("how-to");
165 | });
166 |
167 | it("should categorize documents as reference", async () => {
168 | const docsDir = path.join(tempDir, "docs");
169 | await fs.mkdir(docsDir, { recursive: true });
170 | await fs.writeFile(
171 | path.join(docsDir, "api.md"),
172 | "# API Reference\nEndpoints",
173 | );
174 |
175 | const result = await extractRepositoryContent(tempDir);
176 |
177 | expect(result.existingDocs[0].category).toBe("reference");
178 | });
179 |
180 | it("should categorize documents as explanation", async () => {
181 | const docsDir = path.join(tempDir, "docs");
182 | const adrDir = path.join(docsDir, "adrs");
183 | await fs.mkdir(adrDir, { recursive: true });
184 | await fs.writeFile(
185 | path.join(adrDir, "001-decision.md"),
186 | "# Decision\nExplanation",
187 | );
188 |
189 | const result = await extractRepositoryContent(tempDir);
190 |
191 | const adrDocs = result.existingDocs.filter((d) => d.path.includes("adr"));
192 | expect(adrDocs.length).toBeGreaterThan(0);
193 | expect(adrDocs[0].category).toBe("explanation");
194 | });
195 |
196 | it("should extract title from first heading", async () => {
197 | const docsDir = path.join(tempDir, "docs");
198 | await fs.mkdir(docsDir, { recursive: true });
199 | await fs.writeFile(
200 | path.join(docsDir, "doc.md"),
201 | "Some text\n# Main Title\nContent",
202 | );
203 |
204 | const result = await extractRepositoryContent(tempDir);
205 |
206 | expect(result.existingDocs[0].title).toBe("Main Title");
207 | });
208 |
209 | it("should use filename as title when no heading exists", async () => {
210 | const docsDir = path.join(tempDir, "docs");
211 | await fs.mkdir(docsDir, { recursive: true });
212 | await fs.writeFile(path.join(docsDir, "my-doc-file.md"), "No heading");
213 |
214 | const result = await extractRepositoryContent(tempDir);
215 |
216 | expect(result.existingDocs[0].title).toBe("my doc file");
217 | });
218 |
219 | it("should extract ADRs from docs/adrs", async () => {
220 | const adrDir = path.join(tempDir, "docs/adrs");
221 | await fs.mkdir(adrDir, { recursive: true });
222 | await fs.writeFile(
223 | path.join(adrDir, "001-use-typescript.md"),
224 | "# 1. Use TypeScript\n## Status\nAccepted\n## Decision\nWe will use TypeScript\n## Consequences\nBetter type safety",
225 | );
226 |
227 | const result = await extractRepositoryContent(tempDir);
228 |
229 | expect(result.adrs.length).toBeGreaterThan(0);
230 | expect(result.adrs[0].number).toBe("001");
231 | expect(result.adrs[0].title).toBe("Use TypeScript");
232 | expect(result.adrs[0].status).toBe("Accepted");
233 | expect(result.adrs[0].decision).toContain("TypeScript");
234 | expect(result.adrs[0].consequences).toContain("type safety");
235 | });
236 |
237 | it("should extract ADRs from docs/adr", async () => {
238 | const adrDir = path.join(tempDir, "docs/adr");
239 | await fs.mkdir(adrDir, { recursive: true });
240 | await fs.writeFile(
241 | path.join(adrDir, "0001-test.md"),
242 | "# Test\n## Status\nDraft\n## Decision\nTest",
243 | );
244 |
245 | const result = await extractRepositoryContent(tempDir);
246 |
247 | expect(result.adrs.length).toBeGreaterThan(0);
248 | });
249 |
250 | it("should extract ADRs from docs/decisions", async () => {
251 | const adrDir = path.join(tempDir, "docs/decisions");
252 | await fs.mkdir(adrDir, { recursive: true });
253 | await fs.writeFile(
254 | path.join(adrDir, "0001-test.md"),
255 | "# Test\n## Status\nDraft\n## Decision\nTest",
256 | );
257 |
258 | const result = await extractRepositoryContent(tempDir);
259 |
260 | expect(result.adrs.length).toBeGreaterThan(0);
261 | });
262 |
263 | it("should skip ADRs without number in filename", async () => {
264 | const adrDir = path.join(tempDir, "docs/adrs");
265 | await fs.mkdir(adrDir, { recursive: true });
266 | await fs.writeFile(
267 | path.join(adrDir, "template.md"),
268 | "# Template\n## Status\nN/A",
269 | );
270 |
271 | const result = await extractRepositoryContent(tempDir);
272 |
273 | expect(result.adrs.length).toBe(0);
274 | });
275 |
276 | it("should extract code examples from examples directory", async () => {
277 | const examplesDir = path.join(tempDir, "examples");
278 | await fs.mkdir(examplesDir, { recursive: true });
279 | await fs.writeFile(
280 | path.join(examplesDir, "hello.js"),
281 | "// Example: Hello World\nconsole.log('hello');",
282 | );
283 |
284 | const result = await extractRepositoryContent(tempDir);
285 |
286 | expect(result.codeExamples.length).toBeGreaterThan(0);
287 | expect(result.codeExamples[0].language).toBe("javascript");
288 | expect(result.codeExamples[0].file).toBe("hello.js");
289 | });
290 |
291 | it("should extract code examples from samples directory", async () => {
292 | const samplesDir = path.join(tempDir, "samples");
293 | await fs.mkdir(samplesDir, { recursive: true });
294 | await fs.writeFile(
295 | path.join(samplesDir, "test.py"),
296 | "# Demo: Python example\nprint('test')",
297 | );
298 |
299 | const result = await extractRepositoryContent(tempDir);
300 |
301 | expect(result.codeExamples.length).toBeGreaterThan(0);
302 | expect(result.codeExamples[0].language).toBe("python");
303 | });
304 |
305 | it("should extract code examples from demo directory", async () => {
306 | const demoDir = path.join(tempDir, "demo");
307 | await fs.mkdir(demoDir, { recursive: true });
308 | await fs.writeFile(path.join(demoDir, "test.ts"), "const x = 1;");
309 |
310 | const result = await extractRepositoryContent(tempDir);
311 |
312 | expect(result.codeExamples.length).toBeGreaterThan(0);
313 | expect(result.codeExamples[0].language).toBe("typescript");
314 | });
315 |
316 | it("should extract inline examples from @example tags", async () => {
317 | const srcDir = path.join(tempDir, "src");
318 | await fs.mkdir(srcDir, { recursive: true });
319 | await fs.writeFile(
320 | path.join(srcDir, "utils.ts"),
321 | "/**\n * @example\n * const result = add(1, 2);\n */\nfunction add(a, b) { return a + b; }",
322 | );
323 |
324 | const result = await extractRepositoryContent(tempDir);
325 |
326 | expect(result.codeExamples.length).toBeGreaterThan(0);
327 | expect(result.codeExamples[0].code).toContain("add(1, 2)");
328 | });
329 |
330 | it("should support various programming languages", async () => {
331 | const examplesDir = path.join(tempDir, "examples");
332 | await fs.mkdir(examplesDir, { recursive: true });
333 | await fs.writeFile(path.join(examplesDir, "test.rb"), "puts 'hello'");
334 | await fs.writeFile(path.join(examplesDir, "test.go"), "package main");
335 | await fs.writeFile(path.join(examplesDir, "test.java"), "class Test {}");
336 | await fs.writeFile(path.join(examplesDir, "test.rs"), "fn main() {}");
337 |
338 | const result = await extractRepositoryContent(tempDir);
339 |
340 | const languages = result.codeExamples.map((e) => e.language);
341 | expect(languages).toContain("ruby");
342 | expect(languages).toContain("go");
343 | expect(languages).toContain("java");
344 | expect(languages).toContain("rust");
345 | });
346 |
347 | it("should extract API docs from markdown files", async () => {
348 | const apiContent = `## \`getUserById\` function\n\nGet user by ID\n\n### Parameters\n\n- \`id\` (string) - User ID\n\n### Returns\n\nUser object`;
349 | await fs.writeFile(path.join(tempDir, "api.md"), apiContent);
350 |
351 | const result = await extractRepositoryContent(tempDir);
352 |
353 | expect(result.apiDocs.length).toBeGreaterThan(0);
354 | expect(result.apiDocs[0].function).toBe("getUserById");
355 | expect(result.apiDocs[0].parameters.length).toBeGreaterThan(0);
356 | });
357 |
358 | it("should extract API docs from OpenAPI spec", async () => {
359 | const openApiSpec = {
360 | paths: {
361 | "/users": {
362 | get: {
363 | summary: "List users",
364 | parameters: [
365 | {
366 | name: "page",
367 | type: "integer",
368 | description: "Page number",
369 | },
370 | ],
371 | responses: {
372 | "200": {
373 | description: "Success",
374 | },
375 | },
376 | },
377 | },
378 | },
379 | };
380 | await fs.writeFile(
381 | path.join(tempDir, "openapi.json"),
382 | JSON.stringify(openApiSpec),
383 | );
384 |
385 | const result = await extractRepositoryContent(tempDir);
386 |
387 | expect(result.apiDocs.length).toBeGreaterThan(0);
388 | expect(result.apiDocs[0].endpoint).toContain("GET");
389 | });
390 |
391 | it("should extract JSDoc from source files", async () => {
392 | const srcDir = path.join(tempDir, "src");
393 | await fs.mkdir(srcDir, { recursive: true });
394 | await fs.writeFile(
395 | path.join(srcDir, "utils.js"),
396 | "/**\n * Add two numbers\n * @param {number} a - First number\n * @param {number} b - Second number\n * @returns {number} Sum of a and b\n */\nfunction add(a, b) { return a + b; }",
397 | );
398 |
399 | const result = await extractRepositoryContent(tempDir);
400 |
401 | // JSDoc extraction may or may not find the function depending on parsing
402 | // Just ensure it doesn't crash and returns valid structure
403 | expect(result.apiDocs).toBeDefined();
404 | expect(Array.isArray(result.apiDocs)).toBe(true);
405 | });
406 |
407 | it("should handle empty repository gracefully", async () => {
408 | const result = await extractRepositoryContent(tempDir);
409 |
410 | expect(result.readme).toBeUndefined();
411 | expect(result.existingDocs).toEqual([]);
412 | expect(result.adrs).toEqual([]);
413 | expect(result.codeExamples).toEqual([]);
414 | expect(result.apiDocs).toEqual([]);
415 | });
416 |
417 | it("should handle unreadable files gracefully", async () => {
418 | const docsDir = path.join(tempDir, "docs");
419 | await fs.mkdir(docsDir, { recursive: true });
420 |
421 | // Create a file and immediately make it unreadable (if possible)
422 | const filePath = path.join(docsDir, "test.md");
423 | await fs.writeFile(filePath, "content");
424 |
425 | // The function should handle errors gracefully and continue
426 | const result = await extractRepositoryContent(tempDir);
427 |
428 | expect(result).toBeDefined();
429 | });
430 |
431 | it("should parse markdown sections correctly", async () => {
432 | const readmeContent = `# Main\nIntro\n## Section 1\nContent 1\n### Subsection\nContent 2\n## Section 2\nContent 3`;
433 | await fs.writeFile(path.join(tempDir, "README.md"), readmeContent);
434 |
435 | const result = await extractRepositoryContent(tempDir);
436 |
437 | expect(result.readme?.sections.length).toBeGreaterThan(2);
438 | expect(result.readme?.sections[0].level).toBe(1);
439 | expect(result.readme?.sections[1].level).toBe(2);
440 | });
441 |
442 | it("should handle ADRs with 4-digit numbers", async () => {
443 | const adrDir = path.join(tempDir, "docs/adrs");
444 | await fs.mkdir(adrDir, { recursive: true });
445 | await fs.writeFile(
446 | path.join(adrDir, "1234-long-number.md"),
447 | "# Test\n## Status\nAccepted\n## Decision\nTest",
448 | );
449 |
450 | const result = await extractRepositoryContent(tempDir);
451 |
452 | expect(result.adrs.length).toBeGreaterThan(0);
453 | expect(result.adrs[0].number).toBe("1234");
454 | });
455 |
456 | it("should extract example description from code comments", async () => {
457 | const examplesDir = path.join(tempDir, "examples");
458 | await fs.mkdir(examplesDir, { recursive: true });
459 | await fs.writeFile(
460 | path.join(examplesDir, "test.js"),
461 | "// Example: This is a demo\nconsole.log('test');",
462 | );
463 |
464 | const result = await extractRepositoryContent(tempDir);
465 |
466 | expect(result.codeExamples[0].description).toContain("Example:");
467 | });
468 |
469 | it("should limit code example length", async () => {
470 | const examplesDir = path.join(tempDir, "examples");
471 | await fs.mkdir(examplesDir, { recursive: true });
472 | const longCode = "x".repeat(1000);
473 | await fs.writeFile(path.join(examplesDir, "test.js"), longCode);
474 |
475 | const result = await extractRepositoryContent(tempDir);
476 |
477 | expect(result.codeExamples[0].code.length).toBeLessThanOrEqual(500);
478 | });
479 |
480 | it("should handle invalid OpenAPI spec gracefully", async () => {
481 | await fs.writeFile(path.join(tempDir, "openapi.json"), "invalid json{");
482 |
483 | const result = await extractRepositoryContent(tempDir);
484 |
485 | // Should not crash, just skip the invalid spec
486 | expect(result).toBeDefined();
487 | });
488 |
489 | it("should skip non-markdown files in docs", async () => {
490 | const docsDir = path.join(tempDir, "docs");
491 | await fs.mkdir(docsDir, { recursive: true });
492 | await fs.writeFile(path.join(docsDir, "image.png"), "binary data");
493 | await fs.writeFile(path.join(docsDir, "valid.md"), "# Valid\nContent");
494 |
495 | const result = await extractRepositoryContent(tempDir);
496 |
497 | expect(result.existingDocs.length).toBe(1);
498 | expect(result.existingDocs[0].path).toContain("valid.md");
499 | });
500 |
501 | it("should handle swagger.yaml files", async () => {
502 | await fs.writeFile(path.join(tempDir, "swagger.yaml"), "openapi: 3.0.0");
503 |
504 | const result = await extractRepositoryContent(tempDir);
505 |
506 | // Should attempt to parse it (even if it fails due to YAML parsing)
507 | expect(result).toBeDefined();
508 | });
509 | });
510 | });
511 |
```
--------------------------------------------------------------------------------
/src/memory/learning.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Incremental Learning System for DocuMCP
3 | * Implements Issue #47: Incremental Learning System
4 | *
5 | * Enables continuous improvement of recommendations based on historical patterns,
6 | * success rates, and user feedback to optimize SSG suggestions and documentation strategies.
7 | */
8 |
9 | import { MemoryManager } from "./manager.js";
10 | import { MemoryEntry } from "./storage.js";
11 |
12 | export interface LearningPattern {
13 | id: string;
14 | type:
15 | | "ssg_preference"
16 | | "deployment_success"
17 | | "project_similarity"
18 | | "user_behavior";
19 | pattern: Record<string, any>;
20 | confidence: number;
21 | sampleSize: number;
22 | lastUpdated: string;
23 | metadata: {
24 | projectTypes?: string[];
25 | technologies?: string[];
26 | successRate?: number;
27 | frequency?: number;
28 | };
29 | }
30 |
31 | export interface LearningInsight {
32 | type: "recommendation" | "warning" | "optimization";
33 | message: string;
34 | confidence: number;
35 | actionable: boolean;
36 | data: Record<string, any>;
37 | }
38 |
39 | export interface ProjectFeatures {
40 | language: string;
41 | framework?: string;
42 | size: "small" | "medium" | "large";
43 | complexity: "simple" | "moderate" | "complex";
44 | hasTests: boolean;
45 | hasCI: boolean;
46 | hasDocs: boolean;
47 | teamSize?: number;
48 | isOpenSource: boolean;
49 | }
50 |
51 | export class IncrementalLearningSystem {
52 | private memoryManager: MemoryManager;
53 | private patterns: Map<string, LearningPattern>;
54 | private learningEnabled: boolean = true;
55 | private readonly minSampleSize = 3;
56 | private readonly confidenceThreshold = 0.7;
57 |
58 | constructor(memoryManager: MemoryManager) {
59 | this.memoryManager = memoryManager;
60 | this.patterns = new Map();
61 | }
62 |
63 | async initialize(): Promise<void> {
64 | await this.loadPatterns();
65 | await this.updatePatterns();
66 | }
67 |
68 | /**
69 | * Learn from a new interaction result
70 | */
71 | async learn(
72 | interaction: MemoryEntry,
73 | outcome: "success" | "failure" | "neutral",
74 | feedback?: Record<string, any>,
75 | ): Promise<void> {
76 | if (!this.learningEnabled) return;
77 |
78 | const features = this.extractFeatures(interaction);
79 |
80 | // Update SSG preference patterns
81 | if (interaction.type === "recommendation" && interaction.metadata.ssg) {
82 | await this.updateSSGPattern(features, interaction.metadata.ssg, outcome);
83 | }
84 |
85 | // Update deployment success patterns
86 | if (interaction.type === "deployment") {
87 | await this.updateDeploymentPattern(features, outcome);
88 | }
89 |
90 | // Update project similarity patterns
91 | if (interaction.type === "analysis") {
92 | await this.updateSimilarityPattern(features, interaction);
93 | }
94 |
95 | // Learn from user feedback
96 | if (feedback) {
97 | await this.updateUserBehaviorPattern(features, feedback);
98 | }
99 |
100 | await this.persistPatterns();
101 | }
102 |
103 | /**
104 | * Get improved recommendations based on learned patterns
105 | */
106 | async getImprovedRecommendation(
107 | projectFeatures: ProjectFeatures,
108 | baseRecommendation: any,
109 | ): Promise<{
110 | recommendation: any;
111 | confidence: number;
112 | insights: LearningInsight[];
113 | }> {
114 | const insights: LearningInsight[] = [];
115 | const adjustedRecommendation = { ...baseRecommendation };
116 | let confidenceBoost = 0;
117 |
118 | // Apply SSG preference patterns
119 | const ssgPattern = await this.getSSGPreferencePattern(projectFeatures);
120 | if (ssgPattern && ssgPattern.confidence > this.confidenceThreshold) {
121 | const preferredSSG = ssgPattern.pattern.preferredSSG;
122 | if (preferredSSG !== baseRecommendation.recommended) {
123 | insights.push({
124 | type: "recommendation",
125 | message: `Based on ${
126 | ssgPattern.sampleSize
127 | } similar projects, ${preferredSSG} has a ${(
128 | ssgPattern.pattern.successRate * 100
129 | ).toFixed(0)}% success rate`,
130 | confidence: ssgPattern.confidence,
131 | actionable: true,
132 | data: { suggestedSSG: preferredSSG, pattern: ssgPattern },
133 | });
134 |
135 | adjustedRecommendation.recommended = preferredSSG;
136 | adjustedRecommendation.learningAdjusted = true;
137 | confidenceBoost += 0.2;
138 | }
139 | }
140 |
141 | // Apply deployment success patterns
142 | const deploymentPattern = await this.getDeploymentPattern(projectFeatures);
143 | if (
144 | deploymentPattern &&
145 | deploymentPattern.confidence > this.confidenceThreshold
146 | ) {
147 | const riskFactors = deploymentPattern.pattern.riskFactors || [];
148 | if (riskFactors.length > 0) {
149 | insights.push({
150 | type: "warning",
151 | message: `Projects with similar characteristics have ${riskFactors.length} common deployment issues`,
152 | confidence: deploymentPattern.confidence,
153 | actionable: true,
154 | data: { riskFactors, pattern: deploymentPattern },
155 | });
156 |
157 | adjustedRecommendation.deploymentWarnings = riskFactors;
158 | }
159 | }
160 |
161 | // Apply optimization patterns
162 | const optimizations = await this.getOptimizationInsights(projectFeatures);
163 | insights.push(...optimizations);
164 |
165 | const finalConfidence = Math.min(
166 | baseRecommendation.confidence + confidenceBoost,
167 | 1.0,
168 | );
169 |
170 | return {
171 | recommendation: adjustedRecommendation,
172 | confidence: finalConfidence,
173 | insights,
174 | };
175 | }
176 |
177 | /**
178 | * Extract features from project data for pattern matching
179 | */
180 | private extractFeatures(interaction: MemoryEntry): ProjectFeatures {
181 | const data = interaction.data;
182 |
183 | return {
184 | language: data.language?.primary || "unknown",
185 | framework: data.framework?.name,
186 | size: this.categorizeSize(data.stats?.files || 0),
187 | complexity: this.categorizeComplexity(data),
188 | hasTests: Boolean(data.testing?.hasTests),
189 | hasCI: Boolean(data.ci?.hasCI),
190 | hasDocs: Boolean(data.documentation?.exists),
191 | isOpenSource: Boolean(data.repository?.isPublic),
192 | };
193 | }
194 |
195 | private categorizeSize(fileCount: number): "small" | "medium" | "large" {
196 | if (fileCount < 50) return "small";
197 | if (fileCount < 200) return "medium";
198 | return "large";
199 | }
200 |
201 | private categorizeComplexity(data: any): "simple" | "moderate" | "complex" {
202 | let complexity = 0;
203 |
204 | if (data.dependencies?.count > 20) complexity++;
205 | if (data.framework?.name) complexity++;
206 | if (data.testing?.frameworks?.length > 1) complexity++;
207 | if (data.ci?.workflows?.length > 2) complexity++;
208 | if (data.architecture?.patterns?.length > 3) complexity++;
209 |
210 | if (complexity <= 1) return "simple";
211 | if (complexity <= 3) return "moderate";
212 | return "complex";
213 | }
214 |
215 | /**
216 | * Update SSG preference patterns based on outcomes
217 | */
218 | private async updateSSGPattern(
219 | features: ProjectFeatures,
220 | ssg: string,
221 | outcome: "success" | "failure" | "neutral",
222 | ): Promise<void> {
223 | const patternKey = this.generatePatternKey("ssg_preference", features);
224 | const existing = this.patterns.get(patternKey);
225 |
226 | if (existing) {
227 | // Update existing pattern
228 | const totalCount = existing.sampleSize;
229 | const successCount = existing.pattern.successCount || 0;
230 | const newSuccessCount =
231 | outcome === "success" ? successCount + 1 : successCount;
232 |
233 | existing.pattern.preferredSSG = ssg;
234 | existing.pattern.successCount = newSuccessCount;
235 | existing.pattern.successRate = newSuccessCount / (totalCount + 1);
236 | existing.sampleSize = totalCount + 1;
237 | existing.confidence = Math.min(existing.sampleSize / 10, 1.0);
238 | existing.lastUpdated = new Date().toISOString();
239 | } else {
240 | // Create new pattern
241 | const pattern: LearningPattern = {
242 | id: patternKey,
243 | type: "ssg_preference",
244 | pattern: {
245 | preferredSSG: ssg,
246 | successCount: outcome === "success" ? 1 : 0,
247 | successRate: outcome === "success" ? 1.0 : 0.0,
248 | },
249 | confidence: 0.1,
250 | sampleSize: 1,
251 | lastUpdated: new Date().toISOString(),
252 | metadata: {
253 | projectTypes: [features.language],
254 | technologies: features.framework ? [features.framework] : [],
255 | },
256 | };
257 |
258 | this.patterns.set(patternKey, pattern);
259 | }
260 | }
261 |
262 | /**
263 | * Update deployment success patterns
264 | */
265 | private async updateDeploymentPattern(
266 | features: ProjectFeatures,
267 | outcome: "success" | "failure" | "neutral",
268 | ): Promise<void> {
269 | const patternKey = this.generatePatternKey("deployment_success", features);
270 | const existing = this.patterns.get(patternKey);
271 |
272 | const riskFactors: string[] = [];
273 | if (!features.hasTests) riskFactors.push("no_tests");
274 | if (!features.hasCI) riskFactors.push("no_ci");
275 | if (features.complexity === "complex") riskFactors.push("high_complexity");
276 | if (features.size === "large") riskFactors.push("large_codebase");
277 |
278 | if (existing) {
279 | const totalCount = existing.sampleSize;
280 | const successCount = existing.pattern.successCount || 0;
281 | const newSuccessCount =
282 | outcome === "success" ? successCount + 1 : successCount;
283 |
284 | existing.pattern.successCount = newSuccessCount;
285 | existing.pattern.successRate = newSuccessCount / (totalCount + 1);
286 | existing.pattern.riskFactors = riskFactors;
287 | existing.sampleSize = totalCount + 1;
288 | existing.confidence = Math.min(existing.sampleSize / 10, 1.0);
289 | existing.lastUpdated = new Date().toISOString();
290 | } else {
291 | const pattern: LearningPattern = {
292 | id: patternKey,
293 | type: "deployment_success",
294 | pattern: {
295 | successCount: outcome === "success" ? 1 : 0,
296 | successRate: outcome === "success" ? 1.0 : 0.0,
297 | riskFactors,
298 | },
299 | confidence: 0.1,
300 | sampleSize: 1,
301 | lastUpdated: new Date().toISOString(),
302 | metadata: {
303 | projectTypes: [features.language],
304 | successRate: outcome === "success" ? 1.0 : 0.0,
305 | },
306 | };
307 |
308 | this.patterns.set(patternKey, pattern);
309 | }
310 | }
311 |
312 | /**
313 | * Update project similarity patterns for better matching
314 | */
315 | private async updateSimilarityPattern(
316 | features: ProjectFeatures,
317 | _interaction: MemoryEntry,
318 | ): Promise<void> {
319 | const patternKey = this.generatePatternKey("project_similarity", features);
320 | const existing = this.patterns.get(patternKey);
321 |
322 | const characteristics = {
323 | language: features.language,
324 | framework: features.framework,
325 | size: features.size,
326 | complexity: features.complexity,
327 | };
328 |
329 | if (existing) {
330 | existing.pattern.characteristics = characteristics;
331 | existing.sampleSize += 1;
332 | existing.confidence = Math.min(existing.sampleSize / 15, 1.0);
333 | existing.lastUpdated = new Date().toISOString();
334 | } else {
335 | const pattern: LearningPattern = {
336 | id: patternKey,
337 | type: "project_similarity",
338 | pattern: {
339 | characteristics,
340 | commonPatterns: [],
341 | },
342 | confidence: 0.1,
343 | sampleSize: 1,
344 | lastUpdated: new Date().toISOString(),
345 | metadata: {
346 | projectTypes: [features.language],
347 | },
348 | };
349 |
350 | this.patterns.set(patternKey, pattern);
351 | }
352 | }
353 |
354 | /**
355 | * Update user behavior patterns from feedback
356 | */
357 | private async updateUserBehaviorPattern(
358 | features: ProjectFeatures,
359 | feedback: Record<string, any>,
360 | ): Promise<void> {
361 | const patternKey = this.generatePatternKey("user_behavior", features);
362 | const existing = this.patterns.get(patternKey);
363 |
364 | if (existing) {
365 | existing.pattern.feedback = { ...existing.pattern.feedback, ...feedback };
366 | existing.sampleSize += 1;
367 | existing.confidence = Math.min(existing.sampleSize / 5, 1.0);
368 | existing.lastUpdated = new Date().toISOString();
369 | } else {
370 | const pattern: LearningPattern = {
371 | id: patternKey,
372 | type: "user_behavior",
373 | pattern: {
374 | feedback,
375 | preferences: {},
376 | },
377 | confidence: 0.2,
378 | sampleSize: 1,
379 | lastUpdated: new Date().toISOString(),
380 | metadata: {},
381 | };
382 |
383 | this.patterns.set(patternKey, pattern);
384 | }
385 | }
386 |
387 | /**
388 | * Generate a consistent pattern key for grouping similar projects
389 | */
390 | private generatePatternKey(type: string, features: ProjectFeatures): string {
391 | const keyParts = [
392 | type,
393 | features.language,
394 | features.framework || "none",
395 | features.size,
396 | features.complexity,
397 | ];
398 | return keyParts.join("_").toLowerCase();
399 | }
400 |
401 | /**
402 | * Get SSG preference pattern for similar projects
403 | */
404 | private async getSSGPreferencePattern(
405 | features: ProjectFeatures,
406 | ): Promise<LearningPattern | null> {
407 | const patternKey = this.generatePatternKey("ssg_preference", features);
408 | const pattern = this.patterns.get(patternKey);
409 |
410 | if (pattern && pattern.sampleSize >= this.minSampleSize) {
411 | return pattern;
412 | }
413 |
414 | // Try broader matching if exact match not found
415 | const broaderKey = `ssg_preference_${features.language}_${features.size}`;
416 | return this.patterns.get(broaderKey) || null;
417 | }
418 |
419 | /**
420 | * Get deployment success pattern for risk assessment
421 | */
422 | private async getDeploymentPattern(
423 | features: ProjectFeatures,
424 | ): Promise<LearningPattern | null> {
425 | const patternKey = this.generatePatternKey("deployment_success", features);
426 | const pattern = this.patterns.get(patternKey);
427 |
428 | if (pattern && pattern.sampleSize >= this.minSampleSize) {
429 | return pattern;
430 | }
431 |
432 | return null;
433 | }
434 |
435 | /**
436 | * Generate optimization insights based on learned patterns
437 | */
438 | private async getOptimizationInsights(
439 | features: ProjectFeatures,
440 | ): Promise<LearningInsight[]> {
441 | const insights: LearningInsight[] = [];
442 |
443 | // Check for common optimization opportunities
444 | if (!features.hasTests) {
445 | insights.push({
446 | type: "optimization",
447 | message: "Adding tests could improve deployment success rate by 25%",
448 | confidence: 0.8,
449 | actionable: true,
450 | data: { optimization: "add_tests", impact: "deployment_success" },
451 | });
452 | }
453 |
454 | if (!features.hasCI && features.size !== "small") {
455 | insights.push({
456 | type: "optimization",
457 | message: "CI/CD setup recommended for projects of this size",
458 | confidence: 0.7,
459 | actionable: true,
460 | data: { optimization: "add_ci", impact: "development_velocity" },
461 | });
462 | }
463 |
464 | if (features.complexity === "complex" && !features.hasDocs) {
465 | insights.push({
466 | type: "optimization",
467 | message:
468 | "Complex projects benefit significantly from comprehensive documentation",
469 | confidence: 0.9,
470 | actionable: true,
471 | data: { optimization: "improve_docs", impact: "maintainability" },
472 | });
473 | }
474 |
475 | return insights;
476 | }
477 |
478 | /**
479 | * Load patterns from persistent storage
480 | */
481 | private async loadPatterns(): Promise<void> {
482 | try {
483 | const patternMemories =
484 | await this.memoryManager.search("learning_pattern");
485 |
486 | for (const memory of patternMemories) {
487 | if (memory.data.pattern) {
488 | this.patterns.set(memory.data.pattern.id, memory.data.pattern);
489 | }
490 | }
491 | } catch (error) {
492 | console.error("Failed to load learning patterns:", error);
493 | }
494 | }
495 |
496 | /**
497 | * Update patterns based on recent memory data
498 | */
499 | private async updatePatterns(): Promise<void> {
500 | // Analyze recent memories to update patterns
501 | const recentMemories = await this.memoryManager.search("", {
502 | sortBy: "timestamp",
503 | });
504 |
505 | const cutoffDate = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000); // Last 7 days
506 |
507 | for (const memory of recentMemories) {
508 | if (new Date(memory.timestamp) > cutoffDate) {
509 | // Infer outcome based on memory data
510 | const outcome = this.inferOutcome(memory);
511 | if (outcome) {
512 | await this.learn(memory, outcome);
513 | }
514 | }
515 | }
516 | }
517 |
518 | /**
519 | * Infer outcome from memory entry data
520 | */
521 | private inferOutcome(
522 | memory: MemoryEntry,
523 | ): "success" | "failure" | "neutral" | null {
524 | if (memory.type === "deployment") {
525 | if (memory.data.status === "success") return "success";
526 | if (memory.data.status === "failed") return "failure";
527 | }
528 |
529 | if (memory.type === "recommendation" && memory.data.feedback) {
530 | if (memory.data.feedback.rating > 3) return "success";
531 | if (memory.data.feedback.rating < 3) return "failure";
532 | }
533 |
534 | return "neutral";
535 | }
536 |
537 | /**
538 | * Persist learned patterns to memory
539 | */
540 | private async persistPatterns(): Promise<void> {
541 | for (const [, pattern] of this.patterns) {
542 | if (pattern.sampleSize >= this.minSampleSize) {
543 | await this.memoryManager.remember(
544 | "interaction",
545 | {
546 | pattern,
547 | type: "learning_pattern",
548 | },
549 | {
550 | tags: ["learning", "pattern", pattern.type],
551 | },
552 | );
553 | }
554 | }
555 | }
556 |
557 | /**
558 | * Get all learned patterns
559 | */
560 | async getPatterns(): Promise<LearningPattern[]> {
561 | return Array.from(this.patterns.values());
562 | }
563 |
564 | /**
565 | * Get learning statistics and insights
566 | */
567 | async getStatistics(): Promise<{
568 | totalPatterns: number;
569 | patternsByType: Record<string, number>;
570 | averageConfidence: number;
571 | learningVelocity: number;
572 | insights: string[];
573 | }> {
574 | const stats = {
575 | totalPatterns: this.patterns.size,
576 | patternsByType: {} as Record<string, number>,
577 | averageConfidence: 0,
578 | learningVelocity: 0,
579 | insights: [] as string[],
580 | };
581 |
582 | let totalConfidence = 0;
583 | for (const pattern of this.patterns.values()) {
584 | stats.patternsByType[pattern.type] =
585 | (stats.patternsByType[pattern.type] || 0) + 1;
586 | totalConfidence += pattern.confidence;
587 | }
588 |
589 | stats.averageConfidence =
590 | stats.totalPatterns > 0 ? totalConfidence / stats.totalPatterns : 0;
591 |
592 | // Calculate learning velocity (patterns learned in last week)
593 | const weekAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000);
594 | stats.learningVelocity = Array.from(this.patterns.values()).filter(
595 | (p) => new Date(p.lastUpdated) > weekAgo,
596 | ).length;
597 |
598 | // Generate insights
599 | if (stats.totalPatterns > 10) {
600 | stats.insights.push(
601 | `System has learned ${stats.totalPatterns} patterns with ${(
602 | stats.averageConfidence * 100
603 | ).toFixed(0)}% average confidence`,
604 | );
605 | }
606 |
607 | if (stats.learningVelocity > 0) {
608 | stats.insights.push(
609 | `Learning velocity: ${stats.learningVelocity} new patterns this week`,
610 | );
611 | }
612 |
613 | const topPatternType = Object.entries(stats.patternsByType).sort(
614 | ([, a], [, b]) => b - a,
615 | )[0];
616 |
617 | if (topPatternType) {
618 | stats.insights.push(
619 | `Most common pattern type: ${topPatternType[0]} (${topPatternType[1]} patterns)`,
620 | );
621 | }
622 |
623 | return stats;
624 | }
625 |
626 | /**
627 | * Enable or disable learning
628 | */
629 | setLearningEnabled(enabled: boolean): void {
630 | this.learningEnabled = enabled;
631 | }
632 |
633 | /**
634 | * Clear all learned patterns (useful for testing or reset)
635 | */
636 | async clearPatterns(): Promise<void> {
637 | this.patterns.clear();
638 | }
639 | }
640 |
641 | export default IncrementalLearningSystem;
642 |
```
--------------------------------------------------------------------------------
/src/memory/integration.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Memory System Integration for DocuMCP
3 | * Connects memory capabilities to MCP tools
4 | */
5 |
6 | import { MemoryManager } from "./manager.js";
7 | import { MemoryEntry } from "./storage.js";
8 |
9 | let memoryManager: MemoryManager | null = null;
10 |
11 | /**
12 | * Initializes the DocuMCP memory system for persistent learning and context.
13 | *
14 | * Sets up the memory manager with optional custom storage directory, configures
15 | * event listeners for debugging in development mode, and ensures the memory
16 | * system is ready for storing and retrieving project analysis data, user
17 | * preferences, and deployment patterns.
18 | *
19 | * @param storageDir - Optional custom directory path for memory storage (defaults to .documcp/memory)
20 | *
21 | * @returns Promise resolving to the initialized MemoryManager instance
22 | *
23 | * @throws {Error} When memory system initialization fails
24 | * @throws {Error} When storage directory cannot be created or accessed
25 | *
26 | * @example
27 | * ```typescript
28 | * // Initialize with default storage
29 | * const memory = await initializeMemory();
30 | *
31 | * // Initialize with custom storage directory
32 | * const customMemory = await initializeMemory("/custom/memory/path");
33 | * ```
34 | *
35 | * @since 1.0.0
36 | * @version 1.2.0 - Added development mode event logging
37 | */
38 | export async function initializeMemory(
39 | storageDir?: string,
40 | ): Promise<MemoryManager> {
41 | if (!memoryManager) {
42 | memoryManager = new MemoryManager(storageDir);
43 | await memoryManager.initialize();
44 |
45 | // Set up event listeners (debug logging disabled in production)
46 | if (process.env.NODE_ENV === "development") {
47 | memoryManager.on("memory-created", (entry: MemoryEntry) => {
48 | // eslint-disable-next-line no-console
49 | console.log(`[Memory] Created: ${entry.id} (${entry.type})`);
50 | });
51 |
52 | memoryManager.on("memory-updated", (entry: MemoryEntry) => {
53 | // eslint-disable-next-line no-console
54 | console.log(`[Memory] Updated: ${entry.id}`);
55 | });
56 |
57 | memoryManager.on("memory-deleted", (id: string) => {
58 | // eslint-disable-next-line no-console
59 | console.log(`[Memory] Deleted: ${id}`);
60 | });
61 | }
62 | }
63 |
64 | return memoryManager;
65 | }
66 |
67 | /**
68 | * Stores repository analysis data in the memory system for future reference.
69 | *
70 | * Persists comprehensive repository analysis results including structure, dependencies,
71 | * documentation status, and recommendations. This data is used for learning patterns,
72 | * improving future recommendations, and providing historical context for similar projects.
73 | *
74 | * @param projectPath - The file system path to the analyzed repository
75 | * @param analysisData - The complete repository analysis results to store
76 | *
77 | * @returns Promise resolving to the unique memory entry ID
78 | *
79 | * @throws {Error} When memory system is not initialized
80 | * @throws {Error} When analysis data cannot be stored
81 | *
82 | * @example
83 | * ```typescript
84 | * const analysisId = await rememberAnalysis("/path/to/project", {
85 | * id: "analysis_123",
86 | * structure: { totalFiles: 150, languages: { ".ts": 100 } },
87 | * dependencies: { ecosystem: "javascript", packages: ["react"] },
88 | * // ... other analysis data
89 | * });
90 | * ```
91 | *
92 | * @since 1.0.0
93 | */
94 | export async function rememberAnalysis(
95 | projectPath: string,
96 | analysisData: any,
97 | ): Promise<string> {
98 | const manager = await initializeMemory();
99 |
100 | manager.setContext({
101 | projectId: analysisData.projectId || projectPath,
102 | repository: analysisData.repository?.url,
103 | });
104 |
105 | const entry = await manager.remember("analysis", analysisData, {
106 | repository: analysisData.repository?.url,
107 | tags: [
108 | "analysis",
109 | analysisData.language?.primary || "unknown",
110 | analysisData.framework?.name || "none",
111 | ],
112 | });
113 |
114 | return entry.id;
115 | }
116 |
117 | /**
118 | * Stores SSG recommendation data in the memory system for learning and pattern recognition.
119 | *
120 | * Persists recommendation results including the chosen SSG, confidence scores, reasoning,
121 | * and alternatives. This data is used to improve future recommendations by learning from
122 | * successful patterns and user choices.
123 | *
124 | * @param analysisId - The unique identifier of the associated repository analysis
125 | * @param recommendation - The complete SSG recommendation results to store
126 | *
127 | * @returns Promise resolving to the unique memory entry ID
128 | *
129 | * @throws {Error} When memory system is not initialized
130 | * @throws {Error} When recommendation data cannot be stored
131 | * @throws {Error} When the associated analysis cannot be found
132 | *
133 | * @example
134 | * ```typescript
135 | * const recommendationId = await rememberRecommendation("analysis_123", {
136 | * recommended: "docusaurus",
137 | * confidence: 0.92,
138 | * reasoning: ["React-based project", "Documentation focus"],
139 | * alternatives: [
140 | * { name: "hugo", score: 0.85, pros: ["Performance"], cons: ["Learning curve"] }
141 | * ]
142 | * });
143 | * ```
144 | *
145 | * @since 1.0.0
146 | */
147 | export async function rememberRecommendation(
148 | analysisId: string,
149 | recommendation: any,
150 | ): Promise<string> {
151 | const manager = await initializeMemory();
152 |
153 | const entry = await manager.remember("recommendation", recommendation, {
154 | ssg: recommendation.recommended,
155 | tags: ["recommendation", recommendation.recommended, "ssg"],
156 | });
157 |
158 | // Link to analysis
159 | const analysis = await manager.recall(analysisId);
160 | if (analysis) {
161 | await manager.update(entry.id, {
162 | metadata: {
163 | ...entry.metadata,
164 | projectId: analysis.metadata.projectId,
165 | },
166 | });
167 | }
168 |
169 | return entry.id;
170 | }
171 |
172 | /**
173 | * Stores deployment data in the memory system for success tracking and analytics.
174 | *
175 | * Persists deployment results including success status, timing, configuration used,
176 | * and any issues encountered. This data enables deployment analytics, success rate
177 | * tracking, and identification of deployment patterns for optimization.
178 | *
179 | * @param repository - The repository URL or identifier for the deployment
180 | * @param deploymentData - The complete deployment results and metadata
181 | *
182 | * @returns Promise resolving to the unique memory entry ID
183 | *
184 | * @throws {Error} When memory system is not initialized
185 | * @throws {Error} When deployment data cannot be stored
186 | *
187 | * @example
188 | * ```typescript
189 | * const deploymentId = await rememberDeployment("https://github.com/user/repo", {
190 | * success: true,
191 | * ssg: "docusaurus",
192 | * deploymentTime: 180000,
193 | * url: "https://user.github.io/repo",
194 | * issues: [],
195 | * configuration: { theme: "classic", plugins: ["search"] }
196 | * });
197 | * ```
198 | *
199 | * @since 1.0.0
200 | */
201 | export async function rememberDeployment(
202 | repository: string,
203 | deploymentData: any,
204 | ): Promise<string> {
205 | const manager = await initializeMemory();
206 |
207 | manager.setContext({
208 | projectId: repository,
209 | repository,
210 | });
211 |
212 | const entry = await manager.remember("deployment", deploymentData, {
213 | repository,
214 | ssg: deploymentData.ssg,
215 | tags: [
216 | "deployment",
217 | deploymentData.status || "unknown",
218 | deploymentData.ssg,
219 | ],
220 | });
221 |
222 | return entry.id;
223 | }
224 |
225 | export async function rememberConfiguration(
226 | projectName: string,
227 | ssg: string,
228 | configData: any,
229 | ): Promise<string> {
230 | const manager = await initializeMemory();
231 |
232 | manager.setContext({
233 | projectId: projectName,
234 | });
235 |
236 | const entry = await manager.remember("configuration", configData, {
237 | ssg,
238 | tags: ["configuration", ssg, projectName],
239 | });
240 |
241 | return entry.id;
242 | }
243 |
244 | export async function recallProjectHistory(projectId: string): Promise<any> {
245 | const manager = await initializeMemory();
246 |
247 | const memories = await manager.search(
248 | { projectId },
249 | { sortBy: "timestamp", groupBy: "type" },
250 | );
251 |
252 | return {
253 | projectId,
254 | history: memories,
255 | insights: await getProjectInsights(projectId),
256 | };
257 | }
258 |
259 | /**
260 | * Retrieves intelligent insights about a project based on historical data and patterns.
261 | *
262 | * Analyzes stored project data to provide actionable insights including technology
263 | * trends, successful patterns, optimization opportunities, and recommendations for
264 | * improvement. Uses machine learning and pattern recognition to generate contextual
265 | * insights.
266 | *
267 | * @param projectId - The unique identifier of the project to analyze
268 | *
269 | * @returns Promise resolving to an array of insight strings
270 | *
271 | * @throws {Error} When memory system is not initialized
272 | * @throws {Error} When project data cannot be retrieved
273 | *
274 | * @example
275 | * ```typescript
276 | * const insights = await getProjectInsights("project_abc123");
277 | * console.log(insights);
278 | * // Output: [
279 | * // "Consider upgrading to Docusaurus v3 for better performance",
280 | * // "Similar projects show 95% success rate with current configuration",
281 | * // "Documentation could benefit from additional API examples"
282 | * // ]
283 | * ```
284 | *
285 | * @since 1.0.0
286 | */
287 | export async function getProjectInsights(projectId: string): Promise<string[]> {
288 | const manager = await initializeMemory();
289 |
290 | const memories = await manager.search({ projectId });
291 | const insights: string[] = [];
292 |
293 | // Find patterns in SSG choices
294 | const ssgMemories = memories.filter((m: any) => m.metadata.ssg);
295 | if (ssgMemories.length > 0) {
296 | const lastSSG = ssgMemories[ssgMemories.length - 1].metadata.ssg;
297 | insights.push(`Previously used ${lastSSG} for this project`);
298 | }
299 |
300 | // Find deployment patterns
301 | const deployments = memories.filter((m: any) => m.type === "deployment");
302 | if (deployments.length > 0) {
303 | const successful = deployments.filter(
304 | (d: any) => d.data.status === "success",
305 | ).length;
306 | const rate = ((successful / deployments.length) * 100).toFixed(0);
307 | insights.push(`Deployment success rate: ${rate}%`);
308 | }
309 |
310 | // Find recent activity
311 | const lastMemory = memories[memories.length - 1];
312 | if (lastMemory) {
313 | const daysAgo = Math.floor(
314 | (Date.now() - new Date(lastMemory.timestamp).getTime()) /
315 | (1000 * 60 * 60 * 24),
316 | );
317 | insights.push(`Last activity: ${daysAgo} days ago`);
318 | }
319 |
320 | return insights;
321 | }
322 |
323 | /**
324 | * Finds similar projects based on analysis data and historical patterns.
325 | *
326 | * Uses similarity algorithms to identify projects with comparable characteristics
327 | * including technology stack, project structure, documentation patterns, and
328 | * deployment history. Useful for providing relevant examples and recommendations.
329 | *
330 | * @param analysisData - The analysis data to use for similarity comparison
331 | * @param limit - Maximum number of similar projects to return (default: 5)
332 | *
333 | * @returns Promise resolving to an array of similar project data
334 | *
335 | * @throws {Error} When memory system is not initialized
336 | * @throws {Error} When similarity analysis fails
337 | *
338 | * @example
339 | * ```typescript
340 | * const similar = await getSimilarProjects(analysisData, 3);
341 | * console.log(similar.map(p => p.metadata.projectId));
342 | * // Output: ["project_xyz", "project_abc", "project_def"]
343 | * ```
344 | *
345 | * @since 1.0.0
346 | */
347 | export async function getSimilarProjects(
348 | analysisData: any,
349 | limit: number = 5,
350 | ): Promise<any[]> {
351 | const manager = await initializeMemory();
352 |
353 | // Search for projects with similar characteristics
354 | const similarProjects: any[] = [];
355 |
356 | // Search by language
357 | if (analysisData.language?.primary) {
358 | const languageMatches = await manager.search(
359 | { tags: [analysisData.language.primary] },
360 | { sortBy: "timestamp" },
361 | );
362 | similarProjects.push(...languageMatches);
363 | }
364 |
365 | // Search by framework
366 | if (analysisData.framework?.name) {
367 | const frameworkMatches = await manager.search(
368 | { tags: [analysisData.framework.name] },
369 | { sortBy: "timestamp" },
370 | );
371 | similarProjects.push(...frameworkMatches);
372 | }
373 |
374 | // Deduplicate and return top matches
375 | const unique = Array.from(
376 | new Map(similarProjects.map((p) => [p.metadata.projectId, p])).values(),
377 | );
378 |
379 | return unique.slice(0, limit).map((project) => ({
380 | projectId: project.metadata.projectId,
381 | similarity: calculateSimilarity(analysisData, project.data),
382 | recommendation: project.metadata.ssg,
383 | timestamp: project.timestamp,
384 | }));
385 | }
386 |
387 | function calculateSimilarity(data1: any, data2: any): number {
388 | let score = 0;
389 |
390 | // Language match
391 | if (data1.language?.primary === data2.language?.primary) score += 0.3;
392 |
393 | // Framework match
394 | if (data1.framework?.name === data2.framework?.name) score += 0.3;
395 |
396 | // Size similarity
397 | if (Math.abs((data1.stats?.files || 0) - (data2.stats?.files || 0)) < 100)
398 | score += 0.2;
399 |
400 | // Documentation type match
401 | if (data1.documentation?.type === data2.documentation?.type) score += 0.2;
402 |
403 | return Math.min(score, 1.0);
404 | }
405 |
406 | export async function cleanupOldMemories(
407 | daysToKeep: number = 30,
408 | ): Promise<number> {
409 | const manager = await initializeMemory();
410 | const cutoffDate = new Date(Date.now() - daysToKeep * 24 * 60 * 60 * 1000);
411 |
412 | return await manager.cleanup(cutoffDate);
413 | }
414 |
415 | export async function exportMemories(
416 | format: "json" | "csv" = "json",
417 | projectId?: string,
418 | ): Promise<string> {
419 | const manager = await initializeMemory();
420 | return await manager.export(format, projectId);
421 | }
422 |
423 | export async function importMemories(
424 | data: string,
425 | format: "json" | "csv" = "json",
426 | ): Promise<number> {
427 | const manager = await initializeMemory();
428 | return await manager.import(data, format);
429 | }
430 |
431 | export async function getMemoryStatistics(): Promise<any> {
432 | const manager = await initializeMemory();
433 | return await manager.analyze();
434 | }
435 |
436 | export function getMemoryManager(): MemoryManager | null {
437 | return memoryManager;
438 | }
439 |
440 | export async function resetMemoryManager(storageDir?: string): Promise<void> {
441 | if (memoryManager) {
442 | await memoryManager.close();
443 | }
444 | memoryManager = null;
445 | if (storageDir) {
446 | await initializeMemory(storageDir);
447 | }
448 | }
449 |
450 | // Memory handler functions for MCP tools
451 | export async function handleMemoryRecall(args: {
452 | query: string;
453 | type?: string;
454 | limit?: number;
455 | }): Promise<any> {
456 | const manager = await initializeMemory();
457 |
458 | const searchOptions: any = {
459 | sortBy: "timestamp",
460 | limit: args.limit || 10,
461 | };
462 |
463 | if (args.type && args.type !== "all") {
464 | searchOptions.type = args.type;
465 | }
466 |
467 | const memories = await manager.search({}, searchOptions);
468 |
469 | return {
470 | query: args.query,
471 | type: args.type || "all",
472 | count: memories.length,
473 | memories: memories.map((m: any) => ({
474 | id: m.id,
475 | type: m.type,
476 | timestamp: m.timestamp,
477 | data: m.data,
478 | metadata: m.metadata,
479 | })),
480 | };
481 | }
482 |
483 | export async function handleMemoryIntelligentAnalysis(args: {
484 | projectPath: string;
485 | baseAnalysis: any;
486 | }): Promise<any> {
487 | await initializeMemory();
488 |
489 | // Get project history and similar projects for enhanced analysis
490 | const projectId = args.baseAnalysis.projectId || args.projectPath;
491 | const history = await recallProjectHistory(projectId);
492 | const similarProjects = await getSimilarProjects(args.baseAnalysis);
493 |
494 | // Enhance analysis with memory insights
495 | const enhancedAnalysis = {
496 | ...args.baseAnalysis,
497 | memoryInsights: {
498 | projectHistory: history,
499 | similarProjects,
500 | patterns: await extractPatterns(args.baseAnalysis, history.history),
501 | recommendations: await generateRecommendations(
502 | args.baseAnalysis,
503 | similarProjects,
504 | ),
505 | },
506 | };
507 |
508 | // Remember this enhanced analysis
509 | await rememberAnalysis(args.projectPath, enhancedAnalysis);
510 |
511 | return enhancedAnalysis;
512 | }
513 |
514 | export async function handleMemoryEnhancedRecommendation(args: {
515 | projectPath: string;
516 | baseRecommendation: any;
517 | projectFeatures: any;
518 | }): Promise<any> {
519 | await initializeMemory();
520 |
521 | // Get similar projects with same features
522 | const similarProjects = await getSimilarProjects(args.projectFeatures);
523 |
524 | // Analyze success patterns
525 | const successPatterns = await analyzeSuccessPatterns(similarProjects);
526 |
527 | // Enhanced recommendation with memory insights
528 | const enhancedRecommendation = {
529 | ...args.baseRecommendation,
530 | memoryEnhanced: {
531 | similarProjects,
532 | successPatterns,
533 | confidence: calculateConfidence(args.baseRecommendation, successPatterns),
534 | alternativeOptions: await getAlternativeOptions(
535 | args.baseRecommendation,
536 | successPatterns,
537 | ),
538 | },
539 | };
540 |
541 | return enhancedRecommendation;
542 | }
543 |
544 | // Helper functions for memory enhancement
545 | async function extractPatterns(
546 | analysis: any,
547 | history: any[],
548 | ): Promise<string[]> {
549 | const patterns: string[] = [];
550 |
551 | // Analyze deployment patterns
552 | const deployments = history.filter((h: any) => h.type === "deployment");
553 | if (deployments.length > 0) {
554 | const successfulDeployments = deployments.filter(
555 | (d: any) => d.data.status === "success",
556 | );
557 | if (successfulDeployments.length > 0) {
558 | patterns.push("Previous successful deployments found");
559 | }
560 | }
561 |
562 | // Analyze SSG patterns
563 | const recommendations = history.filter(
564 | (h: any) => h.type === "recommendation",
565 | );
566 | if (recommendations.length > 0) {
567 | const lastSSG =
568 | recommendations[recommendations.length - 1].data.recommended;
569 | patterns.push(`Previously recommended SSG: ${lastSSG}`);
570 | }
571 |
572 | return patterns;
573 | }
574 |
575 | async function generateRecommendations(
576 | analysis: any,
577 | similarProjects: any[],
578 | ): Promise<string[]> {
579 | const recommendations: string[] = [];
580 |
581 | if (similarProjects.length > 0) {
582 | const popularSSG = findMostPopularSSG(similarProjects);
583 | if (popularSSG) {
584 | recommendations.push(
585 | `Consider ${popularSSG} based on similar project success`,
586 | );
587 | }
588 | }
589 |
590 | return recommendations;
591 | }
592 |
593 | async function analyzeSuccessPatterns(similarProjects: any[]): Promise<any> {
594 | const patterns = {
595 | ssgSuccess: {} as Record<string, number>,
596 | deploymentPatterns: [] as string[],
597 | commonFeatures: [] as string[],
598 | };
599 |
600 | // Analyze SSG success rates
601 | similarProjects.forEach((project: any) => {
602 | const ssg = project.recommendation;
603 | if (ssg) {
604 | patterns.ssgSuccess[ssg] = (patterns.ssgSuccess[ssg] || 0) + 1;
605 | }
606 | });
607 |
608 | return patterns;
609 | }
610 |
611 | function calculateConfidence(
612 | baseRecommendation: any,
613 | successPatterns: any,
614 | ): number {
615 | const recommended = baseRecommendation.recommended;
616 | const successCount = successPatterns.ssgSuccess[recommended] || 0;
617 | const totalProjects = Object.values(
618 | successPatterns.ssgSuccess as Record<string, number>,
619 | ).reduce((a: number, b: number) => a + b, 0);
620 |
621 | if (totalProjects === 0) return 0.5; // Default confidence
622 |
623 | return Math.min(successCount / totalProjects + 0.3, 1.0);
624 | }
625 |
626 | async function getAlternativeOptions(
627 | baseRecommendation: any,
628 | successPatterns: any,
629 | ): Promise<string[]> {
630 | const sorted = Object.entries(successPatterns.ssgSuccess)
631 | .sort(([, a]: [string, any], [, b]: [string, any]) => b - a)
632 | .map(([ssg]) => ssg);
633 |
634 | // Return top 2 alternatives different from the base recommendation
635 | return sorted
636 | .filter((ssg) => ssg !== baseRecommendation.recommended)
637 | .slice(0, 2);
638 | }
639 |
640 | function findMostPopularSSG(projects: any[]): string | null {
641 | const ssgCount: Record<string, number> = {};
642 |
643 | projects.forEach((project) => {
644 | const ssg = project.recommendation;
645 | if (ssg) {
646 | ssgCount[ssg] = (ssgCount[ssg] || 0) + 1;
647 | }
648 | });
649 |
650 | const sorted = Object.entries(ssgCount).sort(([, a], [, b]) => b - a);
651 | return sorted.length > 0 ? sorted[0][0] : null;
652 | }
653 |
```
--------------------------------------------------------------------------------
/src/tools/check-documentation-links.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from "zod";
2 | import { readFile, readdir, stat } from "fs/promises";
3 | import { join, extname, resolve, relative, dirname } from "path";
4 | import { MCPToolResponse } from "../types/api.js";
5 |
6 | // Input validation schema
7 | const LinkCheckInputSchema = z.object({
8 | documentation_path: z.string().default("./docs"),
9 | check_external_links: z.boolean().default(true),
10 | check_internal_links: z.boolean().default(true),
11 | check_anchor_links: z.boolean().default(true),
12 | timeout_ms: z.number().min(1000).max(30000).default(5000),
13 | max_concurrent_checks: z.number().min(1).max(20).default(5),
14 | allowed_domains: z.array(z.string()).default([]),
15 | ignore_patterns: z.array(z.string()).default([]),
16 | fail_on_broken_links: z.boolean().default(false),
17 | output_format: z.enum(["summary", "detailed", "json"]).default("detailed"),
18 | });
19 |
20 | type LinkCheckInput = z.infer<typeof LinkCheckInputSchema>;
21 |
22 | interface LinkCheckResult {
23 | url: string;
24 | status: "valid" | "broken" | "warning" | "skipped";
25 | statusCode?: number;
26 | error?: string;
27 | responseTime?: number;
28 | sourceFile: string;
29 | lineNumber?: number;
30 | linkType: "internal" | "external" | "anchor" | "mailto" | "tel";
31 | }
32 |
33 | interface LinkCheckReport {
34 | summary: {
35 | totalLinks: number;
36 | validLinks: number;
37 | brokenLinks: number;
38 | warningLinks: number;
39 | skippedLinks: number;
40 | executionTime: number;
41 | filesScanned: number;
42 | };
43 | results: LinkCheckResult[];
44 | recommendations: string[];
45 | configuration: {
46 | checkExternalLinks: boolean;
47 | checkInternalLinks: boolean;
48 | checkAnchorLinks: boolean;
49 | timeoutMs: number;
50 | maxConcurrentChecks: number;
51 | };
52 | }
53 |
54 | export async function checkDocumentationLinks(
55 | input: Partial<LinkCheckInput>,
56 | ): Promise<MCPToolResponse<LinkCheckReport>> {
57 | const startTime = Date.now();
58 |
59 | try {
60 | // Validate input with defaults
61 | const validatedInput = LinkCheckInputSchema.parse(input);
62 | const {
63 | documentation_path,
64 | check_external_links,
65 | check_internal_links,
66 | check_anchor_links,
67 | timeout_ms,
68 | max_concurrent_checks,
69 | allowed_domains,
70 | ignore_patterns,
71 | fail_on_broken_links,
72 | } = validatedInput;
73 |
74 | // Scan documentation files
75 | const documentationFiles = await scanDocumentationFiles(documentation_path);
76 |
77 | if (documentationFiles.length === 0) {
78 | return {
79 | success: false,
80 | error: {
81 | code: "NO_DOCUMENTATION_FILES",
82 | message: "No documentation files found in the specified path",
83 | details: `Searched in: ${documentation_path}`,
84 | resolution:
85 | "Verify the documentation_path parameter points to a directory containing markdown files",
86 | },
87 | metadata: {
88 | toolVersion: "1.0.0",
89 | executionTime: Date.now() - startTime,
90 | timestamp: new Date().toISOString(),
91 | },
92 | };
93 | }
94 |
95 | // Extract all links from documentation files
96 | const allLinks = await extractLinksFromFiles(
97 | documentationFiles,
98 | documentation_path,
99 | );
100 |
101 | // Filter links based on configuration
102 | const filteredLinks = filterLinks(allLinks, {
103 | checkExternalLinks: check_external_links,
104 | checkInternalLinks: check_internal_links,
105 | checkAnchorLinks: check_anchor_links,
106 | ignorePatterns: ignore_patterns,
107 | });
108 |
109 | // Check links with concurrency control
110 | const linkResults = await checkLinksWithConcurrency(filteredLinks, {
111 | timeoutMs: timeout_ms,
112 | maxConcurrent: max_concurrent_checks,
113 | allowedDomains: allowed_domains,
114 | documentationPath: documentation_path,
115 | });
116 |
117 | // Generate report
118 | const report = generateLinkCheckReport(linkResults, {
119 | checkExternalLinks: check_external_links,
120 | checkInternalLinks: check_internal_links,
121 | checkAnchorLinks: check_anchor_links,
122 | timeoutMs: timeout_ms,
123 | maxConcurrentChecks: max_concurrent_checks,
124 | filesScanned: documentationFiles.length,
125 | executionTime: Date.now() - startTime,
126 | });
127 |
128 | // Check if we should fail on broken links
129 | if (fail_on_broken_links && report.summary.brokenLinks > 0) {
130 | return {
131 | success: false,
132 | error: {
133 | code: "BROKEN_LINKS_FOUND",
134 | message: `Found ${report.summary.brokenLinks} broken links`,
135 | details: `${report.summary.brokenLinks} out of ${report.summary.totalLinks} links are broken`,
136 | resolution:
137 | "Fix the broken links or set fail_on_broken_links to false",
138 | },
139 | data: report,
140 | metadata: {
141 | toolVersion: "1.0.0",
142 | executionTime: Date.now() - startTime,
143 | timestamp: new Date().toISOString(),
144 | },
145 | };
146 | }
147 |
148 | return {
149 | success: true,
150 | data: report,
151 | metadata: {
152 | toolVersion: "1.0.0",
153 | executionTime: Date.now() - startTime,
154 | timestamp: new Date().toISOString(),
155 | },
156 | };
157 | } catch (error) {
158 | return {
159 | success: false,
160 | error: {
161 | code: "LINK_CHECK_ERROR",
162 | message: "Failed to check documentation links",
163 | details:
164 | error instanceof Error ? error.message : "Unknown error occurred",
165 | resolution:
166 | "Check the documentation path and ensure files are accessible",
167 | },
168 | metadata: {
169 | toolVersion: "1.0.0",
170 | executionTime: Date.now() - startTime,
171 | timestamp: new Date().toISOString(),
172 | },
173 | };
174 | }
175 | }
176 |
177 | async function scanDocumentationFiles(basePath: string): Promise<string[]> {
178 | const files: string[] = [];
179 |
180 | async function scanDirectory(dirPath: string): Promise<void> {
181 | try {
182 | const entries = await readdir(dirPath);
183 |
184 | for (const entry of entries) {
185 | const fullPath = join(dirPath, entry);
186 | const stats = await stat(fullPath);
187 |
188 | if (stats.isDirectory()) {
189 | // Skip node_modules and hidden directories
190 | if (!entry.startsWith(".") && entry !== "node_modules") {
191 | await scanDirectory(fullPath);
192 | }
193 | } else if (stats.isFile()) {
194 | const ext = extname(entry).toLowerCase();
195 | if ([".md", ".mdx", ".markdown"].includes(ext)) {
196 | files.push(fullPath);
197 | }
198 | }
199 | }
200 | } catch (error) {
201 | // Skip directories we can't read
202 | }
203 | }
204 |
205 | await scanDirectory(basePath);
206 | return files;
207 | }
208 |
209 | async function extractLinksFromFiles(
210 | files: string[],
211 | basePath: string,
212 | ): Promise<
213 | Array<{
214 | url: string;
215 | sourceFile: string;
216 | lineNumber: number;
217 | linkType: "internal" | "external" | "anchor" | "mailto" | "tel";
218 | }>
219 | > {
220 | const allLinks: Array<{
221 | url: string;
222 | sourceFile: string;
223 | lineNumber: number;
224 | linkType: "internal" | "external" | "anchor" | "mailto" | "tel";
225 | }> = [];
226 |
227 | // Regex patterns for different link types
228 | const markdownLinkRegex = /\[([^\]]*)\]\(([^)]+)\)/g;
229 | const htmlLinkRegex = /<a[^>]+href=["']([^"']+)["'][^>]*>/gi;
230 | const refLinkRegex = /\[([^\]]+)\]:\s*(.+)/g;
231 |
232 | for (const file of files) {
233 | try {
234 | const content = await readFile(file, "utf-8");
235 | const lines = content.split("\n");
236 | // Create proper relative file path
237 | const absoluteBasePath = resolve(basePath);
238 | const absoluteFilePath = resolve(file);
239 | const relativeFile = relative(absoluteBasePath, absoluteFilePath).replace(
240 | /\\/g,
241 | "/",
242 | );
243 |
244 | // Extract markdown links
245 | lines.forEach((line, index) => {
246 | let match;
247 |
248 | // Markdown links [text](url)
249 | while ((match = markdownLinkRegex.exec(line)) !== null) {
250 | const url = match[2].trim();
251 | if (url && !url.startsWith("#")) {
252 | // Skip empty and anchor-only links
253 | allLinks.push({
254 | url,
255 | sourceFile: relativeFile,
256 | lineNumber: index + 1,
257 | linkType: determineLinkType(url),
258 | });
259 | }
260 | }
261 |
262 | // HTML links
263 | while ((match = htmlLinkRegex.exec(line)) !== null) {
264 | const url = match[1].trim();
265 | if (url && !url.startsWith("#")) {
266 | allLinks.push({
267 | url,
268 | sourceFile: relativeFile,
269 | lineNumber: index + 1,
270 | linkType: determineLinkType(url),
271 | });
272 | }
273 | }
274 |
275 | // Reference links
276 | while ((match = refLinkRegex.exec(line)) !== null) {
277 | const url = match[2].trim();
278 | if (url && !url.startsWith("#")) {
279 | allLinks.push({
280 | url,
281 | sourceFile: relativeFile,
282 | lineNumber: index + 1,
283 | linkType: determineLinkType(url),
284 | });
285 | }
286 | }
287 | });
288 | } catch (error) {
289 | // Skip files we can't read
290 | }
291 | }
292 |
293 | return allLinks;
294 | }
295 |
296 | function determineLinkType(
297 | url: string,
298 | ): "internal" | "external" | "anchor" | "mailto" | "tel" {
299 | if (url.startsWith("mailto:")) return "mailto";
300 | if (url.startsWith("tel:")) return "tel";
301 | if (url.startsWith("#")) return "anchor";
302 | if (url.startsWith("http://") || url.startsWith("https://"))
303 | return "external";
304 | return "internal";
305 | }
306 |
307 | function filterLinks(
308 | links: Array<{
309 | url: string;
310 | sourceFile: string;
311 | lineNumber: number;
312 | linkType: "internal" | "external" | "anchor" | "mailto" | "tel";
313 | }>,
314 | options: {
315 | checkExternalLinks: boolean;
316 | checkInternalLinks: boolean;
317 | checkAnchorLinks: boolean;
318 | ignorePatterns: string[];
319 | },
320 | ) {
321 | return links.filter((link) => {
322 | // Check if link should be ignored based on patterns
323 | if (options.ignorePatterns.some((pattern) => link.url.includes(pattern))) {
324 | return false;
325 | }
326 |
327 | // Filter by link type
328 | switch (link.linkType) {
329 | case "external":
330 | return options.checkExternalLinks;
331 | case "internal":
332 | return options.checkInternalLinks;
333 | case "anchor":
334 | return options.checkAnchorLinks;
335 | case "mailto":
336 | case "tel":
337 | return false; // Skip these for now
338 | default:
339 | return true;
340 | }
341 | });
342 | }
343 |
344 | async function checkLinksWithConcurrency(
345 | links: Array<{
346 | url: string;
347 | sourceFile: string;
348 | lineNumber: number;
349 | linkType: "internal" | "external" | "anchor" | "mailto" | "tel";
350 | }>,
351 | options: {
352 | timeoutMs: number;
353 | maxConcurrent: number;
354 | allowedDomains: string[];
355 | documentationPath: string;
356 | },
357 | ): Promise<LinkCheckResult[]> {
358 | const results: LinkCheckResult[] = [];
359 |
360 | async function checkSingleLink(link: {
361 | url: string;
362 | sourceFile: string;
363 | lineNumber: number;
364 | linkType: "internal" | "external" | "anchor" | "mailto" | "tel";
365 | }): Promise<LinkCheckResult> {
366 | const startTime = Date.now();
367 |
368 | try {
369 | if (link.linkType === "internal") {
370 | return await checkInternalLink(link, options.documentationPath);
371 | } else if (link.linkType === "external") {
372 | return await checkExternalLink(
373 | link,
374 | options.timeoutMs,
375 | options.allowedDomains,
376 | );
377 | } else if (link.linkType === "anchor") {
378 | return await checkAnchorLink(link, options.documentationPath);
379 | }
380 |
381 | return {
382 | url: link.url,
383 | status: "skipped",
384 | sourceFile: link.sourceFile,
385 | lineNumber: link.lineNumber,
386 | linkType: link.linkType,
387 | responseTime: Date.now() - startTime,
388 | };
389 | } catch (error) {
390 | return {
391 | url: link.url,
392 | status: "broken",
393 | error: error instanceof Error ? error.message : "Unknown error",
394 | sourceFile: link.sourceFile,
395 | lineNumber: link.lineNumber,
396 | linkType: link.linkType,
397 | responseTime: Date.now() - startTime,
398 | };
399 | }
400 | }
401 |
402 | // Process links with concurrency control
403 | const chunks = [];
404 | for (let i = 0; i < links.length; i += options.maxConcurrent) {
405 | chunks.push(links.slice(i, i + options.maxConcurrent));
406 | }
407 |
408 | for (const chunk of chunks) {
409 | const chunkResults = await Promise.all(chunk.map(checkSingleLink));
410 | results.push(...chunkResults);
411 | }
412 |
413 | return results;
414 | }
415 |
416 | async function checkInternalLink(
417 | link: {
418 | url: string;
419 | sourceFile: string;
420 | lineNumber: number;
421 | linkType: "internal" | "external" | "anchor" | "mailto" | "tel";
422 | },
423 | documentationPath: string,
424 | ): Promise<LinkCheckResult> {
425 | const startTime = Date.now();
426 |
427 | try {
428 | let targetPath = link.url;
429 |
430 | // Remove anchor if present
431 | const [filePath] = targetPath.split("#");
432 |
433 | // Handle relative paths properly using Node.js path resolution
434 | const absoluteDocPath = resolve(documentationPath);
435 | const sourceFileAbsolutePath = resolve(absoluteDocPath, link.sourceFile);
436 | const sourceDir = dirname(sourceFileAbsolutePath);
437 |
438 | if (filePath.startsWith("./")) {
439 | // Current directory reference - resolve relative to source file directory
440 | targetPath = resolve(sourceDir, filePath.substring(2));
441 | } else if (filePath.startsWith("../")) {
442 | // Parent directory reference - resolve relative to source file directory
443 | targetPath = resolve(sourceDir, filePath);
444 | } else if (filePath.startsWith("/")) {
445 | // Absolute path from documentation root
446 | targetPath = resolve(absoluteDocPath, filePath.substring(1));
447 | } else {
448 | // Relative path - resolve relative to source file directory
449 | targetPath = resolve(sourceDir, filePath);
450 | }
451 |
452 | try {
453 | await stat(targetPath);
454 | return {
455 | url: link.url,
456 | status: "valid",
457 | sourceFile: link.sourceFile,
458 | lineNumber: link.lineNumber,
459 | linkType: link.linkType,
460 | responseTime: Date.now() - startTime,
461 | };
462 | } catch {
463 | return {
464 | url: link.url,
465 | status: "broken",
466 | error: "File not found",
467 | sourceFile: link.sourceFile,
468 | lineNumber: link.lineNumber,
469 | linkType: link.linkType,
470 | responseTime: Date.now() - startTime,
471 | };
472 | }
473 | } catch (error) {
474 | return {
475 | url: link.url,
476 | status: "broken",
477 | error: error instanceof Error ? error.message : "Unknown error",
478 | sourceFile: link.sourceFile,
479 | lineNumber: link.lineNumber,
480 | linkType: link.linkType,
481 | responseTime: Date.now() - startTime,
482 | };
483 | }
484 | }
485 |
486 | async function checkExternalLink(
487 | link: {
488 | url: string;
489 | sourceFile: string;
490 | lineNumber: number;
491 | linkType: "internal" | "external" | "anchor" | "mailto" | "tel";
492 | },
493 | timeoutMs: number,
494 | allowedDomains: string[],
495 | ): Promise<LinkCheckResult> {
496 | const startTime = Date.now();
497 |
498 | try {
499 | // Check if domain is in allowed list (if specified)
500 | if (allowedDomains.length > 0) {
501 | const url = new URL(link.url);
502 | const isAllowed = allowedDomains.some(
503 | (domain) =>
504 | url.hostname === domain || url.hostname.endsWith("." + domain),
505 | );
506 |
507 | if (!isAllowed) {
508 | return {
509 | url: link.url,
510 | status: "skipped",
511 | error: "Domain not in allowed list",
512 | sourceFile: link.sourceFile,
513 | lineNumber: link.lineNumber,
514 | linkType: link.linkType,
515 | responseTime: Date.now() - startTime,
516 | };
517 | }
518 | }
519 |
520 | // Simple HEAD request to check if URL is accessible
521 | const controller = new AbortController();
522 | const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
523 |
524 | try {
525 | const response = await fetch(link.url, {
526 | method: "HEAD",
527 | signal: controller.signal,
528 | headers: {
529 | "User-Agent": "DocuMCP Link Checker 1.0",
530 | },
531 | });
532 |
533 | clearTimeout(timeoutId);
534 |
535 | if (response.ok) {
536 | return {
537 | url: link.url,
538 | status: "valid",
539 | statusCode: response.status,
540 | sourceFile: link.sourceFile,
541 | lineNumber: link.lineNumber,
542 | linkType: link.linkType,
543 | responseTime: Date.now() - startTime,
544 | };
545 | } else {
546 | return {
547 | url: link.url,
548 | status: "broken",
549 | statusCode: response.status,
550 | error: `HTTP ${response.status}: ${response.statusText}`,
551 | sourceFile: link.sourceFile,
552 | lineNumber: link.lineNumber,
553 | linkType: link.linkType,
554 | responseTime: Date.now() - startTime,
555 | };
556 | }
557 | } catch (fetchError) {
558 | clearTimeout(timeoutId);
559 |
560 | if (fetchError instanceof Error && fetchError.name === "AbortError") {
561 | return {
562 | url: link.url,
563 | status: "warning",
564 | error: "Request timeout",
565 | sourceFile: link.sourceFile,
566 | lineNumber: link.lineNumber,
567 | linkType: link.linkType,
568 | responseTime: Date.now() - startTime,
569 | };
570 | }
571 |
572 | throw fetchError;
573 | }
574 | } catch (error) {
575 | return {
576 | url: link.url,
577 | status: "broken",
578 | error: error instanceof Error ? error.message : "Unknown error",
579 | sourceFile: link.sourceFile,
580 | lineNumber: link.lineNumber,
581 | linkType: link.linkType,
582 | responseTime: Date.now() - startTime,
583 | };
584 | }
585 | }
586 |
587 | async function checkAnchorLink(
588 | link: {
589 | url: string;
590 | sourceFile: string;
591 | lineNumber: number;
592 | linkType: "internal" | "external" | "anchor" | "mailto" | "tel";
593 | },
594 | _documentationPath: string,
595 | ): Promise<LinkCheckResult> {
596 | const startTime = Date.now();
597 |
598 | // For now, just mark anchor links as valid
599 | // In a more sophisticated implementation, we would parse the target file
600 | // and check if the anchor exists
601 | return {
602 | url: link.url,
603 | status: "valid",
604 | sourceFile: link.sourceFile,
605 | lineNumber: link.lineNumber,
606 | linkType: link.linkType,
607 | responseTime: Date.now() - startTime,
608 | };
609 | }
610 |
611 | function generateLinkCheckReport(
612 | results: LinkCheckResult[],
613 | config: {
614 | checkExternalLinks: boolean;
615 | checkInternalLinks: boolean;
616 | checkAnchorLinks: boolean;
617 | timeoutMs: number;
618 | maxConcurrentChecks: number;
619 | filesScanned: number;
620 | executionTime: number;
621 | },
622 | ): LinkCheckReport {
623 | const summary = {
624 | totalLinks: results.length,
625 | validLinks: results.filter((r) => r.status === "valid").length,
626 | brokenLinks: results.filter((r) => r.status === "broken").length,
627 | warningLinks: results.filter((r) => r.status === "warning").length,
628 | skippedLinks: results.filter((r) => r.status === "skipped").length,
629 | executionTime: config.executionTime,
630 | filesScanned: config.filesScanned,
631 | };
632 |
633 | const recommendations: string[] = [];
634 |
635 | if (summary.brokenLinks > 0) {
636 | recommendations.push(
637 | `🔴 Fix ${summary.brokenLinks} broken links to improve documentation quality`,
638 | );
639 | }
640 |
641 | if (summary.warningLinks > 0) {
642 | recommendations.push(
643 | `🟡 Review ${summary.warningLinks} warning links that may need attention`,
644 | );
645 | }
646 |
647 | if (summary.validLinks === summary.totalLinks) {
648 | recommendations.push(
649 | "✅ All links are valid - excellent documentation quality!",
650 | );
651 | }
652 |
653 | if (summary.totalLinks > 100) {
654 | recommendations.push(
655 | "📊 Consider implementing automated link checking in CI/CD pipeline",
656 | );
657 | }
658 |
659 | return {
660 | summary,
661 | results,
662 | recommendations,
663 | configuration: {
664 | checkExternalLinks: config.checkExternalLinks,
665 | checkInternalLinks: config.checkInternalLinks,
666 | checkAnchorLinks: config.checkAnchorLinks,
667 | timeoutMs: config.timeoutMs,
668 | maxConcurrentChecks: config.maxConcurrentChecks,
669 | },
670 | };
671 | }
672 |
```
--------------------------------------------------------------------------------
/tests/tools/test-local-deployment.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { testLocalDeployment } from "../../src/tools/test-local-deployment.js";
2 | import * as childProcess from "child_process";
3 | import * as fs from "fs";
4 |
5 | // Create simpler mocking approach
6 |
7 | describe("testLocalDeployment", () => {
8 | const testRepoPath = process.cwd();
9 |
10 | afterEach(() => {
11 | jest.restoreAllMocks();
12 | });
13 |
14 | describe("Input validation", () => {
15 | it("should handle invalid SSG parameter", async () => {
16 | await expect(
17 | testLocalDeployment({
18 | repositoryPath: "/test/path",
19 | ssg: "invalid" as any,
20 | }),
21 | ).rejects.toThrow();
22 | });
23 |
24 | it("should handle missing required parameters", async () => {
25 | await expect(
26 | testLocalDeployment({
27 | ssg: "docusaurus",
28 | } as any),
29 | ).rejects.toThrow();
30 | });
31 |
32 | it("should handle unsupported SSG gracefully", async () => {
33 | // This should throw a ZodError due to input validation
34 | await expect(
35 | testLocalDeployment({
36 | repositoryPath: testRepoPath,
37 | ssg: "gatsby" as any,
38 | }),
39 | ).rejects.toThrow("Invalid enum value");
40 | });
41 | });
42 |
43 | describe("Basic functionality", () => {
44 | it("should return proper response structure", async () => {
45 | const result = await testLocalDeployment({
46 | repositoryPath: testRepoPath,
47 | ssg: "hugo",
48 | skipBuild: true,
49 | });
50 |
51 | expect(result.content).toBeDefined();
52 | expect(Array.isArray(result.content)).toBe(true);
53 | expect(result.content.length).toBeGreaterThan(0);
54 | expect(() => JSON.parse(result.content[0].text)).not.toThrow();
55 | });
56 |
57 | it("should use default port when not specified", async () => {
58 | const result = await testLocalDeployment({
59 | repositoryPath: testRepoPath,
60 | ssg: "hugo",
61 | skipBuild: true,
62 | });
63 |
64 | const parsedResult = JSON.parse(result.content[0].text);
65 | expect(parsedResult.port).toBe(3000);
66 | });
67 |
68 | it("should use custom port when specified", async () => {
69 | const result = await testLocalDeployment({
70 | repositoryPath: testRepoPath,
71 | ssg: "hugo",
72 | port: 4000,
73 | skipBuild: true,
74 | });
75 |
76 | const parsedResult = JSON.parse(result.content[0].text);
77 | expect(parsedResult.port).toBe(4000);
78 | });
79 |
80 | it("should use custom timeout when specified", async () => {
81 | const result = await testLocalDeployment({
82 | repositoryPath: testRepoPath,
83 | ssg: "hugo",
84 | timeout: 120,
85 | skipBuild: true,
86 | });
87 |
88 | const parsedResult = JSON.parse(result.content[0].text);
89 | expect(parsedResult.buildSuccess).toBeDefined();
90 | });
91 | });
92 |
93 | describe("SSG support", () => {
94 | it("should handle all supported SSG types", async () => {
95 | const ssgs = ["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"];
96 |
97 | for (const ssg of ssgs) {
98 | const result = await testLocalDeployment({
99 | repositoryPath: testRepoPath,
100 | ssg: ssg as any,
101 | skipBuild: true,
102 | });
103 |
104 | const parsedResult = JSON.parse(result.content[0].text);
105 | expect(parsedResult.ssg).toBe(ssg);
106 | expect(parsedResult.buildSuccess).toBeDefined();
107 | }
108 | });
109 |
110 | it("should generate test script for all SSG types", async () => {
111 | const ssgs = ["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"];
112 |
113 | for (const ssg of ssgs) {
114 | const result = await testLocalDeployment({
115 | repositoryPath: testRepoPath,
116 | ssg: ssg as any,
117 | skipBuild: true,
118 | port: 4000,
119 | });
120 |
121 | const parsedResult = JSON.parse(result.content[0].text);
122 | expect(parsedResult.testScript).toContain(
123 | `# Local Deployment Test Script for ${ssg}`,
124 | );
125 | expect(parsedResult.testScript).toContain("http://localhost:4000");
126 | }
127 | });
128 |
129 | it("should include install commands for Node.js-based SSGs", async () => {
130 | const result = await testLocalDeployment({
131 | repositoryPath: testRepoPath,
132 | ssg: "docusaurus",
133 | skipBuild: true,
134 | });
135 |
136 | const parsedResult = JSON.parse(result.content[0].text);
137 | expect(parsedResult.testScript).toContain("npm install");
138 | });
139 |
140 | it("should not include install commands for non-Node.js SSGs", async () => {
141 | const result = await testLocalDeployment({
142 | repositoryPath: testRepoPath,
143 | ssg: "hugo",
144 | skipBuild: true,
145 | });
146 |
147 | const parsedResult = JSON.parse(result.content[0].text);
148 | expect(parsedResult.testScript).not.toContain("npm install");
149 | });
150 | });
151 |
152 | describe("Configuration handling", () => {
153 | it("should provide recommendations when configuration is missing", async () => {
154 | const result = await testLocalDeployment({
155 | repositoryPath: testRepoPath,
156 | ssg: "jekyll", // Jekyll config unlikely to exist in this repo
157 | skipBuild: true,
158 | });
159 |
160 | const parsedResult = JSON.parse(result.content[0].text);
161 | expect(parsedResult.recommendations).toEqual(
162 | expect.arrayContaining([
163 | expect.stringContaining("Missing configuration file"),
164 | ]),
165 | );
166 | });
167 |
168 | it("should provide next steps for missing configuration", async () => {
169 | const result = await testLocalDeployment({
170 | repositoryPath: testRepoPath,
171 | ssg: "jekyll",
172 | skipBuild: true,
173 | });
174 |
175 | const parsedResult = JSON.parse(result.content[0].text);
176 | expect(parsedResult.nextSteps).toEqual(
177 | expect.arrayContaining([expect.stringContaining("generate_config")]),
178 | );
179 | });
180 | });
181 |
182 | describe("Error handling", () => {
183 | it("should handle general errors gracefully", async () => {
184 | jest.spyOn(process, "chdir").mockImplementation(() => {
185 | throw new Error("Permission denied");
186 | });
187 |
188 | const result = await testLocalDeployment({
189 | repositoryPath: testRepoPath,
190 | ssg: "hugo",
191 | });
192 |
193 | // The tool returns an error response structure instead of throwing
194 | const parsedResult = JSON.parse(result.content[0].text);
195 | expect(parsedResult.success).toBe(false);
196 | expect(parsedResult.error.code).toBe("LOCAL_TEST_FAILED");
197 | expect(parsedResult.error.message).toContain("Permission denied");
198 | });
199 |
200 | it("should handle non-existent repository path", async () => {
201 | const result = await testLocalDeployment({
202 | repositoryPath: "/non/existent/path",
203 | ssg: "hugo",
204 | skipBuild: true,
205 | });
206 |
207 | const parsedResult = JSON.parse(result.content[0].text);
208 | // Should still work with skipBuild, but may have warnings
209 | expect(parsedResult).toBeDefined();
210 | expect(result.content).toBeDefined();
211 | });
212 | });
213 |
214 | describe("Response structure validation", () => {
215 | it("should include all required response fields", async () => {
216 | const result = await testLocalDeployment({
217 | repositoryPath: testRepoPath,
218 | ssg: "hugo",
219 | skipBuild: true,
220 | });
221 |
222 | const parsedResult = JSON.parse(result.content[0].text);
223 | expect(parsedResult).toHaveProperty("buildSuccess");
224 | expect(parsedResult).toHaveProperty("ssg");
225 | expect(parsedResult).toHaveProperty("port");
226 | expect(parsedResult).toHaveProperty("testScript");
227 | expect(parsedResult).toHaveProperty("recommendations");
228 | expect(parsedResult).toHaveProperty("nextSteps");
229 | });
230 |
231 | it("should include tool recommendations in next steps", async () => {
232 | const result = await testLocalDeployment({
233 | repositoryPath: testRepoPath,
234 | ssg: "hugo",
235 | skipBuild: true,
236 | });
237 |
238 | const parsedResult = JSON.parse(result.content[0].text);
239 | expect(Array.isArray(parsedResult.nextSteps)).toBe(true);
240 | expect(parsedResult.nextSteps.length).toBeGreaterThan(0);
241 | });
242 |
243 | it("should validate test script content structure", async () => {
244 | const result = await testLocalDeployment({
245 | repositoryPath: testRepoPath,
246 | ssg: "hugo",
247 | port: 8080,
248 | skipBuild: true,
249 | });
250 |
251 | const parsedResult = JSON.parse(result.content[0].text);
252 | const testScript = parsedResult.testScript;
253 |
254 | expect(testScript).toContain("# Local Deployment Test Script for hugo");
255 | expect(testScript).toContain("http://localhost:8080");
256 | expect(testScript).toContain("hugo server");
257 | expect(testScript).toContain("--port 8080");
258 | });
259 |
260 | it("should handle different timeout values", async () => {
261 | const timeouts = [30, 60, 120, 300];
262 |
263 | for (const timeout of timeouts) {
264 | const result = await testLocalDeployment({
265 | repositoryPath: testRepoPath,
266 | ssg: "hugo",
267 | timeout,
268 | skipBuild: true,
269 | });
270 |
271 | const parsedResult = JSON.parse(result.content[0].text);
272 | expect(parsedResult.buildSuccess).toBeDefined();
273 | // Timeout is not directly returned in response, but test should pass
274 | }
275 | });
276 |
277 | it("should provide appropriate recommendations for each SSG type", async () => {
278 | const ssgConfigs = {
279 | jekyll: "_config.yml",
280 | hugo: "config.toml",
281 | docusaurus: "docusaurus.config.js",
282 | mkdocs: "mkdocs.yml",
283 | eleventy: ".eleventy.js",
284 | };
285 |
286 | for (const [ssg, configFile] of Object.entries(ssgConfigs)) {
287 | const result = await testLocalDeployment({
288 | repositoryPath: testRepoPath,
289 | ssg: ssg as any,
290 | skipBuild: true,
291 | });
292 |
293 | const parsedResult = JSON.parse(result.content[0].text);
294 | expect(parsedResult.recommendations).toEqual(
295 | expect.arrayContaining([expect.stringContaining(configFile)]),
296 | );
297 | }
298 | });
299 |
300 | it("should include comprehensive next steps", async () => {
301 | const result = await testLocalDeployment({
302 | repositoryPath: testRepoPath,
303 | ssg: "jekyll", // Missing config will trigger recommendations
304 | skipBuild: true,
305 | });
306 |
307 | const parsedResult = JSON.parse(result.content[0].text);
308 | const nextSteps = parsedResult.nextSteps;
309 |
310 | expect(Array.isArray(nextSteps)).toBe(true);
311 | expect(nextSteps.length).toBeGreaterThan(0);
312 |
313 | // Should include generate_config step for missing config
314 | expect(nextSteps).toEqual(
315 | expect.arrayContaining([expect.stringContaining("generate_config")]),
316 | );
317 | });
318 |
319 | it("should handle edge case with empty repository path", async () => {
320 | const result = await testLocalDeployment({
321 | repositoryPath: "",
322 | ssg: "hugo",
323 | skipBuild: true,
324 | });
325 |
326 | const parsedResult = JSON.parse(result.content[0].text);
327 | // Should handle gracefully and provide recommendations
328 | expect(parsedResult).toBeDefined();
329 | expect(result.content).toBeDefined();
330 | });
331 |
332 | it("should validate port range handling", async () => {
333 | const ports = [1000, 3000, 8080, 9000, 65535];
334 |
335 | for (const port of ports) {
336 | const result = await testLocalDeployment({
337 | repositoryPath: testRepoPath,
338 | ssg: "hugo",
339 | port,
340 | skipBuild: true,
341 | });
342 |
343 | const parsedResult = JSON.parse(result.content[0].text);
344 | expect(parsedResult.port).toBe(port);
345 | expect(parsedResult.testScript).toContain(`http://localhost:${port}`);
346 | }
347 | });
348 | });
349 |
350 | describe("Advanced coverage scenarios", () => {
351 | beforeEach(() => {
352 | jest.spyOn(process, "chdir").mockImplementation(() => {});
353 | });
354 |
355 | afterEach(() => {
356 | jest.restoreAllMocks();
357 | });
358 |
359 | describe("Configuration file scenarios", () => {
360 | it("should detect existing configuration file for hugo", async () => {
361 | // Mock fs.access to succeed for hugo config file
362 | const mockFsAccess = jest
363 | .spyOn(fs.promises, "access")
364 | .mockResolvedValueOnce(undefined);
365 |
366 | const result = await testLocalDeployment({
367 | repositoryPath: testRepoPath,
368 | ssg: "hugo",
369 | skipBuild: true,
370 | });
371 |
372 | const parsedResult = JSON.parse(result.content[0].text);
373 | // Should not recommend missing config since file exists
374 | expect(parsedResult.recommendations).not.toEqual(
375 | expect.arrayContaining([
376 | expect.stringContaining("Missing configuration file"),
377 | ]),
378 | );
379 |
380 | mockFsAccess.mockRestore();
381 | });
382 |
383 | it("should detect existing configuration file for jekyll", async () => {
384 | // Mock fs.access to succeed for jekyll config file
385 | const mockFsAccess = jest
386 | .spyOn(fs.promises, "access")
387 | .mockResolvedValueOnce(undefined);
388 |
389 | const result = await testLocalDeployment({
390 | repositoryPath: testRepoPath,
391 | ssg: "jekyll",
392 | skipBuild: true,
393 | });
394 |
395 | const parsedResult = JSON.parse(result.content[0].text);
396 | // Should not recommend missing config since file exists
397 | expect(parsedResult.recommendations).not.toEqual(
398 | expect.arrayContaining([
399 | expect.stringContaining("Missing configuration file"),
400 | ]),
401 | );
402 |
403 | mockFsAccess.mockRestore();
404 | });
405 |
406 | it("should detect existing configuration file for docusaurus", async () => {
407 | // Mock fs.access to succeed for docusaurus config file
408 | const mockFsAccess = jest
409 | .spyOn(fs.promises, "access")
410 | .mockResolvedValueOnce(undefined);
411 |
412 | const result = await testLocalDeployment({
413 | repositoryPath: testRepoPath,
414 | ssg: "docusaurus",
415 | skipBuild: true,
416 | });
417 |
418 | const parsedResult = JSON.parse(result.content[0].text);
419 | // Should not recommend missing config since file exists
420 | expect(parsedResult.recommendations).not.toEqual(
421 | expect.arrayContaining([
422 | expect.stringContaining("Missing configuration file"),
423 | ]),
424 | );
425 |
426 | mockFsAccess.mockRestore();
427 | });
428 |
429 | it("should detect existing configuration file for mkdocs", async () => {
430 | // Mock fs.access to succeed for mkdocs config file
431 | const mockFsAccess = jest
432 | .spyOn(fs.promises, "access")
433 | .mockResolvedValueOnce(undefined);
434 |
435 | const result = await testLocalDeployment({
436 | repositoryPath: testRepoPath,
437 | ssg: "mkdocs",
438 | skipBuild: true,
439 | });
440 |
441 | const parsedResult = JSON.parse(result.content[0].text);
442 | // Should not recommend missing config since file exists
443 | expect(parsedResult.recommendations).not.toEqual(
444 | expect.arrayContaining([
445 | expect.stringContaining("Missing configuration file"),
446 | ]),
447 | );
448 |
449 | mockFsAccess.mockRestore();
450 | });
451 |
452 | it("should detect existing configuration file for eleventy", async () => {
453 | // Mock fs.access to succeed for eleventy config file
454 | const mockFsAccess = jest
455 | .spyOn(fs.promises, "access")
456 | .mockResolvedValueOnce(undefined);
457 |
458 | const result = await testLocalDeployment({
459 | repositoryPath: testRepoPath,
460 | ssg: "eleventy",
461 | skipBuild: true,
462 | });
463 |
464 | const parsedResult = JSON.parse(result.content[0].text);
465 | // Should not recommend missing config since file exists
466 | expect(parsedResult.recommendations).not.toEqual(
467 | expect.arrayContaining([
468 | expect.stringContaining("Missing configuration file"),
469 | ]),
470 | );
471 |
472 | mockFsAccess.mockRestore();
473 | });
474 | });
475 |
476 | describe("Build scenarios with actual executions", () => {
477 | it("should handle successful build for eleventy without skipBuild", async () => {
478 | const result = await testLocalDeployment({
479 | repositoryPath: testRepoPath,
480 | ssg: "eleventy",
481 | skipBuild: false,
482 | timeout: 10, // Short timeout to avoid long waits
483 | });
484 |
485 | const parsedResult = JSON.parse(result.content[0].text);
486 | expect(parsedResult.ssg).toBe("eleventy");
487 | expect(parsedResult.buildSuccess).toBeDefined();
488 | expect(parsedResult.testScript).toContain("npx @11ty/eleventy");
489 | });
490 |
491 | it("should handle successful build for mkdocs without skipBuild", async () => {
492 | const result = await testLocalDeployment({
493 | repositoryPath: testRepoPath,
494 | ssg: "mkdocs",
495 | skipBuild: false,
496 | timeout: 10, // Short timeout to avoid long waits
497 | });
498 |
499 | const parsedResult = JSON.parse(result.content[0].text);
500 | expect(parsedResult.ssg).toBe("mkdocs");
501 | expect(parsedResult.buildSuccess).toBeDefined();
502 | expect(parsedResult.testScript).toContain("mkdocs build");
503 | });
504 |
505 | it("should exercise server start paths with short timeout", async () => {
506 | const result = await testLocalDeployment({
507 | repositoryPath: testRepoPath,
508 | ssg: "hugo",
509 | skipBuild: true,
510 | timeout: 5, // Very short timeout to trigger timeout path
511 | });
512 |
513 | const parsedResult = JSON.parse(result.content[0].text);
514 | expect(parsedResult.ssg).toBe("hugo");
515 | expect(parsedResult.serverStarted).toBeDefined();
516 | // localUrl may be undefined if server doesn't start quickly enough
517 | expect(
518 | typeof parsedResult.localUrl === "string" ||
519 | parsedResult.localUrl === undefined,
520 | ).toBe(true);
521 | });
522 |
523 | it("should test port customization in serve commands", async () => {
524 | const result = await testLocalDeployment({
525 | repositoryPath: testRepoPath,
526 | ssg: "jekyll",
527 | port: 4000,
528 | skipBuild: true,
529 | });
530 |
531 | const parsedResult = JSON.parse(result.content[0].text);
532 | expect(parsedResult.testScript).toContain("--port 4000");
533 | expect(parsedResult.testScript).toContain("http://localhost:4000");
534 | });
535 |
536 | it("should test mkdocs serve command with custom port", async () => {
537 | const result = await testLocalDeployment({
538 | repositoryPath: testRepoPath,
539 | ssg: "mkdocs",
540 | port: 8000,
541 | skipBuild: true,
542 | });
543 |
544 | const parsedResult = JSON.parse(result.content[0].text);
545 | expect(parsedResult.testScript).toContain("--dev-addr localhost:8000");
546 | expect(parsedResult.testScript).toContain("http://localhost:8000");
547 | });
548 |
549 | it("should test eleventy serve command with custom port", async () => {
550 | const result = await testLocalDeployment({
551 | repositoryPath: testRepoPath,
552 | ssg: "eleventy",
553 | port: 3001,
554 | skipBuild: true,
555 | });
556 |
557 | const parsedResult = JSON.parse(result.content[0].text);
558 | expect(parsedResult.testScript).toContain("--port 3001");
559 | expect(parsedResult.testScript).toContain("http://localhost:3001");
560 | });
561 |
562 | it("should provide correct next steps recommendations", async () => {
563 | const result = await testLocalDeployment({
564 | repositoryPath: testRepoPath,
565 | ssg: "docusaurus",
566 | skipBuild: true,
567 | });
568 |
569 | const parsedResult = JSON.parse(result.content[0].text);
570 | expect(parsedResult.nextSteps).toBeDefined();
571 | expect(Array.isArray(parsedResult.nextSteps)).toBe(true);
572 | expect(parsedResult.nextSteps.length).toBeGreaterThan(0);
573 | });
574 | });
575 | });
576 | });
577 |
```
--------------------------------------------------------------------------------
/tests/tools/recommend-ssg.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Test suite for SSG Recommendation Tool
3 | */
4 |
5 | import { jest } from "@jest/globals";
6 | import { recommendSSG } from "../../src/tools/recommend-ssg.js";
7 |
8 | // Mock the memory and KG integration
9 | jest.mock("../../src/memory/kg-integration.js", () => ({
10 | getMemoryManager: jest.fn(),
11 | getKnowledgeGraph: jest.fn(),
12 | getUserPreferenceManager: jest.fn(),
13 | getProjectContext: jest.fn(),
14 | saveKnowledgeGraph: (jest.fn() as any).mockResolvedValue(undefined),
15 | }));
16 |
17 | describe("recommendSSG", () => {
18 | let mockManager: any;
19 | let mockKG: any;
20 | let mockPreferenceManager: any;
21 |
22 | beforeEach(() => {
23 | mockManager = {
24 | recall: jest.fn() as any,
25 | } as any;
26 |
27 | mockKG = {
28 | findNode: (jest.fn() as any).mockResolvedValue(null),
29 | findNodes: (jest.fn() as any).mockResolvedValue([]),
30 | findEdges: (jest.fn() as any).mockResolvedValue([]),
31 | getAllNodes: (jest.fn() as any).mockResolvedValue([]),
32 | addNode: (jest.fn() as any).mockImplementation((node: any) => node),
33 | addEdge: (jest.fn() as any).mockReturnValue(undefined),
34 | } as any;
35 |
36 | mockPreferenceManager = {
37 | getPreference: (jest.fn() as any).mockResolvedValue(null),
38 | } as any;
39 |
40 | const {
41 | getMemoryManager,
42 | getKnowledgeGraph,
43 | getUserPreferenceManager,
44 | getProjectContext,
45 | } = require("../../src/memory/kg-integration.js");
46 |
47 | getMemoryManager.mockResolvedValue(mockManager);
48 | getKnowledgeGraph.mockResolvedValue(mockKG);
49 | getUserPreferenceManager.mockResolvedValue(mockPreferenceManager);
50 | getProjectContext.mockResolvedValue({
51 | previousAnalyses: 0,
52 | lastAnalyzed: null,
53 | knownTechnologies: [],
54 | similarProjects: [],
55 | });
56 | });
57 |
58 | afterEach(() => {
59 | jest.clearAllMocks();
60 | });
61 |
62 | describe("Input Validation", () => {
63 | it("should validate required analysisId parameter", async () => {
64 | await expect(recommendSSG({})).rejects.toThrow();
65 | });
66 |
67 | it("should validate analysisId as string", async () => {
68 | await expect(recommendSSG({ analysisId: 123 })).rejects.toThrow();
69 | });
70 |
71 | it("should accept valid preferences", async () => {
72 | mockManager.recall.mockResolvedValue(null);
73 |
74 | const result = await recommendSSG({
75 | analysisId: "test-id",
76 | preferences: {
77 | priority: "simplicity",
78 | ecosystem: "javascript",
79 | },
80 | });
81 |
82 | expect(result.content).toBeDefined();
83 | });
84 |
85 | it("should reject invalid priority preference", async () => {
86 | await expect(
87 | recommendSSG({
88 | analysisId: "test-id",
89 | preferences: { priority: "invalid" },
90 | }),
91 | ).rejects.toThrow();
92 | });
93 |
94 | it("should reject invalid ecosystem preference", async () => {
95 | await expect(
96 | recommendSSG({
97 | analysisId: "test-id",
98 | preferences: { ecosystem: "invalid" },
99 | }),
100 | ).rejects.toThrow();
101 | });
102 | });
103 |
104 | describe("Memory Integration", () => {
105 | it("should retrieve analysis from memory when available", async () => {
106 | const mockAnalysis = {
107 | data: {
108 | content: [
109 | {
110 | type: "text",
111 | text: JSON.stringify({
112 | repository: { language: "JavaScript" },
113 | complexity: "low",
114 | size: "small",
115 | }),
116 | },
117 | ],
118 | },
119 | };
120 |
121 | mockManager.recall.mockResolvedValue(mockAnalysis);
122 |
123 | const result = await recommendSSG({
124 | analysisId: "test-id",
125 | });
126 |
127 | expect(mockManager.recall).toHaveBeenCalledWith("test-id");
128 | expect(result.content).toBeDefined();
129 | });
130 |
131 | it("should handle missing analysis gracefully", async () => {
132 | mockManager.recall.mockResolvedValue(null);
133 |
134 | const result = await recommendSSG({
135 | analysisId: "non-existent-id",
136 | });
137 |
138 | expect(result.content).toBeDefined();
139 | expect(result.content[0].type).toBe("text");
140 | });
141 |
142 | it("should handle analysis with direct data structure", async () => {
143 | const mockAnalysis = {
144 | data: {
145 | repository: { language: "Python" },
146 | complexity: "medium",
147 | },
148 | };
149 |
150 | mockManager.recall.mockResolvedValue(mockAnalysis);
151 |
152 | const result = await recommendSSG({
153 | analysisId: "test-id",
154 | });
155 |
156 | expect(result.content).toBeDefined();
157 | });
158 |
159 | it("should handle corrupted analysis data", async () => {
160 | const mockAnalysis = {
161 | data: {
162 | content: [
163 | {
164 | type: "text",
165 | text: "invalid json",
166 | },
167 | ],
168 | },
169 | };
170 |
171 | mockManager.recall.mockResolvedValue(mockAnalysis);
172 |
173 | const result = await recommendSSG({
174 | analysisId: "test-id",
175 | });
176 |
177 | expect(result.content).toBeDefined();
178 | });
179 | });
180 |
181 | describe("SSG Recommendations", () => {
182 | it("should recommend Jekyll for Ruby projects", async () => {
183 | const mockAnalysis = {
184 | data: {
185 | dependencies: {
186 | ecosystem: "ruby",
187 | },
188 | complexity: "low",
189 | },
190 | };
191 |
192 | mockManager.recall.mockResolvedValue(mockAnalysis);
193 |
194 | const result = await recommendSSG({
195 | analysisId: "test-id",
196 | });
197 |
198 | const recommendation = JSON.parse(result.content[0].text);
199 | expect(recommendation.recommended).toBe("jekyll");
200 | });
201 |
202 | it("should recommend Hugo for Go projects", async () => {
203 | const mockAnalysis = {
204 | data: {
205 | dependencies: {
206 | ecosystem: "go",
207 | },
208 | complexity: "medium",
209 | },
210 | };
211 |
212 | mockManager.recall.mockResolvedValue(mockAnalysis);
213 |
214 | const result = await recommendSSG({
215 | analysisId: "test-id",
216 | });
217 |
218 | const recommendation = JSON.parse(result.content[0].text);
219 | expect(recommendation.recommended).toBe("hugo");
220 | });
221 |
222 | it("should recommend Docusaurus for JavaScript projects", async () => {
223 | const mockAnalysis = {
224 | data: {
225 | dependencies: {
226 | ecosystem: "javascript",
227 | },
228 | documentation: {
229 | estimatedComplexity: "complex",
230 | },
231 | recommendations: {
232 | teamSize: "large",
233 | },
234 | },
235 | };
236 |
237 | mockManager.recall.mockResolvedValue(mockAnalysis);
238 |
239 | const result = await recommendSSG({
240 | analysisId: "test-id",
241 | });
242 |
243 | const recommendation = JSON.parse(result.content[0].text);
244 | expect(recommendation.recommended).toBe("docusaurus");
245 | });
246 |
247 | it("should recommend MkDocs for Python projects", async () => {
248 | const mockAnalysis = {
249 | data: {
250 | dependencies: {
251 | ecosystem: "python",
252 | },
253 | complexity: "medium",
254 | },
255 | };
256 |
257 | mockManager.recall.mockResolvedValue(mockAnalysis);
258 |
259 | const result = await recommendSSG({
260 | analysisId: "test-id",
261 | });
262 |
263 | const recommendation = JSON.parse(result.content[0].text);
264 | expect(recommendation.recommended).toBe("mkdocs");
265 | });
266 |
267 | it("should recommend Eleventy for simple JavaScript projects with simplicity priority", async () => {
268 | const mockAnalysis = {
269 | data: {
270 | dependencies: {
271 | ecosystem: "javascript",
272 | },
273 | documentation: {
274 | estimatedComplexity: "simple",
275 | },
276 | recommendations: {
277 | teamSize: "small",
278 | },
279 | },
280 | };
281 |
282 | mockManager.recall.mockResolvedValue(mockAnalysis);
283 |
284 | const result = await recommendSSG({
285 | analysisId: "test-id",
286 | preferences: { priority: "simplicity" },
287 | });
288 |
289 | const recommendation = JSON.parse(result.content[0].text);
290 | expect(recommendation.recommended).toBe("eleventy");
291 | });
292 | });
293 |
294 | describe("Preference-based Recommendations", () => {
295 | it("should prioritize simplicity when requested", async () => {
296 | mockManager.recall.mockResolvedValue(null);
297 |
298 | const result = await recommendSSG({
299 | analysisId: "test-id",
300 | preferences: { priority: "simplicity" },
301 | });
302 |
303 | const recommendation = JSON.parse(result.content[0].text);
304 | expect(["jekyll", "eleventy"]).toContain(recommendation.recommended);
305 | });
306 |
307 | it("should consider ecosystem preferences", async () => {
308 | mockManager.recall.mockResolvedValue(null);
309 |
310 | const result = await recommendSSG({
311 | analysisId: "test-id",
312 | preferences: { ecosystem: "javascript" },
313 | });
314 |
315 | const recommendation = JSON.parse(result.content[0].text);
316 | expect(["docusaurus", "eleventy"]).toContain(recommendation.recommended);
317 | });
318 |
319 | it("should handle performance preference with fallback", async () => {
320 | mockManager.recall.mockResolvedValue(null);
321 |
322 | const result = await recommendSSG({
323 | analysisId: "test-id",
324 | preferences: { priority: "performance" },
325 | });
326 |
327 | const recommendation = JSON.parse(result.content[0].text);
328 | expect(recommendation.recommended).toBe("docusaurus");
329 | });
330 |
331 | it("should handle features preference with fallback", async () => {
332 | mockManager.recall.mockResolvedValue(null);
333 |
334 | const result = await recommendSSG({
335 | analysisId: "test-id",
336 | preferences: { priority: "features" },
337 | });
338 |
339 | const recommendation = JSON.parse(result.content[0].text);
340 | expect(recommendation.recommended).toBe("docusaurus");
341 | });
342 | });
343 |
344 | describe("Scoring and Alternatives", () => {
345 | it("should provide confidence scores", async () => {
346 | mockManager.recall.mockResolvedValue({
347 | data: {
348 | dependencies: {
349 | ecosystem: "javascript",
350 | },
351 | complexity: "medium",
352 | },
353 | });
354 |
355 | const result = await recommendSSG({
356 | analysisId: "test-id",
357 | });
358 |
359 | const recommendation = JSON.parse(result.content[0].text);
360 | expect(recommendation.confidence).toBeGreaterThan(0);
361 | expect(recommendation.confidence).toBeLessThanOrEqual(1);
362 | });
363 |
364 | it("should provide alternative recommendations", async () => {
365 | mockManager.recall.mockResolvedValue({
366 | data: {
367 | dependencies: {
368 | ecosystem: "javascript",
369 | },
370 | complexity: "medium",
371 | },
372 | });
373 |
374 | const result = await recommendSSG({
375 | analysisId: "test-id",
376 | });
377 |
378 | const recommendation = JSON.parse(result.content[0].text);
379 | expect(recommendation.alternatives).toBeDefined();
380 | expect(Array.isArray(recommendation.alternatives)).toBe(true);
381 | expect(recommendation.alternatives.length).toBeGreaterThan(0);
382 | });
383 |
384 | it("should include pros and cons for alternatives", async () => {
385 | mockManager.recall.mockResolvedValue({
386 | data: {
387 | dependencies: {
388 | ecosystem: "python",
389 | },
390 | complexity: "medium",
391 | },
392 | });
393 |
394 | const result = await recommendSSG({
395 | analysisId: "test-id",
396 | });
397 |
398 | const recommendation = JSON.parse(result.content[0].text);
399 | const alternative = recommendation.alternatives[0];
400 |
401 | expect(alternative.name).toBeDefined();
402 | expect(alternative.score).toBeDefined();
403 | expect(Array.isArray(alternative.pros)).toBe(true);
404 | expect(Array.isArray(alternative.cons)).toBe(true);
405 | });
406 |
407 | it("should sort alternatives by score", async () => {
408 | mockManager.recall.mockResolvedValue({
409 | data: {
410 | dependencies: {
411 | ecosystem: "javascript",
412 | },
413 | documentation: {
414 | estimatedComplexity: "complex",
415 | },
416 | },
417 | });
418 |
419 | const result = await recommendSSG({
420 | analysisId: "test-id",
421 | });
422 |
423 | const recommendation = JSON.parse(result.content[0].text);
424 | const scores = recommendation.alternatives.map((alt: any) => alt.score);
425 |
426 | for (let i = 1; i < scores.length; i++) {
427 | expect(scores[i]).toBeLessThanOrEqual(scores[i - 1]);
428 | }
429 | });
430 | });
431 |
432 | describe("Complex Project Analysis", () => {
433 | it("should handle projects with React packages", async () => {
434 | mockManager.recall.mockResolvedValue({
435 | data: {
436 | dependencies: {
437 | ecosystem: "javascript",
438 | packages: ["react", "next"],
439 | },
440 | },
441 | });
442 |
443 | const result = await recommendSSG({
444 | analysisId: "test-id",
445 | });
446 |
447 | const recommendation = JSON.parse(result.content[0].text);
448 | expect(recommendation.recommended).toBe("docusaurus");
449 | expect(recommendation.reasoning.length).toBeGreaterThan(0);
450 | });
451 |
452 | it("should consider project size in recommendations", async () => {
453 | mockManager.recall.mockResolvedValue({
454 | data: {
455 | dependencies: {
456 | ecosystem: "javascript",
457 | },
458 | structure: {
459 | totalFiles: 150, // Large project
460 | },
461 | documentation: {
462 | estimatedComplexity: "complex",
463 | hasReadme: true,
464 | hasDocs: true,
465 | },
466 | },
467 | });
468 |
469 | const result = await recommendSSG({
470 | analysisId: "test-id",
471 | });
472 |
473 | const recommendation = JSON.parse(result.content[0].text);
474 | expect(recommendation.confidence).toBeGreaterThan(0.85); // Should have higher confidence with more data
475 | });
476 |
477 | it("should handle missing ecosystem information", async () => {
478 | mockManager.recall.mockResolvedValue({
479 | data: {
480 | dependencies: {
481 | ecosystem: "unknown",
482 | },
483 | documentation: {
484 | estimatedComplexity: "moderate",
485 | },
486 | },
487 | });
488 |
489 | const result = await recommendSSG({
490 | analysisId: "test-id",
491 | });
492 |
493 | const recommendation = JSON.parse(result.content[0].text);
494 | expect(recommendation.recommended).toBeDefined();
495 | });
496 |
497 | it("should consider existing documentation structure", async () => {
498 | mockManager.recall.mockResolvedValue({
499 | data: {
500 | dependencies: {
501 | ecosystem: "javascript",
502 | },
503 | documentation: {
504 | hasReadme: true,
505 | hasDocs: true,
506 | estimatedComplexity: "moderate",
507 | },
508 | },
509 | });
510 |
511 | const result = await recommendSSG({
512 | analysisId: "test-id",
513 | });
514 |
515 | const recommendation = JSON.parse(result.content[0].text);
516 | expect(recommendation.confidence).toBeGreaterThan(0.85); // Higher confidence with documentation
517 | });
518 | });
519 |
520 | describe("Memory Error Handling", () => {
521 | it("should handle memory initialization failure", async () => {
522 | const {
523 | getMemoryManager,
524 | } = require("../../src/memory/kg-integration.js");
525 | getMemoryManager.mockRejectedValue(new Error("Memory failed"));
526 |
527 | const result = await recommendSSG({
528 | analysisId: "test-id",
529 | });
530 |
531 | expect(result.content).toBeDefined();
532 | expect(result.content[0].type).toBe("text");
533 |
534 | // Reset the mock
535 | getMemoryManager.mockResolvedValue(mockManager);
536 | });
537 |
538 | it("should handle memory recall failure", async () => {
539 | mockManager.recall.mockRejectedValue(new Error("Recall failed"));
540 |
541 | const result = await recommendSSG({
542 | analysisId: "test-id",
543 | });
544 |
545 | expect(result.content).toBeDefined();
546 | });
547 |
548 | it("should handle corrupted memory data", async () => {
549 | mockManager.recall.mockResolvedValue({
550 | data: {
551 | content: [
552 | {
553 | type: "text",
554 | text: '{"invalid": json}',
555 | },
556 | ],
557 | },
558 | });
559 |
560 | const result = await recommendSSG({
561 | analysisId: "test-id",
562 | });
563 |
564 | expect(result.content).toBeDefined();
565 | });
566 | });
567 |
568 | describe("Performance and Timing", () => {
569 | it("should complete recommendation in reasonable time", async () => {
570 | mockManager.recall.mockResolvedValue({
571 | data: {
572 | repository: { language: "JavaScript" },
573 | complexity: "medium",
574 | },
575 | });
576 |
577 | const start = Date.now();
578 | await recommendSSG({
579 | analysisId: "test-id",
580 | });
581 | const duration = Date.now() - start;
582 |
583 | expect(duration).toBeLessThan(5000); // Should complete within 5 seconds
584 | });
585 |
586 | it("should include execution time in response", async () => {
587 | mockManager.recall.mockResolvedValue({
588 | data: {
589 | repository: { language: "JavaScript" },
590 | },
591 | });
592 |
593 | const result = await recommendSSG({
594 | analysisId: "test-id",
595 | });
596 |
597 | expect(result.content[1].text).toContain("Execution completed in");
598 | });
599 | });
600 |
601 | describe("Edge Cases", () => {
602 | it("should handle null analysis data", async () => {
603 | mockManager.recall.mockResolvedValue({
604 | data: null,
605 | });
606 |
607 | const result = await recommendSSG({
608 | analysisId: "test-id",
609 | });
610 |
611 | expect(result.content).toBeDefined();
612 | });
613 |
614 | it("should handle empty analysis data", async () => {
615 | mockManager.recall.mockResolvedValue({
616 | data: {},
617 | });
618 |
619 | const result = await recommendSSG({
620 | analysisId: "test-id",
621 | });
622 |
623 | expect(result.content).toBeDefined();
624 | });
625 |
626 | it("should handle analysis without dependency data", async () => {
627 | mockManager.recall.mockResolvedValue({
628 | data: {
629 | documentation: {
630 | estimatedComplexity: "moderate",
631 | },
632 | },
633 | });
634 |
635 | const result = await recommendSSG({
636 | analysisId: "test-id",
637 | });
638 |
639 | const recommendation = JSON.parse(result.content[0].text);
640 | expect(recommendation.recommended).toBeDefined();
641 | });
642 |
643 | it("should handle unknown programming languages", async () => {
644 | mockManager.recall.mockResolvedValue({
645 | data: {
646 | dependencies: {
647 | ecosystem: "unknown",
648 | },
649 | documentation: {
650 | estimatedComplexity: "moderate",
651 | },
652 | },
653 | });
654 |
655 | const result = await recommendSSG({
656 | analysisId: "test-id",
657 | });
658 |
659 | const recommendation = JSON.parse(result.content[0].text);
660 | expect(recommendation.recommended).toBeDefined();
661 | });
662 | });
663 |
664 | describe("Response Format", () => {
665 | it("should return properly formatted MCP response", async () => {
666 | mockManager.recall.mockResolvedValue({
667 | data: {
668 | repository: { language: "JavaScript" },
669 | },
670 | });
671 |
672 | const result = await recommendSSG({
673 | analysisId: "test-id",
674 | });
675 |
676 | expect(result).toHaveProperty("content");
677 | expect(Array.isArray(result.content)).toBe(true);
678 | expect(result.content[0]).toHaveProperty("type");
679 | expect(result.content[0]).toHaveProperty("text");
680 | });
681 |
682 | it("should include all required recommendation fields", async () => {
683 | mockManager.recall.mockResolvedValue({
684 | data: {
685 | repository: { language: "Python" },
686 | },
687 | });
688 |
689 | const result = await recommendSSG({
690 | analysisId: "test-id",
691 | });
692 |
693 | const recommendation = JSON.parse(result.content[0].text);
694 |
695 | expect(recommendation).toHaveProperty("recommended");
696 | expect(recommendation).toHaveProperty("confidence");
697 | expect(recommendation).toHaveProperty("reasoning");
698 | expect(recommendation).toHaveProperty("alternatives");
699 | expect(result.content[1].text).toContain("Execution completed in");
700 | });
701 | });
702 | });
703 |
```