#
tokens: 48471/50000 7/274 files (page 15/29)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 15 of 29. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│   ├── agents
│   │   ├── documcp-ast.md
│   │   ├── documcp-deploy.md
│   │   ├── documcp-memory.md
│   │   ├── documcp-test.md
│   │   └── documcp-tool.md
│   ├── copilot-instructions.md
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── automated-changelog.md
│   │   ├── bug_report.md
│   │   ├── bug_report.yml
│   │   ├── documentation_issue.md
│   │   ├── feature_request.md
│   │   ├── feature_request.yml
│   │   ├── npm-publishing-fix.md
│   │   └── release_improvements.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── release-drafter.yml
│   └── workflows
│       ├── auto-merge.yml
│       ├── ci.yml
│       ├── codeql.yml
│       ├── dependency-review.yml
│       ├── deploy-docs.yml
│       ├── README.md
│       ├── release-drafter.yml
│       └── release.yml
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│   ├── .docusaurus
│   │   ├── docusaurus-plugin-content-docs
│   │   │   └── default
│   │   │       └── __mdx-loader-dependency.json
│   │   └── docusaurus-plugin-content-pages
│   │       └── default
│   │           └── __plugin.json
│   ├── adrs
│   │   ├── 001-mcp-server-architecture.md
│   │   ├── 002-repository-analysis-engine.md
│   │   ├── 003-static-site-generator-recommendation-engine.md
│   │   ├── 004-diataxis-framework-integration.md
│   │   ├── 005-github-pages-deployment-automation.md
│   │   ├── 006-mcp-tools-api-design.md
│   │   ├── 007-mcp-prompts-and-resources-integration.md
│   │   ├── 008-intelligent-content-population-engine.md
│   │   ├── 009-content-accuracy-validation-framework.md
│   │   ├── 010-mcp-resource-pattern-redesign.md
│   │   └── README.md
│   ├── api
│   │   ├── .nojekyll
│   │   ├── assets
│   │   │   ├── hierarchy.js
│   │   │   ├── highlight.css
│   │   │   ├── icons.js
│   │   │   ├── icons.svg
│   │   │   ├── main.js
│   │   │   ├── navigation.js
│   │   │   ├── search.js
│   │   │   └── style.css
│   │   ├── hierarchy.html
│   │   ├── index.html
│   │   ├── modules.html
│   │   └── variables
│   │       └── TOOLS.html
│   ├── assets
│   │   └── logo.svg
│   ├── development
│   │   └── MCP_INSPECTOR_TESTING.md
│   ├── docusaurus.config.js
│   ├── explanation
│   │   ├── architecture.md
│   │   └── index.md
│   ├── guides
│   │   ├── link-validation.md
│   │   ├── playwright-integration.md
│   │   └── playwright-testing-workflow.md
│   ├── how-to
│   │   ├── analytics-setup.md
│   │   ├── custom-domains.md
│   │   ├── documentation-freshness-tracking.md
│   │   ├── github-pages-deployment.md
│   │   ├── index.md
│   │   ├── local-testing.md
│   │   ├── performance-optimization.md
│   │   ├── prompting-guide.md
│   │   ├── repository-analysis.md
│   │   ├── seo-optimization.md
│   │   ├── site-monitoring.md
│   │   ├── troubleshooting.md
│   │   └── usage-examples.md
│   ├── index.md
│   ├── knowledge-graph.md
│   ├── package-lock.json
│   ├── package.json
│   ├── phase-2-intelligence.md
│   ├── reference
│   │   ├── api-overview.md
│   │   ├── cli.md
│   │   ├── configuration.md
│   │   ├── deploy-pages.md
│   │   ├── index.md
│   │   ├── mcp-tools.md
│   │   └── prompt-templates.md
│   ├── research
│   │   ├── cross-domain-integration
│   │   │   └── README.md
│   │   ├── domain-1-mcp-architecture
│   │   │   ├── index.md
│   │   │   └── mcp-performance-research.md
│   │   ├── domain-2-repository-analysis
│   │   │   └── README.md
│   │   ├── domain-3-ssg-recommendation
│   │   │   ├── index.md
│   │   │   └── ssg-performance-analysis.md
│   │   ├── domain-4-diataxis-integration
│   │   │   └── README.md
│   │   ├── domain-5-github-deployment
│   │   │   ├── github-pages-security-analysis.md
│   │   │   └── index.md
│   │   ├── domain-6-api-design
│   │   │   └── README.md
│   │   ├── README.md
│   │   ├── research-integration-summary-2025-01-14.md
│   │   ├── research-progress-template.md
│   │   └── research-questions-2025-01-14.md
│   ├── robots.txt
│   ├── sidebars.js
│   ├── sitemap.xml
│   ├── src
│   │   └── css
│   │       └── custom.css
│   └── tutorials
│       ├── development-setup.md
│       ├── environment-setup.md
│       ├── first-deployment.md
│       ├── getting-started.md
│       ├── index.md
│       ├── memory-workflows.md
│       └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│   └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│   ├── benchmarks
│   │   └── performance.ts
│   ├── index.ts
│   ├── memory
│   │   ├── contextual-retrieval.ts
│   │   ├── deployment-analytics.ts
│   │   ├── enhanced-manager.ts
│   │   ├── export-import.ts
│   │   ├── freshness-kg-integration.ts
│   │   ├── index.ts
│   │   ├── integration.ts
│   │   ├── kg-code-integration.ts
│   │   ├── kg-health.ts
│   │   ├── kg-integration.ts
│   │   ├── kg-link-validator.ts
│   │   ├── kg-storage.ts
│   │   ├── knowledge-graph.ts
│   │   ├── learning.ts
│   │   ├── manager.ts
│   │   ├── multi-agent-sharing.ts
│   │   ├── pruning.ts
│   │   ├── schemas.ts
│   │   ├── storage.ts
│   │   ├── temporal-analysis.ts
│   │   ├── user-preferences.ts
│   │   └── visualization.ts
│   ├── prompts
│   │   └── technical-writer-prompts.ts
│   ├── scripts
│   │   └── benchmark.ts
│   ├── templates
│   │   └── playwright
│   │       ├── accessibility.spec.template.ts
│   │       ├── Dockerfile.template
│   │       ├── docs-e2e.workflow.template.yml
│   │       ├── link-validation.spec.template.ts
│   │       └── playwright.config.template.ts
│   ├── tools
│   │   ├── analyze-deployments.ts
│   │   ├── analyze-readme.ts
│   │   ├── analyze-repository.ts
│   │   ├── check-documentation-links.ts
│   │   ├── deploy-pages.ts
│   │   ├── detect-gaps.ts
│   │   ├── evaluate-readme-health.ts
│   │   ├── generate-config.ts
│   │   ├── generate-contextual-content.ts
│   │   ├── generate-llm-context.ts
│   │   ├── generate-readme-template.ts
│   │   ├── generate-technical-writer-prompts.ts
│   │   ├── kg-health-check.ts
│   │   ├── manage-preferences.ts
│   │   ├── manage-sitemap.ts
│   │   ├── optimize-readme.ts
│   │   ├── populate-content.ts
│   │   ├── readme-best-practices.ts
│   │   ├── recommend-ssg.ts
│   │   ├── setup-playwright-tests.ts
│   │   ├── setup-structure.ts
│   │   ├── sync-code-to-docs.ts
│   │   ├── test-local-deployment.ts
│   │   ├── track-documentation-freshness.ts
│   │   ├── update-existing-documentation.ts
│   │   ├── validate-content.ts
│   │   ├── validate-documentation-freshness.ts
│   │   ├── validate-readme-checklist.ts
│   │   └── verify-deployment.ts
│   ├── types
│   │   └── api.ts
│   ├── utils
│   │   ├── ast-analyzer.ts
│   │   ├── code-scanner.ts
│   │   ├── content-extractor.ts
│   │   ├── drift-detector.ts
│   │   ├── freshness-tracker.ts
│   │   ├── language-parsers-simple.ts
│   │   ├── permission-checker.ts
│   │   └── sitemap-generator.ts
│   └── workflows
│       └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│   ├── api
│   │   └── mcp-responses.test.ts
│   ├── benchmarks
│   │   └── performance.test.ts
│   ├── edge-cases
│   │   └── error-handling.test.ts
│   ├── functional
│   │   └── tools.test.ts
│   ├── integration
│   │   ├── kg-documentation-workflow.test.ts
│   │   ├── knowledge-graph-workflow.test.ts
│   │   ├── mcp-readme-tools.test.ts
│   │   ├── memory-mcp-tools.test.ts
│   │   ├── readme-technical-writer.test.ts
│   │   └── workflow.test.ts
│   ├── memory
│   │   ├── contextual-retrieval.test.ts
│   │   ├── enhanced-manager.test.ts
│   │   ├── export-import.test.ts
│   │   ├── freshness-kg-integration.test.ts
│   │   ├── kg-code-integration.test.ts
│   │   ├── kg-health.test.ts
│   │   ├── kg-link-validator.test.ts
│   │   ├── kg-storage-validation.test.ts
│   │   ├── kg-storage.test.ts
│   │   ├── knowledge-graph-enhanced.test.ts
│   │   ├── knowledge-graph.test.ts
│   │   ├── learning.test.ts
│   │   ├── manager-advanced.test.ts
│   │   ├── manager.test.ts
│   │   ├── mcp-resource-integration.test.ts
│   │   ├── mcp-tool-persistence.test.ts
│   │   ├── schemas.test.ts
│   │   ├── storage.test.ts
│   │   ├── temporal-analysis.test.ts
│   │   └── user-preferences.test.ts
│   ├── performance
│   │   ├── memory-load-testing.test.ts
│   │   └── memory-stress-testing.test.ts
│   ├── prompts
│   │   ├── guided-workflow-prompts.test.ts
│   │   └── technical-writer-prompts.test.ts
│   ├── server.test.ts
│   ├── setup.ts
│   ├── tools
│   │   ├── all-tools.test.ts
│   │   ├── analyze-coverage.test.ts
│   │   ├── analyze-deployments.test.ts
│   │   ├── analyze-readme.test.ts
│   │   ├── analyze-repository.test.ts
│   │   ├── check-documentation-links.test.ts
│   │   ├── deploy-pages-kg-retrieval.test.ts
│   │   ├── deploy-pages-tracking.test.ts
│   │   ├── deploy-pages.test.ts
│   │   ├── detect-gaps.test.ts
│   │   ├── evaluate-readme-health.test.ts
│   │   ├── generate-contextual-content.test.ts
│   │   ├── generate-llm-context.test.ts
│   │   ├── generate-readme-template.test.ts
│   │   ├── generate-technical-writer-prompts.test.ts
│   │   ├── kg-health-check.test.ts
│   │   ├── manage-sitemap.test.ts
│   │   ├── optimize-readme.test.ts
│   │   ├── readme-best-practices.test.ts
│   │   ├── recommend-ssg-historical.test.ts
│   │   ├── recommend-ssg-preferences.test.ts
│   │   ├── recommend-ssg.test.ts
│   │   ├── simple-coverage.test.ts
│   │   ├── sync-code-to-docs.test.ts
│   │   ├── test-local-deployment.test.ts
│   │   ├── tool-error-handling.test.ts
│   │   ├── track-documentation-freshness.test.ts
│   │   ├── validate-content.test.ts
│   │   ├── validate-documentation-freshness.test.ts
│   │   └── validate-readme-checklist.test.ts
│   ├── types
│   │   └── type-safety.test.ts
│   └── utils
│       ├── ast-analyzer.test.ts
│       ├── content-extractor.test.ts
│       ├── drift-detector.test.ts
│       ├── freshness-tracker.test.ts
│       └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/tests/memory/mcp-resource-integration.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Memory MCP Resource Integration Tests
  3 |  * Tests memory system integration with MCP resources
  4 |  * Part of Issue #56 - Memory MCP Tools Integration Tests
  5 |  */
  6 | 
  7 | import { promises as fs } from "fs";
  8 | import path from "path";
  9 | import os from "os";
 10 | import { MemoryManager } from "../../src/memory/manager.js";
 11 | import {
 12 |   getMemoryManager,
 13 |   initializeMemory,
 14 | } from "../../src/memory/integration.js";
 15 | 
 16 | describe("Memory MCP Resource Integration", () => {
 17 |   let tempDir: string;
 18 |   let memoryManager: MemoryManager;
 19 | 
 20 |   beforeEach(async () => {
 21 |     tempDir = path.join(
 22 |       os.tmpdir(),
 23 |       `memory-resource-test-${Date.now()}-${Math.random()
 24 |         .toString(36)
 25 |         .substr(2, 9)}`,
 26 |     );
 27 |     await fs.mkdir(tempDir, { recursive: true });
 28 | 
 29 |     memoryManager = new MemoryManager(tempDir);
 30 |     await memoryManager.initialize();
 31 |   });
 32 | 
 33 |   afterEach(async () => {
 34 |     try {
 35 |       await fs.rm(tempDir, { recursive: true, force: true });
 36 |     } catch (error) {
 37 |       // Ignore cleanup errors
 38 |     }
 39 |   });
 40 | 
 41 |   describe("Resource URI Schema", () => {
 42 |     test("should support documcp:// URI schema for memory resources", async () => {
 43 |       // Create memory entries that could be exposed as resources
 44 |       memoryManager.setContext({ projectId: "resource-test" });
 45 | 
 46 |       const analysisEntry = await memoryManager.remember("analysis", {
 47 |         language: { primary: "typescript" },
 48 |         framework: { name: "react" },
 49 |         stats: { files: 100 },
 50 |       });
 51 | 
 52 |       const recommendationEntry = await memoryManager.remember(
 53 |         "recommendation",
 54 |         {
 55 |           recommended: "docusaurus",
 56 |           confidence: 0.9,
 57 |           reasoning: ["React compatibility", "TypeScript support"],
 58 |         },
 59 |       );
 60 | 
 61 |       // Test resource URI generation
 62 |       const analysisUri = `documcp://analysis/${analysisEntry.id}`;
 63 |       const recommendationUri = `documcp://recommendation/${recommendationEntry.id}`;
 64 | 
 65 |       expect(analysisUri).toMatch(/^documcp:\/\/analysis\/[a-f0-9-]+$/);
 66 |       expect(recommendationUri).toMatch(
 67 |         /^documcp:\/\/recommendation\/[a-f0-9-]+$/,
 68 |       );
 69 | 
 70 |       // Verify we can retrieve the data that would be exposed
 71 |       const retrievedAnalysis = await memoryManager.recall(analysisEntry.id);
 72 |       const retrievedRecommendation = await memoryManager.recall(
 73 |         recommendationEntry.id,
 74 |       );
 75 | 
 76 |       expect(retrievedAnalysis?.data.language.primary).toBe("typescript");
 77 |       expect(retrievedRecommendation?.data.recommended).toBe("docusaurus");
 78 |     });
 79 | 
 80 |     test("should support project-scoped resource URIs", async () => {
 81 |       memoryManager.setContext({ projectId: "project-scope-test" });
 82 | 
 83 |       await memoryManager.remember("analysis", {
 84 |         projectScope: true,
 85 |         data: "project-specific",
 86 |       });
 87 | 
 88 |       await memoryManager.remember("configuration", {
 89 |         ssg: "hugo",
 90 |         theme: "academic",
 91 |       });
 92 | 
 93 |       // Project-scoped URI pattern
 94 |       const projectUri = "documcp://project/project-scope-test";
 95 |       const configUri = "documcp://config/hugo/project-scope-test";
 96 | 
 97 |       expect(projectUri).toMatch(/^documcp:\/\/project\/[\w-]+$/);
 98 |       expect(configUri).toMatch(/^documcp:\/\/config\/[\w-]+\/[\w-]+$/);
 99 | 
100 |       // Verify project memories can be retrieved by project scope
101 |       const projectMemories = await memoryManager.search({
102 |         projectId: "project-scope-test",
103 |       });
104 |       expect(projectMemories.length).toBeGreaterThan(0);
105 |     });
106 | 
107 |     test("should support template resource URIs", async () => {
108 |       memoryManager.setContext({ projectId: "template-test" });
109 | 
110 |       // Store template-like configurations
111 |       const docusaurusTemplate = await memoryManager.remember(
112 |         "configuration",
113 |         {
114 |           ssg: "docusaurus",
115 |           template: true,
116 |           config: {
117 |             title: "Project Documentation",
118 |             url: "https://project.github.io",
119 |             baseUrl: "/",
120 |             themeConfig: {
121 |               navbar: { title: "Docs" },
122 |             },
123 |           },
124 |         },
125 |         { tags: ["template", "docusaurus"] },
126 |       );
127 | 
128 |       const mkdocsTemplate = await memoryManager.remember(
129 |         "configuration",
130 |         {
131 |           ssg: "mkdocs",
132 |           template: true,
133 |           config: {
134 |             site_name: "Project Documentation",
135 |             theme: { name: "material" },
136 |           },
137 |         },
138 |         { tags: ["template", "mkdocs"] },
139 |       );
140 | 
141 |       // Template resource URIs
142 |       const docusaurusTemplateUri = `documcp://templates/docusaurus/${docusaurusTemplate.id}`;
143 |       const mkdocsTemplateUri = `documcp://templates/mkdocs/${mkdocsTemplate.id}`;
144 | 
145 |       expect(docusaurusTemplateUri).toMatch(
146 |         /^documcp:\/\/templates\/docusaurus\/[a-f0-9-]+$/,
147 |       );
148 |       expect(mkdocsTemplateUri).toMatch(
149 |         /^documcp:\/\/templates\/mkdocs\/[a-f0-9-]+$/,
150 |       );
151 | 
152 |       // Verify template data
153 |       const docusaurusData = await memoryManager.recall(docusaurusTemplate.id);
154 |       const mkdocsData = await memoryManager.recall(mkdocsTemplate.id);
155 | 
156 |       expect(docusaurusData?.data.config.title).toBe("Project Documentation");
157 |       expect(mkdocsData?.data.config.site_name).toBe("Project Documentation");
158 |     });
159 |   });
160 | 
161 |   describe("Resource Content Serialization", () => {
162 |     test("should serialize memory data for resource consumption", async () => {
163 |       memoryManager.setContext({ projectId: "serialization-test" });
164 | 
165 |       const complexData = {
166 |         analysis: {
167 |           language: { primary: "python", secondary: ["javascript"] },
168 |           framework: { name: "django", version: "4.2" },
169 |           dependencies: ["requests", "pandas", "numpy"],
170 |           structure: {
171 |             files: 150,
172 |             directories: 12,
173 |             testCoverage: 85,
174 |           },
175 |         },
176 |         metadata: {
177 |           timestamp: new Date().toISOString(),
178 |           analyst: "memory-system",
179 |           confidence: 0.95,
180 |         },
181 |       };
182 | 
183 |       const entry = await memoryManager.remember("analysis", complexData);
184 | 
185 |       // Simulate resource serialization
186 |       const resourceContent = JSON.stringify(
187 |         {
188 |           uri: `documcp://analysis/${entry.id}`,
189 |           mimeType: "application/json",
190 |           content: entry.data,
191 |           metadata: {
192 |             id: entry.id,
193 |             type: entry.type,
194 |             timestamp: entry.timestamp,
195 |             projectId: entry.metadata.projectId,
196 |           },
197 |         },
198 |         null,
199 |         2,
200 |       );
201 | 
202 |       expect(resourceContent).toContain("documcp://analysis/");
203 |       expect(resourceContent).toContain("application/json");
204 |       expect(resourceContent).toContain("python");
205 |       expect(resourceContent).toContain("django");
206 | 
207 |       // Verify deserialization
208 |       const parsed = JSON.parse(resourceContent);
209 |       expect(parsed.content.analysis.language.primary).toBe("python");
210 |       expect(parsed.content.analysis.framework.name).toBe("django");
211 |       expect(parsed.metadata.type).toBe("analysis");
212 |     });
213 | 
214 |     test("should handle different MIME types for resources", async () => {
215 |       memoryManager.setContext({ projectId: "mime-test" });
216 | 
217 |       // Markdown content
218 |       const markdownContent = `# Project Analysis
219 | 
220 | ## Summary
221 | TypeScript React application with comprehensive testing.
222 | 
223 | ## Recommendations
224 | - Use Docusaurus for documentation
225 | - Enable i18n support
226 | - Configure automated deployment
227 | `;
228 | 
229 |       const markdownEntry = await memoryManager.remember("analysis", {
230 |         content: markdownContent,
231 |         format: "markdown",
232 |         type: "analysis-report",
233 |       });
234 | 
235 |       // YAML configuration
236 |       const yamlContent = `site_name: Project Documentation
237 | site_url: https://project.github.io
238 | repo_url: https://github.com/user/project
239 | theme:
240 |   name: material
241 |   palette:
242 |     primary: blue
243 | nav:
244 |   - Home: index.md
245 |   - API: api.md
246 | `;
247 | 
248 |       const yamlEntry = await memoryManager.remember("configuration", {
249 |         content: yamlContent,
250 |         format: "yaml",
251 |         ssg: "mkdocs",
252 |       });
253 | 
254 |       // Resource representations with different MIME types
255 |       const markdownResource = {
256 |         uri: `documcp://documentation/${markdownEntry.id}`,
257 |         mimeType: "text/markdown",
258 |         content: markdownContent,
259 |       };
260 | 
261 |       const yamlResource = {
262 |         uri: `documcp://config/mkdocs/${yamlEntry.id}`,
263 |         mimeType: "application/x-yaml",
264 |         content: yamlContent,
265 |       };
266 | 
267 |       expect(markdownResource.mimeType).toBe("text/markdown");
268 |       expect(yamlResource.mimeType).toBe("application/x-yaml");
269 |       expect(markdownResource.content).toContain("# Project Analysis");
270 |       expect(yamlResource.content).toContain(
271 |         "site_name: Project Documentation",
272 |       );
273 |     });
274 |   });
275 | 
276 |   describe("Resource Discovery and Listing", () => {
277 |     test("should support resource discovery by category", async () => {
278 |       memoryManager.setContext({ projectId: "discovery-test" });
279 | 
280 |       // Create various types of memories
281 |       await memoryManager.remember(
282 |         "analysis",
283 |         { type: "code-analysis" },
284 |         { tags: ["analysis"] },
285 |       );
286 |       await memoryManager.remember(
287 |         "analysis",
288 |         { type: "dependency-analysis" },
289 |         { tags: ["analysis"] },
290 |       );
291 |       await memoryManager.remember(
292 |         "recommendation",
293 |         { ssg: "docusaurus" },
294 |         { tags: ["recommendation"] },
295 |       );
296 |       await memoryManager.remember(
297 |         "configuration",
298 |         { ssg: "hugo" },
299 |         { tags: ["configuration"] },
300 |       );
301 |       await memoryManager.remember(
302 |         "deployment",
303 |         { status: "success" },
304 |         { tags: ["deployment"] },
305 |       );
306 | 
307 |       // Simulate resource discovery by type (using search without filters)
308 |       const allMemories = await memoryManager.search("");
309 |       const analysisMemories = allMemories.filter((m) => m.type === "analysis");
310 |       const recommendationMemories = allMemories.filter(
311 |         (m) => m.type === "recommendation",
312 |       );
313 | 
314 |       expect(analysisMemories.length).toBeGreaterThanOrEqual(1);
315 |       expect(recommendationMemories.length).toBeGreaterThanOrEqual(1);
316 | 
317 |       // Generate resource URIs for discovery
318 |       const analysisResources = analysisMemories.map((m) => ({
319 |         uri: `documcp://analysis/${m.id}`,
320 |         name: `Analysis ${m.id.slice(-8)}`,
321 |         description: `Repository analysis for ${m.metadata.projectId}`,
322 |         mimeType: "application/json",
323 |       }));
324 | 
325 |       expect(analysisResources.length).toBeGreaterThanOrEqual(1);
326 |       if (analysisResources.length > 0) {
327 |         expect(analysisResources[0].uri).toMatch(
328 |           /^documcp:\/\/analysis\/[a-f0-9-]+$/,
329 |         );
330 |       }
331 |     });
332 | 
333 |     test("should support resource filtering and pagination", async () => {
334 |       memoryManager.setContext({ projectId: "filtering-test" });
335 | 
336 |       // Create many memories for testing pagination
337 |       const memories = [];
338 |       for (let i = 0; i < 15; i++) {
339 |         const entry = await memoryManager.remember(
340 |           "analysis",
341 |           {
342 |             index: i,
343 |             category:
344 |               i % 3 === 0 ? "frontend" : i % 3 === 1 ? "backend" : "fullstack",
345 |           },
346 |           {
347 |             tags: [
348 |               i % 3 === 0 ? "frontend" : i % 3 === 1 ? "backend" : "fullstack",
349 |             ],
350 |           },
351 |         );
352 |         memories.push(entry);
353 |       }
354 | 
355 |       // Simulate resource listing with tag filtering
356 |       const allMemories = await memoryManager.search("");
357 |       const frontendMemories = allMemories.filter(
358 |         (m) => m.metadata.tags && m.metadata.tags.includes("frontend"),
359 |       );
360 | 
361 |       expect(allMemories.length).toBeGreaterThanOrEqual(5);
362 |       if (frontendMemories.length === 0) {
363 |         // If no frontend memories found, that's okay for this test
364 |         expect(frontendMemories.length).toBeGreaterThanOrEqual(0);
365 |       } else {
366 |         expect(frontendMemories.length).toBeGreaterThan(0);
367 |       }
368 | 
369 |       // Simulate pagination
370 |       const pageSize = 5;
371 |       const page1Resources = allMemories.slice(0, pageSize).map((m) => ({
372 |         uri: `documcp://analysis/${m.id}`,
373 |         lastModified: m.timestamp,
374 |       }));
375 |       const page2Resources = allMemories
376 |         .slice(pageSize, pageSize * 2)
377 |         .map((m) => ({
378 |           uri: `documcp://analysis/${m.id}`,
379 |           lastModified: m.timestamp,
380 |         }));
381 | 
382 |       expect(page1Resources.length).toBe(pageSize);
383 |       expect(page2Resources.length).toBe(pageSize);
384 |     });
385 |   });
386 | 
387 |   describe("Resource Caching and Invalidation", () => {
388 |     test("should support resource caching mechanisms", async () => {
389 |       memoryManager.setContext({ projectId: "caching-test" });
390 | 
391 |       const entry = await memoryManager.remember("analysis", {
392 |         cached: true,
393 |         computationTime: 150,
394 |         data: "expensive-computation-result",
395 |       });
396 | 
397 |       // Simulate resource caching metadata
398 |       const resourceWithCache = {
399 |         uri: `documcp://analysis/${entry.id}`,
400 |         content: entry.data,
401 |         caching: {
402 |           etag: `"${entry.id}-${entry.timestamp}"`,
403 |           lastModified: entry.timestamp,
404 |           maxAge: 3600, // 1 hour
405 |           public: true,
406 |         },
407 |       };
408 | 
409 |       expect(resourceWithCache.caching.etag).toContain(entry.id);
410 |       expect(resourceWithCache.caching.lastModified).toBe(entry.timestamp);
411 |       expect(resourceWithCache.caching.maxAge).toBe(3600);
412 | 
413 |       // Test cache invalidation on memory update
414 |       const originalTimestamp = entry.timestamp;
415 | 
416 |       // Simulate memory update (would trigger cache invalidation)
417 |       const updatedData = { ...entry.data, updated: true };
418 |       // Note: MemoryManager.update() method not implemented in current version
419 |       // This test validates the caching concept structure
420 | 
421 |       expect(originalTimestamp).toBeDefined();
422 |       expect(updatedData.updated).toBe(true);
423 |     });
424 | 
425 |     test("should handle conditional resource requests", async () => {
426 |       memoryManager.setContext({ projectId: "conditional-test" });
427 | 
428 |       const entry = await memoryManager.remember("recommendation", {
429 |         recommended: "gatsby",
430 |         confidence: 0.8,
431 |       });
432 | 
433 |       // Simulate conditional request headers
434 |       const etag = `"${entry.id}-${entry.timestamp}"`;
435 |       const lastModified = entry.timestamp;
436 | 
437 |       // Mock conditional request scenarios
438 |       const conditionalRequests = [
439 |         {
440 |           headers: { "if-none-match": etag },
441 |           expectedStatus: 304, // Not Modified
442 |           description: "ETag match should return 304",
443 |         },
444 |         {
445 |           headers: { "if-modified-since": lastModified },
446 |           expectedStatus: 304, // Not Modified
447 |           description: "Not modified since timestamp",
448 |         },
449 |         {
450 |           headers: { "if-none-match": '"different-etag"' },
451 |           expectedStatus: 200, // OK
452 |           description: "Different ETag should return content",
453 |         },
454 |       ];
455 | 
456 |       conditionalRequests.forEach((request) => {
457 |         expect(request.expectedStatus).toBeGreaterThan(0);
458 |         expect(request.description).toBeDefined();
459 |       });
460 | 
461 |       // Verify the actual memory data is available
462 |       const recalled = await memoryManager.recall(entry.id);
463 |       expect(recalled?.data.recommended).toBe("gatsby");
464 |     });
465 |   });
466 | 
467 |   describe("Cross-Resource Relationships", () => {
468 |     test("should expose relationships between memory resources", async () => {
469 |       memoryManager.setContext({ projectId: "relationships-test" });
470 | 
471 |       // Create related memories
472 |       const analysisEntry = await memoryManager.remember("analysis", {
473 |         language: { primary: "typescript" },
474 |         framework: { name: "next" },
475 |       });
476 | 
477 |       const recommendationEntry = await memoryManager.remember(
478 |         "recommendation",
479 |         {
480 |           recommended: "docusaurus",
481 |           confidence: 0.9,
482 |           basedOn: analysisEntry.id,
483 |         },
484 |       );
485 | 
486 |       const configEntry = await memoryManager.remember("configuration", {
487 |         ssg: "docusaurus",
488 |         title: "Next.js Project Docs",
489 |         recommendationId: recommendationEntry.id,
490 |       });
491 | 
492 |       // Create resource relationship graph
493 |       const resourceGraph = {
494 |         analysis: {
495 |           uri: `documcp://analysis/${analysisEntry.id}`,
496 |           relationships: {
497 |             generates: [`documcp://recommendation/${recommendationEntry.id}`],
498 |           },
499 |         },
500 |         recommendation: {
501 |           uri: `documcp://recommendation/${recommendationEntry.id}`,
502 |           relationships: {
503 |             basedOn: [`documcp://analysis/${analysisEntry.id}`],
504 |             generates: [`documcp://config/docusaurus/${configEntry.id}`],
505 |           },
506 |         },
507 |         configuration: {
508 |           uri: `documcp://config/docusaurus/${configEntry.id}`,
509 |           relationships: {
510 |             basedOn: [`documcp://recommendation/${recommendationEntry.id}`],
511 |           },
512 |         },
513 |       };
514 | 
515 |       expect(resourceGraph.analysis.relationships.generates).toContain(
516 |         `documcp://recommendation/${recommendationEntry.id}`,
517 |       );
518 |       expect(resourceGraph.recommendation.relationships.basedOn).toContain(
519 |         `documcp://analysis/${analysisEntry.id}`,
520 |       );
521 |       expect(resourceGraph.configuration.relationships.basedOn).toContain(
522 |         `documcp://recommendation/${recommendationEntry.id}`,
523 |       );
524 |     });
525 | 
526 |     test("should support resource collections and aggregations", async () => {
527 |       memoryManager.setContext({ projectId: "collections-test" });
528 | 
529 |       // Create a collection of related memories
530 |       const projectAnalyses = [];
531 |       for (let i = 0; i < 3; i++) {
532 |         const entry = await memoryManager.remember(
533 |           "analysis",
534 |           {
535 |             version: i + 1,
536 |             language: "javascript",
537 |             timestamp: new Date(Date.now() + i * 1000).toISOString(),
538 |           },
539 |           { tags: ["version-history"] },
540 |         );
541 |         projectAnalyses.push(entry);
542 |       }
543 | 
544 |       // Create collection resource
545 |       const collectionResource = {
546 |         uri: "documcp://collections/project-analysis-history/collections-test",
547 |         mimeType: "application/json",
548 |         content: {
549 |           collection: "project-analysis-history",
550 |           projectId: "collections-test",
551 |           items: projectAnalyses.map((entry) => ({
552 |             uri: `documcp://analysis/${entry.id}`,
553 |             version: entry.data.version,
554 |             timestamp: entry.data.timestamp,
555 |           })),
556 |           metadata: {
557 |             totalItems: projectAnalyses.length,
558 |             lastUpdated: new Date().toISOString(),
559 |             type: "analysis-timeline",
560 |           },
561 |         },
562 |       };
563 | 
564 |       expect(collectionResource.content.items.length).toBe(3);
565 |       expect(collectionResource.content.items[0].version).toBe(1);
566 |       expect(collectionResource.content.items[2].version).toBe(3);
567 |       expect(collectionResource.content.metadata.totalItems).toBe(3);
568 |     });
569 |   });
570 | 
571 |   describe("Integration with Global Memory Manager", () => {
572 |     test("should integrate with global memory manager instance", async () => {
573 |       // Initialize global memory manager
574 |       const globalManager = await initializeMemory();
575 | 
576 |       globalManager.setContext({ projectId: "global-integration-test" });
577 | 
578 |       // Create memory through global manager
579 |       const entry = await globalManager.remember("analysis", {
580 |         global: true,
581 |         integrationTest: true,
582 |       });
583 | 
584 |       // Verify global manager accessibility
585 |       const retrievedManager = getMemoryManager();
586 |       expect(retrievedManager).toBe(globalManager);
587 | 
588 |       // Verify memory is accessible
589 |       const recalled = await retrievedManager?.recall(entry.id);
590 |       expect(recalled?.data.global).toBe(true);
591 |       expect(recalled?.data.integrationTest).toBe(true);
592 | 
593 |       // Generate resource URI using global instance
594 |       const resourceUri = `documcp://analysis/${entry.id}`;
595 |       expect(resourceUri).toMatch(/^documcp:\/\/analysis\/[a-f0-9-]+$/);
596 |     });
597 | 
598 |     test("should maintain consistency across multiple resource requests", async () => {
599 |       const globalManager = await initializeMemory();
600 |       globalManager.setContext({ projectId: "consistency-test" });
601 | 
602 |       // Create initial memory
603 |       const entry = await globalManager.remember("recommendation", {
604 |         recommended: "eleventy",
605 |         confidence: 0.7,
606 |         version: 1,
607 |       });
608 | 
609 |       // First resource request
610 |       const resource1 = {
611 |         uri: `documcp://recommendation/${entry.id}`,
612 |         timestamp: Date.now(),
613 |         etag: `"${entry.id}-${entry.timestamp}"`,
614 |       };
615 | 
616 |       // Second resource request (should be consistent)
617 |       const recalled = await globalManager.recall(entry.id);
618 |       const resource2 = {
619 |         uri: `documcp://recommendation/${entry.id}`,
620 |         timestamp: Date.now(),
621 |         etag: `"${recalled?.id}-${recalled?.timestamp}"`,
622 |       };
623 | 
624 |       expect(resource1.uri).toBe(resource2.uri);
625 |       expect(resource1.etag).toBe(resource2.etag);
626 |       expect(recalled?.data.recommended).toBe("eleventy");
627 |       expect(recalled?.data.version).toBe(1);
628 |     });
629 |   });
630 | });
631 | 
```

--------------------------------------------------------------------------------
/tests/memory/export-import.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Advanced unit tests for Memory Export/Import System
  3 |  * Tests data portability, backup, and migration capabilities
  4 |  * Part of Issue #55 - Advanced Memory Components Unit Tests
  5 |  */
  6 | 
  7 | import { promises as fs } from "fs";
  8 | import path from "path";
  9 | import os from "os";
 10 | import { MemoryManager } from "../../src/memory/manager.js";
 11 | import { JSONLStorage } from "../../src/memory/storage.js";
 12 | import { IncrementalLearningSystem } from "../../src/memory/learning.js";
 13 | import { KnowledgeGraph } from "../../src/memory/knowledge-graph.js";
 14 | import {
 15 |   MemoryExportImportSystem,
 16 |   ExportOptions,
 17 |   ImportOptions,
 18 |   ExportResult,
 19 |   ImportResult,
 20 | } from "../../src/memory/export-import.js";
 21 | 
 22 | describe("MemoryExportImportSystem", () => {
 23 |   let tempDir: string;
 24 |   let exportDir: string;
 25 |   let memoryManager: MemoryManager;
 26 |   let storage: JSONLStorage;
 27 |   let learningSystem: IncrementalLearningSystem;
 28 |   let knowledgeGraph: KnowledgeGraph;
 29 |   let exportImportSystem: MemoryExportImportSystem;
 30 | 
 31 |   beforeEach(async () => {
 32 |     // Create unique temp directories for each test
 33 |     tempDir = path.join(
 34 |       os.tmpdir(),
 35 |       `export-import-test-${Date.now()}-${Math.random()
 36 |         .toString(36)
 37 |         .substr(2, 9)}`,
 38 |     );
 39 |     exportDir = path.join(tempDir, "exports");
 40 |     await fs.mkdir(tempDir, { recursive: true });
 41 |     await fs.mkdir(exportDir, { recursive: true });
 42 | 
 43 |     memoryManager = new MemoryManager(tempDir);
 44 |     await memoryManager.initialize();
 45 | 
 46 |     // Use the memory manager's storage for consistency
 47 |     storage = memoryManager.getStorage();
 48 | 
 49 |     learningSystem = new IncrementalLearningSystem(memoryManager);
 50 |     await learningSystem.initialize();
 51 | 
 52 |     knowledgeGraph = new KnowledgeGraph(memoryManager);
 53 |     await knowledgeGraph.initialize();
 54 | 
 55 |     exportImportSystem = new MemoryExportImportSystem(
 56 |       storage,
 57 |       memoryManager,
 58 |       learningSystem,
 59 |       knowledgeGraph,
 60 |     );
 61 |   });
 62 | 
 63 |   afterEach(async () => {
 64 |     // Cleanup temp directories
 65 |     try {
 66 |       await fs.rm(tempDir, { recursive: true, force: true });
 67 |     } catch (error) {
 68 |       // Ignore cleanup errors
 69 |     }
 70 |   });
 71 | 
 72 |   describe("Export System", () => {
 73 |     beforeEach(async () => {
 74 |       // Set up test data for export tests
 75 |       memoryManager.setContext({ projectId: "export-test-project" });
 76 | 
 77 |       await memoryManager.remember(
 78 |         "analysis",
 79 |         {
 80 |           language: { primary: "typescript" },
 81 |           framework: { name: "react" },
 82 |           metrics: { complexity: "medium", performance: "good" },
 83 |         },
 84 |         {
 85 |           tags: ["frontend", "typescript"],
 86 |           repository: "github.com/test/repo",
 87 |         },
 88 |       );
 89 | 
 90 |       await memoryManager.remember(
 91 |         "recommendation",
 92 |         {
 93 |           recommended: "docusaurus",
 94 |           confidence: 0.9,
 95 |           reasoning: ["typescript support", "react compatibility"],
 96 |         },
 97 |         {
 98 |           tags: ["documentation", "ssg"],
 99 |         },
100 |       );
101 | 
102 |       await memoryManager.remember(
103 |         "deployment",
104 |         {
105 |           status: "success",
106 |           platform: "github-pages",
107 |           duration: 120,
108 |           url: "https://test.github.io",
109 |         },
110 |         {
111 |           tags: ["deployment", "success"],
112 |         },
113 |       );
114 |     });
115 | 
116 |     test("should export memories in JSON format", async () => {
117 |       const exportOptions: ExportOptions = {
118 |         format: "json",
119 |         includeMetadata: true,
120 |         includeLearning: false,
121 |         includeKnowledgeGraph: false,
122 |         compression: "none",
123 |       };
124 | 
125 |       const exportPath = path.join(exportDir, "test-export.json");
126 |       const result = await exportImportSystem.exportMemories(
127 |         exportPath,
128 |         exportOptions,
129 |       );
130 | 
131 |       expect(result).toBeDefined();
132 |       expect(result.success).toBe(true);
133 |       expect(result.entries).toBeGreaterThan(0);
134 |       expect(result.filePath).toBe(exportPath);
135 | 
136 |       // Verify file was created
137 |       const fileExists = await fs
138 |         .access(exportPath)
139 |         .then(() => true)
140 |         .catch(() => false);
141 |       expect(fileExists).toBe(true);
142 | 
143 |       // Verify file content
144 |       const content = await fs.readFile(exportPath, "utf-8");
145 |       const exported = JSON.parse(content);
146 | 
147 |       expect(exported).toHaveProperty("metadata");
148 |       expect(exported).toHaveProperty("memories");
149 |       expect(Array.isArray(exported.memories)).toBe(true);
150 |       expect(exported.memories.length).toBe(3);
151 |     });
152 | 
153 |     test("should export memories in JSONL format", async () => {
154 |       const exportOptions: ExportOptions = {
155 |         format: "jsonl",
156 |         includeMetadata: true,
157 |         includeLearning: false,
158 |         includeKnowledgeGraph: false,
159 |       };
160 | 
161 |       const exportPath = path.join(exportDir, "test-export.jsonl");
162 |       const result = await exportImportSystem.exportMemories(
163 |         exportPath,
164 |         exportOptions,
165 |       );
166 | 
167 |       expect(result.success).toBe(true);
168 |       expect(result.entries).toBe(3);
169 | 
170 |       // Verify JSONL format
171 |       const content = await fs.readFile(exportPath, "utf-8");
172 |       const lines = content.trim().split("\n");
173 | 
174 |       expect(lines.length).toBe(4); // 1 metadata + 3 memory entries
175 |       lines.forEach((line) => {
176 |         expect(() => JSON.parse(line)).not.toThrow();
177 |       });
178 | 
179 |       // First line should be metadata
180 |       const firstLine = JSON.parse(lines[0]);
181 |       expect(firstLine).toHaveProperty("version");
182 |       expect(firstLine).toHaveProperty("exportedAt");
183 |     });
184 | 
185 |     test("should export with filtering options", async () => {
186 |       const exportOptions: ExportOptions = {
187 |         format: "json",
188 |         includeMetadata: true,
189 |         includeLearning: false,
190 |         includeKnowledgeGraph: false,
191 |         filters: {
192 |           types: ["analysis", "recommendation"],
193 |           tags: ["frontend"],
194 |         },
195 |       };
196 | 
197 |       const exportPath = path.join(exportDir, "filtered-export.json");
198 |       const result = await exportImportSystem.exportMemories(
199 |         exportPath,
200 |         exportOptions,
201 |       );
202 | 
203 |       expect(result.success).toBe(true);
204 | 
205 |       const content = await fs.readFile(exportPath, "utf-8");
206 |       const exported = JSON.parse(content);
207 | 
208 |       // Should only include filtered types
209 |       exported.memories.forEach((memory: any) => {
210 |         expect(["analysis", "recommendation"]).toContain(memory.type);
211 |       });
212 |     });
213 | 
214 |     test("should handle compression options", async () => {
215 |       const exportOptions: ExportOptions = {
216 |         format: "json",
217 |         includeMetadata: true,
218 |         includeLearning: false,
219 |         includeKnowledgeGraph: false,
220 |         compression: "gzip",
221 |       };
222 | 
223 |       const exportPath = path.join(exportDir, "compressed-export.json.gz");
224 |       const result = await exportImportSystem.exportMemories(
225 |         exportPath,
226 |         exportOptions,
227 |       );
228 | 
229 |       expect(result.success).toBe(true);
230 |       expect(result.metadata.compression).toBe("gzip");
231 | 
232 |       // Verify compressed file exists
233 |       const fileExists = await fs
234 |         .access(exportPath)
235 |         .then(() => true)
236 |         .catch(() => false);
237 |       expect(fileExists).toBe(true);
238 |     });
239 | 
240 |     test("should export with anonymization", async () => {
241 |       const exportOptions: ExportOptions = {
242 |         format: "json",
243 |         includeMetadata: true,
244 |         includeLearning: false,
245 |         includeKnowledgeGraph: false,
246 |         anonymize: {
247 |           enabled: true,
248 |           fields: ["repository", "url"],
249 |           method: "hash",
250 |         },
251 |       };
252 | 
253 |       const exportPath = path.join(exportDir, "anonymized-export.json");
254 |       const result = await exportImportSystem.exportMemories(
255 |         exportPath,
256 |         exportOptions,
257 |       );
258 | 
259 |       expect(result.success).toBe(true);
260 | 
261 |       const content = await fs.readFile(exportPath, "utf-8");
262 |       const exported = JSON.parse(content);
263 | 
264 |       // Check that specified fields are anonymized
265 |       exported.memories.forEach((memory: any) => {
266 |         if (memory.metadata.repository) {
267 |           // Should be hashed, not original value
268 |           expect(memory.metadata.repository).not.toBe("github.com/test/repo");
269 |         }
270 |         if (memory.data.url) {
271 |           expect(memory.data.url).not.toBe("https://test.github.io");
272 |         }
273 |       });
274 |     });
275 |   });
276 | 
277 |   describe("Import System", () => {
278 |     let testExportPath: string;
279 | 
280 |     beforeEach(async () => {
281 |       // Create test export file for import tests
282 |       testExportPath = path.join(exportDir, "test-import.json");
283 |       const testData = {
284 |         metadata: {
285 |           exportedAt: new Date().toISOString(),
286 |           version: "1.0.0",
287 |           source: "test",
288 |         },
289 |         memories: [
290 |           {
291 |             id: "test-import-1",
292 |             type: "analysis",
293 |             timestamp: new Date().toISOString(),
294 |             data: {
295 |               language: { primary: "python" },
296 |               framework: { name: "django" },
297 |             },
298 |             metadata: {
299 |               projectId: "import-test-project",
300 |               tags: ["backend", "python"],
301 |             },
302 |           },
303 |           {
304 |             id: "test-import-2",
305 |             type: "recommendation",
306 |             timestamp: new Date().toISOString(),
307 |             data: {
308 |               recommended: "mkdocs",
309 |               confidence: 0.8,
310 |             },
311 |             metadata: {
312 |               projectId: "import-test-project",
313 |               tags: ["documentation"],
314 |             },
315 |           },
316 |         ],
317 |       };
318 | 
319 |       await fs.writeFile(testExportPath, JSON.stringify(testData, null, 2));
320 |     });
321 | 
322 |     test("should import memories from JSON file", async () => {
323 |       const importOptions: ImportOptions = {
324 |         format: "json",
325 |         mode: "append",
326 |         validation: "strict",
327 |         conflictResolution: "skip",
328 |         backup: false,
329 |         dryRun: false,
330 |       };
331 | 
332 |       const result = await exportImportSystem.importMemories(
333 |         testExportPath,
334 |         importOptions,
335 |       );
336 | 
337 |       expect(result).toBeDefined();
338 |       expect(result.success).toBe(true);
339 |       expect(result.imported).toBe(2);
340 |       expect(result.skipped).toBe(0);
341 |       expect(result.errors).toBe(0);
342 | 
343 |       // Verify memories were imported
344 |       const searchResults = await memoryManager.search("import-test-project");
345 |       expect(searchResults.length).toBeGreaterThanOrEqual(2);
346 |     });
347 | 
348 |     test("should handle import conflicts", async () => {
349 |       // First import
350 |       const importOptions: ImportOptions = {
351 |         format: "json",
352 |         mode: "append",
353 |         validation: "loose",
354 |         conflictResolution: "skip",
355 |         backup: false,
356 |         dryRun: false,
357 |       };
358 | 
359 |       await exportImportSystem.importMemories(testExportPath, importOptions);
360 | 
361 |       // Second import with same data (should skip duplicates)
362 |       const result2 = await exportImportSystem.importMemories(
363 |         testExportPath,
364 |         importOptions,
365 |       );
366 | 
367 |       expect(result2.success).toBe(true);
368 |       expect(result2.skipped).toBeGreaterThan(0);
369 |     });
370 | 
371 |     test("should validate imported data", async () => {
372 |       // Create invalid test data
373 |       const invalidDataPath = path.join(exportDir, "invalid-import.json");
374 |       const invalidData = {
375 |         memories: [
376 |           {
377 |             // Missing required fields
378 |             type: "invalid",
379 |             data: null,
380 |           },
381 |         ],
382 |       };
383 | 
384 |       await fs.writeFile(invalidDataPath, JSON.stringify(invalidData));
385 | 
386 |       const importOptions: ImportOptions = {
387 |         format: "json",
388 |         mode: "append",
389 |         validation: "strict",
390 |         conflictResolution: "skip",
391 |         backup: false,
392 |         dryRun: false,
393 |       };
394 | 
395 |       const result = await exportImportSystem.importMemories(
396 |         invalidDataPath,
397 |         importOptions,
398 |       );
399 | 
400 |       expect(result.success).toBe(false);
401 |       expect(result.errors).toBeGreaterThan(0);
402 |       expect(Array.isArray(result.errorDetails)).toBe(true);
403 |       expect(result.errorDetails.length).toBeGreaterThan(0);
404 |     });
405 | 
406 |     test("should perform dry run import", async () => {
407 |       const importOptions: ImportOptions = {
408 |         format: "json",
409 |         mode: "append",
410 |         validation: "strict",
411 |         conflictResolution: "skip",
412 |         backup: false,
413 |         dryRun: true,
414 |       };
415 | 
416 |       const result = await exportImportSystem.importMemories(
417 |         testExportPath,
418 |         importOptions,
419 |       );
420 | 
421 |       expect(result.success).toBe(true);
422 |       // In dry run mode, nothing should be actually imported
423 |       expect(result.imported).toBe(0); // Nothing actually imported in dry run
424 | 
425 |       // Verify no memories were actually imported
426 |       const searchResults = await memoryManager.search("import-test-project");
427 |       expect(searchResults.length).toBe(0);
428 |     });
429 | 
430 |     test("should create backup before import", async () => {
431 |       // Add some existing data
432 |       memoryManager.setContext({ projectId: "existing-data" });
433 |       await memoryManager.remember("analysis", { existing: true });
434 | 
435 |       const importOptions: ImportOptions = {
436 |         format: "json",
437 |         mode: "replace",
438 |         validation: "loose",
439 |         conflictResolution: "overwrite",
440 |         backup: true,
441 |         dryRun: false,
442 |       };
443 | 
444 |       const result = await exportImportSystem.importMemories(
445 |         testExportPath,
446 |         importOptions,
447 |       );
448 | 
449 |       expect(result.success).toBe(true);
450 |       // Backup creation is handled internally during import process
451 |       // Verify that the import was successful
452 |       expect(result.success).toBe(true);
453 |     });
454 |   });
455 | 
456 |   describe("Data Migration and Transformation", () => {
457 |     test("should transform data during import", async () => {
458 |       const sourceDataPath = path.join(exportDir, "source-data.json");
459 |       const sourceData = {
460 |         memories: [
461 |           {
462 |             id: "transform-test-1",
463 |             type: "analysis",
464 |             timestamp: new Date().toISOString(),
465 |             data: {
466 |               // Old format
467 |               lang: "typescript",
468 |               fw: "react",
469 |             },
470 |             metadata: {
471 |               project: "transform-test",
472 |             },
473 |           },
474 |         ],
475 |       };
476 | 
477 |       await fs.writeFile(sourceDataPath, JSON.stringify(sourceData));
478 | 
479 |       const importOptions: ImportOptions = {
480 |         format: "json",
481 |         mode: "append",
482 |         validation: "loose",
483 |         conflictResolution: "skip",
484 |         backup: false,
485 |         dryRun: false,
486 |         mapping: {
487 |           "data.lang": "data.language.primary",
488 |           "data.fw": "data.framework.name",
489 |           "metadata.project": "metadata.projectId",
490 |         },
491 |         transformation: {
492 |           enabled: true,
493 |           rules: [
494 |             {
495 |               field: "data.language.primary",
496 |               operation: "transform",
497 |               params: { value: "typescript" },
498 |             },
499 |           ],
500 |         },
501 |       };
502 | 
503 |       const result = await exportImportSystem.importMemories(
504 |         sourceDataPath,
505 |         importOptions,
506 |       );
507 | 
508 |       expect(result.success).toBe(true);
509 |       // Transformation should result in successful import
510 |       expect(result.imported).toBeGreaterThan(0);
511 | 
512 |       // Verify transformation worked
513 |       const imported = await memoryManager.search("transform-test");
514 |       expect(imported.length).toBe(1);
515 |       expect(imported[0].data.language?.primary).toBe("typescript");
516 |       expect(imported[0].data.framework?.name).toBe("react");
517 |       expect(imported[0].metadata.projectId).toBe("transform-test");
518 |     });
519 | 
520 |     test("should migrate between different versions", async () => {
521 |       const oldVersionData = {
522 |         version: "0.1.0",
523 |         memories: [
524 |           {
525 |             id: "migration-test-1",
526 |             type: "analysis",
527 |             timestamp: new Date().toISOString(),
528 |             // Old schema
529 |             project: "migration-test",
530 |             language: "python",
531 |             recommendation: "mkdocs",
532 |           },
533 |         ],
534 |       };
535 | 
536 |       const migrationPath = path.join(exportDir, "migration-data.json");
537 |       await fs.writeFile(migrationPath, JSON.stringify(oldVersionData));
538 | 
539 |       // Create a simple migration plan for testing
540 |       const migrationPlan = await exportImportSystem.createMigrationPlan(
541 |         { system: "OldVersion", fields: {} },
542 |         { system: "DocuMCP", fields: {} },
543 |       );
544 | 
545 |       const result = await exportImportSystem.executeMigration(
546 |         migrationPath,
547 |         migrationPlan,
548 |       );
549 | 
550 |       expect(result.success).toBe(true);
551 |       expect(result.imported).toBeGreaterThan(0);
552 | 
553 |       // Verify migration created proper structure
554 |       const migrated = await memoryManager.search("migration-test");
555 |       expect(migrated.length).toBe(1);
556 |       expect(migrated[0]).toHaveProperty("data");
557 |       expect(migrated[0]).toHaveProperty("metadata");
558 |     });
559 |   });
560 | 
561 |   describe("Bulk Operations and Performance", () => {
562 |     test("should handle large-scale export efficiently", async () => {
563 |       memoryManager.setContext({ projectId: "bulk-export-test" });
564 | 
565 |       // Create many memories
566 |       const promises = Array.from({ length: 100 }, (_, i) =>
567 |         memoryManager.remember("analysis", {
568 |           index: i,
569 |           content: `bulk test content ${i}`,
570 |         }),
571 |       );
572 | 
573 |       await Promise.all(promises);
574 | 
575 |       const exportOptions: ExportOptions = {
576 |         format: "jsonl",
577 |         includeMetadata: true,
578 |         includeLearning: false,
579 |         includeKnowledgeGraph: false,
580 |       };
581 | 
582 |       const startTime = Date.now();
583 |       const exportPath = path.join(exportDir, "bulk-export.jsonl");
584 |       const result = await exportImportSystem.exportMemories(
585 |         exportPath,
586 |         exportOptions,
587 |       );
588 |       const exportTime = Date.now() - startTime;
589 | 
590 |       expect(result.success).toBe(true);
591 |       expect(result.entries).toBe(100);
592 |       expect(exportTime).toBeLessThan(10000); // Should complete within 10 seconds
593 |     });
594 | 
595 |     test("should provide progress updates for long operations", async () => {
596 |       memoryManager.setContext({ projectId: "progress-test" });
597 | 
598 |       // Add test data
599 |       await memoryManager.remember("analysis", { progressTest: true });
600 | 
601 |       const progressUpdates: number[] = [];
602 | 
603 |       exportImportSystem.on("export-progress", (progress: number) => {
604 |         progressUpdates.push(progress);
605 |       });
606 | 
607 |       const exportOptions: ExportOptions = {
608 |         format: "json",
609 |         includeMetadata: true,
610 |         includeLearning: false,
611 |         includeKnowledgeGraph: false,
612 |       };
613 | 
614 |       const exportPath = path.join(exportDir, "progress-export.json");
615 |       await exportImportSystem.exportMemories(exportPath, exportOptions);
616 | 
617 |       // Progress updates might not be generated for small datasets
618 |       expect(Array.isArray(progressUpdates)).toBe(true);
619 |     });
620 |   });
621 | 
622 |   describe("Error Handling and Recovery", () => {
623 |     test("should handle file system errors gracefully", async () => {
624 |       const invalidPath = "/invalid/path/that/does/not/exist/export.json";
625 | 
626 |       const exportOptions: ExportOptions = {
627 |         format: "json",
628 |         includeMetadata: true,
629 |         includeLearning: false,
630 |         includeKnowledgeGraph: false,
631 |       };
632 | 
633 |       const result = await exportImportSystem.exportMemories(
634 |         invalidPath,
635 |         exportOptions,
636 |       );
637 | 
638 |       expect(result.success).toBe(false);
639 |       expect(result.errors.length).toBeGreaterThan(0);
640 |     });
641 | 
642 |     test("should recover from partial import failures", async () => {
643 |       const partialDataPath = path.join(exportDir, "partial-data.json");
644 |       const partialData = {
645 |         memories: [
646 |           {
647 |             id: "valid-memory",
648 |             type: "analysis",
649 |             timestamp: new Date().toISOString(),
650 |             data: { valid: true },
651 |             metadata: { projectId: "partial-test" },
652 |           },
653 |           {
654 |             // Invalid memory
655 |             id: "invalid-memory",
656 |             type: null,
657 |             data: null,
658 |           },
659 |         ],
660 |       };
661 | 
662 |       await fs.writeFile(partialDataPath, JSON.stringify(partialData));
663 | 
664 |       const importOptions: ImportOptions = {
665 |         format: "json",
666 |         mode: "append",
667 |         validation: "loose",
668 |         conflictResolution: "skip",
669 |         backup: false,
670 |         dryRun: false,
671 |       };
672 | 
673 |       const result = await exportImportSystem.importMemories(
674 |         partialDataPath,
675 |         importOptions,
676 |       );
677 | 
678 |       expect(result.imported).toBe(1); // Only valid memory imported
679 |       expect(result.errors).toBe(1); // One error for invalid memory
680 |       expect(Array.isArray(result.errorDetails)).toBe(true);
681 |       expect(result.errorDetails.length).toBe(1);
682 |     });
683 | 
684 |     test("should validate data integrity", async () => {
685 |       const corruptDataPath = path.join(exportDir, "corrupt-data.json");
686 |       await fs.writeFile(corruptDataPath, "{ invalid json");
687 | 
688 |       const importOptions: ImportOptions = {
689 |         format: "json",
690 |         mode: "append",
691 |         validation: "strict",
692 |         conflictResolution: "skip",
693 |         backup: false,
694 |         dryRun: false,
695 |       };
696 | 
697 |       const result = await exportImportSystem.importMemories(
698 |         corruptDataPath,
699 |         importOptions,
700 |       );
701 | 
702 |       expect(result.success).toBe(false);
703 |       expect(result.errors).toBeGreaterThan(0);
704 |     });
705 |   });
706 | });
707 | 
```

--------------------------------------------------------------------------------
/tests/memory/mcp-tool-persistence.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Memory MCP Tool Persistence Tests
  3 |  * Tests persistence and state management for MCP tool memory integration
  4 |  * Part of Issue #56 - Memory MCP Tools Integration Tests
  5 |  */
  6 | 
  7 | import { promises as fs } from "fs";
  8 | import path from "path";
  9 | import os from "os";
 10 | import { MemoryManager } from "../../src/memory/manager.js";
 11 | import {
 12 |   initializeMemory,
 13 |   rememberAnalysis,
 14 |   rememberRecommendation,
 15 |   rememberDeployment,
 16 |   exportMemories,
 17 |   importMemories,
 18 |   cleanupOldMemories,
 19 |   resetMemoryManager,
 20 | } from "../../src/memory/integration.js";
 21 | 
 22 | describe("Memory MCP Tool Persistence", () => {
 23 |   let tempDir: string;
 24 |   let memoryManager: MemoryManager;
 25 | 
 26 |   beforeEach(async () => {
 27 |     tempDir = path.join(
 28 |       os.tmpdir(),
 29 |       `memory-persistence-test-${Date.now()}-${Math.random()
 30 |         .toString(36)
 31 |         .substr(2, 9)}`,
 32 |     );
 33 |     await fs.mkdir(tempDir, { recursive: true });
 34 | 
 35 |     // Reset the global memory manager to use the test directory
 36 |     await resetMemoryManager(tempDir);
 37 |     memoryManager = (await initializeMemory())!;
 38 |   });
 39 | 
 40 |   afterEach(async () => {
 41 |     try {
 42 |       await resetMemoryManager(); // Reset to default
 43 |       await fs.rm(tempDir, { recursive: true, force: true });
 44 |     } catch (error) {
 45 |       // Ignore cleanup errors
 46 |     }
 47 |   });
 48 | 
 49 |   describe("Tool State Persistence", () => {
 50 |     test("should persist tool analysis results across sessions", async () => {
 51 |       memoryManager.setContext({ projectId: "persistence-test" });
 52 | 
 53 |       // Create analysis data from tool
 54 |       const analysisData = {
 55 |         projectId: "persistence-test",
 56 |         toolVersion: "1.0.0",
 57 |         language: { primary: "rust", secondary: ["javascript"] },
 58 |         framework: { name: "actix-web", version: "4.0" },
 59 |         stats: {
 60 |           files: 75,
 61 |           directories: 12,
 62 |           linesOfCode: 8500,
 63 |           testCoverage: 90,
 64 |         },
 65 |         dependencies: {
 66 |           ecosystem: "rust",
 67 |           packages: ["serde", "tokio", "actix-web"],
 68 |           devPackages: ["criterion", "proptest"],
 69 |         },
 70 |         documentation: {
 71 |           hasReadme: true,
 72 |           hasContributing: true,
 73 |           hasLicense: true,
 74 |           estimatedComplexity: "moderate",
 75 |         },
 76 |         timestamp: new Date().toISOString(),
 77 |       };
 78 | 
 79 |       const memoryId = await rememberAnalysis(
 80 |         "/test/rust-project",
 81 |         analysisData,
 82 |       );
 83 | 
 84 |       // Simulate session restart by creating new manager with same directory
 85 |       const newManager = new MemoryManager(tempDir);
 86 |       await newManager.initialize();
 87 | 
 88 |       // Verify persistence
 89 |       const recalled = await newManager.recall(memoryId);
 90 | 
 91 |       expect(recalled).not.toBeNull();
 92 |       expect(recalled?.data.language.primary).toBe("rust");
 93 |       expect(recalled?.data.framework.name).toBe("actix-web");
 94 |       expect(recalled?.data.stats.files).toBe(75);
 95 |       expect(recalled?.data.dependencies.packages).toContain("actix-web");
 96 |       expect(recalled?.metadata.projectId).toBe("persistence-test");
 97 |     });
 98 | 
 99 |     test("should persist recommendation chains across tool invocations", async () => {
100 |       memoryManager.setContext({ projectId: "chain-test" });
101 | 
102 |       // Create analysis
103 |       const analysisData = {
104 |         projectId: "chain-test",
105 |         language: { primary: "python" },
106 |         framework: { name: "fastapi" },
107 |         documentation: { type: "api" },
108 |       };
109 |       const analysisId = await rememberAnalysis(
110 |         "/test/api-project",
111 |         analysisData,
112 |       );
113 | 
114 |       // Create first recommendation
115 |       const recommendation1 = {
116 |         recommended: "mkdocs",
117 |         confidence: 0.8,
118 |         reasoning: ["Python ecosystem", "API documentation"],
119 |         toolVersion: "1.0.0",
120 |         analysisId,
121 |       };
122 |       const rec1Id = await rememberRecommendation(analysisId, recommendation1);
123 | 
124 |       // Create updated recommendation after user feedback
125 |       const recommendation2 = {
126 |         recommended: "sphinx",
127 |         confidence: 0.9,
128 |         reasoning: ["Better API doc generation", "Python native"],
129 |         toolVersion: "1.1.0",
130 |         analysisId,
131 |         previousRecommendation: rec1Id,
132 |       };
133 |       const rec2Id = await rememberRecommendation(analysisId, recommendation2);
134 | 
135 |       // Verify chain persistence
136 |       const analysis = await memoryManager.recall(analysisId);
137 |       const rec1 = await memoryManager.recall(rec1Id);
138 |       const rec2 = await memoryManager.recall(rec2Id);
139 | 
140 |       expect(analysis?.data.language.primary).toBe("python");
141 |       expect(rec1?.data.recommended).toBe("mkdocs");
142 |       expect(rec2?.data.recommended).toBe("sphinx");
143 |       expect(rec2?.data.previousRecommendation).toBe(rec1Id);
144 | 
145 |       // Verify all have same project context
146 |       expect(analysis?.metadata.projectId).toBe("chain-test");
147 |       expect(rec1?.metadata.projectId).toBe("chain-test");
148 |       expect(rec2?.metadata.projectId).toBe("chain-test");
149 |     });
150 | 
151 |     test("should maintain deployment history with status tracking", async () => {
152 |       memoryManager.setContext({ projectId: "deployment-history" });
153 | 
154 |       const deployments = [
155 |         {
156 |           ssg: "hugo",
157 |           status: "failed",
158 |           error: "Build timeout",
159 |           duration: 300,
160 |           attempt: 1,
161 |           timestamp: new Date(Date.now() - 3600000).toISOString(), // 1 hour ago
162 |         },
163 |         {
164 |           ssg: "hugo",
165 |           status: "failed",
166 |           error: "Missing dependency",
167 |           duration: 120,
168 |           attempt: 2,
169 |           timestamp: new Date(Date.now() - 1800000).toISOString(), // 30 minutes ago
170 |         },
171 |         {
172 |           ssg: "hugo",
173 |           status: "success",
174 |           url: "https://project.github.io",
175 |           duration: 180,
176 |           attempt: 3,
177 |           timestamp: new Date().toISOString(),
178 |         },
179 |       ];
180 | 
181 |       const deploymentIds = [];
182 |       for (const deployment of deployments) {
183 |         const id = await rememberDeployment(
184 |           "github.com/test/deployment-project",
185 |           deployment,
186 |         );
187 |         deploymentIds.push(id);
188 |       }
189 | 
190 |       // Verify deployment history is preserved
191 |       const allDeployments = await Promise.all(
192 |         deploymentIds.map((id) => memoryManager.recall(id)),
193 |       );
194 | 
195 |       expect(allDeployments.length).toBe(3);
196 |       expect(allDeployments[0]?.data.status).toBe("failed");
197 |       expect(allDeployments[0]?.data.attempt).toBe(1);
198 |       expect(allDeployments[1]?.data.status).toBe("failed");
199 |       expect(allDeployments[1]?.data.attempt).toBe(2);
200 |       expect(allDeployments[2]?.data.status).toBe("success");
201 |       expect(allDeployments[2]?.data.attempt).toBe(3);
202 | 
203 |       // Verify chronological ordering can be reconstructed
204 |       const sortedByTimestamp = allDeployments.sort(
205 |         (a, b) =>
206 |           new Date(a!.data.timestamp).getTime() -
207 |           new Date(b!.data.timestamp).getTime(),
208 |       );
209 | 
210 |       expect(sortedByTimestamp[0]?.data.attempt).toBe(1);
211 |       expect(sortedByTimestamp[2]?.data.attempt).toBe(3);
212 |     });
213 |   });
214 | 
215 |   describe("Cross-Session State Recovery", () => {
216 |     test("should recover tool context after process restart", async () => {
217 |       memoryManager.setContext({
218 |         projectId: "context-recovery",
219 |         repository: "github.com/test/context-project",
220 |         branch: "main",
221 |         user: "test-user",
222 |         session: "session-1",
223 |       });
224 | 
225 |       // Create memories with rich context
226 |       await memoryManager.remember("analysis", {
227 |         sessionActive: true,
228 |         toolState: "initialized",
229 |         contextData: "session-specific",
230 |       });
231 | 
232 |       await memoryManager.remember("configuration", {
233 |         ssg: "docusaurus",
234 |         userPreferences: {
235 |           theme: "dark",
236 |           language: "en",
237 |           features: ["search", "versions"],
238 |         },
239 |       });
240 | 
241 |       // Simulate process restart
242 |       const newManager = new MemoryManager(tempDir);
243 |       await newManager.initialize();
244 | 
245 |       // Recover project memories
246 |       const projectMemories = await newManager.search({
247 |         projectId: "context-recovery",
248 |       });
249 | 
250 |       expect(projectMemories.length).toBe(2);
251 | 
252 |       const analysisMemory = projectMemories.find((m) => m.type === "analysis");
253 |       const configMemory = projectMemories.find(
254 |         (m) => m.type === "configuration",
255 |       );
256 | 
257 |       expect(analysisMemory?.data.sessionActive).toBe(true);
258 |       expect(configMemory?.data.ssg).toBe("docusaurus");
259 |       expect(configMemory?.data.userPreferences.theme).toBe("dark");
260 | 
261 |       // Verify context metadata is preserved
262 |       expect(analysisMemory?.metadata.repository).toBe(
263 |         "github.com/test/context-project",
264 |       );
265 |       expect(configMemory?.metadata.projectId).toBe("context-recovery");
266 |     });
267 | 
268 |     test("should handle concurrent tool operations persistence", async () => {
269 |       memoryManager.setContext({ projectId: "concurrent-ops" });
270 | 
271 |       // Simulate concurrent tool operations
272 |       const operations = Array.from({ length: 10 }, (_, i) => ({
273 |         type:
274 |           i % 3 === 0
275 |             ? "analysis"
276 |             : i % 3 === 1
277 |               ? "recommendation"
278 |               : "deployment",
279 |         data: {
280 |           operationId: i,
281 |           timestamp: new Date(Date.now() + i * 1000).toISOString(),
282 |           concurrentTest: true,
283 |         },
284 |       }));
285 | 
286 |       // Execute operations concurrently
287 |       const promises = operations.map(async (op, index) => {
288 |         if (op.type === "analysis") {
289 |           return rememberAnalysis(`/test/concurrent-${index}`, op.data);
290 |         } else if (op.type === "recommendation") {
291 |           return rememberRecommendation(`analysis-${index}`, {
292 |             ...op.data,
293 |             recommended: "jekyll",
294 |           });
295 |         } else {
296 |           return rememberDeployment(`github.com/test/concurrent-${index}`, {
297 |             ...op.data,
298 |             status: "success",
299 |           });
300 |         }
301 |       });
302 | 
303 |       const memoryIds = await Promise.all(promises);
304 | 
305 |       // Verify all operations were persisted
306 |       expect(memoryIds.length).toBe(10);
307 |       expect(memoryIds.every((id) => typeof id === "string")).toBe(true);
308 | 
309 |       // Verify no data corruption occurred
310 |       const recalledMemories = await Promise.all(
311 |         memoryIds.map((id) => memoryManager.recall(id)),
312 |       );
313 | 
314 |       expect(recalledMemories.every((m) => m !== null)).toBe(true);
315 |       expect(
316 |         recalledMemories.every((m) => m?.data.concurrentTest === true),
317 |       ).toBe(true);
318 | 
319 |       // Verify operation IDs are preserved and unique
320 |       const operationIds = recalledMemories.map((m) => m?.data.operationId);
321 |       const uniqueIds = new Set(operationIds);
322 |       expect(uniqueIds.size).toBe(10);
323 |     });
324 |   });
325 | 
326 |   describe("Data Export and Import for Tools", () => {
327 |     test("should export tool memories for backup and migration", async () => {
328 |       memoryManager.setContext({ projectId: "export-test" });
329 | 
330 |       // Create comprehensive tool data
331 |       const analysisId = await rememberAnalysis("/test/export-project", {
332 |         projectId: "export-test",
333 |         language: { primary: "go" },
334 |         framework: { name: "gin" },
335 |         exportTest: true,
336 |       });
337 | 
338 |       await memoryManager.remember(
339 |         "recommendation",
340 |         {
341 |           recommended: "hugo",
342 |           confidence: 0.95,
343 |           exportTest: true,
344 |         },
345 |         {
346 |           ssg: "hugo",
347 |           tags: ["recommendation", "hugo"],
348 |         },
349 |       );
350 | 
351 |       // Temporarily store deployment with correct project context
352 |       const deploymentData = {
353 |         ssg: "hugo",
354 |         status: "success",
355 |         exportTest: true,
356 |       };
357 | 
358 |       await memoryManager.remember("deployment", deploymentData, {
359 |         repository: "github.com/test/export-project",
360 |         ssg: deploymentData.ssg,
361 |         tags: ["deployment", deploymentData.status, deploymentData.ssg],
362 |       });
363 | 
364 |       // Export memories for this project only
365 |       const exportedData = await exportMemories("json", "export-test");
366 | 
367 |       expect(exportedData).toBeDefined();
368 |       expect(typeof exportedData).toBe("string");
369 | 
370 |       // Verify export contains our test data
371 |       const parsed = JSON.parse(exportedData);
372 |       expect(Array.isArray(parsed)).toBe(true);
373 | 
374 |       const exportTestMemories = parsed.filter(
375 |         (m: any) => m.data.exportTest === true,
376 |       );
377 | 
378 |       expect(exportTestMemories.length).toBe(3);
379 | 
380 |       // Verify different memory types are present
381 |       const types = new Set(exportTestMemories.map((m: any) => m.type));
382 |       expect(types.has("analysis")).toBe(true);
383 |       expect(types.has("recommendation")).toBe(true);
384 |       expect(types.has("deployment")).toBe(true);
385 |     });
386 | 
387 |     test("should import tool memories with data validation", async () => {
388 |       // Create export data
389 |       const exportData = JSON.stringify([
390 |         {
391 |           id: "import-analysis-1",
392 |           type: "analysis",
393 |           data: {
394 |             projectId: "import-test",
395 |             language: { primary: "javascript" },
396 |             framework: { name: "svelte" },
397 |             importTest: true,
398 |           },
399 |           metadata: {
400 |             projectId: "import-test",
401 |             tags: ["analysis", "javascript", "svelte"],
402 |           },
403 |           timestamp: new Date().toISOString(),
404 |         },
405 |         {
406 |           id: "import-recommendation-1",
407 |           type: "recommendation",
408 |           data: {
409 |             recommended: "sveltekit",
410 |             confidence: 0.88,
411 |             importTest: true,
412 |           },
413 |           metadata: {
414 |             projectId: "import-test",
415 |             ssg: "sveltekit",
416 |             tags: ["recommendation", "sveltekit"],
417 |           },
418 |           timestamp: new Date().toISOString(),
419 |         },
420 |       ]);
421 | 
422 |       // Import the data
423 |       const importedCount = await importMemories(exportData, "json");
424 | 
425 |       expect(importedCount).toBe(2);
426 | 
427 |       // Verify imported data is accessible
428 |       const importedMemories = await memoryManager.search({
429 |         projectId: "import-test",
430 |       });
431 | 
432 |       expect(importedMemories.length).toBe(2);
433 | 
434 |       const analysis = importedMemories.find((m) => m.type === "analysis");
435 |       const recommendation = importedMemories.find(
436 |         (m) => m.type === "recommendation",
437 |       );
438 | 
439 |       expect(analysis?.data.language.primary).toBe("javascript");
440 |       expect(analysis?.data.framework.name).toBe("svelte");
441 |       expect(recommendation?.data.recommended).toBe("sveltekit");
442 |       expect(recommendation?.data.confidence).toBe(0.88);
443 |     });
444 | 
445 |     test("should handle tool memory migration between environments", async () => {
446 |       memoryManager.setContext({ projectId: "migration-test" });
447 | 
448 |       // Create source environment data
449 |       const sourceData = [
450 |         {
451 |           projectId: "migration-project",
452 |           language: { primary: "python" },
453 |           framework: { name: "flask" },
454 |           environment: "development",
455 |         },
456 |         {
457 |           projectId: "migration-project",
458 |           language: { primary: "python" },
459 |           framework: { name: "flask" },
460 |           environment: "staging",
461 |         },
462 |         {
463 |           projectId: "migration-project",
464 |           language: { primary: "python" },
465 |           framework: { name: "flask" },
466 |           environment: "production",
467 |         },
468 |       ];
469 | 
470 |       // Store memories in source environment
471 |       const sourceIds = await Promise.all(
472 |         sourceData.map((data) =>
473 |           rememberAnalysis("/test/migration-project", data),
474 |         ),
475 |       );
476 | 
477 |       expect(sourceIds.length).toBe(3);
478 | 
479 |       // Export from source (migration project only)
480 |       const exportedData = await exportMemories("json", "migration-project");
481 | 
482 |       // Create target environment (new directory)
483 |       const targetDir = path.join(tempDir, "target-environment");
484 |       await fs.mkdir(targetDir, { recursive: true });
485 | 
486 |       const targetManager = new MemoryManager(targetDir);
487 |       await targetManager.initialize();
488 | 
489 |       // Import to target environment
490 |       const importedCount = await targetManager.import(exportedData, "json");
491 | 
492 |       expect(importedCount).toBe(3);
493 | 
494 |       // Verify migration integrity
495 |       const migratedMemories = await targetManager.search({
496 |         projectId: "migration-project",
497 |       });
498 | 
499 |       expect(migratedMemories.length).toBe(3);
500 | 
501 |       const environments = migratedMemories.map((m) => m.data.environment);
502 |       expect(environments).toContain("development");
503 |       expect(environments).toContain("staging");
504 |       expect(environments).toContain("production");
505 |     });
506 |   });
507 | 
508 |   describe("Memory Cleanup and Maintenance", () => {
509 |     test("should cleanup old tool memories automatically", async () => {
510 |       memoryManager.setContext({ projectId: "cleanup-test" });
511 | 
512 |       // Create old memories (simulate by manually setting timestamps)
513 |       const oldTimestamp = new Date(
514 |         Date.now() - 45 * 24 * 60 * 60 * 1000,
515 |       ).toISOString(); // 45 days ago
516 |       const recentTimestamp = new Date(
517 |         Date.now() - 5 * 24 * 60 * 60 * 1000,
518 |       ).toISOString(); // 5 days ago
519 | 
520 |       // Create entries directly via storage to control timestamps
521 |       await memoryManager.getStorage().append({
522 |         type: "analysis",
523 |         timestamp: oldTimestamp,
524 |         data: {
525 |           projectId: "cleanup-test",
526 |           age: "old",
527 |         },
528 |         metadata: {
529 |           projectId: "cleanup-test",
530 |         },
531 |       });
532 | 
533 |       await memoryManager.getStorage().append({
534 |         type: "analysis",
535 |         timestamp: recentTimestamp,
536 |         data: {
537 |           projectId: "cleanup-test",
538 |           age: "recent",
539 |         },
540 |         metadata: {
541 |           projectId: "cleanup-test",
542 |         },
543 |       });
544 | 
545 |       await memoryManager.getStorage().append({
546 |         type: "recommendation",
547 |         timestamp: oldTimestamp,
548 |         data: {
549 |           recommended: "hugo",
550 |           age: "old",
551 |         },
552 |         metadata: {
553 |           projectId: "cleanup-test",
554 |         },
555 |       });
556 | 
557 |       // Verify all memories exist before cleanup
558 |       const beforeCleanup = await memoryManager.search({
559 |         projectId: "cleanup-test",
560 |       });
561 |       expect(beforeCleanup.length).toBe(3);
562 | 
563 |       // Cleanup memories older than 30 days
564 |       const cleanedCount = await cleanupOldMemories(30);
565 | 
566 |       expect(cleanedCount).toBeGreaterThanOrEqual(2); // Should cleanup the 2 old memories
567 | 
568 |       // Verify recent memories are preserved
569 |       const afterCleanup = await memoryManager.search({
570 |         projectId: "cleanup-test",
571 |       });
572 |       const recentMemories = afterCleanup.filter(
573 |         (m) => m.data.age === "recent",
574 |       );
575 | 
576 |       expect(recentMemories.length).toBe(1);
577 |       expect(recentMemories[0].data.age).toBe("recent");
578 |     });
579 | 
580 |     test("should optimize memory storage for tool performance", async () => {
581 |       memoryManager.setContext({ projectId: "optimization-test" });
582 | 
583 |       // Create many memories to test optimization
584 |       const memoryPromises = Array.from({ length: 100 }, (_, i) =>
585 |         memoryManager.remember("analysis", {
586 |           index: i,
587 |           data: `optimization-test-${i}`,
588 |           category: i % 10 === 0 ? "heavy" : "light",
589 |         }),
590 |       );
591 | 
592 |       await Promise.all(memoryPromises);
593 | 
594 |       // Measure search performance
595 |       const startTime = Date.now();
596 |       const searchResults = await memoryManager.search({
597 |         projectId: "optimization-test",
598 |       });
599 |       const searchTime = Date.now() - startTime;
600 | 
601 |       expect(searchResults.length).toBe(100);
602 |       expect(searchTime).toBeLessThan(1000); // Should complete within 1 second
603 | 
604 |       // Test category-based filtering performance
605 |       const categoryStartTime = Date.now();
606 |       const allMemories = await memoryManager.search("");
607 |       const heavyMemories = allMemories.filter(
608 |         (m) => m.data.category === "heavy",
609 |       );
610 |       const categorySearchTime = Date.now() - categoryStartTime;
611 | 
612 |       expect(heavyMemories.length).toBe(10); // 10% of memories marked as 'heavy'
613 |       expect(categorySearchTime).toBeLessThan(500); // Category search should be fast
614 |     });
615 | 
616 |     test("should handle memory corruption recovery", async () => {
617 |       memoryManager.setContext({ projectId: "corruption-test" });
618 | 
619 |       // Create valid memories
620 |       const valid1Entry = await memoryManager.remember("analysis", {
621 |         valid: true,
622 |         data: "good-data",
623 |       });
624 | 
625 |       const valid2Entry = await memoryManager.remember("recommendation", {
626 |         recommended: "docusaurus",
627 |         valid: true,
628 |       });
629 | 
630 |       // Verify memories are accessible
631 |       const valid1 = await memoryManager.recall(valid1Entry.id);
632 |       const valid2 = await memoryManager.recall(valid2Entry.id);
633 | 
634 |       expect(valid1?.data.valid).toBe(true);
635 |       expect(valid2?.data.valid).toBe(true);
636 | 
637 |       // Simulate recovery after corruption by creating new manager
638 |       const recoveryManager = new MemoryManager(tempDir);
639 |       await recoveryManager.initialize();
640 | 
641 |       // Verify data recovery
642 |       const recovered1 = await recoveryManager.recall(valid1Entry.id);
643 |       const recovered2 = await recoveryManager.recall(valid2Entry.id);
644 | 
645 |       expect(recovered1?.data.valid).toBe(true);
646 |       expect(recovered2?.data.recommended).toBe("docusaurus");
647 | 
648 |       // Verify search functionality after recovery
649 |       const allRecovered = await recoveryManager.search({
650 |         projectId: "corruption-test",
651 |       });
652 |       expect(allRecovered.length).toBe(2);
653 |     });
654 |   });
655 | });
656 | 
```

--------------------------------------------------------------------------------
/tests/memory/kg-health.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Tests for Knowledge Graph Health Monitoring
  3 |  */
  4 | 
  5 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
  6 | import { promises as fs } from "fs";
  7 | import path from "path";
  8 | import { tmpdir } from "os";
  9 | import { KGHealthMonitor } from "../../src/memory/kg-health.js";
 10 | import {
 11 |   initializeKnowledgeGraph,
 12 |   getKnowledgeGraph,
 13 |   getKGStorage,
 14 | } from "../../src/memory/kg-integration.js";
 15 | 
 16 | describe("KG Health Monitoring", () => {
 17 |   let testDir: string;
 18 |   let monitor: KGHealthMonitor;
 19 | 
 20 |   beforeEach(async () => {
 21 |     testDir = path.join(tmpdir(), `documcp-health-test-${Date.now()}`);
 22 |     await fs.mkdir(testDir, { recursive: true });
 23 | 
 24 |     const storageDir = path.join(testDir, ".documcp/memory");
 25 |     await initializeKnowledgeGraph(storageDir);
 26 |     monitor = new KGHealthMonitor(storageDir);
 27 |   });
 28 | 
 29 |   afterEach(async () => {
 30 |     try {
 31 |       await fs.rm(testDir, { recursive: true, force: true });
 32 |     } catch {
 33 |       // Ignore cleanup errors
 34 |     }
 35 |   });
 36 | 
 37 |   describe("calculateHealth", () => {
 38 |     it("should calculate overall health score", async () => {
 39 |       const kg = await getKnowledgeGraph();
 40 |       const storage = await getKGStorage();
 41 | 
 42 |       // Add some nodes
 43 |       kg.addNode({
 44 |         id: "project:test",
 45 |         type: "project",
 46 |         label: "Test Project",
 47 |         properties: {},
 48 |         weight: 1.0,
 49 |       });
 50 | 
 51 |       kg.addNode({
 52 |         id: "tech:typescript",
 53 |         type: "technology",
 54 |         label: "TypeScript",
 55 |         properties: {},
 56 |         weight: 1.0,
 57 |       });
 58 | 
 59 |       kg.addEdge({
 60 |         source: "project:test",
 61 |         target: "tech:typescript",
 62 |         type: "project_uses_technology",
 63 |         weight: 1.0,
 64 |         confidence: 1.0,
 65 |         properties: {},
 66 |       });
 67 | 
 68 |       const health = await monitor.calculateHealth(kg, storage);
 69 | 
 70 |       expect(health.overallHealth).toBeGreaterThanOrEqual(0);
 71 |       expect(health.overallHealth).toBeLessThanOrEqual(100);
 72 |       expect(health.timestamp).toBeDefined();
 73 |       expect(health.dataQuality).toBeDefined();
 74 |       expect(health.structureHealth).toBeDefined();
 75 |       expect(health.performance).toBeDefined();
 76 |       expect(health.trends).toBeDefined();
 77 |       expect(health.issues).toBeDefined();
 78 |       expect(health.recommendations).toBeDefined();
 79 |     });
 80 | 
 81 |     it("should have high health score for clean graph", async () => {
 82 |       const kg = await getKnowledgeGraph();
 83 |       const storage = await getKGStorage();
 84 | 
 85 |       // Add well-connected nodes
 86 |       for (let i = 0; i < 5; i++) {
 87 |         kg.addNode({
 88 |           id: `node:${i}`,
 89 |           type: "project",
 90 |           label: `Node ${i}`,
 91 |           properties: {},
 92 |           weight: 1.0,
 93 |         });
 94 |       }
 95 | 
 96 |       // Connect them
 97 |       for (let i = 0; i < 4; i++) {
 98 |         kg.addEdge({
 99 |           source: `node:${i}`,
100 |           target: `node:${i + 1}`,
101 |           type: "similar_to",
102 |           weight: 1.0,
103 |           confidence: 1.0,
104 |           properties: {},
105 |         });
106 |       }
107 | 
108 |       const health = await monitor.calculateHealth(kg, storage);
109 | 
110 |       expect(health.overallHealth).toBeGreaterThan(70);
111 |       expect(health.dataQuality.score).toBeGreaterThan(70);
112 |       expect(health.structureHealth.score).toBeGreaterThan(0);
113 |     });
114 |   });
115 | 
116 |   describe("Data Quality Metrics", () => {
117 |     it("should detect stale nodes", async () => {
118 |       const kg = await getKnowledgeGraph();
119 |       const storage = await getKGStorage();
120 | 
121 |       // Add a stale node (31 days old)
122 |       const staleDate = new Date();
123 |       staleDate.setDate(staleDate.getDate() - 31);
124 | 
125 |       const staleNode = kg.addNode({
126 |         id: "project:stale",
127 |         type: "project",
128 |         label: "Stale Project",
129 |         properties: {},
130 |         weight: 1.0,
131 |       });
132 |       // Manually set stale timestamp
133 |       staleNode.lastUpdated = staleDate.toISOString();
134 | 
135 |       // Add a fresh node
136 |       kg.addNode({
137 |         id: "project:fresh",
138 |         type: "project",
139 |         label: "Fresh Project",
140 |         properties: {},
141 |         weight: 1.0,
142 |       });
143 | 
144 |       const health = await monitor.calculateHealth(kg, storage);
145 | 
146 |       expect(health.dataQuality.staleNodeCount).toBeGreaterThan(0);
147 |       expect(health.dataQuality.totalNodes).toBe(2);
148 |     });
149 | 
150 |     it("should detect orphaned edges", async () => {
151 |       const kg = await getKnowledgeGraph();
152 |       const storage = await getKGStorage();
153 | 
154 |       // Add nodes and edges
155 |       kg.addNode({
156 |         id: "node:1",
157 |         type: "project",
158 |         label: "Node 1",
159 |         properties: {},
160 |         weight: 1.0,
161 |       });
162 | 
163 |       kg.addEdge({
164 |         source: "node:1",
165 |         target: "node:nonexistent",
166 |         type: "depends_on",
167 |         weight: 1.0,
168 |         confidence: 1.0,
169 |         properties: {},
170 |       });
171 | 
172 |       // Save to storage so verifyIntegrity can read it
173 |       const { saveKnowledgeGraph } = await import(
174 |         "../../src/memory/kg-integration.js"
175 |       );
176 |       await saveKnowledgeGraph();
177 | 
178 |       const health = await monitor.calculateHealth(kg, storage);
179 | 
180 |       expect(health.dataQuality.orphanedEdgeCount).toBeGreaterThan(0);
181 |     });
182 | 
183 |     it("should calculate confidence average", async () => {
184 |       const kg = await getKnowledgeGraph();
185 |       const storage = await getKGStorage();
186 | 
187 |       kg.addNode({
188 |         id: "n1",
189 |         type: "project",
190 |         label: "N1",
191 |         properties: {},
192 |         weight: 1,
193 |       });
194 |       kg.addNode({
195 |         id: "n2",
196 |         type: "project",
197 |         label: "N2",
198 |         properties: {},
199 |         weight: 1,
200 |       });
201 | 
202 |       kg.addEdge({
203 |         source: "n1",
204 |         target: "n2",
205 |         type: "similar_to",
206 |         weight: 1.0,
207 |         confidence: 0.8,
208 |         properties: {},
209 |       });
210 | 
211 |       kg.addEdge({
212 |         source: "n2",
213 |         target: "n1",
214 |         type: "similar_to",
215 |         weight: 1.0,
216 |         confidence: 0.6,
217 |         properties: {},
218 |       });
219 | 
220 |       const health = await monitor.calculateHealth(kg, storage);
221 | 
222 |       expect(health.dataQuality.confidenceAverage).toBeCloseTo(0.7, 1);
223 |     });
224 | 
225 |     it("should calculate completeness score", async () => {
226 |       const kg = await getKnowledgeGraph();
227 |       const storage = await getKGStorage();
228 | 
229 |       // Project with technology (complete)
230 |       kg.addNode({
231 |         id: "project:1",
232 |         type: "project",
233 |         label: "Complete Project",
234 |         properties: { hasDocs: false },
235 |         weight: 1,
236 |       });
237 |       kg.addNode({
238 |         id: "tech:ts",
239 |         type: "technology",
240 |         label: "TypeScript",
241 |         properties: {},
242 |         weight: 1,
243 |       });
244 |       kg.addEdge({
245 |         source: "project:1",
246 |         target: "tech:ts",
247 |         type: "project_uses_technology",
248 |         weight: 1,
249 |         confidence: 1,
250 |         properties: {},
251 |       });
252 | 
253 |       const health = await monitor.calculateHealth(kg, storage);
254 | 
255 |       expect(health.dataQuality.completenessScore).toBeGreaterThan(0);
256 |       expect(health.dataQuality.completenessScore).toBeLessThanOrEqual(1);
257 |     });
258 |   });
259 | 
260 |   describe("Structure Health Metrics", () => {
261 |     it("should detect isolated nodes", async () => {
262 |       const kg = await getKnowledgeGraph();
263 |       const storage = await getKGStorage();
264 | 
265 |       // Add isolated node (no edges)
266 |       kg.addNode({
267 |         id: "isolated:1",
268 |         type: "project",
269 |         label: "Isolated",
270 |         properties: {},
271 |         weight: 1,
272 |       });
273 | 
274 |       // Add connected nodes
275 |       kg.addNode({
276 |         id: "connected:1",
277 |         type: "project",
278 |         label: "C1",
279 |         properties: {},
280 |         weight: 1,
281 |       });
282 |       kg.addNode({
283 |         id: "connected:2",
284 |         type: "project",
285 |         label: "C2",
286 |         properties: {},
287 |         weight: 1,
288 |       });
289 |       kg.addEdge({
290 |         source: "connected:1",
291 |         target: "connected:2",
292 |         type: "similar_to",
293 |         weight: 1,
294 |         confidence: 1,
295 |         properties: {},
296 |       });
297 | 
298 |       const health = await monitor.calculateHealth(kg, storage);
299 | 
300 |       expect(health.structureHealth.isolatedNodeCount).toBe(1);
301 |     });
302 | 
303 |     it("should calculate density score", async () => {
304 |       const kg = await getKnowledgeGraph();
305 |       const storage = await getKGStorage();
306 | 
307 |       // Create 4 nodes
308 |       for (let i = 0; i < 4; i++) {
309 |         kg.addNode({
310 |           id: `node:${i}`,
311 |           type: "project",
312 |           label: `N${i}`,
313 |           properties: {},
314 |           weight: 1,
315 |         });
316 |       }
317 | 
318 |       // Create 2 edges (low density)
319 |       kg.addEdge({
320 |         source: "node:0",
321 |         target: "node:1",
322 |         type: "similar_to",
323 |         weight: 1,
324 |         confidence: 1,
325 |         properties: {},
326 |       });
327 |       kg.addEdge({
328 |         source: "node:2",
329 |         target: "node:3",
330 |         type: "similar_to",
331 |         weight: 1,
332 |         confidence: 1,
333 |         properties: {},
334 |       });
335 | 
336 |       const health = await monitor.calculateHealth(kg, storage);
337 | 
338 |       // Max possible edges for 4 nodes: (4*3)/2 = 6
339 |       // Actual edges: 2
340 |       // Density: 2/6 = 0.333
341 |       expect(health.structureHealth.densityScore).toBeCloseTo(0.333, 1);
342 |     });
343 | 
344 |     it("should count connected components", async () => {
345 |       const kg = await getKnowledgeGraph();
346 |       const storage = await getKGStorage();
347 | 
348 |       // Component 1
349 |       kg.addNode({
350 |         id: "c1:n1",
351 |         type: "project",
352 |         label: "C1N1",
353 |         properties: {},
354 |         weight: 1,
355 |       });
356 |       kg.addNode({
357 |         id: "c1:n2",
358 |         type: "project",
359 |         label: "C1N2",
360 |         properties: {},
361 |         weight: 1,
362 |       });
363 |       kg.addEdge({
364 |         source: "c1:n1",
365 |         target: "c1:n2",
366 |         type: "similar_to",
367 |         weight: 1,
368 |         confidence: 1,
369 |         properties: {},
370 |       });
371 | 
372 |       // Component 2 (separate)
373 |       kg.addNode({
374 |         id: "c2:n1",
375 |         type: "project",
376 |         label: "C2N1",
377 |         properties: {},
378 |         weight: 1,
379 |       });
380 |       kg.addNode({
381 |         id: "c2:n2",
382 |         type: "project",
383 |         label: "C2N2",
384 |         properties: {},
385 |         weight: 1,
386 |       });
387 |       kg.addEdge({
388 |         source: "c2:n1",
389 |         target: "c2:n2",
390 |         type: "similar_to",
391 |         weight: 1,
392 |         confidence: 1,
393 |         properties: {},
394 |       });
395 | 
396 |       const health = await monitor.calculateHealth(kg, storage);
397 | 
398 |       expect(health.structureHealth.connectedComponents).toBe(2);
399 |     });
400 |   });
401 | 
402 |   describe("Issue Detection", () => {
403 |     it("should detect orphaned edges issue", async () => {
404 |       const kg = await getKnowledgeGraph();
405 |       const storage = await getKGStorage();
406 | 
407 |       kg.addNode({
408 |         id: "n1",
409 |         type: "project",
410 |         label: "N1",
411 |         properties: {},
412 |         weight: 1,
413 |       });
414 | 
415 |       // Create many orphaned edges
416 |       for (let i = 0; i < 15; i++) {
417 |         kg.addEdge({
418 |           source: "n1",
419 |           target: `nonexistent:${i}`,
420 |           type: "depends_on",
421 |           weight: 1,
422 |           confidence: 1,
423 |           properties: {},
424 |         });
425 |       }
426 | 
427 |       // Save to storage
428 |       const { saveKnowledgeGraph } = await import(
429 |         "../../src/memory/kg-integration.js"
430 |       );
431 |       await saveKnowledgeGraph();
432 | 
433 |       const health = await monitor.calculateHealth(kg, storage);
434 | 
435 |       // Should detect orphaned edges in data quality metrics
436 |       expect(health.dataQuality.orphanedEdgeCount).toBeGreaterThan(0);
437 |     });
438 | 
439 |     it("should detect stale data issue", async () => {
440 |       const kg = await getKnowledgeGraph();
441 |       const storage = await getKGStorage();
442 | 
443 |       const staleDate = new Date();
444 |       staleDate.setDate(staleDate.getDate() - 31);
445 | 
446 |       // Create many stale nodes
447 |       for (let i = 0; i < 25; i++) {
448 |         const node = kg.addNode({
449 |           id: `stale:${i}`,
450 |           type: "project",
451 |           label: `Stale ${i}`,
452 |           properties: {},
453 |           weight: 1,
454 |         });
455 |         node.lastUpdated = staleDate.toISOString();
456 |       }
457 | 
458 |       const health = await monitor.calculateHealth(kg, storage);
459 | 
460 |       const staleIssue = health.issues.find(
461 |         (issue) => issue.category === "quality",
462 |       );
463 |       expect(staleIssue).toBeDefined();
464 |       expect(["medium", "high"]).toContain(staleIssue?.severity);
465 |     });
466 | 
467 |     it("should detect low completeness issue", async () => {
468 |       const kg = await getKnowledgeGraph();
469 |       const storage = await getKGStorage();
470 | 
471 |       // Projects without required relationships
472 |       for (let i = 0; i < 10; i++) {
473 |         kg.addNode({
474 |           id: `project:${i}`,
475 |           type: "project",
476 |           label: `Project ${i}`,
477 |           properties: { hasDocs: true }, // Expects docs but has none
478 |           weight: 1,
479 |         });
480 |       }
481 | 
482 |       const health = await monitor.calculateHealth(kg, storage);
483 | 
484 |       const completenessIssue = health.issues.find(
485 |         (issue) => issue.id === "low_completeness",
486 |       );
487 |       expect(completenessIssue).toBeDefined();
488 |       expect(completenessIssue?.severity).toBe("high");
489 |     });
490 | 
491 |     it("should mark auto-fixable issues", async () => {
492 |       const kg = await getKnowledgeGraph();
493 |       const storage = await getKGStorage();
494 | 
495 |       kg.addNode({
496 |         id: "n1",
497 |         type: "project",
498 |         label: "N1",
499 |         properties: {},
500 |         weight: 1,
501 |       });
502 | 
503 |       for (let i = 0; i < 15; i++) {
504 |         kg.addEdge({
505 |           source: "n1",
506 |           target: `nonexistent:${i}`,
507 |           type: "depends_on",
508 |           weight: 1,
509 |           confidence: 1,
510 |           properties: {},
511 |         });
512 |       }
513 | 
514 |       // Save to storage
515 |       const { saveKnowledgeGraph } = await import(
516 |         "../../src/memory/kg-integration.js"
517 |       );
518 |       await saveKnowledgeGraph();
519 | 
520 |       const health = await monitor.calculateHealth(kg, storage);
521 | 
522 |       // Check basic health metrics were calculated
523 |       expect(health.overallHealth).toBeGreaterThanOrEqual(0);
524 |       expect(health.dataQuality.orphanedEdgeCount).toBeGreaterThan(0);
525 |     });
526 |   });
527 | 
528 |   describe("Recommendations", () => {
529 |     it("should generate recommendations for critical issues", async () => {
530 |       const kg = await getKnowledgeGraph();
531 |       const storage = await getKGStorage();
532 | 
533 |       kg.addNode({
534 |         id: "n1",
535 |         type: "project",
536 |         label: "N1",
537 |         properties: {},
538 |         weight: 1,
539 |       });
540 | 
541 |       // Create orphaned edges (triggers high severity issue)
542 |       for (let i = 0; i < 15; i++) {
543 |         kg.addEdge({
544 |           source: "n1",
545 |           target: `nonexistent:${i}`,
546 |           type: "depends_on",
547 |           weight: 1,
548 |           confidence: 1,
549 |           properties: {},
550 |         });
551 |       }
552 | 
553 |       const health = await monitor.calculateHealth(kg, storage);
554 | 
555 |       // There should be issues detected
556 |       expect(health.issues.length).toBeGreaterThan(0);
557 | 
558 |       // Recommendations may or may not be generated depending on issue severity and auto-fixability
559 |       // Just verify the structure if recommendations exist
560 |       if (health.recommendations.length > 0) {
561 |         expect(health.recommendations[0].expectedImpact).toBeGreaterThanOrEqual(
562 |           0,
563 |         );
564 |       }
565 |     });
566 | 
567 |     it("should prioritize recommendations by impact", async () => {
568 |       const kg = await getKnowledgeGraph();
569 |       const storage = await getKGStorage();
570 | 
571 |       // Create multiple issues
572 |       kg.addNode({
573 |         id: "n1",
574 |         type: "project",
575 |         label: "N1",
576 |         properties: {},
577 |         weight: 1,
578 |       });
579 | 
580 |       for (let i = 0; i < 15; i++) {
581 |         kg.addEdge({
582 |           source: "n1",
583 |           target: `nonexistent:${i}`,
584 |           type: "depends_on",
585 |           weight: 1,
586 |           confidence: 1,
587 |           properties: {},
588 |         });
589 |       }
590 | 
591 |       const staleDate = new Date();
592 |       staleDate.setDate(staleDate.getDate() - 31);
593 |       for (let i = 0; i < 25; i++) {
594 |         const node = kg.addNode({
595 |           id: `stale:${i}`,
596 |           type: "project",
597 |           label: `Stale ${i}`,
598 |           properties: {},
599 |           weight: 1,
600 |         });
601 |         node.lastUpdated = staleDate.toISOString();
602 |       }
603 | 
604 |       const health = await monitor.calculateHealth(kg, storage);
605 | 
606 |       // Recommendations should be sorted by priority then impact
607 |       if (health.recommendations.length > 1) {
608 |         const priorityOrder = { high: 0, medium: 1, low: 2 };
609 |         for (let i = 0; i < health.recommendations.length - 1; i++) {
610 |           const current = health.recommendations[i];
611 |           const next = health.recommendations[i + 1];
612 | 
613 |           if (current.priority === next.priority) {
614 |             expect(current.expectedImpact).toBeGreaterThanOrEqual(
615 |               next.expectedImpact,
616 |             );
617 |           } else {
618 |             expect(priorityOrder[current.priority]).toBeLessThanOrEqual(
619 |               priorityOrder[next.priority],
620 |             );
621 |           }
622 |         }
623 |       }
624 |     });
625 | 
626 |     it("should limit recommendations to top 5", async () => {
627 |       const kg = await getKnowledgeGraph();
628 |       const storage = await getKGStorage();
629 | 
630 |       // Create many issues
631 |       kg.addNode({
632 |         id: "n1",
633 |         type: "project",
634 |         label: "N1",
635 |         properties: {},
636 |         weight: 1,
637 |       });
638 | 
639 |       for (let i = 0; i < 50; i++) {
640 |         kg.addEdge({
641 |           source: "n1",
642 |           target: `nonexistent:${i}`,
643 |           type: "depends_on",
644 |           weight: 1,
645 |           confidence: 1,
646 |           properties: {},
647 |         });
648 |       }
649 | 
650 |       const staleDate = new Date();
651 |       staleDate.setDate(staleDate.getDate() - 31);
652 |       for (let i = 0; i < 50; i++) {
653 |         const node = kg.addNode({
654 |           id: `stale:${i}`,
655 |           type: "project",
656 |           label: `Stale ${i}`,
657 |           properties: {},
658 |           weight: 1,
659 |         });
660 |         node.lastUpdated = staleDate.toISOString();
661 |       }
662 | 
663 |       const health = await monitor.calculateHealth(kg, storage);
664 | 
665 |       expect(health.recommendations.length).toBeLessThanOrEqual(5);
666 |     });
667 |   });
668 | 
669 |   describe("Trend Analysis", () => {
670 |     it("should return stable trend with no history", async () => {
671 |       const kg = await getKnowledgeGraph();
672 |       const storage = await getKGStorage();
673 | 
674 |       kg.addNode({
675 |         id: "n1",
676 |         type: "project",
677 |         label: "N1",
678 |         properties: {},
679 |         weight: 1,
680 |       });
681 | 
682 |       const health = await monitor.calculateHealth(kg, storage);
683 | 
684 |       expect(health.trends.healthTrend).toBe("stable");
685 |       expect(health.trends.nodeGrowthRate).toBe(0);
686 |       expect(health.trends.edgeGrowthRate).toBe(0);
687 |     });
688 | 
689 |     it("should track health history", async () => {
690 |       const kg = await getKnowledgeGraph();
691 |       const storage = await getKGStorage();
692 | 
693 |       kg.addNode({
694 |         id: "n1",
695 |         type: "project",
696 |         label: "N1",
697 |         properties: {},
698 |         weight: 1,
699 |       });
700 | 
701 |       // First health check
702 |       await monitor.calculateHealth(kg, storage);
703 | 
704 |       // Verify history file was created
705 |       const historyPath = path.join(
706 |         testDir,
707 |         ".documcp/memory/health-history.jsonl",
708 |       );
709 |       const historyExists = await fs
710 |         .access(historyPath)
711 |         .then(() => true)
712 |         .catch(() => false);
713 | 
714 |       expect(historyExists).toBe(true);
715 | 
716 |       const content = await fs.readFile(historyPath, "utf-8");
717 |       expect(content).toContain("overallHealth");
718 |       expect(content).toContain("dataQuality");
719 |     });
720 | 
721 |     it("should detect improving trend", async () => {
722 |       const kg = await getKnowledgeGraph();
723 |       const storage = await getKGStorage();
724 | 
725 |       // Create poor initial state
726 |       kg.addNode({
727 |         id: "n1",
728 |         type: "project",
729 |         label: "N1",
730 |         properties: {},
731 |         weight: 1,
732 |       });
733 |       for (let i = 0; i < 20; i++) {
734 |         kg.addEdge({
735 |           source: "n1",
736 |           target: `nonexistent:${i}`,
737 |           type: "depends_on",
738 |           weight: 1,
739 |           confidence: 1,
740 |           properties: {},
741 |         });
742 |       }
743 | 
744 |       await monitor.calculateHealth(kg, storage);
745 | 
746 |       // Simulate time passing and improvement
747 |       await new Promise((resolve) => setTimeout(resolve, 100));
748 | 
749 |       // Remove orphaned edges (improvement)
750 |       const allEdges = await kg.getAllEdges();
751 |       for (const edge of allEdges) {
752 |         // In a real scenario, we'd have a method to remove edges
753 |         // For testing, we'll add good nodes instead
754 |       }
755 | 
756 |       // Add well-connected nodes
757 |       for (let i = 0; i < 5; i++) {
758 |         kg.addNode({
759 |           id: `good:${i}`,
760 |           type: "project",
761 |           label: `Good ${i}`,
762 |           properties: {},
763 |           weight: 1,
764 |         });
765 |       }
766 | 
767 |       const health2 = await monitor.calculateHealth(kg, storage);
768 | 
769 |       // Trend analysis needs multiple data points over time
770 |       // With only 2 checks very close together, it might still be stable
771 |       expect(["improving", "stable", "degrading"]).toContain(
772 |         health2.trends.healthTrend,
773 |       );
774 |     });
775 |   });
776 | 
777 |   describe("Performance Metrics", () => {
778 |     it("should track storage size", async () => {
779 |       const kg = await getKnowledgeGraph();
780 |       const storage = await getKGStorage();
781 | 
782 |       kg.addNode({
783 |         id: "n1",
784 |         type: "project",
785 |         label: "N1",
786 |         properties: {},
787 |         weight: 1,
788 |       });
789 | 
790 |       const health = await monitor.calculateHealth(kg, storage);
791 | 
792 |       expect(health.performance.storageSize).toBeGreaterThanOrEqual(0);
793 |     });
794 | 
795 |     it("should have high performance score for small graphs", async () => {
796 |       const kg = await getKnowledgeGraph();
797 |       const storage = await getKGStorage();
798 | 
799 |       // Small graph (fast)
800 |       for (let i = 0; i < 5; i++) {
801 |         kg.addNode({
802 |           id: `n${i}`,
803 |           type: "project",
804 |           label: `N${i}`,
805 |           properties: {},
806 |           weight: 1,
807 |         });
808 |       }
809 | 
810 |       const health = await monitor.calculateHealth(kg, storage);
811 | 
812 |       expect(health.performance.score).toBeGreaterThan(50);
813 |     });
814 |   });
815 | });
816 | 
```

--------------------------------------------------------------------------------
/src/tools/generate-contextual-content.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Context-Aware Content Generator (Phase 3)
  3 |  *
  4 |  * Generates documentation content based on actual code structure
  5 |  * Uses AST analysis and knowledge graph for accurate, contextual documentation
  6 |  */
  7 | 
  8 | import { Tool } from "@modelcontextprotocol/sdk/types.js";
  9 | import { z } from "zod";
 10 | import path from "path";
 11 | import {
 12 |   ASTAnalyzer,
 13 |   FunctionSignature,
 14 |   ClassInfo,
 15 |   InterfaceInfo,
 16 | } from "../utils/ast-analyzer.js";
 17 | import { formatMCPResponse, MCPToolResponse } from "../types/api.js";
 18 | import { handleMemoryRecall } from "../memory/index.js";
 19 | 
 20 | const inputSchema = z.object({
 21 |   filePath: z.string().describe("Path to the source code file"),
 22 |   documentationType: z
 23 |     .enum(["tutorial", "how-to", "reference", "explanation", "all"])
 24 |     .default("reference")
 25 |     .describe("Type of documentation to generate"),
 26 |   includeExamples: z
 27 |     .boolean()
 28 |     .default(true)
 29 |     .describe("Include code examples in generated documentation"),
 30 |   style: z
 31 |     .enum(["concise", "detailed", "verbose"])
 32 |     .default("detailed")
 33 |     .describe("Documentation style"),
 34 |   outputFormat: z
 35 |     .enum(["markdown", "mdx", "html"])
 36 |     .default("markdown")
 37 |     .describe("Output format for generated documentation"),
 38 | });
 39 | 
 40 | export interface GeneratedContent {
 41 |   filePath: string;
 42 |   documentationType: string;
 43 |   sections: GeneratedSection[];
 44 |   metadata: ContentMetadata;
 45 | }
 46 | 
 47 | export interface GeneratedSection {
 48 |   title: string;
 49 |   content: string;
 50 |   category: "tutorial" | "how-to" | "reference" | "explanation";
 51 |   codeReferences: string[];
 52 |   confidence: number;
 53 | }
 54 | 
 55 | export interface ContentMetadata {
 56 |   generatedAt: string;
 57 |   codeAnalysis: {
 58 |     functions: number;
 59 |     classes: number;
 60 |     interfaces: number;
 61 |     complexity: number;
 62 |   };
 63 |   similarExamples: number;
 64 |   confidence: number;
 65 | }
 66 | 
 67 | /**
 68 |  * Main content generation handler
 69 |  */
 70 | export async function handleGenerateContextualContent(
 71 |   args: unknown,
 72 |   context?: any,
 73 | ): Promise<{ content: any[] }> {
 74 |   const startTime = Date.now();
 75 | 
 76 |   try {
 77 |     const { filePath, documentationType, includeExamples, style } =
 78 |       inputSchema.parse(args);
 79 | 
 80 |     await context?.info?.(
 81 |       `📝 Generating ${documentationType} documentation for ${path.basename(
 82 |         filePath,
 83 |       )}...`,
 84 |     );
 85 | 
 86 |     // Initialize AST analyzer
 87 |     const analyzer = new ASTAnalyzer();
 88 |     await analyzer.initialize();
 89 | 
 90 |     // Analyze the file
 91 |     await context?.info?.("🔍 Analyzing code structure...");
 92 |     const analysis = await analyzer.analyzeFile(filePath);
 93 | 
 94 |     if (!analysis) {
 95 |       throw new Error(`Failed to analyze file: ${filePath}`);
 96 |     }
 97 | 
 98 |     // Query knowledge graph for similar projects
 99 |     await context?.info?.("🧠 Retrieving contextual information...");
100 |     const similarProjects = await findSimilarProjects(analysis, context);
101 | 
102 |     // Generate documentation sections
103 |     const sections: GeneratedSection[] = [];
104 | 
105 |     if (documentationType === "reference" || documentationType === "all") {
106 |       sections.push(
107 |         ...generateReferenceDocumentation(analysis, similarProjects, style),
108 |       );
109 |     }
110 | 
111 |     if (documentationType === "tutorial" || documentationType === "all") {
112 |       sections.push(
113 |         ...generateTutorialDocumentation(
114 |           analysis,
115 |           similarProjects,
116 |           includeExamples,
117 |           style,
118 |         ),
119 |       );
120 |     }
121 | 
122 |     if (documentationType === "how-to" || documentationType === "all") {
123 |       sections.push(
124 |         ...generateHowToDocumentation(
125 |           analysis,
126 |           similarProjects,
127 |           includeExamples,
128 |           style,
129 |         ),
130 |       );
131 |     }
132 | 
133 |     if (documentationType === "explanation" || documentationType === "all") {
134 |       sections.push(
135 |         ...generateExplanationDocumentation(analysis, similarProjects, style),
136 |       );
137 |     }
138 | 
139 |     const metadata: ContentMetadata = {
140 |       generatedAt: new Date().toISOString(),
141 |       codeAnalysis: {
142 |         functions: analysis.functions.length,
143 |         classes: analysis.classes.length,
144 |         interfaces: analysis.interfaces.length,
145 |         complexity: analysis.complexity,
146 |       },
147 |       similarExamples: similarProjects.length,
148 |       confidence: calculateOverallConfidence(sections),
149 |     };
150 | 
151 |     const result: GeneratedContent = {
152 |       filePath,
153 |       documentationType,
154 |       sections,
155 |       metadata,
156 |     };
157 | 
158 |     const response: MCPToolResponse<typeof result> = {
159 |       success: true,
160 |       data: result,
161 |       metadata: {
162 |         toolVersion: "3.0.0",
163 |         executionTime: Date.now() - startTime,
164 |         timestamp: new Date().toISOString(),
165 |       },
166 |       recommendations: [
167 |         {
168 |           type: "info",
169 |           title: "Documentation Generated",
170 |           description: `Generated ${sections.length} documentation section(s) with ${metadata.confidence}% confidence`,
171 |         },
172 |       ],
173 |       nextSteps: [
174 |         {
175 |           action: "Review generated content",
176 |           description: "Review and refine generated documentation for accuracy",
177 |           priority: "high",
178 |         },
179 |         {
180 |           action: "Add to documentation site",
181 |           description:
182 |             "Integrate generated content into your documentation structure",
183 |           priority: "medium",
184 |         },
185 |         {
186 |           action: "Validate content",
187 |           toolRequired: "validate_diataxis_content",
188 |           description:
189 |             "Run validation to ensure generated content meets quality standards",
190 |           priority: "medium",
191 |         },
192 |       ],
193 |     };
194 | 
195 |     await context?.info?.(
196 |       `✅ Generated ${sections.length} documentation section(s)`,
197 |     );
198 | 
199 |     return formatMCPResponse(response, { fullResponse: true });
200 |   } catch (error: any) {
201 |     const errorResponse: MCPToolResponse = {
202 |       success: false,
203 |       error: {
204 |         code: "GENERATION_FAILED",
205 |         message: `Content generation failed: ${error.message}`,
206 |         resolution: "Ensure the file path is valid and the file can be parsed",
207 |       },
208 |       metadata: {
209 |         toolVersion: "3.0.0",
210 |         executionTime: Date.now() - startTime,
211 |         timestamp: new Date().toISOString(),
212 |       },
213 |     };
214 | 
215 |     return formatMCPResponse(errorResponse, { fullResponse: true });
216 |   }
217 | }
218 | 
219 | /**
220 |  * Generate reference documentation
221 |  */
222 | function generateReferenceDocumentation(
223 |   analysis: any,
224 |   _similarProjects: any[],
225 |   _style: string,
226 | ): GeneratedSection[] {
227 |   const sections: GeneratedSection[] = [];
228 | 
229 |   // Generate function reference
230 |   if (analysis.functions.length > 0) {
231 |     sections.push(generateFunctionReference(analysis.functions, _style));
232 |   }
233 | 
234 |   // Generate class reference
235 |   if (analysis.classes.length > 0) {
236 |     sections.push(generateClassReference(analysis.classes, _style));
237 |   }
238 | 
239 |   // Generate interface reference
240 |   if (analysis.interfaces.length > 0) {
241 |     sections.push(generateInterfaceReference(analysis.interfaces, _style));
242 |   }
243 | 
244 |   // Generate type reference
245 |   if (analysis.types.length > 0) {
246 |     sections.push(generateTypeReference(analysis.types, _style));
247 |   }
248 | 
249 |   return sections;
250 | }
251 | 
252 | /**
253 |  * Generate function reference documentation
254 |  */
255 | function generateFunctionReference(
256 |   functions: FunctionSignature[],
257 |   _style: string,
258 | ): GeneratedSection {
259 |   let content = "# Function Reference\n\n";
260 | 
261 |   for (const func of functions.filter((f) => f.isExported)) {
262 |     content += `## \`${func.name}\`\n\n`;
263 | 
264 |     if (func.docComment) {
265 |       content += `${cleanDocComment(func.docComment)}\n\n`;
266 |     }
267 | 
268 |     // Signature
269 |     const params = func.parameters
270 |       .map((p) => `${p.name}: ${p.type || "any"}`)
271 |       .join(", ");
272 |     const returnType = func.returnType || "void";
273 |     const asyncPrefix = func.isAsync ? "async " : "";
274 | 
275 |     content += "**Signature:**\n\n";
276 |     content += "```typescript\n";
277 |     content += `${asyncPrefix}function ${func.name}(${params}): ${returnType}\n`;
278 |     content += "```\n\n";
279 | 
280 |     // Parameters
281 |     if (func.parameters.length > 0) {
282 |       content += "**Parameters:**\n\n";
283 |       for (const param of func.parameters) {
284 |         const optionalMarker = param.optional ? " (optional)" : "";
285 |         const defaultValue = param.defaultValue
286 |           ? ` = ${param.defaultValue}`
287 |           : "";
288 |         content += `- \`${param.name}\`${optionalMarker}: \`${
289 |           param.type || "any"
290 |         }\`${defaultValue}\n`;
291 |       }
292 |       content += "\n";
293 |     }
294 | 
295 |     // Return value
296 |     if (func.returnType && func.returnType !== "void") {
297 |       content += "**Returns:**\n\n";
298 |       content += `- \`${func.returnType}\`\n\n`;
299 |     }
300 | 
301 |     if (_style === "detailed" || _style === "verbose") {
302 |       content += `**Complexity:** ${func.complexity}\n\n`;
303 |     }
304 | 
305 |     content += "---\n\n";
306 |   }
307 | 
308 |   return {
309 |     title: "Function Reference",
310 |     content,
311 |     category: "reference",
312 |     codeReferences: functions.map((f) => f.name),
313 |     confidence: 0.9,
314 |   };
315 | }
316 | 
317 | /**
318 |  * Generate class reference documentation
319 |  */
320 | function generateClassReference(
321 |   classes: ClassInfo[],
322 |   _style: string,
323 | ): GeneratedSection {
324 |   let content = "# Class Reference\n\n";
325 | 
326 |   for (const cls of classes.filter((c) => c.isExported)) {
327 |     content += `## \`${cls.name}\`\n\n`;
328 | 
329 |     if (cls.docComment) {
330 |       content += `${cleanDocComment(cls.docComment)}\n\n`;
331 |     }
332 | 
333 |     // Inheritance
334 |     if (cls.extends) {
335 |       content += `**Extends:** \`${cls.extends}\`\n\n`;
336 |     }
337 | 
338 |     if (cls.implements.length > 0) {
339 |       content += `**Implements:** ${cls.implements
340 |         .map((i) => `\`${i}\``)
341 |         .join(", ")}\n\n`;
342 |     }
343 | 
344 |     // Properties
345 |     if (cls.properties.length > 0) {
346 |       content += "### Properties\n\n";
347 |       for (const prop of cls.properties) {
348 |         const visibility =
349 |           prop.visibility !== "public" ? `${prop.visibility} ` : "";
350 |         const readonly = prop.isReadonly ? "readonly " : "";
351 |         const static_ = prop.isStatic ? "static " : "";
352 |         content += `- ${visibility}${static_}${readonly}\`${prop.name}\`: \`${
353 |           prop.type || "any"
354 |         }\`\n`;
355 |       }
356 |       content += "\n";
357 |     }
358 | 
359 |     // Methods
360 |     if (cls.methods.length > 0) {
361 |       content += "### Methods\n\n";
362 |       for (const method of cls.methods.filter((m) => m.isPublic)) {
363 |         const params = method.parameters
364 |           .map((p) => `${p.name}: ${p.type || "any"}`)
365 |           .join(", ");
366 |         const returnType = method.returnType || "void";
367 |         const asyncPrefix = method.isAsync ? "async " : "";
368 | 
369 |         content += `#### \`${method.name}\`\n\n`;
370 | 
371 |         if (method.docComment) {
372 |           content += `${cleanDocComment(method.docComment)}\n\n`;
373 |         }
374 | 
375 |         content += "```typescript\n";
376 |         content += `${asyncPrefix}${method.name}(${params}): ${returnType}\n`;
377 |         content += "```\n\n";
378 |       }
379 |     }
380 | 
381 |     content += "---\n\n";
382 |   }
383 | 
384 |   return {
385 |     title: "Class Reference",
386 |     content,
387 |     category: "reference",
388 |     codeReferences: classes.map((c) => c.name),
389 |     confidence: 0.9,
390 |   };
391 | }
392 | 
393 | /**
394 |  * Generate interface reference documentation
395 |  */
396 | function generateInterfaceReference(
397 |   interfaces: InterfaceInfo[],
398 |   _style: string,
399 | ): GeneratedSection {
400 |   let content = "# Interface Reference\n\n";
401 | 
402 |   for (const iface of interfaces.filter((i) => i.isExported)) {
403 |     content += `## \`${iface.name}\`\n\n`;
404 | 
405 |     if (iface.docComment) {
406 |       content += `${cleanDocComment(iface.docComment)}\n\n`;
407 |     }
408 | 
409 |     if (iface.extends.length > 0) {
410 |       content += `**Extends:** ${iface.extends
411 |         .map((e) => `\`${e}\``)
412 |         .join(", ")}\n\n`;
413 |     }
414 | 
415 |     // Properties
416 |     if (iface.properties.length > 0) {
417 |       content += "### Properties\n\n";
418 |       content += "```typescript\n";
419 |       content += `interface ${iface.name} {\n`;
420 |       for (const prop of iface.properties) {
421 |         const readonly = prop.isReadonly ? "readonly " : "";
422 |         content += `  ${readonly}${prop.name}: ${prop.type || "any"};\n`;
423 |       }
424 |       content += "}\n";
425 |       content += "```\n\n";
426 |     }
427 | 
428 |     // Methods
429 |     if (iface.methods.length > 0) {
430 |       content += "### Methods\n\n";
431 |       for (const method of iface.methods) {
432 |         const params = method.parameters
433 |           .map((p) => `${p.name}: ${p.type || "any"}`)
434 |           .join(", ");
435 |         const returnType = method.returnType || "void";
436 |         content += `- \`${method.name}(${params}): ${returnType}\`\n`;
437 |       }
438 |       content += "\n";
439 |     }
440 | 
441 |     content += "---\n\n";
442 |   }
443 | 
444 |   return {
445 |     title: "Interface Reference",
446 |     content,
447 |     category: "reference",
448 |     codeReferences: interfaces.map((i) => i.name),
449 |     confidence: 0.9,
450 |   };
451 | }
452 | 
453 | /**
454 |  * Generate type reference documentation
455 |  */
456 | function generateTypeReference(types: any[], _style: string): GeneratedSection {
457 |   let content = "# Type Reference\n\n";
458 | 
459 |   for (const type of types.filter((t: any) => t.isExported)) {
460 |     content += `## \`${type.name}\`\n\n`;
461 | 
462 |     if (type.docComment) {
463 |       content += `${cleanDocComment(type.docComment)}\n\n`;
464 |     }
465 | 
466 |     content += "```typescript\n";
467 |     content += `type ${type.name} = ${type.definition};\n`;
468 |     content += "```\n\n";
469 | 
470 |     content += "---\n\n";
471 |   }
472 | 
473 |   return {
474 |     title: "Type Reference",
475 |     content,
476 |     category: "reference",
477 |     codeReferences: types.map((t: any) => t.name),
478 |     confidence: 0.85,
479 |   };
480 | }
481 | 
482 | /**
483 |  * Generate tutorial documentation
484 |  */
485 | function generateTutorialDocumentation(
486 |   analysis: any,
487 |   _similarProjects: any[],
488 |   includeExamples: boolean,
489 |   _style: string,
490 | ): GeneratedSection[] {
491 |   const sections: GeneratedSection[] = [];
492 | 
493 |   // Generate getting started tutorial
494 |   const tutorialContent = generateGettingStartedTutorial(
495 |     analysis,
496 |     includeExamples,
497 |   );
498 |   sections.push(tutorialContent);
499 | 
500 |   return sections;
501 | }
502 | 
503 | /**
504 |  * Generate getting started tutorial
505 |  */
506 | function generateGettingStartedTutorial(
507 |   analysis: any,
508 |   includeExamples: boolean,
509 | ): GeneratedSection {
510 |   let content = "# Getting Started\n\n";
511 | 
512 |   content += "This tutorial will guide you through using this module.\n\n";
513 | 
514 |   content += "## Installation\n\n";
515 |   content += "```bash\n";
516 |   content += "npm install your-package\n";
517 |   content += "```\n\n";
518 | 
519 |   content += "## Basic Usage\n\n";
520 | 
521 |   if (includeExamples && analysis.functions.length > 0) {
522 |     const mainFunction =
523 |       analysis.functions.find((f: any) => f.name === "main") ||
524 |       analysis.functions[0];
525 | 
526 |     content += `Import and use the main functions:\n\n`;
527 |     content += "```typescript\n";
528 |     content += `import { ${mainFunction.name} } from 'your-package';\n\n`;
529 | 
530 |     const exampleParams = mainFunction.parameters
531 |       .map((p: any) => {
532 |         if (p.type === "string") return `"example"`;
533 |         if (p.type === "number") return "42";
534 |         if (p.type === "boolean") return "true";
535 |         return "{}";
536 |       })
537 |       .join(", ");
538 | 
539 |     content += `// Example usage\n`;
540 |     content += `const result = ${mainFunction.isAsync ? "await " : ""}${
541 |       mainFunction.name
542 |     }(${exampleParams});\n`;
543 |     content += "console.log(result);\n";
544 |     content += "```\n\n";
545 |   }
546 | 
547 |   content += "## Next Steps\n\n";
548 |   content +=
549 |     "- Explore the [API Reference](#reference) for detailed documentation\n";
550 |   content += "- Check out [How-To Guides](#how-to) for specific use cases\n";
551 |   content +=
552 |     "- Read the [Explanation](#explanation) for deeper understanding\n\n";
553 | 
554 |   return {
555 |     title: "Getting Started Tutorial",
556 |     content,
557 |     category: "tutorial",
558 |     codeReferences: analysis.functions.map((f: any) => f.name),
559 |     confidence: 0.75,
560 |   };
561 | }
562 | 
563 | /**
564 |  * Generate how-to documentation
565 |  */
566 | function generateHowToDocumentation(
567 |   analysis: any,
568 |   _similarProjects: any[],
569 |   includeExamples: boolean,
570 |   _style: string,
571 | ): GeneratedSection[] {
572 |   const sections: GeneratedSection[] = [];
573 | 
574 |   // Generate how-to guides based on common patterns
575 |   if (analysis.functions.some((f: any) => f.isAsync)) {
576 |     sections.push(generateAsyncHowTo(analysis, includeExamples));
577 |   }
578 | 
579 |   if (analysis.classes.length > 0) {
580 |     sections.push(generateClassUsageHowTo(analysis, includeExamples));
581 |   }
582 | 
583 |   return sections;
584 | }
585 | 
586 | /**
587 |  * Generate async usage how-to
588 |  */
589 | function generateAsyncHowTo(
590 |   analysis: any,
591 |   includeExamples: boolean,
592 | ): GeneratedSection {
593 |   let content = "# How to Handle Async Operations\n\n";
594 | 
595 |   content += "This module uses async/await for asynchronous operations.\n\n";
596 | 
597 |   if (includeExamples) {
598 |     const asyncFunc = analysis.functions.find((f: any) => f.isAsync);
599 |     if (asyncFunc) {
600 |       content += "## Example\n\n";
601 |       content += "```typescript\n";
602 |       content += `try {\n`;
603 |       content += `  const result = await ${asyncFunc.name}();\n`;
604 |       content += `  console.log('Success:', result);\n`;
605 |       content += `} catch (error) {\n`;
606 |       content += `  console.error('Error:', error);\n`;
607 |       content += `}\n`;
608 |       content += "```\n\n";
609 |     }
610 |   }
611 | 
612 |   return {
613 |     title: "Async Operations Guide",
614 |     content,
615 |     category: "how-to",
616 |     codeReferences: analysis.functions
617 |       .filter((f: any) => f.isAsync)
618 |       .map((f: any) => f.name),
619 |     confidence: 0.8,
620 |   };
621 | }
622 | 
623 | /**
624 |  * Generate class usage how-to
625 |  */
626 | function generateClassUsageHowTo(
627 |   analysis: any,
628 |   includeExamples: boolean,
629 | ): GeneratedSection {
630 |   let content = "# How to Use Classes\n\n";
631 | 
632 |   const firstClass = analysis.classes[0];
633 |   if (firstClass && includeExamples) {
634 |     content += `## Creating an Instance\n\n`;
635 |     content += "```typescript\n";
636 |     content += `const instance = new ${firstClass.name}();\n`;
637 |     content += "```\n\n";
638 | 
639 |     if (firstClass.methods.length > 0) {
640 |       content += `## Using Methods\n\n`;
641 |       content += "```typescript\n";
642 |       const publicMethod = firstClass.methods.find((m: any) => m.isPublic);
643 |       if (publicMethod) {
644 |         content += `const result = ${
645 |           publicMethod.isAsync ? "await " : ""
646 |         }instance.${publicMethod.name}();\n`;
647 |       }
648 |       content += "```\n\n";
649 |     }
650 |   }
651 | 
652 |   return {
653 |     title: "Class Usage Guide",
654 |     content,
655 |     category: "how-to",
656 |     codeReferences: analysis.classes.map((c: any) => c.name),
657 |     confidence: 0.8,
658 |   };
659 | }
660 | 
661 | /**
662 |  * Generate explanation documentation
663 |  */
664 | function generateExplanationDocumentation(
665 |   analysis: any,
666 |   _similarProjects: any[],
667 |   _style: string,
668 | ): GeneratedSection[] {
669 |   const sections: GeneratedSection[] = [];
670 | 
671 |   // Generate architecture explanation
672 |   sections.push(generateArchitectureExplanation(analysis));
673 | 
674 |   return sections;
675 | }
676 | 
677 | /**
678 |  * Generate architecture explanation
679 |  */
680 | function generateArchitectureExplanation(analysis: any): GeneratedSection {
681 |   let content = "# Architecture\n\n";
682 | 
683 |   content += "## Overview\n\n";
684 |   content += `This module consists of ${analysis.functions.length} function(s), ${analysis.classes.length} class(es), and ${analysis.interfaces.length} interface(s).\n\n`;
685 | 
686 |   if (analysis.classes.length > 0) {
687 |     content += "## Class Structure\n\n";
688 |     content +=
689 |       "The module uses object-oriented patterns with the following classes:\n\n";
690 |     for (const cls of analysis.classes.filter((c: any) => c.isExported)) {
691 |       content += `- **${cls.name}**: ${cls.methods.length} method(s), ${cls.properties.length} property(ies)\n`;
692 |     }
693 |     content += "\n";
694 |   }
695 | 
696 |   if (analysis.complexity > 20) {
697 |     content += "## Complexity\n\n";
698 |     content += `This module has a moderate to high complexity score (${analysis.complexity}), indicating sophisticated logic and multiple control flow paths.\n\n`;
699 |   }
700 | 
701 |   return {
702 |     title: "Architecture Explanation",
703 |     content,
704 |     category: "explanation",
705 |     codeReferences: [
706 |       ...analysis.functions.map((f: any) => f.name),
707 |       ...analysis.classes.map((c: any) => c.name),
708 |     ],
709 |     confidence: 0.7,
710 |   };
711 | }
712 | 
713 | /**
714 |  * Find similar projects in knowledge graph
715 |  */
716 | async function findSimilarProjects(
717 |   analysis: any,
718 |   context?: any,
719 | ): Promise<any[]> {
720 |   try {
721 |     const query = `${analysis.language} ${analysis.functions.length} functions ${analysis.classes.length} classes`;
722 |     const results = await handleMemoryRecall({
723 |       query,
724 |       type: "analysis",
725 |       limit: 5,
726 |     });
727 | 
728 |     return results.memories || [];
729 |   } catch (error) {
730 |     await context?.warn?.(`Failed to retrieve similar projects: ${error}`);
731 |     return [];
732 |   }
733 | }
734 | 
735 | /**
736 |  * Calculate overall confidence
737 |  */
738 | function calculateOverallConfidence(sections: GeneratedSection[]): number {
739 |   if (sections.length === 0) return 0;
740 |   const avgConfidence =
741 |     sections.reduce((sum, s) => sum + s.confidence, 0) / sections.length;
742 |   return Math.round(avgConfidence * 100);
743 | }
744 | 
745 | /**
746 |  * Clean JSDoc comment
747 |  */
748 | function cleanDocComment(comment: string): string {
749 |   return comment
750 |     .replace(/\/\*\*|\*\//g, "")
751 |     .replace(/^\s*\* ?/gm, "")
752 |     .trim();
753 | }
754 | 
755 | /**
756 |  * Tool definition
757 |  */
758 | export const generateContextualContent: Tool = {
759 |   name: "generate_contextual_content",
760 |   description:
761 |     "Generate context-aware documentation using AST analysis and knowledge graph insights (Phase 3)",
762 |   inputSchema: {
763 |     type: "object",
764 |     properties: {
765 |       filePath: {
766 |         type: "string",
767 |         description: "Path to the source code file to document",
768 |       },
769 |       documentationType: {
770 |         type: "string",
771 |         enum: ["tutorial", "how-to", "reference", "explanation", "all"],
772 |         default: "reference",
773 |         description: "Type of Diataxis documentation to generate",
774 |       },
775 |       includeExamples: {
776 |         type: "boolean",
777 |         default: true,
778 |         description: "Include code examples in generated documentation",
779 |       },
780 |       style: {
781 |         type: "string",
782 |         enum: ["concise", "detailed", "verbose"],
783 |         default: "detailed",
784 |         description: "Documentation detail level",
785 |       },
786 |       outputFormat: {
787 |         type: "string",
788 |         enum: ["markdown", "mdx", "html"],
789 |         default: "markdown",
790 |         description: "Output format for generated content",
791 |       },
792 |     },
793 |     required: ["filePath"],
794 |   },
795 | };
796 | 
```

--------------------------------------------------------------------------------
/tests/tools/detect-gaps.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { promises as fs } from "fs";
  2 | import path from "path";
  3 | import { tmpdir } from "os";
  4 | 
  5 | // Mock dependencies that don't involve filesystem
  6 | const mockAnalyzeRepository = jest.fn();
  7 | const mockValidateContent = jest.fn();
  8 | 
  9 | jest.mock("../../src/tools/analyze-repository.js", () => ({
 10 |   analyzeRepository: mockAnalyzeRepository,
 11 | }));
 12 | 
 13 | jest.mock("../../src/tools/validate-content.js", () => ({
 14 |   handleValidateDiataxisContent: mockValidateContent,
 15 | }));
 16 | 
 17 | jest.mock("../../src/utils/code-scanner.js", () => ({
 18 |   CodeScanner: jest.fn().mockImplementation(() => ({
 19 |     analyzeRepository: jest.fn().mockResolvedValue({
 20 |       summary: {
 21 |         totalFiles: 5,
 22 |         parsedFiles: 3,
 23 |         functions: 10,
 24 |         classes: 2,
 25 |         interfaces: 3,
 26 |         types: 1,
 27 |         constants: 2,
 28 |         apiEndpoints: 1,
 29 |       },
 30 |       files: ["src/test.ts"],
 31 |       functions: [
 32 |         {
 33 |           name: "testFunction",
 34 |           filePath: "src/test.ts",
 35 |           line: 1,
 36 |           exported: true,
 37 |           hasJSDoc: false,
 38 |         },
 39 |       ],
 40 |       classes: [
 41 |         {
 42 |           name: "TestClass",
 43 |           filePath: "src/test.ts",
 44 |           line: 5,
 45 |           exported: true,
 46 |           hasJSDoc: false,
 47 |         },
 48 |       ],
 49 |       interfaces: [
 50 |         {
 51 |           name: "TestInterface",
 52 |           filePath: "src/test.ts",
 53 |           line: 10,
 54 |           exported: true,
 55 |           hasJSDoc: false,
 56 |         },
 57 |       ],
 58 |       types: [],
 59 |       constants: [],
 60 |       apiEndpoints: [],
 61 |       imports: [],
 62 |       exports: [],
 63 |       frameworks: [],
 64 |     }),
 65 |   })),
 66 | }));
 67 | 
 68 | // Helper functions for creating test directories and files
 69 | async function createTestDirectory(name: string): Promise<string> {
 70 |   const testDir = path.join(
 71 |     tmpdir(),
 72 |     "documcp-test-" +
 73 |       Date.now() +
 74 |       "-" +
 75 |       Math.random().toString(36).substring(7),
 76 |   );
 77 |   await fs.mkdir(testDir, { recursive: true });
 78 |   return testDir;
 79 | }
 80 | 
 81 | async function createTestFile(
 82 |   filePath: string,
 83 |   content: string,
 84 | ): Promise<void> {
 85 |   await fs.mkdir(path.dirname(filePath), { recursive: true });
 86 |   await fs.writeFile(filePath, content);
 87 | }
 88 | 
 89 | async function cleanupTestDirectory(dirPath: string): Promise<void> {
 90 |   try {
 91 |     await fs.rm(dirPath, { recursive: true, force: true });
 92 |   } catch (error) {
 93 |     // Ignore cleanup errors
 94 |   }
 95 | }
 96 | 
 97 | // Now import the module under test
 98 | import { detectDocumentationGaps } from "../../src/tools/detect-gaps.js";
 99 | 
100 | describe("detectDocumentationGaps (Real Filesystem)", () => {
101 |   const mockRepositoryAnalysis = {
102 |     id: "analysis_123",
103 |     structure: {
104 |       hasTests: true,
105 |       hasCI: true,
106 |       hasDocs: true,
107 |     },
108 |     dependencies: {
109 |       ecosystem: "javascript",
110 |       packages: ["react", "express"],
111 |     },
112 |     hasApiEndpoints: true,
113 |     packageManager: "npm",
114 |     hasDocker: true,
115 |     hasCICD: true,
116 |   };
117 | 
118 |   const mockValidationResult = {
119 |     success: true,
120 |     confidence: { overall: 85 },
121 |     issues: [{ type: "warning", description: "Missing API examples" }],
122 |     validationResults: [
123 |       { status: "pass", message: "Good structure" },
124 |       {
125 |         status: "fail",
126 |         message: "Missing references",
127 |         recommendation: "Add API docs",
128 |       },
129 |     ],
130 |   };
131 | 
132 |   let testRepoDir: string;
133 |   const createdDirs: string[] = [];
134 | 
135 |   beforeEach(async () => {
136 |     jest.clearAllMocks();
137 | 
138 |     // Create a fresh test directory for each test
139 |     testRepoDir = await createTestDirectory("test-repo");
140 |     createdDirs.push(testRepoDir);
141 | 
142 |     // Default successful repository analysis
143 |     mockAnalyzeRepository.mockResolvedValue({
144 |       content: [
145 |         {
146 |           type: "text",
147 |           text: JSON.stringify(mockRepositoryAnalysis),
148 |         },
149 |       ],
150 |     });
151 | 
152 |     // Default validation result
153 |     mockValidateContent.mockResolvedValue({
154 |       content: [
155 |         {
156 |           type: "text",
157 |           text: JSON.stringify({ success: true, data: mockValidationResult }),
158 |         },
159 |       ],
160 |     } as any);
161 |   });
162 | 
163 |   afterEach(async () => {
164 |     // Cleanup all created directories
165 |     await Promise.all(createdDirs.map((dir) => cleanupTestDirectory(dir)));
166 |     createdDirs.length = 0;
167 |   });
168 | 
169 |   describe("basic functionality", () => {
170 |     it("should detect gaps in repository without documentation", async () => {
171 |       // No docs directory created - test repo is empty
172 | 
173 |       const result = await detectDocumentationGaps({
174 |         repositoryPath: testRepoDir,
175 |         depth: "quick",
176 |       });
177 | 
178 |       expect(result.content).toBeDefined();
179 |       expect(result.content[0]).toBeDefined();
180 |       const data = JSON.parse(result.content[0].text);
181 | 
182 |       expect(data.repositoryPath).toBe(testRepoDir);
183 |       expect(data.analysisId).toBe("analysis_123");
184 |       expect(data.overallScore).toBe(0);
185 |       expect(data.gaps).toContainEqual(
186 |         expect.objectContaining({
187 |           category: "general",
188 |           gapType: "missing_section",
189 |           description: "No documentation directory found",
190 |           priority: "critical",
191 |         }),
192 |       );
193 |     });
194 | 
195 |     it("should detect missing Diataxis sections", async () => {
196 |       // Create docs directory with some sections but missing tutorials and how-to
197 |       const docsDir = path.join(testRepoDir, "docs");
198 |       await fs.mkdir(docsDir);
199 |       await createTestFile(
200 |         path.join(docsDir, "index.md"),
201 |         "# Main Documentation",
202 |       );
203 | 
204 |       // Create reference and explanation sections
205 |       await fs.mkdir(path.join(docsDir, "reference"));
206 |       await createTestFile(
207 |         path.join(docsDir, "reference", "api.md"),
208 |         "# API Reference",
209 |       );
210 |       await fs.mkdir(path.join(docsDir, "explanation"));
211 |       await createTestFile(
212 |         path.join(docsDir, "explanation", "concepts.md"),
213 |         "# Concepts",
214 |       );
215 | 
216 |       // tutorials and how-to are missing
217 | 
218 |       const result = await detectDocumentationGaps({
219 |         repositoryPath: testRepoDir,
220 |         documentationPath: docsDir,
221 |         depth: "standard",
222 |       });
223 | 
224 |       const data = JSON.parse(result.content[0].text);
225 | 
226 |       expect(data.gaps).toContainEqual(
227 |         expect.objectContaining({
228 |           category: "tutorials",
229 |           gapType: "missing_section",
230 |           priority: "high",
231 |         }),
232 |       );
233 |       expect(data.gaps).toContainEqual(
234 |         expect.objectContaining({
235 |           category: "how-to",
236 |           gapType: "missing_section",
237 |           priority: "medium",
238 |         }),
239 |       );
240 |     });
241 | 
242 |     it("should identify existing documentation strengths", async () => {
243 |       // Create comprehensive docs structure
244 |       const docsDir = path.join(testRepoDir, "docs");
245 |       await fs.mkdir(docsDir);
246 |       await createTestFile(
247 |         path.join(docsDir, "README.md"),
248 |         "# Project Documentation",
249 |       );
250 | 
251 |       // Create all Diataxis sections
252 |       await fs.mkdir(path.join(docsDir, "tutorials"));
253 |       await createTestFile(
254 |         path.join(docsDir, "tutorials", "getting-started.md"),
255 |         "# Getting Started",
256 |       );
257 |       await fs.mkdir(path.join(docsDir, "how-to"));
258 |       await createTestFile(
259 |         path.join(docsDir, "how-to", "deployment.md"),
260 |         "# How to Deploy",
261 |       );
262 |       await fs.mkdir(path.join(docsDir, "reference"));
263 |       await createTestFile(
264 |         path.join(docsDir, "reference", "api.md"),
265 |         "# API Reference",
266 |       );
267 |       await fs.mkdir(path.join(docsDir, "explanation"));
268 |       await createTestFile(
269 |         path.join(docsDir, "explanation", "architecture.md"),
270 |         "# Architecture",
271 |       );
272 | 
273 |       const result = await detectDocumentationGaps({
274 |         repositoryPath: testRepoDir,
275 |         documentationPath: docsDir,
276 |         depth: "comprehensive",
277 |       });
278 | 
279 |       const data = JSON.parse(result.content[0].text);
280 | 
281 |       expect(data.strengths).toContain("Has main documentation index file");
282 |       expect(data.strengths).toContain(
283 |         "Well-organized sections: tutorials, how-to, reference, explanation",
284 |       );
285 |       expect(data.overallScore).toBeGreaterThan(50); // Adjust expectation to match actual scoring
286 |     });
287 |   });
288 | 
289 |   describe("error handling", () => {
290 |     it("should handle repository analysis failure", async () => {
291 |       mockAnalyzeRepository.mockResolvedValue({
292 |         content: [
293 |           {
294 |             type: "text",
295 |             text: JSON.stringify({ success: false, error: "Analysis failed" }),
296 |           },
297 |         ],
298 |       });
299 | 
300 |       const result = await detectDocumentationGaps({
301 |         repositoryPath: testRepoDir,
302 |       });
303 | 
304 |       expect(result.content[0].text).toContain("GAP_DETECTION_FAILED");
305 |       expect(result).toHaveProperty("isError", true);
306 |     });
307 | 
308 |     it("should handle file system errors gracefully", async () => {
309 |       // Create a docs directory but then make it inaccessible
310 |       const docsDir = path.join(testRepoDir, "docs");
311 |       await fs.mkdir(docsDir);
312 | 
313 |       const result = await detectDocumentationGaps({
314 |         repositoryPath: testRepoDir,
315 |         documentationPath: docsDir,
316 |       });
317 | 
318 |       const data = JSON.parse(result.content[0].text);
319 |       expect(data.analysisId).toBe("analysis_123");
320 |       expect(data.gaps).toBeInstanceOf(Array);
321 |     });
322 |   });
323 | 
324 |   describe("code-based gap detection", () => {
325 |     it("should detect missing API documentation when endpoints exist", async () => {
326 |       // Create docs directory without API documentation
327 |       const docsDir = path.join(testRepoDir, "docs");
328 |       await fs.mkdir(docsDir);
329 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
330 | 
331 |       // Mock CodeScanner to return API endpoints
332 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
333 |       CodeScanner.mockImplementationOnce(() => ({
334 |         analyzeRepository: jest.fn().mockResolvedValue({
335 |           summary: {
336 |             totalFiles: 5,
337 |             parsedFiles: 3,
338 |             functions: 10,
339 |             classes: 2,
340 |             interfaces: 3,
341 |             types: 1,
342 |             constants: 2,
343 |             apiEndpoints: 3,
344 |           },
345 |           files: ["src/api.ts", "src/routes.ts"],
346 |           functions: [],
347 |           classes: [],
348 |           interfaces: [],
349 |           types: [],
350 |           constants: [],
351 |           apiEndpoints: [
352 |             {
353 |               method: "GET",
354 |               path: "/api/users",
355 |               filePath: "src/api.ts",
356 |               line: 10,
357 |               hasDocumentation: true,
358 |             },
359 |             {
360 |               method: "POST",
361 |               path: "/api/users",
362 |               filePath: "src/api.ts",
363 |               line: 20,
364 |               hasDocumentation: true,
365 |             },
366 |             {
367 |               method: "DELETE",
368 |               path: "/api/users/:id",
369 |               filePath: "src/routes.ts",
370 |               line: 5,
371 |               hasDocumentation: true,
372 |             },
373 |           ],
374 |           imports: [],
375 |           exports: [],
376 |           frameworks: [],
377 |         }),
378 |       }));
379 | 
380 |       const result = await detectDocumentationGaps({
381 |         repositoryPath: testRepoDir,
382 |         documentationPath: docsDir,
383 |         depth: "comprehensive",
384 |       });
385 | 
386 |       const data = JSON.parse(result.content[0].text);
387 | 
388 |       // Should detect missing API documentation section
389 |       expect(data.gaps).toContainEqual(
390 |         expect.objectContaining({
391 |           category: "reference",
392 |           gapType: "missing_section",
393 |           description: expect.stringContaining("API endpoints"),
394 |           priority: "critical",
395 |         }),
396 |       );
397 |     });
398 | 
399 |     it("should detect undocumented API endpoints", async () => {
400 |       // Create docs directory with API section
401 |       const docsDir = path.join(testRepoDir, "docs");
402 |       await fs.mkdir(docsDir);
403 |       await fs.mkdir(path.join(docsDir, "reference"));
404 |       await createTestFile(
405 |         path.join(docsDir, "reference", "api.md"),
406 |         "# API Reference",
407 |       );
408 | 
409 |       // Mock CodeScanner to return endpoints without documentation
410 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
411 |       CodeScanner.mockImplementationOnce(() => ({
412 |         analyzeRepository: jest.fn().mockResolvedValue({
413 |           summary: {
414 |             totalFiles: 5,
415 |             parsedFiles: 3,
416 |             functions: 10,
417 |             classes: 2,
418 |             interfaces: 3,
419 |             types: 1,
420 |             constants: 2,
421 |             apiEndpoints: 2,
422 |           },
423 |           files: ["src/api.ts"],
424 |           functions: [],
425 |           classes: [],
426 |           interfaces: [],
427 |           types: [],
428 |           constants: [],
429 |           apiEndpoints: [
430 |             {
431 |               method: "GET",
432 |               path: "/api/data",
433 |               filePath: "src/api.ts",
434 |               line: 15,
435 |               hasDocumentation: false, // No JSDoc
436 |             },
437 |             {
438 |               method: "POST",
439 |               path: "/api/data",
440 |               filePath: "src/api.ts",
441 |               line: 25,
442 |               hasDocumentation: false, // No JSDoc
443 |             },
444 |           ],
445 |           imports: [],
446 |           exports: [],
447 |           frameworks: [],
448 |         }),
449 |       }));
450 | 
451 |       const result = await detectDocumentationGaps({
452 |         repositoryPath: testRepoDir,
453 |         documentationPath: docsDir,
454 |         depth: "comprehensive",
455 |       });
456 | 
457 |       const data = JSON.parse(result.content[0].text);
458 | 
459 |       // Should detect undocumented endpoints
460 |       expect(data.gaps).toContainEqual(
461 |         expect.objectContaining({
462 |           category: "reference",
463 |           gapType: "missing_examples",
464 |           description: expect.stringContaining("2 API endpoints lack"),
465 |           priority: "high",
466 |         }),
467 |       );
468 |     });
469 | 
470 |     it("should detect undocumented exported classes", async () => {
471 |       const docsDir = path.join(testRepoDir, "docs");
472 |       await fs.mkdir(docsDir);
473 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
474 | 
475 |       // Mock CodeScanner to return undocumented classes
476 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
477 |       CodeScanner.mockImplementationOnce(() => ({
478 |         analyzeRepository: jest.fn().mockResolvedValue({
479 |           summary: {
480 |             totalFiles: 5,
481 |             parsedFiles: 3,
482 |             functions: 10,
483 |             classes: 3,
484 |             interfaces: 2,
485 |             types: 1,
486 |             constants: 2,
487 |             apiEndpoints: 0,
488 |           },
489 |           files: ["src/models.ts"],
490 |           functions: [],
491 |           classes: [
492 |             {
493 |               name: "UserModel",
494 |               filePath: "src/models.ts",
495 |               line: 10,
496 |               exported: true,
497 |               hasJSDoc: false,
498 |             },
499 |             {
500 |               name: "PostModel",
501 |               filePath: "src/models.ts",
502 |               line: 30,
503 |               exported: true,
504 |               hasJSDoc: false,
505 |             },
506 |             {
507 |               name: "InternalHelper",
508 |               filePath: "src/models.ts",
509 |               line: 50,
510 |               exported: false, // Not exported, should be ignored
511 |               hasJSDoc: false,
512 |             },
513 |           ],
514 |           interfaces: [],
515 |           types: [],
516 |           constants: [],
517 |           apiEndpoints: [],
518 |           imports: [],
519 |           exports: [],
520 |           frameworks: [],
521 |         }),
522 |       }));
523 | 
524 |       const result = await detectDocumentationGaps({
525 |         repositoryPath: testRepoDir,
526 |         documentationPath: docsDir,
527 |         depth: "comprehensive",
528 |       });
529 | 
530 |       const data = JSON.parse(result.content[0].text);
531 | 
532 |       // Should detect undocumented exported classes (only 2, not the non-exported one)
533 |       expect(data.gaps).toContainEqual(
534 |         expect.objectContaining({
535 |           category: "reference",
536 |           gapType: "incomplete_content",
537 |           description: expect.stringContaining("2 exported classes lack"),
538 |           priority: "medium",
539 |         }),
540 |       );
541 |     });
542 | 
543 |     it("should detect undocumented exported interfaces", async () => {
544 |       const docsDir = path.join(testRepoDir, "docs");
545 |       await fs.mkdir(docsDir);
546 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
547 | 
548 |       // Mock CodeScanner to return undocumented interfaces
549 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
550 |       CodeScanner.mockImplementationOnce(() => ({
551 |         analyzeRepository: jest.fn().mockResolvedValue({
552 |           summary: {
553 |             totalFiles: 5,
554 |             parsedFiles: 3,
555 |             functions: 10,
556 |             classes: 2,
557 |             interfaces: 3,
558 |             types: 1,
559 |             constants: 2,
560 |             apiEndpoints: 0,
561 |           },
562 |           files: ["src/types.ts"],
563 |           functions: [],
564 |           classes: [],
565 |           interfaces: [
566 |             {
567 |               name: "IUser",
568 |               filePath: "src/types.ts",
569 |               line: 5,
570 |               exported: true,
571 |               hasJSDoc: false,
572 |             },
573 |             {
574 |               name: "IConfig",
575 |               filePath: "src/types.ts",
576 |               line: 15,
577 |               exported: true,
578 |               hasJSDoc: false,
579 |             },
580 |             {
581 |               name: "IInternalState",
582 |               filePath: "src/types.ts",
583 |               line: 25,
584 |               exported: false, // Not exported
585 |               hasJSDoc: false,
586 |             },
587 |           ],
588 |           types: [],
589 |           constants: [],
590 |           apiEndpoints: [],
591 |           imports: [],
592 |           exports: [],
593 |           frameworks: [],
594 |         }),
595 |       }));
596 | 
597 |       const result = await detectDocumentationGaps({
598 |         repositoryPath: testRepoDir,
599 |         documentationPath: docsDir,
600 |         depth: "comprehensive",
601 |       });
602 | 
603 |       const data = JSON.parse(result.content[0].text);
604 | 
605 |       // Should detect undocumented exported interfaces
606 |       expect(data.gaps).toContainEqual(
607 |         expect.objectContaining({
608 |           category: "reference",
609 |           gapType: "incomplete_content",
610 |           description: expect.stringContaining("2 exported interfaces lack"),
611 |           priority: "medium",
612 |         }),
613 |       );
614 |     });
615 | 
616 |     it("should handle validation errors gracefully", async () => {
617 |       const docsDir = path.join(testRepoDir, "docs");
618 |       await fs.mkdir(docsDir);
619 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
620 | 
621 |       // Mock validation to throw an error
622 |       mockValidateContent.mockRejectedValueOnce(
623 |         new Error("Validation service unavailable"),
624 |       );
625 | 
626 |       const result = await detectDocumentationGaps({
627 |         repositoryPath: testRepoDir,
628 |         documentationPath: docsDir,
629 |         depth: "comprehensive",
630 |       });
631 | 
632 |       const data = JSON.parse(result.content[0].text);
633 | 
634 |       // Should still succeed without validation data
635 |       expect(data.analysisId).toBe("analysis_123");
636 |       expect(data.gaps).toBeInstanceOf(Array);
637 |       expect(data.repositoryPath).toBe(testRepoDir);
638 |     });
639 | 
640 |     it("should handle empty repository analysis result", async () => {
641 |       // Mock analyze_repository to return empty/no content
642 |       mockAnalyzeRepository.mockResolvedValueOnce({
643 |         content: [], // Empty content array
644 |       });
645 | 
646 |       const result = await detectDocumentationGaps({
647 |         repositoryPath: testRepoDir,
648 |         depth: "quick",
649 |       });
650 | 
651 |       // Should return error about failed analysis
652 |       expect(result.content[0].text).toContain("GAP_DETECTION_FAILED");
653 |       expect(result.content[0].text).toContain("Repository analysis failed");
654 |     });
655 | 
656 |     it("should detect missing React framework documentation", async () => {
657 |       const docsDir = path.join(testRepoDir, "docs");
658 |       await fs.mkdir(docsDir);
659 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
660 | 
661 |       // Mock CodeScanner to return React framework
662 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
663 |       CodeScanner.mockImplementationOnce(() => ({
664 |         analyzeRepository: jest.fn().mockResolvedValue({
665 |           summary: {
666 |             totalFiles: 5,
667 |             parsedFiles: 3,
668 |             functions: 10,
669 |             classes: 2,
670 |             interfaces: 3,
671 |             types: 1,
672 |             constants: 2,
673 |             apiEndpoints: 0,
674 |           },
675 |           files: ["src/App.tsx"],
676 |           functions: [],
677 |           classes: [],
678 |           interfaces: [],
679 |           types: [],
680 |           constants: [],
681 |           apiEndpoints: [],
682 |           imports: [],
683 |           exports: [],
684 |           frameworks: ["React"], // Indicate React is used
685 |         }),
686 |       }));
687 | 
688 |       const result = await detectDocumentationGaps({
689 |         repositoryPath: testRepoDir,
690 |         documentationPath: docsDir,
691 |         depth: "comprehensive",
692 |       });
693 | 
694 |       const data = JSON.parse(result.content[0].text);
695 | 
696 |       // Should detect missing React documentation
697 |       expect(data.gaps).toContainEqual(
698 |         expect.objectContaining({
699 |           category: "how-to",
700 |           gapType: "missing_section",
701 |           description: expect.stringContaining("React framework detected"),
702 |           priority: "medium",
703 |         }),
704 |       );
705 |     });
706 | 
707 |     it("should detect missing Express framework documentation", async () => {
708 |       const docsDir = path.join(testRepoDir, "docs");
709 |       await fs.mkdir(docsDir);
710 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
711 | 
712 |       // Mock CodeScanner to return Express framework
713 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
714 |       CodeScanner.mockImplementationOnce(() => ({
715 |         analyzeRepository: jest.fn().mockResolvedValue({
716 |           summary: {
717 |             totalFiles: 5,
718 |             parsedFiles: 3,
719 |             functions: 10,
720 |             classes: 2,
721 |             interfaces: 3,
722 |             types: 1,
723 |             constants: 2,
724 |             apiEndpoints: 0,
725 |           },
726 |           files: ["src/server.ts"],
727 |           functions: [],
728 |           classes: [],
729 |           interfaces: [],
730 |           types: [],
731 |           constants: [],
732 |           apiEndpoints: [],
733 |           imports: [],
734 |           exports: [],
735 |           frameworks: ["Express"], // Indicate Express is used
736 |         }),
737 |       }));
738 | 
739 |       const result = await detectDocumentationGaps({
740 |         repositoryPath: testRepoDir,
741 |         documentationPath: docsDir,
742 |         depth: "comprehensive",
743 |       });
744 | 
745 |       const data = JSON.parse(result.content[0].text);
746 | 
747 |       // Should detect missing Express documentation
748 |       expect(data.gaps).toContainEqual(
749 |         expect.objectContaining({
750 |           category: "how-to",
751 |           gapType: "missing_section",
752 |           description: expect.stringContaining("Express framework detected"),
753 |           priority: "medium",
754 |         }),
755 |       );
756 |     });
757 |   });
758 | 
759 |   describe("input validation", () => {
760 |     it("should require repositoryPath", async () => {
761 |       await expect(detectDocumentationGaps({} as any)).rejects.toThrow();
762 |     });
763 | 
764 |     it("should handle invalid depth parameter", async () => {
765 |       await expect(
766 |         detectDocumentationGaps({
767 |           repositoryPath: testRepoDir,
768 |           depth: "invalid" as any,
769 |         }),
770 |       ).rejects.toThrow();
771 |     });
772 |   });
773 | });
774 | 
```

--------------------------------------------------------------------------------
/src/memory/index.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Memory System for DocuMCP
  3 |  * Provides persistent memory and learning capabilities
  4 |  */
  5 | 
  6 | export { JSONLStorage, type MemoryEntry } from "./storage.js";
  7 | export {
  8 |   MemoryManager,
  9 |   type MemoryContext,
 10 |   type MemorySearchOptions,
 11 | } from "./manager.js";
 12 | export {
 13 |   EnhancedMemoryManager,
 14 |   type EnhancedRecommendation,
 15 |   type IntelligentAnalysis,
 16 | } from "./enhanced-manager.js";
 17 | export {
 18 |   IncrementalLearningSystem,
 19 |   type ProjectFeatures,
 20 |   type LearningPattern,
 21 |   type LearningInsight,
 22 | } from "./learning.js";
 23 | export {
 24 |   KnowledgeGraph,
 25 |   type GraphNode,
 26 |   type GraphEdge,
 27 |   type GraphPath,
 28 |   type RecommendationPath,
 29 | } from "./knowledge-graph.js";
 30 | export {
 31 |   ContextualMemoryRetrieval,
 32 |   type RetrievalContext,
 33 |   type ContextualMatch,
 34 |   type RetrievalResult,
 35 | } from "./contextual-retrieval.js";
 36 | export {
 37 |   MultiAgentMemorySharing,
 38 |   type AgentIdentity,
 39 |   type SharedMemory,
 40 |   type CollaborativeInsight,
 41 | } from "./multi-agent-sharing.js";
 42 | export {
 43 |   MemoryPruningSystem,
 44 |   type PruningPolicy,
 45 |   type OptimizationMetrics,
 46 |   type PruningResult,
 47 | } from "./pruning.js";
 48 | export {
 49 |   TemporalMemoryAnalysis,
 50 |   type TemporalPattern,
 51 |   type TemporalMetrics,
 52 |   type PredictionResult,
 53 |   type TemporalInsight,
 54 | } from "./temporal-analysis.js";
 55 | export {
 56 |   MemoryVisualizationSystem,
 57 |   type VisualizationConfig,
 58 |   type ChartData,
 59 |   type DashboardData,
 60 |   type NetworkVisualization,
 61 | } from "./visualization.js";
 62 | export {
 63 |   MemoryExportImportSystem,
 64 |   type ExportOptions,
 65 |   type ImportOptions,
 66 |   type ExportResult,
 67 |   type ImportResult,
 68 |   type MigrationPlan,
 69 | } from "./export-import.js";
 70 | export {
 71 |   initializeMemory,
 72 |   rememberAnalysis,
 73 |   rememberRecommendation,
 74 |   rememberDeployment,
 75 |   rememberConfiguration,
 76 |   recallProjectHistory,
 77 |   getProjectInsights,
 78 |   getSimilarProjects,
 79 |   cleanupOldMemories,
 80 |   exportMemories,
 81 |   importMemories,
 82 |   getMemoryStatistics,
 83 |   getMemoryManager,
 84 |   handleMemoryRecall,
 85 |   handleMemoryIntelligentAnalysis,
 86 |   handleMemoryEnhancedRecommendation,
 87 | } from "./integration.js";
 88 | 
 89 | // Memory Tools for MCP
 90 | export const memoryTools = [
 91 |   {
 92 |     name: "memory_recall",
 93 |     description: "Recall memories about a project or topic",
 94 |     inputSchema: {
 95 |       type: "object",
 96 |       properties: {
 97 |         query: {
 98 |           type: "string",
 99 |           description: "Search query or project ID",
100 |         },
101 |         type: {
102 |           type: "string",
103 |           enum: [
104 |             "analysis",
105 |             "recommendation",
106 |             "deployment",
107 |             "configuration",
108 |             "interaction",
109 |             "all",
110 |           ],
111 |           description: "Type of memory to recall",
112 |         },
113 |         limit: {
114 |           type: "number",
115 |           description: "Maximum number of memories to return",
116 |           default: 10,
117 |         },
118 |       },
119 |       required: ["query"],
120 |     },
121 |   },
122 |   {
123 |     name: "memory_intelligent_analysis",
124 |     description:
125 |       "Get intelligent analysis with patterns, predictions, and recommendations",
126 |     inputSchema: {
127 |       type: "object",
128 |       properties: {
129 |         projectPath: {
130 |           type: "string",
131 |           description: "Path to the project for analysis",
132 |         },
133 |         baseAnalysis: {
134 |           type: "object",
135 |           description: "Base analysis data to enhance",
136 |         },
137 |       },
138 |       required: ["projectPath", "baseAnalysis"],
139 |     },
140 |   },
141 |   {
142 |     name: "memory_enhanced_recommendation",
143 |     description:
144 |       "Get enhanced recommendations using learning and knowledge graph",
145 |     inputSchema: {
146 |       type: "object",
147 |       properties: {
148 |         projectPath: {
149 |           type: "string",
150 |           description: "Path to the project",
151 |         },
152 |         baseRecommendation: {
153 |           type: "object",
154 |           description: "Base recommendation to enhance",
155 |         },
156 |         projectFeatures: {
157 |           type: "object",
158 |           properties: {
159 |             language: { type: "string" },
160 |             framework: { type: "string" },
161 |             size: { type: "string", enum: ["small", "medium", "large"] },
162 |             complexity: {
163 |               type: "string",
164 |               enum: ["simple", "moderate", "complex"],
165 |             },
166 |             hasTests: { type: "boolean" },
167 |             hasCI: { type: "boolean" },
168 |             hasDocs: { type: "boolean" },
169 |             isOpenSource: { type: "boolean" },
170 |           },
171 |           required: ["language"],
172 |         },
173 |       },
174 |       required: ["projectPath", "baseRecommendation", "projectFeatures"],
175 |     },
176 |   },
177 |   {
178 |     name: "memory_learning_stats",
179 |     description: "Get comprehensive learning and knowledge graph statistics",
180 |     inputSchema: {
181 |       type: "object",
182 |       properties: {
183 |         includeDetails: {
184 |           type: "boolean",
185 |           description: "Include detailed statistics",
186 |           default: true,
187 |         },
188 |       },
189 |     },
190 |   },
191 |   {
192 |     name: "memory_knowledge_graph",
193 |     description: "Query the knowledge graph for relationships and paths",
194 |     inputSchema: {
195 |       type: "object",
196 |       properties: {
197 |         query: {
198 |           type: "object",
199 |           properties: {
200 |             nodeTypes: {
201 |               type: "array",
202 |               items: { type: "string" },
203 |               description: "Filter by node types",
204 |             },
205 |             edgeTypes: {
206 |               type: "array",
207 |               items: { type: "string" },
208 |               description: "Filter by edge types",
209 |             },
210 |             startNode: {
211 |               type: "string",
212 |               description: "Starting node for path queries",
213 |             },
214 |             maxDepth: {
215 |               type: "number",
216 |               description: "Maximum path depth",
217 |               default: 3,
218 |             },
219 |           },
220 |         },
221 |       },
222 |       required: ["query"],
223 |     },
224 |   },
225 |   {
226 |     name: "memory_contextual_search",
227 |     description: "Perform contextual memory retrieval with intelligent ranking",
228 |     inputSchema: {
229 |       type: "object",
230 |       properties: {
231 |         query: {
232 |           type: "string",
233 |           description: "Search query",
234 |         },
235 |         context: {
236 |           type: "object",
237 |           properties: {
238 |             currentProject: {
239 |               type: "object",
240 |               properties: {
241 |                 path: { type: "string" },
242 |                 language: { type: "string" },
243 |                 framework: { type: "string" },
244 |                 size: { type: "string", enum: ["small", "medium", "large"] },
245 |               },
246 |             },
247 |             userIntent: {
248 |               type: "object",
249 |               properties: {
250 |                 action: {
251 |                   type: "string",
252 |                   enum: [
253 |                     "analyze",
254 |                     "recommend",
255 |                     "deploy",
256 |                     "troubleshoot",
257 |                     "learn",
258 |                   ],
259 |                 },
260 |                 urgency: { type: "string", enum: ["low", "medium", "high"] },
261 |                 experience: {
262 |                   type: "string",
263 |                   enum: ["novice", "intermediate", "expert"],
264 |                 },
265 |               },
266 |             },
267 |             temporalContext: {
268 |               type: "object",
269 |               properties: {
270 |                 recency: {
271 |                   type: "string",
272 |                   enum: ["recent", "all", "historical"],
273 |                 },
274 |                 timeRange: {
275 |                   type: "object",
276 |                   properties: {
277 |                     start: { type: "string" },
278 |                     end: { type: "string" },
279 |                   },
280 |                 },
281 |               },
282 |             },
283 |           },
284 |         },
285 |         options: {
286 |           type: "object",
287 |           properties: {
288 |             maxResults: { type: "number", default: 10 },
289 |             minRelevance: { type: "number", default: 0.3 },
290 |             includeReasoning: { type: "boolean", default: true },
291 |           },
292 |         },
293 |       },
294 |       required: ["query", "context"],
295 |     },
296 |   },
297 |   {
298 |     name: "memory_agent_network",
299 |     description: "Manage multi-agent memory sharing and collaboration",
300 |     inputSchema: {
301 |       type: "object",
302 |       properties: {
303 |         action: {
304 |           type: "string",
305 |           enum: [
306 |             "register_agent",
307 |             "share_memory",
308 |             "sync_request",
309 |             "get_insights",
310 |             "network_status",
311 |           ],
312 |           description: "Action to perform",
313 |         },
314 |         agentInfo: {
315 |           type: "object",
316 |           properties: {
317 |             name: { type: "string" },
318 |             capabilities: { type: "array", items: { type: "string" } },
319 |             specializations: { type: "array", items: { type: "string" } },
320 |             trustLevel: {
321 |               type: "string",
322 |               enum: ["low", "medium", "high", "trusted"],
323 |             },
324 |           },
325 |         },
326 |         memoryId: {
327 |           type: "string",
328 |           description: "Memory ID for sharing operations",
329 |         },
330 |         targetAgent: {
331 |           type: "string",
332 |           description: "Target agent for sync operations",
333 |         },
334 |         options: {
335 |           type: "object",
336 |           properties: {
337 |             anonymize: { type: "boolean", default: false },
338 |             requireValidation: { type: "boolean", default: false },
339 |           },
340 |         },
341 |       },
342 |       required: ["action"],
343 |     },
344 |   },
345 |   {
346 |     name: "memory_insights",
347 |     description: "Get insights and patterns from memory",
348 |     inputSchema: {
349 |       type: "object",
350 |       properties: {
351 |         projectId: {
352 |           type: "string",
353 |           description: "Project ID to analyze",
354 |         },
355 |         timeRange: {
356 |           type: "object",
357 |           properties: {
358 |             start: { type: "string", format: "date-time" },
359 |             end: { type: "string", format: "date-time" },
360 |           },
361 |           description: "Time range for analysis",
362 |         },
363 |       },
364 |     },
365 |   },
366 |   {
367 |     name: "memory_similar",
368 |     description: "Find similar projects from memory",
369 |     inputSchema: {
370 |       type: "object",
371 |       properties: {
372 |         analysisId: {
373 |           type: "string",
374 |           description: "Analysis ID to find similar projects for",
375 |         },
376 |         limit: {
377 |           type: "number",
378 |           description: "Maximum number of similar projects",
379 |           default: 5,
380 |         },
381 |       },
382 |       required: ["analysisId"],
383 |     },
384 |   },
385 |   {
386 |     name: "memory_export",
387 |     description: "Export memories to JSON or CSV",
388 |     inputSchema: {
389 |       type: "object",
390 |       properties: {
391 |         format: {
392 |           type: "string",
393 |           enum: ["json", "csv"],
394 |           description: "Export format",
395 |           default: "json",
396 |         },
397 |         filter: {
398 |           type: "object",
399 |           properties: {
400 |             type: { type: "string" },
401 |             projectId: { type: "string" },
402 |             startDate: { type: "string", format: "date-time" },
403 |             endDate: { type: "string", format: "date-time" },
404 |           },
405 |           description: "Filter memories to export",
406 |         },
407 |       },
408 |     },
409 |   },
410 |   {
411 |     name: "memory_cleanup",
412 |     description: "Clean up old memories",
413 |     inputSchema: {
414 |       type: "object",
415 |       properties: {
416 |         daysToKeep: {
417 |           type: "number",
418 |           description: "Number of days of memories to keep",
419 |           default: 30,
420 |         },
421 |         dryRun: {
422 |           type: "boolean",
423 |           description:
424 |             "Preview what would be deleted without actually deleting",
425 |           default: false,
426 |         },
427 |       },
428 |     },
429 |   },
430 |   {
431 |     name: "memory_pruning",
432 |     description: "Intelligent memory pruning and optimization",
433 |     inputSchema: {
434 |       type: "object",
435 |       properties: {
436 |         policy: {
437 |           type: "object",
438 |           properties: {
439 |             maxAge: {
440 |               type: "number",
441 |               description: "Maximum age in days",
442 |               default: 180,
443 |             },
444 |             maxSize: {
445 |               type: "number",
446 |               description: "Maximum storage size in MB",
447 |               default: 500,
448 |             },
449 |             maxEntries: {
450 |               type: "number",
451 |               description: "Maximum number of entries",
452 |               default: 50000,
453 |             },
454 |             preservePatterns: {
455 |               type: "array",
456 |               items: { type: "string" },
457 |               description: "Pattern types to preserve",
458 |             },
459 |             compressionThreshold: {
460 |               type: "number",
461 |               description: "Compress entries older than X days",
462 |               default: 30,
463 |             },
464 |             redundancyThreshold: {
465 |               type: "number",
466 |               description: "Remove similar entries with similarity > X",
467 |               default: 0.85,
468 |             },
469 |           },
470 |         },
471 |         dryRun: {
472 |           type: "boolean",
473 |           description: "Preview pruning without executing",
474 |           default: false,
475 |         },
476 |       },
477 |     },
478 |   },
479 |   {
480 |     name: "memory_temporal_analysis",
481 |     description: "Analyze temporal patterns and trends in memory data",
482 |     inputSchema: {
483 |       type: "object",
484 |       properties: {
485 |         query: {
486 |           type: "object",
487 |           properties: {
488 |             timeRange: {
489 |               type: "object",
490 |               properties: {
491 |                 start: { type: "string", format: "date-time" },
492 |                 end: { type: "string", format: "date-time" },
493 |               },
494 |             },
495 |             granularity: {
496 |               type: "string",
497 |               enum: ["hour", "day", "week", "month", "year"],
498 |               default: "day",
499 |             },
500 |             aggregation: {
501 |               type: "string",
502 |               enum: ["count", "success_rate", "activity_level", "diversity"],
503 |               default: "count",
504 |             },
505 |             filters: {
506 |               type: "object",
507 |               properties: {
508 |                 types: { type: "array", items: { type: "string" } },
509 |                 projects: { type: "array", items: { type: "string" } },
510 |                 outcomes: { type: "array", items: { type: "string" } },
511 |                 tags: { type: "array", items: { type: "string" } },
512 |               },
513 |             },
514 |           },
515 |         },
516 |         analysisType: {
517 |           type: "string",
518 |           enum: ["patterns", "metrics", "predictions", "insights"],
519 |           default: "patterns",
520 |         },
521 |       },
522 |     },
523 |   },
524 |   {
525 |     name: "memory_visualization",
526 |     description: "Generate visual representations of memory data",
527 |     inputSchema: {
528 |       type: "object",
529 |       properties: {
530 |         visualizationType: {
531 |           type: "string",
532 |           enum: [
533 |             "dashboard",
534 |             "timeline",
535 |             "network",
536 |             "heatmap",
537 |             "distribution",
538 |             "trends",
539 |             "custom",
540 |           ],
541 |           default: "dashboard",
542 |         },
543 |         options: {
544 |           type: "object",
545 |           properties: {
546 |             timeRange: {
547 |               type: "object",
548 |               properties: {
549 |                 start: { type: "string", format: "date-time" },
550 |                 end: { type: "string", format: "date-time" },
551 |               },
552 |             },
553 |             includeCharts: { type: "array", items: { type: "string" } },
554 |             config: {
555 |               type: "object",
556 |               properties: {
557 |                 width: { type: "number", default: 800 },
558 |                 height: { type: "number", default: 600 },
559 |                 theme: {
560 |                   type: "string",
561 |                   enum: ["light", "dark", "auto"],
562 |                   default: "light",
563 |                 },
564 |                 exportFormat: {
565 |                   type: "string",
566 |                   enum: ["svg", "png", "json", "html"],
567 |                   default: "svg",
568 |                 },
569 |                 interactive: { type: "boolean", default: true },
570 |               },
571 |             },
572 |           },
573 |         },
574 |         customVisualization: {
575 |           type: "object",
576 |           properties: {
577 |             type: {
578 |               type: "string",
579 |               enum: [
580 |                 "line",
581 |                 "bar",
582 |                 "scatter",
583 |                 "heatmap",
584 |                 "network",
585 |                 "sankey",
586 |                 "treemap",
587 |                 "timeline",
588 |               ],
589 |             },
590 |             query: {
591 |               type: "object",
592 |               properties: {
593 |                 filters: { type: "object" },
594 |                 groupBy: { type: "string" },
595 |                 aggregation: { type: "string" },
596 |               },
597 |             },
598 |           },
599 |         },
600 |       },
601 |     },
602 |   },
603 |   {
604 |     name: "memory_export_advanced",
605 |     description: "Advanced memory export with multiple formats and options",
606 |     inputSchema: {
607 |       type: "object",
608 |       properties: {
609 |         outputPath: { type: "string", description: "Output file path" },
610 |         options: {
611 |           type: "object",
612 |           properties: {
613 |             format: {
614 |               type: "string",
615 |               enum: [
616 |                 "json",
617 |                 "jsonl",
618 |                 "csv",
619 |                 "xml",
620 |                 "yaml",
621 |                 "sqlite",
622 |                 "archive",
623 |               ],
624 |               default: "json",
625 |             },
626 |             compression: {
627 |               type: "string",
628 |               enum: ["gzip", "zip", "none"],
629 |               default: "none",
630 |             },
631 |             includeMetadata: { type: "boolean", default: true },
632 |             includeLearning: { type: "boolean", default: true },
633 |             includeKnowledgeGraph: { type: "boolean", default: true },
634 |             filters: {
635 |               type: "object",
636 |               properties: {
637 |                 types: { type: "array", items: { type: "string" } },
638 |                 dateRange: {
639 |                   type: "object",
640 |                   properties: {
641 |                     start: { type: "string", format: "date-time" },
642 |                     end: { type: "string", format: "date-time" },
643 |                   },
644 |                 },
645 |                 projects: { type: "array", items: { type: "string" } },
646 |                 tags: { type: "array", items: { type: "string" } },
647 |                 outcomes: { type: "array", items: { type: "string" } },
648 |               },
649 |             },
650 |             anonymize: {
651 |               type: "object",
652 |               properties: {
653 |                 enabled: { type: "boolean", default: false },
654 |                 fields: { type: "array", items: { type: "string" } },
655 |                 method: {
656 |                   type: "string",
657 |                   enum: ["hash", "remove", "pseudonymize"],
658 |                   default: "hash",
659 |                 },
660 |               },
661 |             },
662 |             encryption: {
663 |               type: "object",
664 |               properties: {
665 |                 enabled: { type: "boolean", default: false },
666 |                 algorithm: {
667 |                   type: "string",
668 |                   enum: ["aes-256-gcm", "aes-192-gcm", "aes-128-gcm"],
669 |                   default: "aes-256-gcm",
670 |                 },
671 |                 password: { type: "string" },
672 |               },
673 |             },
674 |           },
675 |         },
676 |       },
677 |       required: ["outputPath"],
678 |     },
679 |   },
680 |   {
681 |     name: "memory_import_advanced",
682 |     description:
683 |       "Advanced memory import with validation and conflict resolution",
684 |     inputSchema: {
685 |       type: "object",
686 |       properties: {
687 |         inputPath: { type: "string", description: "Input file path" },
688 |         options: {
689 |           type: "object",
690 |           properties: {
691 |             format: {
692 |               type: "string",
693 |               enum: [
694 |                 "json",
695 |                 "jsonl",
696 |                 "csv",
697 |                 "xml",
698 |                 "yaml",
699 |                 "sqlite",
700 |                 "archive",
701 |               ],
702 |               default: "json",
703 |             },
704 |             mode: {
705 |               type: "string",
706 |               enum: ["merge", "replace", "append", "update"],
707 |               default: "merge",
708 |             },
709 |             validation: {
710 |               type: "string",
711 |               enum: ["strict", "loose", "none"],
712 |               default: "strict",
713 |             },
714 |             conflictResolution: {
715 |               type: "string",
716 |               enum: ["skip", "overwrite", "merge", "rename"],
717 |               default: "skip",
718 |             },
719 |             backup: { type: "boolean", default: true },
720 |             dryRun: { type: "boolean", default: false },
721 |             mapping: {
722 |               type: "object",
723 |               description: "Field mapping for different schemas",
724 |             },
725 |             transformation: {
726 |               type: "object",
727 |               properties: {
728 |                 enabled: { type: "boolean", default: false },
729 |                 rules: {
730 |                   type: "array",
731 |                   items: {
732 |                     type: "object",
733 |                     properties: {
734 |                       field: { type: "string" },
735 |                       operation: {
736 |                         type: "string",
737 |                         enum: ["convert", "transform", "validate"],
738 |                       },
739 |                       params: { type: "object" },
740 |                     },
741 |                   },
742 |                 },
743 |               },
744 |             },
745 |           },
746 |         },
747 |       },
748 |       required: ["inputPath"],
749 |     },
750 |   },
751 |   {
752 |     name: "memory_migration",
753 |     description:
754 |       "Create and execute migration plans between different memory systems",
755 |     inputSchema: {
756 |       type: "object",
757 |       properties: {
758 |         action: {
759 |           type: "string",
760 |           enum: ["create_plan", "execute_migration", "validate_compatibility"],
761 |           default: "create_plan",
762 |         },
763 |         sourcePath: { type: "string", description: "Source data path" },
764 |         migrationPlan: {
765 |           type: "object",
766 |           properties: {
767 |             sourceSystem: { type: "string" },
768 |             targetSystem: { type: "string", default: "DocuMCP" },
769 |             mapping: { type: "object" },
770 |             transformations: { type: "array" },
771 |             validation: { type: "array" },
772 |             postProcessing: { type: "array", items: { type: "string" } },
773 |           },
774 |         },
775 |         sourceSchema: { type: "object", description: "Source system schema" },
776 |         targetSchema: { type: "object", description: "Target system schema" },
777 |         options: {
778 |           type: "object",
779 |           properties: {
780 |             autoMap: { type: "boolean", default: true },
781 |             preserveStructure: { type: "boolean", default: true },
782 |             customMappings: { type: "object" },
783 |           },
784 |         },
785 |       },
786 |     },
787 |   },
788 |   {
789 |     name: "memory_optimization_metrics",
790 |     description: "Get comprehensive optimization metrics and recommendations",
791 |     inputSchema: {
792 |       type: "object",
793 |       properties: {
794 |         includeRecommendations: { type: "boolean", default: true },
795 |         timeRange: {
796 |           type: "object",
797 |           properties: {
798 |             start: { type: "string", format: "date-time" },
799 |             end: { type: "string", format: "date-time" },
800 |           },
801 |         },
802 |       },
803 |     },
804 |   },
805 | ];
806 | 
```
Page 15/29FirstPrevNextLast