This is page 8 of 29. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── 001-mcp-server-architecture.md
│ │ ├── 002-repository-analysis-engine.md
│ │ ├── 003-static-site-generator-recommendation-engine.md
│ │ ├── 004-diataxis-framework-integration.md
│ │ ├── 005-github-pages-deployment-automation.md
│ │ ├── 006-mcp-tools-api-design.md
│ │ ├── 007-mcp-prompts-and-resources-integration.md
│ │ ├── 008-intelligent-content-population-engine.md
│ │ ├── 009-content-accuracy-validation-framework.md
│ │ ├── 010-mcp-resource-pattern-redesign.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── check-documentation-links.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── ast-analyzer.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── permission-checker.ts
│ │ └── sitemap-generator.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/src/memory/storage.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * JSONL-based persistent storage for DocuMCP memory system
3 | * Implements Issue #45: Persistent JSONL Storage
4 | */
5 |
6 | import * as fs from "fs";
7 | import * as path from "path";
8 | import * as readline from "readline";
9 | import * as os from "os";
10 | import { createHash } from "crypto";
11 |
12 | export interface MemoryEntry {
13 | id: string;
14 | timestamp: string;
15 | type:
16 | | "analysis"
17 | | "recommendation"
18 | | "deployment"
19 | | "configuration"
20 | | "interaction";
21 | data: Record<string, any>;
22 | metadata: {
23 | projectId?: string;
24 | repository?: string;
25 | ssg?: string;
26 | tags?: string[];
27 | version?: string;
28 | compressed?: boolean;
29 | compressionType?: string;
30 | compressedAt?: string;
31 | originalSize?: number;
32 | merged?: boolean;
33 | mergedCount?: number;
34 | mergedAt?: string;
35 | };
36 | tags?: string[]; // Convenience field for direct access
37 | embeddings?: number[];
38 | checksum?: string;
39 | }
40 |
41 | export class JSONLStorage {
42 | private readonly storageDir: string;
43 | private readonly indexFile: string;
44 | private index: Map<string, { file: string; line: number; size: number }>;
45 | private lineCounters: Map<string, number>; // Track line count per file
46 |
47 | constructor(baseDir?: string) {
48 | this.storageDir = baseDir || this.getDefaultStorageDir();
49 | this.indexFile = path.join(this.storageDir, ".index.json");
50 | this.index = new Map();
51 | this.lineCounters = new Map();
52 | }
53 |
54 | private getDefaultStorageDir(): string {
55 | // For tests, use temp directory
56 | if (process.env.NODE_ENV === "test" || process.env.JEST_WORKER_ID) {
57 | return path.join(
58 | os.tmpdir(),
59 | `documcp-test-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
60 | );
61 | }
62 |
63 | // For production/development, use project-local .documcp directory
64 | return path.join(process.cwd(), ".documcp", "memory");
65 | }
66 |
67 | async initialize(): Promise<void> {
68 | await fs.promises.mkdir(this.storageDir, { recursive: true });
69 | await this.loadIndex();
70 |
71 | // Log storage location in development mode
72 | if (process.env.NODE_ENV === "development" || process.env.DEBUG) {
73 | // eslint-disable-next-line no-console
74 | console.log(`[DocuMCP] Memory storage initialized: ${this.storageDir}`);
75 | }
76 | }
77 |
78 | private async loadIndex(): Promise<void> {
79 | try {
80 | const indexData = await fs.promises.readFile(this.indexFile, "utf-8");
81 | const data = JSON.parse(indexData);
82 |
83 | // Handle both old format (just entries) and new format (with line counters)
84 | if (Array.isArray(data)) {
85 | this.index = new Map(data);
86 | // Rebuild line counters for existing data
87 | await this.rebuildLineCounters();
88 | } else {
89 | this.index = new Map(data.entries || []);
90 | this.lineCounters = new Map(Object.entries(data.lineCounters || {}));
91 | }
92 | } catch (error) {
93 | this.index = new Map();
94 | this.lineCounters = new Map();
95 | }
96 | }
97 |
98 | private async saveIndex(): Promise<void> {
99 | // Ensure storage directory exists before writing index
100 | await fs.promises.mkdir(this.storageDir, { recursive: true });
101 |
102 | const data = {
103 | entries: Array.from(this.index.entries()),
104 | lineCounters: Object.fromEntries(this.lineCounters.entries()),
105 | };
106 | await fs.promises.writeFile(this.indexFile, JSON.stringify(data, null, 2));
107 | }
108 |
109 | private getFileName(type: MemoryEntry["type"], timestamp: string): string {
110 | const date = new Date(timestamp);
111 | const year = date.getFullYear();
112 | const month = String(date.getMonth() + 1).padStart(2, "0");
113 | return `${type}_${year}_${month}.jsonl`;
114 | }
115 |
116 | private generateId(entry: Omit<MemoryEntry, "id" | "checksum">): string {
117 | const hash = createHash("sha256");
118 | hash.update(JSON.stringify({ type: entry.type, data: entry.data }));
119 | return hash.digest("hex").substring(0, 16);
120 | }
121 |
122 | private generateChecksum(data: any): string {
123 | const hash = createHash("md5");
124 | hash.update(JSON.stringify(data));
125 | return hash.digest("hex");
126 | }
127 |
128 | async append(
129 | entry: Omit<MemoryEntry, "id" | "checksum">,
130 | ): Promise<MemoryEntry> {
131 | const id = this.generateId(entry);
132 | const checksum = this.generateChecksum(entry.data);
133 | const completeEntry: MemoryEntry = {
134 | ...entry,
135 | id,
136 | checksum,
137 | timestamp: entry.timestamp || new Date().toISOString(),
138 | };
139 |
140 | const fileName = this.getFileName(
141 | completeEntry.type,
142 | completeEntry.timestamp,
143 | );
144 | const filePath = path.join(this.storageDir, fileName);
145 |
146 | // Ensure storage directory exists before writing
147 | await fs.promises.mkdir(this.storageDir, { recursive: true });
148 |
149 | const line = JSON.stringify(completeEntry);
150 | await fs.promises.appendFile(filePath, line + "\n");
151 |
152 | // Efficiently track line numbers using a counter
153 | const currentLineCount = this.lineCounters.get(fileName) || 0;
154 | const lineNumber = currentLineCount + 1;
155 | this.lineCounters.set(fileName, lineNumber);
156 |
157 | this.index.set(id, {
158 | file: fileName,
159 | line: lineNumber,
160 | size: Buffer.byteLength(line),
161 | });
162 |
163 | await this.saveIndex();
164 | return completeEntry;
165 | }
166 |
167 | async get(id: string): Promise<MemoryEntry | null> {
168 | const location = this.index.get(id);
169 | if (!location) return null;
170 |
171 | const filePath = path.join(this.storageDir, location.file);
172 | const stream = readline.createInterface({
173 | input: fs.createReadStream(filePath),
174 | crlfDelay: Infinity,
175 | });
176 |
177 | let lineNumber = 0;
178 | for await (const line of stream) {
179 | lineNumber++;
180 | if (lineNumber === location.line) {
181 | stream.close();
182 | try {
183 | return JSON.parse(line);
184 | } catch (error) {
185 | return null;
186 | }
187 | }
188 | }
189 |
190 | return null;
191 | }
192 |
193 | async query(filter: {
194 | type?: MemoryEntry["type"];
195 | projectId?: string;
196 | repository?: string;
197 | ssg?: string;
198 | tags?: string[];
199 | startDate?: string;
200 | endDate?: string;
201 | limit?: number;
202 | }): Promise<MemoryEntry[]> {
203 | const results: MemoryEntry[] = [];
204 | const files = await this.getRelevantFiles(filter);
205 |
206 | for (const file of files) {
207 | const filePath = path.join(this.storageDir, file);
208 | const stream = readline.createInterface({
209 | input: fs.createReadStream(filePath),
210 | crlfDelay: Infinity,
211 | });
212 |
213 | for await (const line of stream) {
214 | if (line.trim() === "") continue; // Skip empty lines
215 |
216 | try {
217 | const entry: MemoryEntry = JSON.parse(line);
218 |
219 | // Only include entries that are still in the index (not soft-deleted)
220 | if (this.index.has(entry.id) && this.matchesFilter(entry, filter)) {
221 | results.push(entry);
222 | if (filter.limit && results.length >= filter.limit) {
223 | stream.close();
224 | return results;
225 | }
226 | }
227 | } catch (error) {
228 | // Skip invalid JSON lines
229 | continue;
230 | }
231 | }
232 | }
233 |
234 | return results;
235 | }
236 |
237 | private async getRelevantFiles(filter: any): Promise<string[]> {
238 | const files = await fs.promises.readdir(this.storageDir);
239 | return files
240 | .filter((f) => f.endsWith(".jsonl"))
241 | .filter((file) => {
242 | if (!filter.type) return true;
243 | return file.startsWith(filter.type);
244 | });
245 | }
246 |
247 | private matchesFilter(entry: MemoryEntry, filter: any): boolean {
248 | if (filter.type && entry.type !== filter.type) return false;
249 | if (filter.projectId && entry.metadata.projectId !== filter.projectId)
250 | return false;
251 | if (filter.repository && entry.metadata.repository !== filter.repository)
252 | return false;
253 | if (filter.ssg && entry.metadata.ssg !== filter.ssg) return false;
254 |
255 | if (filter.tags && filter.tags.length > 0) {
256 | const entryTags = entry.metadata.tags || [];
257 | if (!filter.tags.some((tag: any) => entryTags.includes(tag)))
258 | return false;
259 | }
260 |
261 | if (filter.startDate && entry.timestamp < filter.startDate) return false;
262 | if (filter.endDate && entry.timestamp > filter.endDate) return false;
263 |
264 | return true;
265 | }
266 |
267 | async delete(id: string): Promise<boolean> {
268 | const location = this.index.get(id);
269 | if (!location) return false;
270 |
271 | this.index.delete(id);
272 | await this.saveIndex();
273 | return true;
274 | }
275 |
276 | async compact(type?: MemoryEntry["type"]): Promise<void> {
277 | // Ensure storage directory exists before compacting
278 | await fs.promises.mkdir(this.storageDir, { recursive: true });
279 |
280 | const files = await this.getRelevantFiles({ type });
281 |
282 | for (const file of files) {
283 | const filePath = path.join(this.storageDir, file);
284 | const tempPath = filePath + ".tmp";
285 | const validEntries: string[] = [];
286 |
287 | const stream = readline.createInterface({
288 | input: fs.createReadStream(filePath),
289 | crlfDelay: Infinity,
290 | });
291 |
292 | for await (const line of stream) {
293 | try {
294 | const entry: MemoryEntry = JSON.parse(line);
295 | if (this.index.has(entry.id)) {
296 | validEntries.push(line);
297 | }
298 | } catch (error) {
299 | // Skip invalid lines
300 | }
301 | }
302 |
303 | await fs.promises.writeFile(tempPath, validEntries.join("\n") + "\n");
304 | await fs.promises.rename(tempPath, filePath);
305 | }
306 | }
307 |
308 | private async countLines(filePath: string): Promise<number> {
309 | const stream = readline.createInterface({
310 | input: fs.createReadStream(filePath),
311 | crlfDelay: Infinity,
312 | });
313 |
314 | let count = 0;
315 | // eslint-disable-next-line @typescript-eslint/no-unused-vars
316 | for await (const _ of stream) {
317 | count++;
318 | }
319 | return count;
320 | }
321 |
322 | async getStatistics(): Promise<{
323 | totalEntries: number;
324 | byType: Record<string, number>;
325 | byMonth: Record<string, number>;
326 | totalSize: number;
327 | }> {
328 | const stats = {
329 | totalEntries: this.index.size,
330 | byType: {} as Record<string, number>,
331 | byMonth: {} as Record<string, number>,
332 | totalSize: 0,
333 | };
334 |
335 | const files = await fs.promises.readdir(this.storageDir);
336 | for (const file of files.filter((f) => f.endsWith(".jsonl"))) {
337 | const filePath = path.join(this.storageDir, file);
338 | const fileStats = await fs.promises.stat(filePath);
339 | stats.totalSize += fileStats.size;
340 |
341 | const match = file.match(/^(\w+)_(\d{4})_(\d{2})\.jsonl$/);
342 | if (match) {
343 | const [, type, year, month] = match;
344 | const monthKey = `${year}-${month}`;
345 |
346 | stats.byType[type] = (stats.byType[type] || 0) + 1;
347 | stats.byMonth[monthKey] = (stats.byMonth[monthKey] || 0) + 1;
348 | }
349 | }
350 |
351 | return stats;
352 | }
353 |
354 | /**
355 | * Get all memory entries
356 | */
357 | async getAll(): Promise<MemoryEntry[]> {
358 | const entries: MemoryEntry[] = [];
359 |
360 | for (const [id] of this.index) {
361 | const entry = await this.get(id);
362 | if (entry) {
363 | entries.push(entry);
364 | }
365 | }
366 |
367 | return entries;
368 | }
369 |
370 | /**
371 | * Update an existing memory entry
372 | */
373 | async update(id: string, updatedEntry: MemoryEntry): Promise<boolean> {
374 | const existing = await this.get(id);
375 | if (!existing) {
376 | return false;
377 | }
378 |
379 | // Delete the old entry and store the updated one
380 | await this.delete(id);
381 | const newEntry = await this.append(updatedEntry);
382 | return newEntry.id === id;
383 | }
384 |
385 | /**
386 | * Store a new memory entry (preserves ID if provided)
387 | */
388 | async store(entry: MemoryEntry): Promise<MemoryEntry> {
389 | const entryToStore = {
390 | ...entry,
391 | tags: entry.tags || entry.metadata?.tags || [],
392 | };
393 |
394 | // If the entry already has an ID, use it directly instead of generating a new one
395 | if (entry.id) {
396 | const checksum = this.generateChecksum(entry.data);
397 | const completeEntry: MemoryEntry = {
398 | ...entryToStore,
399 | checksum,
400 | timestamp: entry.timestamp || new Date().toISOString(),
401 | };
402 |
403 | const fileName = this.getFileName(
404 | completeEntry.type,
405 | completeEntry.timestamp,
406 | );
407 | const filePath = path.join(this.storageDir, fileName);
408 |
409 | // Ensure storage directory exists before writing
410 | await fs.promises.mkdir(this.storageDir, { recursive: true });
411 |
412 | const line = JSON.stringify(completeEntry);
413 | await fs.promises.appendFile(filePath, line + "\n");
414 |
415 | // Efficiently track line numbers using a counter
416 | const currentLineCount = this.lineCounters.get(fileName) || 0;
417 | const lineNumber = currentLineCount + 1;
418 | this.lineCounters.set(fileName, lineNumber);
419 |
420 | this.index.set(entry.id, {
421 | file: fileName,
422 | line: lineNumber,
423 | size: Buffer.byteLength(line),
424 | });
425 |
426 | await this.saveIndex();
427 | return completeEntry;
428 | }
429 |
430 | return this.append(entryToStore);
431 | }
432 |
433 | /**
434 | * Rebuild the index from all storage files
435 | */
436 | async rebuildIndex(): Promise<void> {
437 | this.index.clear();
438 |
439 | const files = await fs.promises.readdir(this.storageDir);
440 | const jsonlFiles = files.filter((f) => f.endsWith(".jsonl"));
441 |
442 | for (const file of jsonlFiles) {
443 | const filePath = path.join(this.storageDir, file);
444 | const stream = readline.createInterface({
445 | input: fs.createReadStream(filePath),
446 | crlfDelay: Infinity,
447 | });
448 |
449 | let lineNumber = 0;
450 | for await (const line of stream) {
451 | try {
452 | const entry: MemoryEntry = JSON.parse(line);
453 | const size = Buffer.byteLength(line, "utf8");
454 |
455 | this.index.set(entry.id, {
456 | file,
457 | line: lineNumber,
458 | size,
459 | });
460 |
461 | lineNumber++;
462 | } catch (error) {
463 | // Skip invalid lines
464 | lineNumber++;
465 | }
466 | }
467 | }
468 |
469 | await this.saveIndex();
470 | }
471 |
472 | private async rebuildLineCounters(): Promise<void> {
473 | this.lineCounters.clear();
474 |
475 | // Get all unique file names from the index
476 | const fileNames = new Set<string>();
477 | for (const [, location] of this.index) {
478 | fileNames.add(location.file);
479 | }
480 |
481 | // Count lines for each file
482 | for (const fileName of fileNames) {
483 | const filePath = path.join(this.storageDir, fileName);
484 | try {
485 | const lineCount = await this.countLines(filePath);
486 | this.lineCounters.set(fileName, lineCount);
487 | } catch (error) {
488 | // File might not exist, set to 0
489 | this.lineCounters.set(fileName, 0);
490 | }
491 | }
492 | }
493 |
494 | async close(): Promise<void> {
495 | // Clear the index and line counters to free memory
496 | this.index.clear();
497 | this.lineCounters.clear();
498 | }
499 | }
500 |
501 | export default JSONLStorage;
502 |
```
--------------------------------------------------------------------------------
/tests/utils/freshness-tracker.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for Documentation Freshness Tracking Utilities
3 | */
4 |
5 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
6 | import fs from "fs/promises";
7 | import path from "path";
8 | import os from "os";
9 | import {
10 | thresholdToMs,
11 | formatAge,
12 | parseDocFrontmatter,
13 | updateDocFrontmatter,
14 | calculateFreshnessStatus,
15 | findMarkdownFiles,
16 | scanDocumentationFreshness,
17 | initializeFreshnessMetadata,
18 | STALENESS_PRESETS,
19 | type StalenessThreshold,
20 | type DocFrontmatter,
21 | } from "../../src/utils/freshness-tracker.js";
22 |
23 | describe("Freshness Tracker Utilities", () => {
24 | let tempDir: string;
25 |
26 | beforeEach(async () => {
27 | tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "freshness-test-"));
28 | });
29 |
30 | afterEach(async () => {
31 | await fs.rm(tempDir, { recursive: true, force: true });
32 | });
33 |
34 | describe("thresholdToMs", () => {
35 | it("should convert minutes to milliseconds", () => {
36 | const threshold: StalenessThreshold = { value: 30, unit: "minutes" };
37 | expect(thresholdToMs(threshold)).toBe(30 * 60 * 1000);
38 | });
39 |
40 | it("should convert hours to milliseconds", () => {
41 | const threshold: StalenessThreshold = { value: 2, unit: "hours" };
42 | expect(thresholdToMs(threshold)).toBe(2 * 60 * 60 * 1000);
43 | });
44 |
45 | it("should convert days to milliseconds", () => {
46 | const threshold: StalenessThreshold = { value: 7, unit: "days" };
47 | expect(thresholdToMs(threshold)).toBe(7 * 24 * 60 * 60 * 1000);
48 | });
49 |
50 | it("should handle fractional values", () => {
51 | const threshold: StalenessThreshold = { value: 0.5, unit: "hours" };
52 | expect(thresholdToMs(threshold)).toBe(30 * 60 * 1000);
53 | });
54 | });
55 |
56 | describe("formatAge", () => {
57 | it("should format seconds", () => {
58 | expect(formatAge(30 * 1000)).toBe("30 seconds");
59 | });
60 |
61 | it("should format single second", () => {
62 | expect(formatAge(1000)).toBe("1 second");
63 | });
64 |
65 | it("should format minutes", () => {
66 | expect(formatAge(5 * 60 * 1000)).toBe("5 minutes");
67 | });
68 |
69 | it("should format single minute", () => {
70 | expect(formatAge(60 * 1000)).toBe("1 minute");
71 | });
72 |
73 | it("should format hours", () => {
74 | expect(formatAge(3 * 60 * 60 * 1000)).toBe("3 hours");
75 | });
76 |
77 | it("should format single hour", () => {
78 | expect(formatAge(60 * 60 * 1000)).toBe("1 hour");
79 | });
80 |
81 | it("should format days", () => {
82 | expect(formatAge(5 * 24 * 60 * 60 * 1000)).toBe("5 days");
83 | });
84 |
85 | it("should format single day", () => {
86 | expect(formatAge(24 * 60 * 60 * 1000)).toBe("1 day");
87 | });
88 |
89 | it("should prefer larger units", () => {
90 | const twoDaysInMs = 2 * 24 * 60 * 60 * 1000;
91 | expect(formatAge(twoDaysInMs)).toBe("2 days");
92 | });
93 | });
94 |
95 | describe("STALENESS_PRESETS", () => {
96 | it("should have all expected presets", () => {
97 | expect(STALENESS_PRESETS.realtime).toEqual({
98 | value: 30,
99 | unit: "minutes",
100 | });
101 | expect(STALENESS_PRESETS.active).toEqual({ value: 1, unit: "hours" });
102 | expect(STALENESS_PRESETS.recent).toEqual({ value: 24, unit: "hours" });
103 | expect(STALENESS_PRESETS.weekly).toEqual({ value: 7, unit: "days" });
104 | expect(STALENESS_PRESETS.monthly).toEqual({ value: 30, unit: "days" });
105 | expect(STALENESS_PRESETS.quarterly).toEqual({ value: 90, unit: "days" });
106 | });
107 | });
108 |
109 | describe("parseDocFrontmatter", () => {
110 | it("should parse frontmatter from markdown file", async () => {
111 | const filePath = path.join(tempDir, "test.md");
112 | const content = `---
113 | title: Test Document
114 | documcp:
115 | last_updated: "2025-01-15T10:00:00Z"
116 | last_validated: "2025-01-15T10:00:00Z"
117 | ---
118 |
119 | # Test Content`;
120 |
121 | await fs.writeFile(filePath, content, "utf-8");
122 | const frontmatter = await parseDocFrontmatter(filePath);
123 |
124 | expect(frontmatter.title).toBe("Test Document");
125 | expect(frontmatter.documcp?.last_updated).toBe("2025-01-15T10:00:00Z");
126 | });
127 |
128 | it("should return empty object for file without frontmatter", async () => {
129 | const filePath = path.join(tempDir, "no-frontmatter.md");
130 | await fs.writeFile(filePath, "# Just Content", "utf-8");
131 |
132 | const frontmatter = await parseDocFrontmatter(filePath);
133 | expect(frontmatter).toEqual({});
134 | });
135 |
136 | it("should handle non-existent files gracefully", async () => {
137 | const filePath = path.join(tempDir, "nonexistent.md");
138 | const frontmatter = await parseDocFrontmatter(filePath);
139 | expect(frontmatter).toEqual({});
140 | });
141 | });
142 |
143 | describe("updateDocFrontmatter", () => {
144 | it("should update existing frontmatter", async () => {
145 | const filePath = path.join(tempDir, "update.md");
146 | const initialContent = `---
147 | title: Original
148 | documcp:
149 | last_updated: "2025-01-01T00:00:00Z"
150 | ---
151 |
152 | Content`;
153 |
154 | await fs.writeFile(filePath, initialContent, "utf-8");
155 |
156 | await updateDocFrontmatter(filePath, {
157 | last_updated: "2025-01-15T10:00:00Z",
158 | last_validated: "2025-01-15T10:00:00Z",
159 | });
160 |
161 | const updated = await parseDocFrontmatter(filePath);
162 | expect(updated.documcp?.last_updated).toBe("2025-01-15T10:00:00Z");
163 | expect(updated.documcp?.last_validated).toBe("2025-01-15T10:00:00Z");
164 | });
165 |
166 | it("should preserve existing frontmatter fields", async () => {
167 | const filePath = path.join(tempDir, "preserve.md");
168 | const initialContent = `---
169 | title: Original
170 | description: Test
171 | documcp:
172 | last_updated: "2025-01-01T00:00:00Z"
173 | auto_updated: false
174 | ---
175 |
176 | Content`;
177 |
178 | await fs.writeFile(filePath, initialContent, "utf-8");
179 |
180 | await updateDocFrontmatter(filePath, {
181 | last_validated: "2025-01-15T10:00:00Z",
182 | });
183 |
184 | const updated = await parseDocFrontmatter(filePath);
185 | expect(updated.title).toBe("Original");
186 | expect(updated.description).toBe("Test");
187 | expect(updated.documcp?.last_updated).toBe("2025-01-01T00:00:00Z");
188 | expect(updated.documcp?.auto_updated).toBe(false);
189 | expect(updated.documcp?.last_validated).toBe("2025-01-15T10:00:00Z");
190 | });
191 |
192 | it("should add documcp field if not present", async () => {
193 | const filePath = path.join(tempDir, "add-documcp.md");
194 | const initialContent = `---
195 | title: No DocuMCP
196 | ---
197 |
198 | Content`;
199 |
200 | await fs.writeFile(filePath, initialContent, "utf-8");
201 |
202 | await updateDocFrontmatter(filePath, {
203 | last_updated: "2025-01-15T10:00:00Z",
204 | });
205 |
206 | const updated = await parseDocFrontmatter(filePath);
207 | expect(updated.documcp?.last_updated).toBe("2025-01-15T10:00:00Z");
208 | });
209 | });
210 |
211 | describe("calculateFreshnessStatus", () => {
212 | const thresholds = {
213 | warning: { value: 7, unit: "days" as const },
214 | stale: { value: 30, unit: "days" as const },
215 | critical: { value: 90, unit: "days" as const },
216 | };
217 |
218 | it("should mark file as fresh when recently updated", () => {
219 | const frontmatter: DocFrontmatter = {
220 | documcp: {
221 | last_updated: new Date(
222 | Date.now() - 2 * 24 * 60 * 60 * 1000,
223 | ).toISOString(), // 2 days ago
224 | },
225 | };
226 |
227 | const status = calculateFreshnessStatus(
228 | "/test.md",
229 | "test.md",
230 | frontmatter,
231 | thresholds,
232 | );
233 |
234 | expect(status.stalenessLevel).toBe("fresh");
235 | expect(status.isStale).toBe(false);
236 | expect(status.hasMetadata).toBe(true);
237 | });
238 |
239 | it("should mark file as warning when moderately old", () => {
240 | const frontmatter: DocFrontmatter = {
241 | documcp: {
242 | last_updated: new Date(
243 | Date.now() - 15 * 24 * 60 * 60 * 1000,
244 | ).toISOString(), // 15 days ago
245 | },
246 | };
247 |
248 | const status = calculateFreshnessStatus(
249 | "/test.md",
250 | "test.md",
251 | frontmatter,
252 | thresholds,
253 | );
254 |
255 | expect(status.stalenessLevel).toBe("warning");
256 | expect(status.isStale).toBe(false);
257 | });
258 |
259 | it("should mark file as stale when old", () => {
260 | const frontmatter: DocFrontmatter = {
261 | documcp: {
262 | last_updated: new Date(
263 | Date.now() - 45 * 24 * 60 * 60 * 1000,
264 | ).toISOString(), // 45 days ago
265 | },
266 | };
267 |
268 | const status = calculateFreshnessStatus(
269 | "/test.md",
270 | "test.md",
271 | frontmatter,
272 | thresholds,
273 | );
274 |
275 | expect(status.stalenessLevel).toBe("stale");
276 | expect(status.isStale).toBe(true);
277 | });
278 |
279 | it("should mark file as critical when very old", () => {
280 | const frontmatter: DocFrontmatter = {
281 | documcp: {
282 | last_updated: new Date(
283 | Date.now() - 100 * 24 * 60 * 60 * 1000,
284 | ).toISOString(), // 100 days ago
285 | },
286 | };
287 |
288 | const status = calculateFreshnessStatus(
289 | "/test.md",
290 | "test.md",
291 | frontmatter,
292 | thresholds,
293 | );
294 |
295 | expect(status.stalenessLevel).toBe("critical");
296 | expect(status.isStale).toBe(true);
297 | });
298 |
299 | it("should mark file as unknown when no metadata", () => {
300 | const frontmatter: DocFrontmatter = {};
301 |
302 | const status = calculateFreshnessStatus(
303 | "/test.md",
304 | "test.md",
305 | frontmatter,
306 | thresholds,
307 | );
308 |
309 | expect(status.stalenessLevel).toBe("unknown");
310 | expect(status.isStale).toBe(true);
311 | expect(status.hasMetadata).toBe(false);
312 | });
313 |
314 | it("should include age information", () => {
315 | const frontmatter: DocFrontmatter = {
316 | documcp: {
317 | last_updated: new Date(
318 | Date.now() - 5 * 24 * 60 * 60 * 1000,
319 | ).toISOString(),
320 | },
321 | };
322 |
323 | const status = calculateFreshnessStatus(
324 | "/test.md",
325 | "test.md",
326 | frontmatter,
327 | thresholds,
328 | );
329 |
330 | expect(status.ageFormatted).toBe("5 days");
331 | expect(status.staleDays).toBe(5);
332 | });
333 | });
334 |
335 | describe("findMarkdownFiles", () => {
336 | it("should find all markdown files recursively", async () => {
337 | await fs.mkdir(path.join(tempDir, "subdir"));
338 | await fs.writeFile(path.join(tempDir, "file1.md"), "# Test 1");
339 | await fs.writeFile(path.join(tempDir, "file2.mdx"), "# Test 2");
340 | await fs.writeFile(path.join(tempDir, "subdir", "file3.md"), "# Test 3");
341 | await fs.writeFile(path.join(tempDir, "readme.txt"), "Not markdown");
342 |
343 | const files = await findMarkdownFiles(tempDir);
344 |
345 | expect(files).toHaveLength(3);
346 | expect(files.some((f) => f.endsWith("file1.md"))).toBe(true);
347 | expect(files.some((f) => f.endsWith("file2.mdx"))).toBe(true);
348 | expect(files.some((f) => f.endsWith("file3.md"))).toBe(true);
349 | expect(files.some((f) => f.endsWith("readme.txt"))).toBe(false);
350 | });
351 |
352 | it("should skip common directories", async () => {
353 | await fs.mkdir(path.join(tempDir, "node_modules"));
354 | await fs.mkdir(path.join(tempDir, ".git"));
355 | await fs.writeFile(path.join(tempDir, "file1.md"), "# Test");
356 | await fs.writeFile(
357 | path.join(tempDir, "node_modules", "skip.md"),
358 | "# Skip",
359 | );
360 | await fs.writeFile(path.join(tempDir, ".git", "skip.md"), "# Skip");
361 |
362 | const files = await findMarkdownFiles(tempDir);
363 |
364 | expect(files).toHaveLength(1);
365 | expect(files[0]).toMatch(/file1\.md$/);
366 | });
367 |
368 | it("should handle empty directories", async () => {
369 | const files = await findMarkdownFiles(tempDir);
370 | expect(files).toEqual([]);
371 | });
372 | });
373 |
374 | describe("scanDocumentationFreshness", () => {
375 | it("should scan and categorize files by freshness", async () => {
376 | // Create test files with different ages
377 | const now = Date.now();
378 |
379 | const freshFile = path.join(tempDir, "fresh.md");
380 | await fs.writeFile(
381 | freshFile,
382 | `---
383 | documcp:
384 | last_updated: "${new Date(now - 2 * 24 * 60 * 60 * 1000).toISOString()}"
385 | ---
386 | # Fresh`,
387 | );
388 |
389 | const staleFile = path.join(tempDir, "stale.md");
390 | await fs.writeFile(
391 | staleFile,
392 | `---
393 | documcp:
394 | last_updated: "${new Date(now - 40 * 24 * 60 * 60 * 1000).toISOString()}"
395 | ---
396 | # Stale`,
397 | );
398 |
399 | const noMetadataFile = path.join(tempDir, "no-metadata.md");
400 | await fs.writeFile(noMetadataFile, "# No Metadata");
401 |
402 | const report = await scanDocumentationFreshness(tempDir, {
403 | warning: { value: 7, unit: "days" },
404 | stale: { value: 30, unit: "days" },
405 | critical: { value: 90, unit: "days" },
406 | });
407 |
408 | expect(report.totalFiles).toBe(3);
409 | expect(report.freshFiles).toBe(1);
410 | expect(report.staleFiles).toBe(1);
411 | expect(report.filesWithoutMetadata).toBe(1);
412 | });
413 |
414 | it("should use default thresholds when not provided", async () => {
415 | await fs.writeFile(path.join(tempDir, "test.md"), "# Test");
416 |
417 | const report = await scanDocumentationFreshness(tempDir);
418 |
419 | expect(report.thresholds).toBeDefined();
420 | expect(report.thresholds.warning).toBeDefined();
421 | expect(report.thresholds.stale).toBeDefined();
422 | expect(report.thresholds.critical).toBeDefined();
423 | });
424 | });
425 |
426 | describe("initializeFreshnessMetadata", () => {
427 | it("should initialize metadata for file without it", async () => {
428 | const filePath = path.join(tempDir, "init.md");
429 | await fs.writeFile(filePath, "# Test");
430 |
431 | await initializeFreshnessMetadata(filePath, {
432 | updateFrequency: "monthly",
433 | autoUpdated: false,
434 | });
435 |
436 | const frontmatter = await parseDocFrontmatter(filePath);
437 |
438 | expect(frontmatter.documcp?.last_updated).toBeDefined();
439 | expect(frontmatter.documcp?.last_validated).toBeDefined();
440 | expect(frontmatter.documcp?.auto_updated).toBe(false);
441 | expect(frontmatter.documcp?.update_frequency).toBe("monthly");
442 | });
443 |
444 | it("should not overwrite existing metadata", async () => {
445 | const filePath = path.join(tempDir, "existing.md");
446 | const originalDate = "2025-01-01T00:00:00Z";
447 | await fs.writeFile(
448 | filePath,
449 | `---
450 | documcp:
451 | last_updated: "${originalDate}"
452 | ---
453 | # Test`,
454 | );
455 |
456 | await initializeFreshnessMetadata(filePath);
457 |
458 | const frontmatter = await parseDocFrontmatter(filePath);
459 | expect(frontmatter.documcp?.last_updated).toBe(originalDate);
460 | });
461 |
462 | it("should set staleness threshold when frequency is provided", async () => {
463 | const filePath = path.join(tempDir, "threshold.md");
464 | await fs.writeFile(filePath, "# Test");
465 |
466 | await initializeFreshnessMetadata(filePath, {
467 | updateFrequency: "weekly",
468 | });
469 |
470 | const frontmatter = await parseDocFrontmatter(filePath);
471 | expect(frontmatter.documcp?.staleness_threshold).toEqual(
472 | STALENESS_PRESETS.weekly,
473 | );
474 | });
475 | });
476 | });
477 |
```
--------------------------------------------------------------------------------
/src/tools/test-local-deployment.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from "zod";
2 | import { promises as fs } from "fs";
3 | import * as path from "path";
4 | import { spawn, exec } from "child_process";
5 | import { promisify } from "util";
6 | import { MCPToolResponse, formatMCPResponse } from "../types/api.js";
7 |
8 | const execAsync = promisify(exec);
9 |
10 | const inputSchema = z.object({
11 | repositoryPath: z.string().describe("Path to the repository"),
12 | ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
13 | port: z.number().optional().default(3000).describe("Port for local server"),
14 | timeout: z
15 | .number()
16 | .optional()
17 | .default(60)
18 | .describe("Timeout in seconds for build process"),
19 | skipBuild: z
20 | .boolean()
21 | .optional()
22 | .default(false)
23 | .describe("Skip build step and only start server"),
24 | });
25 |
26 | interface LocalTestResult {
27 | repositoryPath: string;
28 | ssg: string;
29 | buildSuccess: boolean;
30 | buildOutput?: string;
31 | buildErrors?: string;
32 | serverStarted: boolean;
33 | localUrl?: string;
34 | port: number;
35 | testScript: string;
36 | recommendations: string[];
37 | nextSteps: string[];
38 | }
39 |
40 | interface SSGConfig {
41 | buildCommand: string;
42 | serveCommand: string;
43 | buildDir: string;
44 | configFiles: string[];
45 | installCommand?: string;
46 | }
47 |
48 | const SSG_CONFIGS: Record<string, SSGConfig> = {
49 | jekyll: {
50 | buildCommand: "bundle exec jekyll build",
51 | serveCommand: "bundle exec jekyll serve",
52 | buildDir: "_site",
53 | configFiles: ["_config.yml", "_config.yaml"],
54 | installCommand: "bundle install",
55 | },
56 | hugo: {
57 | buildCommand: "hugo",
58 | serveCommand: "hugo server",
59 | buildDir: "public",
60 | configFiles: [
61 | "hugo.toml",
62 | "hugo.yaml",
63 | "hugo.yml",
64 | "config.toml",
65 | "config.yaml",
66 | "config.yml",
67 | ],
68 | },
69 | docusaurus: {
70 | buildCommand: "npm run build",
71 | serveCommand: "npm run serve",
72 | buildDir: "build",
73 | configFiles: ["docusaurus.config.js", "docusaurus.config.ts"],
74 | installCommand: "npm install",
75 | },
76 | mkdocs: {
77 | buildCommand: "mkdocs build",
78 | serveCommand: "mkdocs serve",
79 | buildDir: "site",
80 | configFiles: ["mkdocs.yml", "mkdocs.yaml"],
81 | installCommand: "pip install -r requirements.txt",
82 | },
83 | eleventy: {
84 | buildCommand: "npx @11ty/eleventy",
85 | serveCommand: "npx @11ty/eleventy --serve",
86 | buildDir: "_site",
87 | configFiles: [".eleventy.js", "eleventy.config.js", ".eleventy.json"],
88 | installCommand: "npm install",
89 | },
90 | };
91 |
92 | export async function testLocalDeployment(
93 | args: unknown,
94 | ): Promise<{ content: any[] }> {
95 | const startTime = Date.now();
96 | const { repositoryPath, ssg, port, timeout, skipBuild } =
97 | inputSchema.parse(args);
98 |
99 | try {
100 | const config = SSG_CONFIGS[ssg];
101 | if (!config) {
102 | throw new Error(`Unsupported SSG: ${ssg}`);
103 | }
104 |
105 | // Change to repository directory
106 | process.chdir(repositoryPath);
107 |
108 | const testResult: LocalTestResult = {
109 | repositoryPath,
110 | ssg,
111 | buildSuccess: false,
112 | serverStarted: false,
113 | port,
114 | testScript: "",
115 | recommendations: [],
116 | nextSteps: [],
117 | };
118 |
119 | // Step 1: Check if configuration exists (always check, even if skipBuild)
120 | const configExists = await checkConfigurationExists(repositoryPath, config);
121 | if (!configExists) {
122 | testResult.recommendations.push(
123 | `Missing configuration file. Expected one of: ${config.configFiles.join(
124 | ", ",
125 | )}`,
126 | );
127 | testResult.nextSteps.push(
128 | "Run generate_config tool to create configuration",
129 | );
130 | } else {
131 | // Always mention which config file was found/expected for test purposes
132 | testResult.recommendations.push(
133 | `Using ${ssg} configuration: ${config.configFiles.join(" or ")}`,
134 | );
135 | }
136 |
137 | // Step 2: Install dependencies if needed
138 | if (config.installCommand && !skipBuild) {
139 | try {
140 | const { stderr } = await execAsync(config.installCommand, {
141 | cwd: repositoryPath,
142 | timeout: timeout * 1000,
143 | });
144 | if (stderr && !stderr.includes("npm WARN")) {
145 | testResult.recommendations.push(
146 | "Dependency installation warnings detected",
147 | );
148 | }
149 | } catch (error: any) {
150 | testResult.recommendations.push(
151 | `Dependency installation failed: ${error.message}`,
152 | );
153 | testResult.nextSteps.push(
154 | "Fix dependency installation issues before testing deployment",
155 | );
156 | }
157 | }
158 |
159 | // Step 3: Build the site (unless skipped)
160 | if (!skipBuild) {
161 | try {
162 | const { stdout, stderr } = await execAsync(config.buildCommand, {
163 | cwd: repositoryPath,
164 | timeout: timeout * 1000,
165 | });
166 | testResult.buildSuccess = true;
167 | testResult.buildOutput = stdout;
168 |
169 | if (stderr && stderr.trim()) {
170 | testResult.buildErrors = stderr;
171 | if (stderr.includes("error") || stderr.includes("Error")) {
172 | testResult.recommendations.push(
173 | "Build completed with errors - review build output",
174 | );
175 | }
176 | }
177 |
178 | // Check if build directory was created
179 | const buildDirExists = await checkBuildOutput(
180 | repositoryPath,
181 | config.buildDir,
182 | );
183 | if (!buildDirExists) {
184 | testResult.recommendations.push(
185 | `Build directory ${config.buildDir} was not created`,
186 | );
187 | }
188 | } catch (error: any) {
189 | testResult.buildSuccess = false;
190 | testResult.buildErrors = error.message;
191 | testResult.recommendations.push(
192 | "Build failed - fix build errors before deployment",
193 | );
194 | testResult.nextSteps.push(
195 | "Review build configuration and resolve errors",
196 | );
197 | }
198 | } else {
199 | testResult.buildSuccess = true; // Assume success if skipped
200 | }
201 |
202 | // Step 4: Generate test script
203 | testResult.testScript = generateTestScript(
204 | ssg,
205 | config,
206 | port,
207 | repositoryPath,
208 | );
209 |
210 | // Step 5: Try to start local server (non-blocking)
211 | if (testResult.buildSuccess || skipBuild) {
212 | const serverResult = await startLocalServer(
213 | config,
214 | port,
215 | repositoryPath,
216 | 10,
217 | ); // 10 second timeout for server start
218 | testResult.serverStarted = serverResult.started;
219 | testResult.localUrl = serverResult.url;
220 |
221 | if (testResult.serverStarted) {
222 | testResult.recommendations.push(
223 | "Local server started successfully - test manually at the provided URL",
224 | );
225 | testResult.nextSteps.push("Verify content loads correctly in browser");
226 | testResult.nextSteps.push("Test navigation and responsive design");
227 | } else {
228 | testResult.recommendations.push(
229 | "Could not automatically start local server - run manually using the provided script",
230 | );
231 | testResult.nextSteps.push(
232 | "Start server manually and verify it works before GitHub deployment",
233 | );
234 | }
235 | }
236 |
237 | // Step 6: Generate final recommendations
238 | if (testResult.buildSuccess && testResult.serverStarted) {
239 | testResult.recommendations.push(
240 | "Local deployment test successful - ready for GitHub Pages",
241 | );
242 | testResult.nextSteps.push(
243 | "Run deploy_pages tool to set up GitHub Actions workflow",
244 | );
245 | } else if (testResult.buildSuccess && !testResult.serverStarted) {
246 | testResult.recommendations.push(
247 | "Build successful but server test incomplete - manual verification needed",
248 | );
249 | testResult.nextSteps.push(
250 | "Test server manually before deploying to GitHub",
251 | );
252 | }
253 |
254 | const response: MCPToolResponse<typeof testResult> = {
255 | success: true,
256 | data: testResult,
257 | metadata: {
258 | toolVersion: "1.0.0",
259 | executionTime: Date.now() - startTime,
260 | timestamp: new Date().toISOString(),
261 | },
262 | recommendations: [
263 | {
264 | type: testResult.buildSuccess ? "info" : "warning",
265 | title: "Local Deployment Test Complete",
266 | description: `Build ${
267 | testResult.buildSuccess ? "succeeded" : "failed"
268 | }, Server ${
269 | testResult.serverStarted ? "started" : "failed to start"
270 | }`,
271 | },
272 | ],
273 | nextSteps: testResult.nextSteps.map((step) => ({
274 | action: step,
275 | toolRequired: getRecommendedTool(step),
276 | description: step,
277 | priority: testResult.buildSuccess ? "medium" : ("high" as const),
278 | })),
279 | };
280 |
281 | return formatMCPResponse(response);
282 | } catch (error) {
283 | const errorResponse: MCPToolResponse = {
284 | success: false,
285 | error: {
286 | code: "LOCAL_TEST_FAILED",
287 | message: `Failed to test local deployment: ${error}`,
288 | resolution:
289 | "Ensure repository path is valid and SSG is properly configured",
290 | },
291 | metadata: {
292 | toolVersion: "1.0.0",
293 | executionTime: Date.now() - startTime,
294 | timestamp: new Date().toISOString(),
295 | },
296 | };
297 | return formatMCPResponse(errorResponse);
298 | }
299 | }
300 |
301 | async function checkConfigurationExists(
302 | repoPath: string,
303 | config: SSGConfig,
304 | ): Promise<boolean> {
305 | for (const configFile of config.configFiles) {
306 | try {
307 | await fs.access(path.join(repoPath, configFile));
308 | return true;
309 | } catch {
310 | // File doesn't exist, continue checking
311 | }
312 | }
313 | return false;
314 | }
315 |
316 | async function checkBuildOutput(
317 | repoPath: string,
318 | buildDir: string,
319 | ): Promise<boolean> {
320 | try {
321 | const buildPath = path.join(repoPath, buildDir);
322 | const stats = await fs.stat(buildPath);
323 | if (stats.isDirectory()) {
324 | const files = await fs.readdir(buildPath);
325 | return files.length > 0;
326 | }
327 | } catch {
328 | // Directory doesn't exist or can't be read
329 | }
330 | return false;
331 | }
332 |
333 | async function startLocalServer(
334 | config: SSGConfig,
335 | port: number,
336 | repoPath: string,
337 | timeout: number,
338 | ): Promise<{ started: boolean; url?: string }> {
339 | return new Promise((resolve) => {
340 | let serverProcess: any = null;
341 | let resolved = false;
342 |
343 | const cleanup = () => {
344 | if (serverProcess && !serverProcess.killed) {
345 | try {
346 | serverProcess.kill("SIGTERM");
347 | // Force kill if SIGTERM doesn't work after 1 second
348 | const forceKillTimeout = setTimeout(() => {
349 | if (serverProcess && !serverProcess.killed) {
350 | serverProcess.kill("SIGKILL");
351 | }
352 | }, 1000);
353 |
354 | // Clear the timeout if process exits normally
355 | serverProcess.on("exit", () => {
356 | clearTimeout(forceKillTimeout);
357 | });
358 | } catch (error) {
359 | // Process may already be dead
360 | }
361 | }
362 | };
363 |
364 | const safeResolve = (result: { started: boolean; url?: string }) => {
365 | if (!resolved) {
366 | resolved = true;
367 | cleanup();
368 | resolve(result);
369 | }
370 | };
371 |
372 | const serverTimeout = setTimeout(() => {
373 | safeResolve({ started: false });
374 | }, timeout * 1000);
375 |
376 | try {
377 | let command = config.serveCommand;
378 |
379 | // Modify serve command to use custom port for some SSGs
380 | if (config.serveCommand.includes("jekyll serve")) {
381 | command = `${config.serveCommand} --port ${port}`;
382 | } else if (config.serveCommand.includes("hugo server")) {
383 | command = `${config.serveCommand} --port ${port}`;
384 | } else if (config.serveCommand.includes("mkdocs serve")) {
385 | command = `${config.serveCommand} --dev-addr localhost:${port}`;
386 | } else if (config.serveCommand.includes("--serve")) {
387 | command = `${config.serveCommand} --port ${port}`;
388 | }
389 |
390 | serverProcess = spawn("sh", ["-c", command], {
391 | cwd: repoPath,
392 | detached: false,
393 | stdio: "pipe",
394 | });
395 |
396 | let serverStarted = false;
397 |
398 | serverProcess.stdout?.on("data", (data: Buffer) => {
399 | const output = data.toString();
400 |
401 | // Check for server start indicators
402 | if (
403 | !serverStarted &&
404 | (output.includes("Server running") ||
405 | output.includes("Serving on") ||
406 | output.includes("Local:") ||
407 | output.includes("localhost:") ||
408 | output.includes(`http://127.0.0.1:${port}`) ||
409 | output.includes(`http://localhost:${port}`))
410 | ) {
411 | serverStarted = true;
412 | clearTimeout(serverTimeout);
413 |
414 | safeResolve({
415 | started: true,
416 | url: `http://localhost:${port}`,
417 | });
418 | }
419 | });
420 |
421 | serverProcess.stderr?.on("data", (data: Buffer) => {
422 | const error = data.toString();
423 |
424 | // Some servers output startup info to stderr
425 | if (
426 | !serverStarted &&
427 | (error.includes("Serving on") ||
428 | error.includes("Local:") ||
429 | error.includes("localhost:"))
430 | ) {
431 | serverStarted = true;
432 | clearTimeout(serverTimeout);
433 |
434 | safeResolve({
435 | started: true,
436 | url: `http://localhost:${port}`,
437 | });
438 | }
439 | });
440 |
441 | serverProcess.on("error", (_error: Error) => {
442 | clearTimeout(serverTimeout);
443 | safeResolve({ started: false });
444 | });
445 |
446 | serverProcess.on("exit", () => {
447 | clearTimeout(serverTimeout);
448 | if (!resolved) {
449 | safeResolve({ started: false });
450 | }
451 | });
452 | } catch (_error) {
453 | clearTimeout(serverTimeout);
454 | safeResolve({ started: false });
455 | }
456 | });
457 | }
458 |
459 | function generateTestScript(
460 | ssg: string,
461 | config: SSGConfig,
462 | port: number,
463 | repoPath: string,
464 | ): string {
465 | const commands: string[] = [
466 | `# Local Deployment Test Script for ${ssg}`,
467 | `# Generated on ${new Date().toISOString()}`,
468 | ``,
469 | `cd "${repoPath}"`,
470 | ``,
471 | ];
472 |
473 | // Add install command if needed
474 | if (config.installCommand) {
475 | commands.push(`# Install dependencies`);
476 | commands.push(config.installCommand);
477 | commands.push(``);
478 | }
479 |
480 | // Add build command
481 | commands.push(`# Build the site`);
482 | commands.push(config.buildCommand);
483 | commands.push(``);
484 |
485 | // Add serve command with custom port
486 | commands.push(`# Start local server`);
487 | let serveCommand = config.serveCommand;
488 |
489 | if (serveCommand.includes("jekyll serve")) {
490 | serveCommand = `${serveCommand} --port ${port}`;
491 | } else if (serveCommand.includes("hugo server")) {
492 | serveCommand = `${serveCommand} --port ${port}`;
493 | } else if (serveCommand.includes("mkdocs serve")) {
494 | serveCommand = `${serveCommand} --dev-addr localhost:${port}`;
495 | } else if (serveCommand.includes("--serve")) {
496 | serveCommand = `${serveCommand} --port ${port}`;
497 | }
498 |
499 | commands.push(serveCommand);
500 | commands.push(``);
501 | commands.push(`# Open in browser:`);
502 | commands.push(`# http://localhost:${port}`);
503 |
504 | return commands.join("\n");
505 | }
506 |
507 | function getRecommendedTool(step: string): string {
508 | if (step.includes("generate_config")) return "generate_config";
509 | if (step.includes("deploy_pages")) return "deploy_pages";
510 | if (step.includes("verify_deployment")) return "verify_deployment";
511 | return "manual";
512 | }
513 |
```
--------------------------------------------------------------------------------
/src/tools/generate-llm-context.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { formatMCPResponse } from "../types/api.js";
2 | import { promises as fs } from "fs";
3 | import path from "path";
4 | import { z } from "zod";
5 |
6 | // Dynamic import to avoid circular dependency
7 | let cachedTools: any[] | null = null;
8 |
9 | async function getToolDefinitions(): Promise<any[]> {
10 | if (cachedTools) return cachedTools;
11 |
12 | try {
13 | const indexModule = await import("../index.js");
14 | cachedTools = indexModule.TOOLS || [];
15 | return cachedTools;
16 | } catch (error) {
17 | console.warn("Could not load TOOLS from index.js:", error);
18 | return [];
19 | }
20 | }
21 |
22 | // Input schema for the tool
23 | export const GenerateLLMContextInputSchema = z.object({
24 | projectPath: z
25 | .string()
26 | .describe(
27 | "Path to the project root directory where LLM_CONTEXT.md will be generated",
28 | ),
29 | includeExamples: z
30 | .boolean()
31 | .optional()
32 | .default(true)
33 | .describe("Include usage examples for tools"),
34 | format: z
35 | .enum(["detailed", "concise"])
36 | .optional()
37 | .default("detailed")
38 | .describe("Level of detail in the generated context"),
39 | });
40 |
41 | export type GenerateLLMContextInput = z.infer<
42 | typeof GenerateLLMContextInputSchema
43 | >;
44 |
45 | /**
46 | * Set tool definitions for the context generator
47 | * This is called from src/index.ts when TOOLS array is initialized
48 | */
49 | export function setToolDefinitions(tools: any[]) {
50 | cachedTools = tools;
51 | }
52 |
53 | export async function generateLLMContext(
54 | params: Partial<GenerateLLMContextInput>,
55 | ): Promise<any> {
56 | try {
57 | // Parse with defaults
58 | const validated = GenerateLLMContextInputSchema.parse(params);
59 | const { projectPath, includeExamples, format } = validated;
60 |
61 | // Always generate LLM_CONTEXT.md in the project root
62 | const outputPath = path.join(projectPath, "LLM_CONTEXT.md");
63 |
64 | // Get tool definitions dynamically
65 | const toolDefinitions = await getToolDefinitions();
66 |
67 | // Generate the context content
68 | const content = generateContextContent(
69 | includeExamples,
70 | format,
71 | toolDefinitions,
72 | );
73 |
74 | // Write the file
75 | await fs.writeFile(outputPath, content, "utf-8");
76 |
77 | const metadata = {
78 | toolVersion: "0.4.1",
79 | executionTime: 0,
80 | timestamp: new Date().toISOString(),
81 | };
82 |
83 | return formatMCPResponse({
84 | success: true,
85 | data: {
86 | message: `LLM context file generated successfully at ${outputPath}`,
87 | path: path.resolve(outputPath),
88 | stats: {
89 | totalTools: toolDefinitions.length,
90 | fileSize: Buffer.byteLength(content, "utf-8"),
91 | sections: [
92 | "Overview",
93 | "Core Tools",
94 | "README Tools",
95 | "Memory System",
96 | "Phase 3 Features",
97 | "Workflows",
98 | "Quick Reference",
99 | ],
100 | },
101 | },
102 | metadata,
103 | nextSteps: [
104 | {
105 | action:
106 | "Reference this file with @LLM_CONTEXT.md in your LLM conversations",
107 | priority: "high" as const,
108 | },
109 | {
110 | action: "Regenerate periodically when new tools are added",
111 | toolRequired: "generate_llm_context",
112 | priority: "low" as const,
113 | },
114 | {
115 | action: "Use this as a quick reference for DocuMCP capabilities",
116 | priority: "medium" as const,
117 | },
118 | ],
119 | });
120 | } catch (error: any) {
121 | return formatMCPResponse({
122 | success: false,
123 | error: {
124 | code: "GENERATION_ERROR",
125 | message: `Failed to generate LLM context: ${error.message}`,
126 | },
127 | metadata: {
128 | toolVersion: "0.4.1",
129 | executionTime: 0,
130 | timestamp: new Date().toISOString(),
131 | },
132 | });
133 | }
134 | }
135 |
136 | function generateContextContent(
137 | includeExamples: boolean,
138 | format: "detailed" | "concise",
139 | toolDefinitions: any[],
140 | ): string {
141 | const sections: string[] = [];
142 |
143 | // Header
144 | sections.push(`# DocuMCP LLM Context Reference
145 | **Auto-generated**: ${new Date().toISOString()}
146 |
147 | This file provides instant context about DocuMCP's tools and memory system for LLMs.
148 | Reference this file with @ to get comprehensive context about available capabilities.
149 |
150 | ---
151 | `);
152 |
153 | // Overview
154 | sections.push(`## Overview
155 |
156 | DocuMCP is an intelligent MCP server for GitHub Pages documentation deployment with:
157 | - **${toolDefinitions.length} Tools** for repository analysis, SSG recommendations, and deployment
158 | - **Knowledge Graph** memory system tracking projects, technologies, and deployments
159 | - **Phase 3 Features** including AST-based code analysis and drift detection
160 | - **Diataxis Framework** compliance for documentation structure
161 |
162 | ---
163 | `);
164 |
165 | // Categorize tools
166 | const coreTools = toolDefinitions.filter((t) =>
167 | [
168 | "analyze_repository",
169 | "recommend_ssg",
170 | "generate_config",
171 | "setup_structure",
172 | "deploy_pages",
173 | "verify_deployment",
174 | "populate_diataxis_content",
175 | "validate_diataxis_content",
176 | "update_existing_documentation",
177 | ].includes(t.name),
178 | );
179 |
180 | const readmeTools = toolDefinitions.filter((t) =>
181 | t.name.toLowerCase().includes("readme"),
182 | );
183 |
184 | const memoryTools = toolDefinitions.filter((t) =>
185 | ["manage_preferences", "analyze_deployments", "kg_health_check"].includes(
186 | t.name,
187 | ),
188 | );
189 |
190 | const phase3Tools = toolDefinitions.filter((t) =>
191 | ["sync_code_to_docs", "generate_contextual_content"].includes(t.name),
192 | );
193 |
194 | const otherTools = toolDefinitions.filter(
195 | (t) =>
196 | ![...coreTools, ...readmeTools, ...memoryTools, ...phase3Tools].some(
197 | (ct) => ct.name === t.name,
198 | ),
199 | );
200 |
201 | // Core Documentation Tools
202 | sections.push(`## Core Documentation Tools
203 |
204 | These are the primary tools for analyzing repositories and deploying documentation:
205 | `);
206 |
207 | for (const tool of coreTools) {
208 | sections.push(formatToolSection(tool, includeExamples, format));
209 | }
210 |
211 | // README Tools
212 | if (readmeTools.length > 0) {
213 | sections.push(`---
214 |
215 | ## README Analysis & Generation Tools
216 |
217 | Specialized tools for README creation, analysis, and optimization:
218 | `);
219 |
220 | for (const tool of readmeTools) {
221 | sections.push(formatToolSection(tool, includeExamples, format));
222 | }
223 | }
224 |
225 | // Phase 3 Tools
226 | if (phase3Tools.length > 0) {
227 | sections.push(`---
228 |
229 | ## Phase 3: Code-to-Docs Synchronization Tools
230 |
231 | Advanced tools using AST analysis and drift detection:
232 | `);
233 |
234 | for (const tool of phase3Tools) {
235 | sections.push(formatToolSection(tool, includeExamples, format));
236 | }
237 | }
238 |
239 | // Memory Tools
240 | if (memoryTools.length > 0) {
241 | sections.push(`---
242 |
243 | ## Memory & Analytics Tools
244 |
245 | Tools for user preferences, deployment analytics, and knowledge graph management:
246 | `);
247 |
248 | for (const tool of memoryTools) {
249 | sections.push(formatToolSection(tool, includeExamples, format));
250 | }
251 | }
252 |
253 | // Other Tools
254 | if (otherTools.length > 0) {
255 | sections.push(`---
256 |
257 | ## Additional Tools
258 |
259 | ${otherTools
260 | .map((t) => formatToolSection(t, includeExamples, format))
261 | .join("\n")}
262 | `);
263 | }
264 |
265 | // Memory System
266 | sections.push(`---
267 |
268 | ## Memory Knowledge Graph System
269 |
270 | DocuMCP includes a persistent memory system that learns from every analysis:
271 |
272 | ### Entity Types
273 | - **Project**: Software projects with analysis history and metadata
274 | - **User**: User preferences and SSG usage patterns
275 | - **Configuration**: SSG deployment configurations with success rates
276 | - **Documentation**: Documentation structures and patterns
277 | - **CodeFile**: Source code files with metadata and change tracking
278 | - **DocumentationSection**: Documentation sections linked to code
279 | - **Technology**: Languages, frameworks, and tools used in projects
280 |
281 | ### Relationship Types
282 | - \`project_uses_technology\`: Links projects to their tech stack
283 | - \`user_prefers_ssg\`: Tracks user SSG preferences
284 | - \`project_deployed_with\`: Records deployment configurations and outcomes
285 | - \`similar_to\`: Identifies similar projects for better recommendations
286 | - \`documents\`: Links code files to documentation sections
287 | - \`outdated_for\`: Flags documentation that's out of sync with code
288 | - \`depends_on\`: Tracks technology dependencies
289 |
290 | ### Storage Location
291 | - Default: \`.documcp/memory/\`
292 | - Files: \`knowledge-graph-entities.jsonl\`, \`knowledge-graph-relationships.jsonl\`
293 | - Backups: \`.documcp/memory/backups/\`
294 | - Snapshots: \`.documcp/snapshots/\` (for drift detection)
295 |
296 | ### Memory Benefits
297 | 1. **Context-Aware Recommendations**: Uses historical data to improve SSG suggestions
298 | 2. **Learning from Success**: Tracks which configurations work best
299 | 3. **Similar Project Insights**: Leverages patterns from similar projects
300 | 4. **Drift Detection**: Automatically identifies when docs are out of sync
301 | 5. **User Preferences**: Adapts to individual user patterns over time
302 |
303 | ---
304 | `);
305 |
306 | // Phase 3 Features
307 | sections.push(`## Phase 3 Features (Code-to-Docs Sync)
308 |
309 | ### AST-Based Code Analysis
310 | - Multi-language support: TypeScript, JavaScript, Python, Go, Rust, Java, Ruby, Bash
311 | - Extracts functions, classes, interfaces, types, imports, exports
312 | - Tracks complexity metrics and code signatures
313 | - Detects semantic changes (not just text diffs)
314 |
315 | ### Drift Detection
316 | - **Snapshot-based approach**: Stores code and documentation state over time
317 | - **Impact analysis**: Categorizes changes (breaking, major, minor, patch)
318 | - **Affected documentation tracking**: Links code changes to specific docs
319 | - **Automatic suggestions**: Generates update recommendations
320 |
321 | ### Drift Types Detected
322 | - **Outdated**: Documentation references old API signatures
323 | - **Incorrect**: Documented features no longer exist in code
324 | - **Missing**: New code features lack documentation
325 | - **Breaking**: API changes that invalidate existing docs
326 |
327 | ### Sync Modes
328 | - \`detect\`: Analyze drift without making changes
329 | - \`preview\`: Show proposed changes
330 | - \`apply\`: Apply high-confidence changes automatically (threshold: 0.8)
331 | - \`auto\`: Apply all changes (use with caution)
332 |
333 | ---
334 | `);
335 |
336 | // Workflows
337 | sections.push(`## Common Workflows
338 |
339 | ### 1. New Documentation Site Setup
340 | \`\`\`
341 | 1. analyze_repository (path: "./")
342 | 2. recommend_ssg (analysisId: from step 1)
343 | 3. generate_config (ssg: from step 2, outputPath: "./")
344 | 4. setup_structure (path: "./docs", ssg: from step 2)
345 | 5. populate_diataxis_content (analysisId: from step 1, docsPath: "./docs")
346 | 6. deploy_pages (repository: repo-url, ssg: from step 2)
347 | \`\`\`
348 |
349 | ### 2. Documentation Synchronization (Phase 3)
350 | \`\`\`
351 | 1. sync_code_to_docs (projectPath: "./", docsPath: "./docs", mode: "detect")
352 | 2. Review drift report and affected sections
353 | 3. sync_code_to_docs (mode: "apply", autoApplyThreshold: 0.8)
354 | 4. Manual review of remaining changes
355 | \`\`\`
356 |
357 | ### 3. Content Generation from Code
358 | \`\`\`
359 | 1. generate_contextual_content (filePath: "./src/api.ts", documentationType: "reference")
360 | 2. generate_contextual_content (filePath: "./src/api.ts", documentationType: "tutorial")
361 | 3. Review and integrate generated content
362 | \`\`\`
363 |
364 | ### 4. Existing Documentation Improvement
365 | \`\`\`
366 | 1. analyze_repository (path: "./")
367 | 2. update_existing_documentation (analysisId: from step 1, docsPath: "./docs")
368 | 3. validate_diataxis_content (contentPath: "./docs", analysisId: from step 1)
369 | 4. check_documentation_links (documentation_path: "./docs")
370 | \`\`\`
371 |
372 | ### 5. README Enhancement
373 | \`\`\`
374 | 1. analyze_readme (project_path: "./")
375 | 2. evaluate_readme_health (readme_path: "./README.md")
376 | 3. readme_best_practices (readme_path: "./README.md", generate_template: true)
377 | 4. optimize_readme (readme_path: "./README.md")
378 | \`\`\`
379 |
380 | ---
381 | `);
382 |
383 | // Quick Reference
384 | sections.push(`## Quick Reference Table
385 |
386 | | Tool | Primary Use | Key Parameters | Output |
387 | |------|-------------|----------------|--------|
388 | ${coreTools
389 | .map(
390 | (t) =>
391 | `| \`${t.name}\` | ${t.description.slice(0, 50)}... | ${getKeyParams(
392 | t,
393 | )} | Analysis/Config |`,
394 | )
395 | .join("\n")}
396 |
397 | ---
398 |
399 | ## Tips for LLMs
400 |
401 | 1. **Always start with \`analyze_repository\`** to get project context
402 | 2. **Use the knowledge graph**: Tools automatically store and retrieve relevant history
403 | 3. **Phase 3 tools need setup**: Ensure project has code structure before running sync
404 | 4. **Memory persists**: The system learns from every interaction
405 | 5. **Workflows are composable**: Chain tools together for complex operations
406 | 6. **Permission-aware**: All tools respect MCP root permissions
407 |
408 | ---
409 |
410 | ## Storage Locations to Reference
411 |
412 | - **Memory**: \`.documcp/memory/\`
413 | - **Snapshots**: \`.documcp/snapshots/\`
414 | - **Knowledge Graph Entities**: \`.documcp/memory/knowledge-graph-entities.jsonl\`
415 | - **Knowledge Graph Relationships**: \`.documcp/memory/knowledge-graph-relationships.jsonl\`
416 | - **User Preferences**: Stored in knowledge graph with \`user_prefers_ssg\` edges
417 |
418 | ---
419 |
420 | *This file is auto-generated. To regenerate, use the \`generate_llm_context\` tool.*
421 | `);
422 |
423 | return sections.join("\n");
424 | }
425 |
426 | function formatToolSection(
427 | tool: any,
428 | includeExamples: boolean,
429 | format: "detailed" | "concise",
430 | ): string {
431 | const sections: string[] = [];
432 |
433 | sections.push(`### \`${tool.name}\``);
434 | sections.push(`**Description**: ${tool.description}`);
435 |
436 | if (format === "detailed" && tool.inputSchema) {
437 | sections.push("\n**Parameters**:");
438 | const schema = tool.inputSchema._def?.schema || tool.inputSchema;
439 |
440 | if (schema.shape) {
441 | for (const [key, value] of Object.entries(schema.shape)) {
442 | const field = value as any;
443 | const optional = field.isOptional() ? " (optional)" : " (required)";
444 | const description = field.description || "";
445 | const defaultVal = field._def.defaultValue
446 | ? ` [default: ${JSON.stringify(field._def.defaultValue())}]`
447 | : "";
448 |
449 | sections.push(`- \`${key}\`${optional}: ${description}${defaultVal}`);
450 | }
451 | }
452 | }
453 |
454 | if (includeExamples && format === "detailed") {
455 | const example = getToolExample(tool.name);
456 | if (example) {
457 | sections.push(`\n**Example**:\n\`\`\`typescript\n${example}\n\`\`\``);
458 | }
459 | }
460 |
461 | sections.push(""); // blank line
462 | return sections.join("\n");
463 | }
464 |
465 | function getKeyParams(tool: any): string {
466 | const schema = tool.inputSchema._def?.schema || tool.inputSchema;
467 | if (!schema.shape) return "N/A";
468 |
469 | const required = Object.entries(schema.shape)
470 | .filter(([_, value]) => !(value as any).isOptional())
471 | .map(([key]) => key)
472 | .slice(0, 3);
473 |
474 | return required.join(", ") || "N/A";
475 | }
476 |
477 | function getToolExample(toolName: string): string | null {
478 | const examples: Record<string, string> = {
479 | analyze_repository: `analyze_repository({
480 | path: "./",
481 | depth: "standard"
482 | })`,
483 | recommend_ssg: `recommend_ssg({
484 | analysisId: "repo_abc123",
485 | userId: "default",
486 | preferences: {
487 | priority: "simplicity",
488 | ecosystem: "javascript"
489 | }
490 | })`,
491 | sync_code_to_docs: `sync_code_to_docs({
492 | projectPath: "./",
493 | docsPath: "./docs",
494 | mode: "detect",
495 | createSnapshot: true
496 | })`,
497 | generate_contextual_content: `generate_contextual_content({
498 | filePath: "./src/api.ts",
499 | documentationType: "reference",
500 | includeExamples: true,
501 | style: "detailed"
502 | })`,
503 | deploy_pages: `deploy_pages({
504 | repository: "user/repo",
505 | ssg: "docusaurus",
506 | branch: "gh-pages",
507 | userId: "default"
508 | })`,
509 | };
510 |
511 | return examples[toolName] || null;
512 | }
513 |
```
--------------------------------------------------------------------------------
/tests/tools/manage-sitemap.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for manage-sitemap MCP tool
3 | */
4 |
5 | import { promises as fs } from "fs";
6 | import path from "path";
7 | import { tmpdir } from "os";
8 | import {
9 | manageSitemap,
10 | ManageSitemapInputSchema,
11 | } from "../../src/tools/manage-sitemap.js";
12 |
13 | /**
14 | * Helper to parse data from MCP tool response
15 | */
16 | function parseMCPResponse(result: { content: any[] }): any {
17 | if (!result.content || !result.content[0]) {
18 | throw new Error("Invalid MCP response structure");
19 | }
20 | return JSON.parse(result.content[0].text);
21 | }
22 |
23 | describe("manage-sitemap tool", () => {
24 | let testDir: string;
25 | let docsDir: string;
26 |
27 | beforeEach(async () => {
28 | testDir = path.join(tmpdir(), `sitemap-tool-test-${Date.now()}`);
29 | docsDir = path.join(testDir, "docs");
30 | await fs.mkdir(docsDir, { recursive: true });
31 | });
32 |
33 | afterEach(async () => {
34 | try {
35 | await fs.rm(testDir, { recursive: true, force: true });
36 | } catch (error) {
37 | // Ignore cleanup errors
38 | }
39 | });
40 |
41 | describe("input validation", () => {
42 | it("should validate required fields", () => {
43 | expect(() => {
44 | ManageSitemapInputSchema.parse({});
45 | }).toThrow();
46 | });
47 |
48 | it("should validate action enum", () => {
49 | expect(() => {
50 | ManageSitemapInputSchema.parse({
51 | action: "invalid",
52 | docsPath: "/path",
53 | });
54 | }).toThrow();
55 | });
56 |
57 | it("should accept valid input", () => {
58 | const result = ManageSitemapInputSchema.parse({
59 | action: "generate",
60 | docsPath: "/path/to/docs",
61 | baseUrl: "https://example.com",
62 | });
63 |
64 | expect(result.action).toBe("generate");
65 | expect(result.docsPath).toBe("/path/to/docs");
66 | expect(result.baseUrl).toBe("https://example.com");
67 | });
68 | });
69 |
70 | describe("generate action", () => {
71 | it("should generate sitemap.xml", async () => {
72 | // Create test documentation
73 | await fs.writeFile(path.join(docsDir, "index.md"), "# Home");
74 | await fs.writeFile(path.join(docsDir, "guide.md"), "# Guide");
75 |
76 | const result = await manageSitemap({
77 | action: "generate",
78 | docsPath: docsDir,
79 | baseUrl: "https://example.com",
80 | });
81 |
82 | expect(result.content).toBeDefined();
83 | expect(result.content[0].type).toBe("text");
84 | expect(result.content[0].text).toContain("✅");
85 | expect(result.content[0].text).toContain(
86 | "Sitemap generated successfully",
87 | );
88 |
89 | // Verify data is in the response
90 | const data = JSON.parse(result.content[0].text);
91 | expect(data.action).toBe("generate");
92 | expect(data.totalUrls).toBe(2);
93 |
94 | // Verify file was created
95 | const sitemapPath = path.join(docsDir, "sitemap.xml");
96 | const exists = await fs
97 | .access(sitemapPath)
98 | .then(() => true)
99 | .catch(() => false);
100 | expect(exists).toBe(true);
101 | });
102 |
103 | it("should require baseUrl for generate action", async () => {
104 | const result = await manageSitemap({
105 | action: "generate",
106 | docsPath: docsDir,
107 | });
108 |
109 | const data = parseMCPResponse(result);
110 | expect(data.success).toBe(false);
111 | expect(data.error.code).toBe("BASE_URL_REQUIRED");
112 | expect(data.error.message).toContain("baseUrl is required");
113 | });
114 |
115 | it("should return error if docs directory does not exist", async () => {
116 | const result = await manageSitemap({
117 | action: "generate",
118 | docsPath: "/nonexistent/path",
119 | baseUrl: "https://example.com",
120 | });
121 |
122 | const data = parseMCPResponse(result);
123 | expect(data.success).toBe(false);
124 | expect(data.error.code).toBe("DOCS_DIR_NOT_FOUND");
125 | expect(data.error.message).toContain("not found");
126 | });
127 |
128 | it("should include statistics in output", async () => {
129 | await fs.mkdir(path.join(docsDir, "tutorials"), { recursive: true });
130 | await fs.mkdir(path.join(docsDir, "reference"), { recursive: true });
131 | await fs.writeFile(
132 | path.join(docsDir, "tutorials", "guide.md"),
133 | "# Tutorial",
134 | );
135 | await fs.writeFile(path.join(docsDir, "reference", "api.md"), "# API");
136 |
137 | const result = await manageSitemap({
138 | action: "generate",
139 | docsPath: docsDir,
140 | baseUrl: "https://example.com",
141 | });
142 |
143 | const output = result.content[0].text;
144 | expect(output).toContain("URLs by Category");
145 | expect(output).toContain("Change Frequencies");
146 | expect(output).toContain("Next Steps");
147 | });
148 | });
149 |
150 | describe("validate action", () => {
151 | it("should validate existing sitemap", async () => {
152 | // Generate a sitemap first
153 | await fs.writeFile(path.join(docsDir, "page.md"), "# Page");
154 | await manageSitemap({
155 | action: "generate",
156 | docsPath: docsDir,
157 | baseUrl: "https://example.com",
158 | });
159 |
160 | const result = await manageSitemap({
161 | action: "validate",
162 | docsPath: docsDir,
163 | });
164 |
165 | expect(result.content[0].text).toContain("✅");
166 | expect(result.content[0].text).toContain("Sitemap is valid");
167 |
168 | const data = parseMCPResponse(result);
169 | expect(data.valid).toBe(true);
170 | });
171 |
172 | it("should return error if sitemap does not exist", async () => {
173 | const result = await manageSitemap({
174 | action: "validate",
175 | docsPath: docsDir,
176 | });
177 |
178 | const data = parseMCPResponse(result);
179 | expect(data.success).toBe(false);
180 | expect(data.error.code).toBe("SITEMAP_NOT_FOUND");
181 | expect(data.error.message).toContain("Sitemap not found");
182 | });
183 |
184 | it("should detect invalid sitemap", async () => {
185 | // Create invalid sitemap
186 | const sitemapPath = path.join(docsDir, "sitemap.xml");
187 | await fs.writeFile(
188 | sitemapPath,
189 | `<?xml version="1.0" encoding="UTF-8"?>
190 | <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
191 | <url>
192 | <loc>ftp://example.com/page.html</loc>
193 | <priority>5.0</priority>
194 | </url>
195 | </urlset>`,
196 | );
197 |
198 | const result = await manageSitemap({
199 | action: "validate",
200 | docsPath: docsDir,
201 | });
202 |
203 | const data = parseMCPResponse(result);
204 | expect(data.success).toBe(false);
205 | expect(data.error.code).toBe("VALIDATION_FAILED");
206 | expect(data.error.message).toContain("validation failed");
207 | expect(data.data.valid).toBe(false);
208 | expect(data.data.errorCount).toBeGreaterThan(0);
209 | });
210 | });
211 |
212 | describe("update action", () => {
213 | it("should update existing sitemap", async () => {
214 | // Create initial sitemap
215 | await fs.writeFile(path.join(docsDir, "page1.md"), "# Page 1");
216 | await manageSitemap({
217 | action: "generate",
218 | docsPath: docsDir,
219 | baseUrl: "https://example.com",
220 | });
221 |
222 | // Add new page
223 | await fs.writeFile(path.join(docsDir, "page2.md"), "# Page 2");
224 |
225 | const result = await manageSitemap({
226 | action: "update",
227 | docsPath: docsDir,
228 | baseUrl: "https://example.com",
229 | });
230 |
231 | expect(result.content[0].text).toContain("✅");
232 | expect(result.content[0].text).toContain("Sitemap updated successfully");
233 |
234 | const data = parseMCPResponse(result);
235 | expect(data.added).toBe(1);
236 | expect(data.total).toBe(2);
237 | });
238 |
239 | it("should require baseUrl for update action", async () => {
240 | const result = await manageSitemap({
241 | action: "update",
242 | docsPath: docsDir,
243 | });
244 |
245 | const data = parseMCPResponse(result);
246 | expect(data.success).toBe(false);
247 | expect(data.error.code).toBe("BASE_URL_REQUIRED");
248 | expect(data.error.message).toContain("baseUrl is required");
249 | });
250 |
251 | it("should show removed pages", async () => {
252 | // Create sitemap with 2 pages
253 | await fs.writeFile(path.join(docsDir, "page1.md"), "# Page 1");
254 | await fs.writeFile(path.join(docsDir, "page2.md"), "# Page 2");
255 | await manageSitemap({
256 | action: "generate",
257 | docsPath: docsDir,
258 | baseUrl: "https://example.com",
259 | });
260 |
261 | // Remove one page
262 | await fs.rm(path.join(docsDir, "page2.md"));
263 |
264 | const result = await manageSitemap({
265 | action: "update",
266 | docsPath: docsDir,
267 | baseUrl: "https://example.com",
268 | });
269 |
270 | const data = parseMCPResponse(result);
271 | expect(data.removed).toBe(1);
272 | expect(data.total).toBe(1);
273 | });
274 |
275 | it("should detect no changes", async () => {
276 | await fs.writeFile(path.join(docsDir, "page.md"), "# Page");
277 | await manageSitemap({
278 | action: "generate",
279 | docsPath: docsDir,
280 | baseUrl: "https://example.com",
281 | });
282 |
283 | const result = await manageSitemap({
284 | action: "update",
285 | docsPath: docsDir,
286 | baseUrl: "https://example.com",
287 | });
288 |
289 | expect(result.content[0].text).toContain("No changes detected");
290 |
291 | const data = parseMCPResponse(result);
292 | expect(data.added).toBe(0);
293 | expect(data.removed).toBe(0);
294 | });
295 | });
296 |
297 | describe("list action", () => {
298 | it("should list all URLs from sitemap", async () => {
299 | await fs.mkdir(path.join(docsDir, "tutorials"), { recursive: true });
300 | await fs.writeFile(
301 | path.join(docsDir, "tutorials", "guide.md"),
302 | "# Tutorial Guide",
303 | );
304 | await fs.writeFile(path.join(docsDir, "index.md"), "# Home");
305 |
306 | await manageSitemap({
307 | action: "generate",
308 | docsPath: docsDir,
309 | baseUrl: "https://example.com",
310 | });
311 |
312 | const result = await manageSitemap({
313 | action: "list",
314 | docsPath: docsDir,
315 | });
316 |
317 | expect(result.content[0].text).toContain("Sitemap URLs");
318 | expect(result.content[0].text).toContain("Total: 2");
319 |
320 | const data = parseMCPResponse(result);
321 | expect(data.totalUrls).toBe(2);
322 | expect(data.urls).toHaveLength(2);
323 | });
324 |
325 | it("should group URLs by category", async () => {
326 | await fs.mkdir(path.join(docsDir, "tutorials"), { recursive: true });
327 | await fs.mkdir(path.join(docsDir, "reference"), { recursive: true });
328 | await fs.writeFile(
329 | path.join(docsDir, "tutorials", "guide.md"),
330 | "# Tutorial",
331 | );
332 | await fs.writeFile(path.join(docsDir, "reference", "api.md"), "# API");
333 |
334 | await manageSitemap({
335 | action: "generate",
336 | docsPath: docsDir,
337 | baseUrl: "https://example.com",
338 | });
339 |
340 | const result = await manageSitemap({
341 | action: "list",
342 | docsPath: docsDir,
343 | });
344 |
345 | const output = result.content[0].text;
346 | expect(output).toContain("tutorial");
347 | expect(output).toContain("reference");
348 | });
349 |
350 | it("should return error if sitemap does not exist", async () => {
351 | const result = await manageSitemap({
352 | action: "list",
353 | docsPath: docsDir,
354 | });
355 |
356 | const data = parseMCPResponse(result);
357 | expect(data.success).toBe(false);
358 | expect(data.error.code).toBe("SITEMAP_NOT_FOUND");
359 | expect(data.error.message).toContain("Sitemap not found");
360 | });
361 | });
362 |
363 | describe("custom sitemap path", () => {
364 | it("should use custom sitemap path", async () => {
365 | const customPath = path.join(testDir, "custom-sitemap.xml");
366 | await fs.writeFile(path.join(docsDir, "page.md"), "# Page");
367 |
368 | await manageSitemap({
369 | action: "generate",
370 | docsPath: docsDir,
371 | baseUrl: "https://example.com",
372 | sitemapPath: customPath,
373 | });
374 |
375 | const exists = await fs
376 | .access(customPath)
377 | .then(() => true)
378 | .catch(() => false);
379 | expect(exists).toBe(true);
380 | });
381 | });
382 |
383 | describe("include and exclude patterns", () => {
384 | it("should respect include patterns", async () => {
385 | await fs.writeFile(path.join(docsDir, "page.md"), "# Markdown");
386 | await fs.writeFile(path.join(docsDir, "page.html"), "<h1>HTML</h1>");
387 | await fs.writeFile(path.join(docsDir, "data.json"), "{}");
388 |
389 | const result = await manageSitemap({
390 | action: "generate",
391 | docsPath: docsDir,
392 | baseUrl: "https://example.com",
393 | includePatterns: ["**/*.md"],
394 | });
395 |
396 | const data = parseMCPResponse(result);
397 | expect(data.totalUrls).toBe(1);
398 | });
399 |
400 | it("should respect exclude patterns", async () => {
401 | await fs.mkdir(path.join(docsDir, "drafts"), { recursive: true });
402 | await fs.writeFile(path.join(docsDir, "page.md"), "# Page");
403 | await fs.writeFile(path.join(docsDir, "drafts", "draft.md"), "# Draft");
404 |
405 | const result = await manageSitemap({
406 | action: "generate",
407 | docsPath: docsDir,
408 | baseUrl: "https://example.com",
409 | excludePatterns: ["**/drafts/**"],
410 | });
411 |
412 | const data = parseMCPResponse(result);
413 | expect(data.totalUrls).toBe(1);
414 | });
415 | });
416 |
417 | describe("change frequency", () => {
418 | it("should use custom update frequency", async () => {
419 | await fs.writeFile(path.join(docsDir, "page.md"), "# Page");
420 |
421 | await manageSitemap({
422 | action: "generate",
423 | docsPath: docsDir,
424 | baseUrl: "https://example.com",
425 | updateFrequency: "daily",
426 | });
427 |
428 | const sitemapPath = path.join(docsDir, "sitemap.xml");
429 | const xml = await fs.readFile(sitemapPath, "utf-8");
430 |
431 | // Should contain daily for pages without specific category
432 | expect(xml).toContain("<changefreq>");
433 | });
434 | });
435 |
436 | describe("error handling", () => {
437 | it("should handle invalid action gracefully", async () => {
438 | const result = await manageSitemap({
439 | action: "generate" as any,
440 | docsPath: "/invalid/path",
441 | baseUrl: "https://example.com",
442 | });
443 |
444 | const data = parseMCPResponse(result);
445 | expect(data.success).toBe(false);
446 | expect(data.error).toBeDefined();
447 | });
448 |
449 | it("should handle file system errors", async () => {
450 | // Try to write to read-only location (will fail on most systems)
451 | const readOnlyPath = "/root/docs";
452 |
453 | const result = await manageSitemap({
454 | action: "generate",
455 | docsPath: readOnlyPath,
456 | baseUrl: "https://example.com",
457 | });
458 |
459 | const data = parseMCPResponse(result);
460 | expect(data.success).toBe(false);
461 | expect(data.error).toBeDefined();
462 | });
463 | });
464 |
465 | describe("integration with other tools", () => {
466 | it("should work with Diataxis structure", async () => {
467 | // Create Diataxis structure
468 | const categories = ["tutorials", "how-to", "reference", "explanation"];
469 | for (const category of categories) {
470 | await fs.mkdir(path.join(docsDir, category), { recursive: true });
471 | await fs.writeFile(
472 | path.join(docsDir, category, "index.md"),
473 | `# ${category}`,
474 | );
475 | }
476 |
477 | const result = await manageSitemap({
478 | action: "generate",
479 | docsPath: docsDir,
480 | baseUrl: "https://example.com",
481 | });
482 |
483 | const data = parseMCPResponse(result);
484 | expect(data.totalUrls).toBe(4);
485 | expect(data.categories).toHaveProperty("tutorial");
486 | expect(data.categories).toHaveProperty("how-to");
487 | expect(data.categories).toHaveProperty("reference");
488 | expect(data.categories).toHaveProperty("explanation");
489 | });
490 | });
491 | });
492 |
```
--------------------------------------------------------------------------------
/tests/integration/readme-technical-writer.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
2 | import { promises as fs } from "fs";
3 | import { join } from "path";
4 | import { analyzeReadme } from "../../src/tools/analyze-readme.js";
5 | import { optimizeReadme } from "../../src/tools/optimize-readme.js";
6 | import { tmpdir } from "os";
7 |
8 | describe("README Technical Writer Integration Tests", () => {
9 | let testDir: string;
10 | let readmePath: string;
11 |
12 | beforeEach(async () => {
13 | // Create temporary test directory
14 | testDir = join(tmpdir(), `test-readme-integration-${Date.now()}`);
15 | await fs.mkdir(testDir, { recursive: true });
16 | readmePath = join(testDir, "README.md");
17 | });
18 |
19 | afterEach(async () => {
20 | // Cleanup test directory
21 | try {
22 | await fs.rm(testDir, { recursive: true, force: true });
23 | } catch {
24 | // Ignore cleanup errors
25 | }
26 | });
27 |
28 | describe("Real-world README analysis and optimization workflow", () => {
29 | it("should analyze and optimize a typical open source project README", async () => {
30 | // Create a realistic README that needs optimization
31 | const originalReadme = `# MyAwesome Library
32 |
33 | MyAwesome Library is a comprehensive JavaScript library that provides a wide range of utilities and functions for modern web development. It has been carefully designed to address common challenges that developers face when building complex applications, and it incorporates industry best practices to ensure optimal performance, maintainability, and ease of use.
34 |
35 | ## Table of Contents
36 |
37 | - [Installation](#installation)
38 | - [Usage](#usage)
39 | - [API Documentation](#api-documentation)
40 | - [Contributing](#contributing)
41 | - [License](#license)
42 |
43 | ## Installation
44 |
45 | Installing MyAwesome Library is straightforward and can be accomplished through several different methods depending on your project setup and preferences.
46 |
47 | ### Using npm
48 |
49 | If you're using npm as your package manager, you can install MyAwesome Library by running the following command in your terminal:
50 |
51 | \`\`\`bash
52 | npm install myawesome-library
53 | \`\`\`
54 |
55 | ### Using yarn
56 |
57 | Alternatively, if you prefer to use yarn as your package manager, you can install the library with:
58 |
59 | \`\`\`bash
60 | yarn add myawesome-library
61 | \`\`\`
62 |
63 | ### Using CDN
64 |
65 | For quick prototyping or if you prefer not to use a package manager, you can include MyAwesome Library directly from a CDN:
66 |
67 | \`\`\`html
68 | <script src="https://cdn.jsdelivr.net/npm/myawesome-library@latest/dist/myawesome.min.js"></script>
69 | \`\`\`
70 |
71 | ## Usage
72 |
73 | MyAwesome Library provides a simple and intuitive API that makes it easy to get started with your projects. Here are some basic usage examples to help you understand how to integrate the library into your applications.
74 |
75 | ### Basic Example
76 |
77 | \`\`\`javascript
78 | import { MyAwesome } from 'myawesome-library';
79 |
80 | const awesome = new MyAwesome();
81 | awesome.doSomething();
82 | \`\`\`
83 |
84 | ### Advanced Configuration
85 |
86 | For more advanced use cases, you can configure the library with various options:
87 |
88 | \`\`\`javascript
89 | import { MyAwesome } from 'myawesome-library';
90 |
91 | const awesome = new MyAwesome({
92 | apiKey: 'your-api-key',
93 | environment: 'production',
94 | debug: false,
95 | timeout: 5000
96 | });
97 | \`\`\`
98 |
99 | ## API Documentation
100 |
101 | This section provides comprehensive documentation for all the methods and properties available in MyAwesome Library.
102 |
103 | ### Core Methods
104 |
105 | #### \`doSomething(options?)\`
106 |
107 | Performs the primary functionality of the library.
108 |
109 | **Parameters:**
110 | - \`options\` (Object, optional): Configuration options
111 | - \`param1\` (String): Description of parameter 1
112 | - \`param2\` (Number): Description of parameter 2
113 | - \`param3\` (Boolean): Description of parameter 3
114 |
115 | **Returns:** Promise<Result>
116 |
117 | **Example:**
118 | \`\`\`javascript
119 | const result = await awesome.doSomething({
120 | param1: 'value',
121 | param2: 42,
122 | param3: true
123 | });
124 | \`\`\`
125 |
126 | #### \`configure(config)\`
127 |
128 | Updates the configuration of the library instance.
129 |
130 | **Parameters:**
131 | - \`config\` (Object): New configuration object
132 |
133 | **Returns:** void
134 |
135 | ### Utility Methods
136 |
137 | #### \`validate(data)\`
138 |
139 | Validates input data according to library specifications.
140 |
141 | **Parameters:**
142 | - \`data\` (Any): Data to validate
143 |
144 | **Returns:** Boolean
145 |
146 | #### \`transform(input, options)\`
147 |
148 | Transforms input data using specified options.
149 |
150 | **Parameters:**
151 | - \`input\` (Any): Input data to transform
152 | - \`options\` (Object): Transformation options
153 |
154 | **Returns:** Any
155 |
156 | ## Contributing
157 |
158 | We welcome contributions from the community! MyAwesome Library is an open source project, and we appreciate any help in making it better.
159 |
160 | ### Development Setup
161 |
162 | To set up the development environment:
163 |
164 | 1. Fork the repository
165 | 2. Clone your fork: \`git clone https://github.com/yourusername/myawesome-library.git\`
166 | 3. Install dependencies: \`npm install\`
167 | 4. Run tests: \`npm test\`
168 | 5. Start development server: \`npm run dev\`
169 |
170 | ### Coding Standards
171 |
172 | Please ensure your code follows our coding standards:
173 |
174 | - Use TypeScript for all new code
175 | - Follow ESLint configuration
176 | - Write tests for new features
177 | - Update documentation as needed
178 | - Use conventional commit messages
179 |
180 | ### Pull Request Process
181 |
182 | 1. Create a feature branch from main
183 | 2. Make your changes
184 | 3. Add tests for new functionality
185 | 4. Ensure all tests pass
186 | 5. Update documentation
187 | 6. Submit a pull request
188 |
189 | ## License
190 |
191 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
192 |
193 | ## Support
194 |
195 | If you encounter any issues or have questions about MyAwesome Library, please:
196 |
197 | 1. Check the [documentation](https://myawesome-library.dev/docs)
198 | 2. Search existing [issues](https://github.com/user/myawesome-library/issues)
199 | 3. Create a new issue if needed
200 | 4. Join our [Discord community](https://discord.gg/myawesome)
201 |
202 | ## Changelog
203 |
204 | See [CHANGELOG.md](CHANGELOG.md) for a list of changes and version history.
205 |
206 | ## Acknowledgments
207 |
208 | - Thanks to all contributors who have helped make this project possible
209 | - Special thanks to the open source community for inspiration and support
210 | - Built with love using TypeScript, Jest, and other amazing tools`;
211 |
212 | await fs.writeFile(readmePath, originalReadme);
213 |
214 | // Step 1: Analyze the README
215 | console.log("🔍 Analyzing README...");
216 | const analysisResult = await analyzeReadme({
217 | project_path: testDir,
218 | target_audience: "developers",
219 | optimization_level: "moderate",
220 | });
221 |
222 | expect(analysisResult.success).toBe(true);
223 | expect(analysisResult.data?.analysis.overallScore).toBeDefined();
224 | expect(
225 | analysisResult.data?.analysis.lengthAnalysis.currentWords,
226 | ).toBeGreaterThan(500);
227 | expect(
228 | analysisResult.data?.analysis.optimizationOpportunities.length,
229 | ).toBeGreaterThan(0);
230 |
231 | console.log(
232 | `📊 Analysis Score: ${analysisResult.data?.analysis.overallScore}/100`,
233 | );
234 | console.log(
235 | `📝 Word Count: ${analysisResult.data?.analysis.lengthAnalysis.currentWords}`,
236 | );
237 | console.log(
238 | `💡 Optimization Opportunities: ${analysisResult.data?.analysis.optimizationOpportunities.length}`,
239 | );
240 |
241 | // Step 2: Optimize the README
242 | console.log("\n🛠️ Optimizing README...");
243 | const optimizationResult = await optimizeReadme({
244 | readme_path: readmePath,
245 | strategy: "developer_focused",
246 | max_length: 300,
247 | include_tldr: true,
248 | create_docs_directory: true,
249 | output_path: readmePath,
250 | });
251 |
252 | expect(optimizationResult.success).toBe(true);
253 | expect(optimizationResult.data?.optimization.optimizedContent).toContain(
254 | "## TL;DR",
255 | );
256 | expect(
257 | optimizationResult.data?.optimization.originalLength,
258 | ).toBeGreaterThan(0);
259 | // Note: Optimization may not always reduce length due to TL;DR addition
260 | expect(
261 | optimizationResult.data?.optimization.optimizedLength,
262 | ).toBeGreaterThan(0);
263 |
264 | console.log(
265 | `📉 Length Reduction: ${optimizationResult.data?.optimization.reductionPercentage}%`,
266 | );
267 | console.log(
268 | `🔄 Restructuring Changes: ${optimizationResult.data?.optimization.restructuringChanges.length}`,
269 | );
270 | console.log(
271 | `📁 Extracted Sections: ${optimizationResult.data?.optimization.extractedSections.length}`,
272 | );
273 |
274 | // Step 3: Verify the optimized README is better
275 | const optimizedContent = await fs.readFile(readmePath, "utf-8");
276 | expect(optimizedContent).toContain("## TL;DR");
277 | // Note: Length may increase due to TL;DR addition, but structure improves
278 | expect(optimizedContent.length).toBeGreaterThan(0);
279 |
280 | // Step 4: Re-analyze to confirm improvement
281 | console.log("\n🔍 Re-analyzing optimized README...");
282 | const reanalysisResult = await analyzeReadme({
283 | project_path: testDir,
284 | target_audience: "developers",
285 | });
286 |
287 | expect(reanalysisResult.success).toBe(true);
288 | console.log(
289 | `📊 New Analysis Score: ${reanalysisResult.data?.analysis.overallScore}/100`,
290 | );
291 |
292 | // The optimized version should have fewer optimization opportunities
293 | const originalOpportunities =
294 | analysisResult.data?.analysis.optimizationOpportunities.length ?? 0;
295 | const newOpportunities =
296 | reanalysisResult.data?.analysis.optimizationOpportunities.length ?? 0;
297 | expect(newOpportunities).toBeLessThanOrEqual(originalOpportunities);
298 | });
299 |
300 | it("should handle enterprise-focused optimization strategy", async () => {
301 | const enterpriseReadme = `# Enterprise Solution
302 |
303 | Our enterprise solution provides comprehensive business capabilities.
304 |
305 | ## Features
306 |
307 | - Feature 1
308 | - Feature 2
309 | - Feature 3
310 |
311 | ## Installation
312 |
313 | Standard installation process.
314 |
315 | ## Usage
316 |
317 | Basic usage instructions.
318 |
319 | ## Support
320 |
321 | Contact our support team.`;
322 |
323 | await fs.writeFile(readmePath, enterpriseReadme);
324 |
325 | const result = await optimizeReadme({
326 | readme_path: readmePath,
327 | strategy: "enterprise_focused",
328 | max_length: 200,
329 | });
330 |
331 | expect(result.success).toBe(true);
332 | expect(result.data?.optimization.optimizedContent).toContain("## TL;DR");
333 |
334 | // Enterprise strategy should provide relevant optimization
335 | expect(result.data?.optimization.recommendations.length).toBeGreaterThan(
336 | 0,
337 | );
338 | expect(result.data?.optimization.optimizedContent).toContain("## TL;DR");
339 | });
340 |
341 | it("should handle community-focused optimization strategy", async () => {
342 | const communityReadme = `# Open Source Project
343 |
344 | A project for the community.
345 |
346 | ## Installation
347 |
348 | npm install project
349 |
350 | ## Usage
351 |
352 | Basic usage.
353 |
354 | ## License
355 |
356 | MIT License`;
357 |
358 | await fs.writeFile(readmePath, communityReadme);
359 |
360 | const result = await optimizeReadme({
361 | readme_path: readmePath,
362 | strategy: "community_focused",
363 | max_length: 150,
364 | });
365 |
366 | expect(result.success).toBe(true);
367 | expect(result.data?.optimization.optimizedContent).toContain("## TL;DR");
368 |
369 | // Community strategy should focus on contribution and collaboration
370 | const optimizedContent = result.data?.optimization.optimizedContent || "";
371 | expect(optimizedContent.toLowerCase()).toMatch(
372 | /contribut|collaborat|communit/,
373 | );
374 | });
375 | });
376 |
377 | describe("Error handling and edge cases", () => {
378 | it("should handle README with no headings", async () => {
379 | const noHeadingsReadme = `This is a README without any headings. It just contains plain text describing the project. There are no sections or structure to work with.`;
380 |
381 | await fs.writeFile(readmePath, noHeadingsReadme);
382 |
383 | const analysisResult = await analyzeReadme({
384 | project_path: testDir,
385 | });
386 |
387 | expect(analysisResult.success).toBe(true);
388 | expect(
389 | analysisResult.data?.analysis.structureAnalysis.scannabilityScore,
390 | ).toBeLessThan(50);
391 | expect(
392 | analysisResult.data?.analysis.optimizationOpportunities.length,
393 | ).toBeGreaterThan(0);
394 |
395 | const optimizationResult = await optimizeReadme({
396 | readme_path: readmePath,
397 | strategy: "general",
398 | });
399 |
400 | expect(optimizationResult.success).toBe(true);
401 | expect(optimizationResult.data?.optimization.optimizedContent).toContain(
402 | "## TL;DR",
403 | );
404 | });
405 |
406 | it("should handle very short README", async () => {
407 | const shortReadme = `# Project\n\nShort description.`;
408 |
409 | await fs.writeFile(readmePath, shortReadme);
410 |
411 | const analysisResult = await analyzeReadme({
412 | project_path: testDir,
413 | max_length_target: 100,
414 | });
415 |
416 | expect(analysisResult.success).toBe(true);
417 | expect(analysisResult.data?.analysis.lengthAnalysis.exceedsTarget).toBe(
418 | false,
419 | );
420 |
421 | const optimizationResult = await optimizeReadme({
422 | readme_path: readmePath,
423 | max_length: 100,
424 | });
425 |
426 | expect(optimizationResult.success).toBe(true);
427 | // Should still add TL;DR even for short READMEs
428 | expect(optimizationResult.data?.optimization.optimizedContent).toContain(
429 | "## TL;DR",
430 | );
431 | });
432 |
433 | it("should handle README with existing TL;DR", async () => {
434 | const readmeWithTldr = `# Project
435 |
436 | ## TL;DR
437 |
438 | This project does X for Y users.
439 |
440 | ## Installation
441 |
442 | npm install project
443 |
444 | ## Usage
445 |
446 | Use it like this.`;
447 |
448 | await fs.writeFile(readmePath, readmeWithTldr);
449 |
450 | const result = await optimizeReadme({
451 | readme_path: readmePath,
452 | preserve_existing: true,
453 | });
454 |
455 | expect(result.success).toBe(true);
456 | // The tool may still generate a TL;DR even with existing one for optimization
457 | expect(result.data?.optimization.optimizedContent).toContain(
458 | "This project does X for Y users",
459 | );
460 | });
461 | });
462 |
463 | describe("Performance and scalability", () => {
464 | it("should handle large README files efficiently", async () => {
465 | // Create a large README with many sections
466 | const largeSections = Array.from({ length: 50 }, (_, i) =>
467 | `## Section ${i + 1}\n\nThis is section ${
468 | i + 1
469 | } with some content. `.repeat(20),
470 | ).join("\n\n");
471 |
472 | const largeReadme = `# Large Project\n\n${largeSections}`;
473 |
474 | await fs.writeFile(readmePath, largeReadme);
475 |
476 | const startTime = Date.now();
477 |
478 | const analysisResult = await analyzeReadme({
479 | project_path: testDir,
480 | max_length_target: 500,
481 | });
482 |
483 | const analysisTime = Date.now() - startTime;
484 |
485 | expect(analysisResult.success).toBe(true);
486 | expect(analysisTime).toBeLessThan(5000); // Should complete within 5 seconds
487 | expect(analysisResult.data?.analysis.lengthAnalysis.exceedsTarget).toBe(
488 | true,
489 | );
490 |
491 | const optimizationStartTime = Date.now();
492 |
493 | const optimizationResult = await optimizeReadme({
494 | readme_path: readmePath,
495 | max_length: 500,
496 | create_docs_directory: true,
497 | });
498 |
499 | const optimizationTime = Date.now() - optimizationStartTime;
500 |
501 | expect(optimizationResult.success).toBe(true);
502 | expect(optimizationTime).toBeLessThan(5000); // Should complete within 5 seconds
503 | expect(
504 | optimizationResult.data?.optimization.extractedSections.length,
505 | ).toBeGreaterThan(0);
506 | });
507 | });
508 | });
509 |
```
--------------------------------------------------------------------------------
/tests/tools/validate-documentation-freshness.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Integration Tests for validate_documentation_freshness Tool
3 | */
4 |
5 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
6 | import fs from "fs/promises";
7 | import path from "path";
8 | import os from "os";
9 | import { simpleGit } from "simple-git";
10 | import {
11 | validateDocumentationFreshness,
12 | type ValidateDocumentationFreshnessInput,
13 | } from "../../src/tools/validate-documentation-freshness.js";
14 | import { parseDocFrontmatter } from "../../src/utils/freshness-tracker.js";
15 |
16 | describe("validate_documentation_freshness Tool", () => {
17 | let tempDir: string;
18 | let docsDir: string;
19 | let projectDir: string;
20 |
21 | beforeEach(async () => {
22 | tempDir = await fs.mkdtemp(
23 | path.join(os.tmpdir(), "validate-freshness-test-"),
24 | );
25 | docsDir = path.join(tempDir, "docs");
26 | projectDir = tempDir;
27 | await fs.mkdir(docsDir);
28 | });
29 |
30 | afterEach(async () => {
31 | await fs.rm(tempDir, { recursive: true, force: true });
32 | });
33 |
34 | describe("Initialization", () => {
35 | it("should initialize metadata for files without it", async () => {
36 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test Document");
37 |
38 | const input: ValidateDocumentationFreshnessInput = {
39 | docsPath: docsDir,
40 | projectPath: projectDir,
41 | initializeMissing: true,
42 | };
43 |
44 | const result = await validateDocumentationFreshness(input);
45 |
46 | expect(result.success).toBe(true);
47 | expect(result.data.report.initialized).toBe(1);
48 |
49 | const frontmatter = await parseDocFrontmatter(
50 | path.join(docsDir, "test.md"),
51 | );
52 | expect(frontmatter.documcp?.last_updated).toBeDefined();
53 | expect(frontmatter.documcp?.last_validated).toBeDefined();
54 | });
55 |
56 | it("should skip files that already have metadata", async () => {
57 | await fs.writeFile(
58 | path.join(docsDir, "existing.md"),
59 | `---
60 | documcp:
61 | last_updated: "2025-01-01T00:00:00Z"
62 | ---
63 | # Existing`,
64 | );
65 |
66 | const input: ValidateDocumentationFreshnessInput = {
67 | docsPath: docsDir,
68 | projectPath: projectDir,
69 | initializeMissing: true,
70 | };
71 |
72 | const result = await validateDocumentationFreshness(input);
73 |
74 | expect(result.success).toBe(true);
75 | expect(result.data.report.initialized).toBe(0);
76 | expect(result.data.report.skipped).toBe(1);
77 | });
78 |
79 | it("should set default update frequency", async () => {
80 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test");
81 |
82 | const input: ValidateDocumentationFreshnessInput = {
83 | docsPath: docsDir,
84 | projectPath: projectDir,
85 | initializeMissing: true,
86 | updateFrequency: "weekly",
87 | };
88 |
89 | const result = await validateDocumentationFreshness(input);
90 |
91 | expect(result.success).toBe(true);
92 |
93 | const frontmatter = await parseDocFrontmatter(
94 | path.join(docsDir, "test.md"),
95 | );
96 | expect(frontmatter.documcp?.update_frequency).toBe("weekly");
97 | });
98 | });
99 |
100 | describe("Updating Existing Metadata", () => {
101 | it("should update last_validated for existing files when requested", async () => {
102 | await fs.writeFile(
103 | path.join(docsDir, "existing.md"),
104 | `---
105 | documcp:
106 | last_updated: "2025-01-01T00:00:00Z"
107 | ---
108 | # Existing`,
109 | );
110 |
111 | const input: ValidateDocumentationFreshnessInput = {
112 | docsPath: docsDir,
113 | projectPath: projectDir,
114 | updateExisting: true,
115 | };
116 |
117 | const result = await validateDocumentationFreshness(input);
118 |
119 | expect(result.success).toBe(true);
120 | expect(result.data.report.updated).toBe(1);
121 |
122 | const frontmatter = await parseDocFrontmatter(
123 | path.join(docsDir, "existing.md"),
124 | );
125 | expect(frontmatter.documcp?.last_validated).toBeDefined();
126 | expect(
127 | new Date(frontmatter.documcp?.last_validated!).getTime(),
128 | ).toBeGreaterThan(new Date("2025-01-01").getTime());
129 | });
130 |
131 | it("should not update existing files when updateExisting is false", async () => {
132 | const originalDate = "2025-01-01T00:00:00Z";
133 | await fs.writeFile(
134 | path.join(docsDir, "existing.md"),
135 | `---
136 | documcp:
137 | last_updated: "${originalDate}"
138 | last_validated: "${originalDate}"
139 | ---
140 | # Existing`,
141 | );
142 |
143 | const input: ValidateDocumentationFreshnessInput = {
144 | docsPath: docsDir,
145 | projectPath: projectDir,
146 | updateExisting: false,
147 | };
148 |
149 | const result = await validateDocumentationFreshness(input);
150 |
151 | expect(result.success).toBe(true);
152 | expect(result.data.report.updated).toBe(0);
153 |
154 | const frontmatter = await parseDocFrontmatter(
155 | path.join(docsDir, "existing.md"),
156 | );
157 | expect(frontmatter.documcp?.last_validated).toBe(originalDate);
158 | });
159 | });
160 |
161 | describe("Git Integration", () => {
162 | it("should add git commit hash when git is available", async () => {
163 | // Initialize git repo
164 | const git = simpleGit(projectDir);
165 | await git.init();
166 | await git.addConfig("user.name", "Test User");
167 | await git.addConfig("user.email", "[email protected]");
168 | await fs.writeFile(path.join(projectDir, "README.md"), "# Test Repo");
169 | await git.add(".");
170 | await git.commit("Initial commit");
171 |
172 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test");
173 |
174 | const input: ValidateDocumentationFreshnessInput = {
175 | docsPath: docsDir,
176 | projectPath: projectDir,
177 | initializeMissing: true,
178 | validateAgainstGit: true,
179 | };
180 |
181 | const result = await validateDocumentationFreshness(input);
182 |
183 | expect(result.success).toBe(true);
184 | expect(result.data.report.currentCommit).toBeDefined();
185 |
186 | const frontmatter = await parseDocFrontmatter(
187 | path.join(docsDir, "test.md"),
188 | );
189 | expect(frontmatter.documcp?.validated_against_commit).toBeDefined();
190 | expect(frontmatter.documcp?.validated_against_commit).toBe(
191 | result.data.report.currentCommit,
192 | );
193 | });
194 |
195 | it("should work without git when validateAgainstGit is false", async () => {
196 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test");
197 |
198 | const input: ValidateDocumentationFreshnessInput = {
199 | docsPath: docsDir,
200 | projectPath: projectDir,
201 | initializeMissing: true,
202 | validateAgainstGit: false,
203 | };
204 |
205 | const result = await validateDocumentationFreshness(input);
206 |
207 | expect(result.success).toBe(true);
208 | expect(result.data.report.currentCommit).toBeUndefined();
209 | });
210 |
211 | it("should handle non-git directories gracefully", async () => {
212 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test");
213 |
214 | const input: ValidateDocumentationFreshnessInput = {
215 | docsPath: docsDir,
216 | projectPath: projectDir,
217 | initializeMissing: true,
218 | validateAgainstGit: true,
219 | };
220 |
221 | const result = await validateDocumentationFreshness(input);
222 |
223 | expect(result.success).toBe(true);
224 | expect(result.data.report.currentCommit).toBeUndefined();
225 | });
226 | });
227 |
228 | describe("Batch Operations", () => {
229 | it("should process multiple files", async () => {
230 | await fs.writeFile(path.join(docsDir, "file1.md"), "# File 1");
231 | await fs.writeFile(path.join(docsDir, "file2.md"), "# File 2");
232 | await fs.writeFile(path.join(docsDir, "file3.md"), "# File 3");
233 |
234 | const input: ValidateDocumentationFreshnessInput = {
235 | docsPath: docsDir,
236 | projectPath: projectDir,
237 | initializeMissing: true,
238 | };
239 |
240 | const result = await validateDocumentationFreshness(input);
241 |
242 | expect(result.success).toBe(true);
243 | expect(result.data.report.totalFiles).toBe(3);
244 | expect(result.data.report.initialized).toBe(3);
245 | });
246 |
247 | it("should handle nested directories", async () => {
248 | await fs.mkdir(path.join(docsDir, "api"));
249 | await fs.mkdir(path.join(docsDir, "guides"));
250 |
251 | await fs.writeFile(path.join(docsDir, "index.md"), "# Index");
252 | await fs.writeFile(path.join(docsDir, "api", "endpoints.md"), "# API");
253 | await fs.writeFile(
254 | path.join(docsDir, "guides", "tutorial.md"),
255 | "# Guide",
256 | );
257 |
258 | const input: ValidateDocumentationFreshnessInput = {
259 | docsPath: docsDir,
260 | projectPath: projectDir,
261 | initializeMissing: true,
262 | };
263 |
264 | const result = await validateDocumentationFreshness(input);
265 |
266 | expect(result.success).toBe(true);
267 | expect(result.data.report.totalFiles).toBe(3);
268 | });
269 |
270 | it("should provide individual file results", async () => {
271 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test");
272 |
273 | const input: ValidateDocumentationFreshnessInput = {
274 | docsPath: docsDir,
275 | projectPath: projectDir,
276 | initializeMissing: true,
277 | };
278 |
279 | const result = await validateDocumentationFreshness(input);
280 |
281 | expect(result.data.report.files).toBeDefined();
282 | expect(result.data.report.files.length).toBe(1);
283 | expect(result.data.report.files[0].action).toBe("initialized");
284 | });
285 | });
286 |
287 | describe("Error Handling", () => {
288 | it("should handle non-existent docs directory", async () => {
289 | const input: ValidateDocumentationFreshnessInput = {
290 | docsPath: "/nonexistent/docs",
291 | projectPath: projectDir,
292 | initializeMissing: true,
293 | };
294 |
295 | const result = await validateDocumentationFreshness(input);
296 |
297 | expect(result.success).toBe(false);
298 | expect(result.error).toBeDefined();
299 | expect(result.error?.code).toBe("FRESHNESS_VALIDATION_FAILED");
300 | });
301 |
302 | it("should track file-level errors", async () => {
303 | // Create a file that will cause issues
304 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test");
305 |
306 | // Make it read-only to cause write errors (skip on Windows)
307 | if (process.platform !== "win32") {
308 | await fs.chmod(path.join(docsDir, "test.md"), 0o444);
309 |
310 | const input: ValidateDocumentationFreshnessInput = {
311 | docsPath: docsDir,
312 | projectPath: projectDir,
313 | initializeMissing: true,
314 | };
315 |
316 | const result = await validateDocumentationFreshness(input);
317 |
318 | // Restore permissions for cleanup
319 | await fs.chmod(path.join(docsDir, "test.md"), 0o644);
320 |
321 | expect(result.data.report.errors).toBeGreaterThan(0);
322 | }
323 | });
324 |
325 | it("should handle empty docs directory", async () => {
326 | const input: ValidateDocumentationFreshnessInput = {
327 | docsPath: docsDir,
328 | projectPath: projectDir,
329 | initializeMissing: true,
330 | };
331 |
332 | const result = await validateDocumentationFreshness(input);
333 |
334 | expect(result.success).toBe(true);
335 | expect(result.data.report.totalFiles).toBe(0);
336 | });
337 | });
338 |
339 | describe("Output Format", () => {
340 | it("should include formatted report", async () => {
341 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test");
342 |
343 | const input: ValidateDocumentationFreshnessInput = {
344 | docsPath: docsDir,
345 | projectPath: projectDir,
346 | initializeMissing: true,
347 | };
348 |
349 | const result = await validateDocumentationFreshness(input);
350 |
351 | expect(result.data.formattedReport).toBeDefined();
352 | expect(result.data.formattedReport).toContain(
353 | "Documentation Freshness Validation Report",
354 | );
355 | expect(result.data.formattedReport).toContain("Summary");
356 | expect(result.data.formattedReport).toContain("Actions Performed");
357 | });
358 |
359 | it("should include summary", async () => {
360 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test");
361 |
362 | const input: ValidateDocumentationFreshnessInput = {
363 | docsPath: docsDir,
364 | projectPath: projectDir,
365 | initializeMissing: true,
366 | };
367 |
368 | const result = await validateDocumentationFreshness(input);
369 |
370 | expect(result.data.summary).toBeDefined();
371 | expect(result.data.summary).toContain("Validated");
372 | expect(result.data.summary).toContain("initialized");
373 | });
374 |
375 | it("should include metadata", async () => {
376 | await fs.writeFile(path.join(docsDir, "test.md"), "# Test");
377 |
378 | const input: ValidateDocumentationFreshnessInput = {
379 | docsPath: docsDir,
380 | projectPath: projectDir,
381 | initializeMissing: true,
382 | };
383 |
384 | const result = await validateDocumentationFreshness(input);
385 |
386 | expect(result.metadata).toBeDefined();
387 | expect(result.metadata.toolVersion).toBe("1.0.0");
388 | expect(result.metadata.timestamp).toBeDefined();
389 | expect(result.metadata.executionTime).toBeGreaterThanOrEqual(0);
390 | });
391 | });
392 |
393 | describe("Update Frequency Presets", () => {
394 | const frequencies: Array<
395 | "realtime" | "active" | "recent" | "weekly" | "monthly" | "quarterly"
396 | > = ["realtime", "active", "recent", "weekly", "monthly", "quarterly"];
397 |
398 | frequencies.forEach((frequency) => {
399 | it(`should work with ${frequency} update frequency`, async () => {
400 | await fs.writeFile(
401 | path.join(docsDir, `test-${frequency}.md`),
402 | "# Test",
403 | );
404 |
405 | const input: ValidateDocumentationFreshnessInput = {
406 | docsPath: docsDir,
407 | projectPath: projectDir,
408 | initializeMissing: true,
409 | updateFrequency: frequency,
410 | };
411 |
412 | const result = await validateDocumentationFreshness(input);
413 |
414 | expect(result.success).toBe(true);
415 |
416 | const frontmatter = await parseDocFrontmatter(
417 | path.join(docsDir, `test-${frequency}.md`),
418 | );
419 | expect(frontmatter.documcp?.update_frequency).toBe(frequency);
420 | });
421 | });
422 | });
423 |
424 | describe("Mixed File States", () => {
425 | it("should handle mix of initialized, updated, and skipped files", async () => {
426 | // File without metadata (will be initialized)
427 | await fs.writeFile(path.join(docsDir, "new.md"), "# New");
428 |
429 | // File with metadata (will be skipped if updateExisting=false)
430 | await fs.writeFile(
431 | path.join(docsDir, "existing.md"),
432 | `---
433 | documcp:
434 | last_updated: "2025-01-01T00:00:00Z"
435 | ---
436 | # Existing`,
437 | );
438 |
439 | const input: ValidateDocumentationFreshnessInput = {
440 | docsPath: docsDir,
441 | projectPath: projectDir,
442 | initializeMissing: true,
443 | updateExisting: false,
444 | };
445 |
446 | const result = await validateDocumentationFreshness(input);
447 |
448 | expect(result.success).toBe(true);
449 | expect(result.data.report.initialized).toBe(1);
450 | expect(result.data.report.skipped).toBe(1);
451 | expect(result.data.report.updated).toBe(0);
452 | });
453 |
454 | it("should update all when both initializeMissing and updateExisting are true", async () => {
455 | await fs.writeFile(path.join(docsDir, "new.md"), "# New");
456 | await fs.writeFile(
457 | path.join(docsDir, "existing.md"),
458 | `---
459 | documcp:
460 | last_updated: "2025-01-01T00:00:00Z"
461 | ---
462 | # Existing`,
463 | );
464 |
465 | const input: ValidateDocumentationFreshnessInput = {
466 | docsPath: docsDir,
467 | projectPath: projectDir,
468 | initializeMissing: true,
469 | updateExisting: true,
470 | };
471 |
472 | const result = await validateDocumentationFreshness(input);
473 |
474 | expect(result.success).toBe(true);
475 | expect(result.data.report.initialized).toBe(1);
476 | expect(result.data.report.updated).toBe(1);
477 | });
478 | });
479 | });
480 |
```
--------------------------------------------------------------------------------
/docs/knowledge-graph.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.958Z"
4 | last_validated: "2025-11-20T00:46:21.958Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | ---
8 |
9 | # Knowledge Graph Documentation
10 |
11 | ## Overview
12 |
13 | The DocuMCP Knowledge Graph is an intelligent semantic network that captures relationships between projects, technologies, deployments, user preferences, and documentation patterns. It enables smart recommendations, deployment tracking, preference learning, and context-aware documentation generation.
14 |
15 | ## Architecture
16 |
17 | ### Core Components
18 |
19 | - **Graph Database**: In-memory graph with persistent storage
20 | - **Node Types**: Projects, technologies, configurations, deployments, users
21 | - **Edge Types**: Relationships, dependencies, recommendations, usage patterns
22 | - **Intelligence Layer**: Pattern recognition, recommendation engine, drift detection
23 |
24 | ### Node Types
25 |
26 | #### Project Nodes
27 |
28 | ```typescript
29 | interface ProjectNode {
30 | id: string;
31 | type: "project";
32 | properties: {
33 | name: string;
34 | path: string;
35 | primaryLanguage: string;
36 | framework?: string;
37 | lastAnalyzed: string;
38 | structure: {
39 | totalFiles: number;
40 | languages: Record<string, number>;
41 | hasTests: boolean;
42 | hasCI: boolean;
43 | hasDocs: boolean;
44 | };
45 | };
46 | }
47 | ```
48 |
49 | #### Technology Nodes
50 |
51 | ```typescript
52 | interface TechnologyNode {
53 | id: string;
54 | type: "technology";
55 | properties: {
56 | name: string;
57 | category: "language" | "framework" | "tool" | "platform";
58 | version?: string;
59 | ecosystem: string;
60 | popularity: number;
61 | stability: number;
62 | };
63 | }
64 | ```
65 |
66 | #### Configuration Nodes
67 |
68 | ```typescript
69 | interface ConfigurationNode {
70 | id: string;
71 | type: "configuration";
72 | properties: {
73 | ssg: string;
74 | settings: Record<string, any>;
75 | optimizations: string[];
76 | lastUsed: string;
77 | successRate: number;
78 | };
79 | }
80 | ```
81 |
82 | #### User Nodes
83 |
84 | ```typescript
85 | interface UserNode {
86 | id: string;
87 | type: "user";
88 | properties: {
89 | userId: string;
90 | preferences: {
91 | preferredSSGs: string[];
92 | expertise: "beginner" | "intermediate" | "advanced";
93 | technologies: string[];
94 | };
95 | activity: {
96 | totalDeployments: number;
97 | successfulDeployments: number;
98 | lastActive: string;
99 | };
100 | };
101 | }
102 | ```
103 |
104 | ### Edge Types
105 |
106 | #### Project Relationships
107 |
108 | - `depends_on`: Project dependencies and technology usage
109 | - `similar_to`: Projects with similar characteristics
110 | - `derived_from`: Project templates and forks
111 |
112 | #### Deployment Tracking
113 |
114 | - `deployed_with`: Project deployed using specific SSG/configuration
115 | - `succeeded_at`: Successful deployment timestamp and metrics
116 | - `failed_at`: Failed deployment with error analysis
117 |
118 | #### User Patterns
119 |
120 | - `prefers`: User SSG and technology preferences
121 | - `succeeded_with`: User's successful deployment patterns
122 | - `learned_from`: Preference updates based on experience
123 |
124 | #### Recommendation Flows
125 |
126 | - `recommends`: SSG recommendations with confidence scores
127 | - `optimizes_for`: Configuration optimizations for specific scenarios
128 | - `suggests`: Next-step suggestions based on current state
129 |
130 | ## Knowledge Graph Integration
131 |
132 | ### Initialization
133 |
134 | ```typescript
135 | import { initializeKnowledgeGraph, getKnowledgeGraph } from "./kg-integration";
136 |
137 | // Initialize with storage directory
138 | await initializeKnowledgeGraph("/path/to/storage");
139 |
140 | // Get graph instance
141 | const kg = await getKnowledgeGraph();
142 | ```
143 |
144 | ### Project Management
145 |
146 | #### Creating Projects
147 |
148 | ```typescript
149 | import { createOrUpdateProject } from "./kg-integration";
150 |
151 | const project = await createOrUpdateProject({
152 | id: "my-project-123",
153 | timestamp: new Date().toISOString(),
154 | path: "/path/to/project",
155 | projectName: "My Documentation Site",
156 | structure: {
157 | totalFiles: 150,
158 | languages: {
159 | typescript: 80,
160 | javascript: 45,
161 | markdown: 25,
162 | },
163 | hasTests: true,
164 | hasCI: true,
165 | hasDocs: true,
166 | },
167 | });
168 | ```
169 |
170 | #### Querying Projects
171 |
172 | ```typescript
173 | // Find project by ID
174 | const project = await kg.findNode({
175 | type: "project",
176 | properties: { id: "my-project-123" },
177 | });
178 |
179 | // Find similar projects
180 | const similarProjects = await kg.findNodes({
181 | type: "project",
182 | properties: {
183 | "structure.primaryLanguage": "typescript",
184 | },
185 | });
186 | ```
187 |
188 | ### Deployment Tracking
189 |
190 | #### Recording Deployments
191 |
192 | ```typescript
193 | import { trackDeployment } from "./kg-integration";
194 |
195 | // Successful deployment
196 | await trackDeployment("project-123", "docusaurus", true, {
197 | buildTime: 45000,
198 | branch: "main",
199 | customDomain: "docs.example.com",
200 | });
201 |
202 | // Failed deployment
203 | await trackDeployment("project-123", "hugo", false, {
204 | errorMessage: "Build failed: missing dependencies",
205 | failureStage: "build",
206 | buildTime: 15000,
207 | });
208 | ```
209 |
210 | #### Querying Deployment History
211 |
212 | ```typescript
213 | // Get all deployments for a project
214 | const deployments = await kg.findEdges({
215 | source: "project:my-project-123",
216 | type: "deployed_with",
217 | });
218 |
219 | // Get successful deployments only
220 | const successfulDeployments = deployments.filter(
221 | (edge) => edge.properties.success === true,
222 | );
223 | ```
224 |
225 | ### Recommendation Engine
226 |
227 | #### SSG Recommendations
228 |
229 | ```typescript
230 | import { getDeploymentRecommendations } from "./kg-integration";
231 |
232 | const recommendations = await getDeploymentRecommendations("project-123");
233 |
234 | // Returns sorted by confidence
235 | recommendations.forEach((rec) => {
236 | console.log(`${rec.ssg}: ${rec.confidence}% confidence`);
237 | console.log(`Reason: ${rec.reason}`);
238 | });
239 | ```
240 |
241 | #### Technology Compatibility
242 |
243 | ```typescript
244 | // Find compatible technologies
245 | const compatibleSSGs = await kg.findEdges({
246 | source: "technology:react",
247 | type: "compatible_with",
248 | });
249 |
250 | const recommendations = compatibleSSGs
251 | .filter((edge) => edge.target.startsWith("ssg:"))
252 | .sort((a, b) => b.confidence - a.confidence);
253 | ```
254 |
255 | ### User Preference Learning
256 |
257 | #### Preference Management
258 |
259 | ```typescript
260 | import { getUserPreferenceManager } from "./user-preferences";
261 |
262 | const manager = await getUserPreferenceManager("user-123");
263 |
264 | // Track SSG usage
265 | await manager.trackSSGUsage({
266 | ssg: "docusaurus",
267 | success: true,
268 | timestamp: new Date().toISOString(),
269 | projectType: "javascript-library",
270 | });
271 |
272 | // Get personalized recommendations
273 | const personalizedRecs = await manager.getSSGRecommendations();
274 | ```
275 |
276 | #### Learning Patterns
277 |
278 | ```typescript
279 | // Update preferences based on deployment success
280 | await manager.updatePreferences({
281 | preferredSSGs: ["docusaurus", "hugo"],
282 | expertise: "intermediate",
283 | technologies: ["react", "typescript", "node"],
284 | });
285 |
286 | // Get usage statistics
287 | const stats = await manager.getUsageStatistics();
288 | console.log(`Total deployments: ${stats.totalDeployments}`);
289 | console.log(`Success rate: ${stats.successRate}%`);
290 | ```
291 |
292 | ## Code Integration (Phase 1.2)
293 |
294 | ### Code File Entities
295 |
296 | ```typescript
297 | import { createCodeFileEntities } from "./kg-code-integration";
298 |
299 | // Create code file nodes with AST analysis
300 | const codeFiles = await createCodeFileEntities(
301 | "project-123",
302 | "/path/to/repository",
303 | );
304 |
305 | // Each code file includes:
306 | // - Functions and classes (via AST parsing)
307 | // - Dependencies and imports
308 | // - Complexity metrics
309 | // - Change detection (content hash)
310 | ```
311 |
312 | ### Documentation Linking
313 |
314 | ```typescript
315 | import {
316 | createDocumentationEntities,
317 | linkCodeToDocs,
318 | } from "./kg-code-integration";
319 |
320 | // Create documentation section nodes
321 | const docSections = await createDocumentationEntities(
322 | "project-123",
323 | extractedContent,
324 | );
325 |
326 | // Link code files to documentation
327 | const relationships = await linkCodeToDocs(codeFiles, docSections);
328 |
329 | // Detect outdated documentation
330 | const outdatedLinks = relationships.filter(
331 | (edge) => edge.type === "outdated_for",
332 | );
333 | ```
334 |
335 | ## Query Patterns
336 |
337 | ### Basic Queries
338 |
339 | #### Node Queries
340 |
341 | ```typescript
342 | // Find all projects using React
343 | const reactProjects = await kg.findNodes({
344 | type: "project",
345 | properties: {
346 | "structure.technologies": { contains: "react" },
347 | },
348 | });
349 |
350 | // Find high-success configurations
351 | const reliableConfigs = await kg.findNodes({
352 | type: "configuration",
353 | properties: {
354 | successRate: { gte: 0.9 },
355 | },
356 | });
357 | ```
358 |
359 | #### Edge Queries
360 |
361 | ```typescript
362 | // Find all deployment relationships
363 | const deployments = await kg.findEdges({
364 | type: "deployed_with",
365 | });
366 |
367 | // Find user preferences
368 | const userPrefs = await kg.findEdges({
369 | source: "user:developer-123",
370 | type: "prefers",
371 | });
372 | ```
373 |
374 | ### Complex Queries
375 |
376 | #### Multi-hop Traversal
377 |
378 | ```typescript
379 | // Find recommended SSGs for similar projects
380 | const recommendations = await kg.query(`
381 | MATCH (p1:project {id: 'my-project'})
382 | MATCH (p2:project)-[:similar_to]-(p1)
383 | MATCH (p2)-[:deployed_with]->(config:configuration)
384 | WHERE config.successRate > 0.8
385 | RETURN config.ssg, AVG(config.successRate) as avgSuccess
386 | ORDER BY avgSuccess DESC
387 | `);
388 | ```
389 |
390 | #### Aggregation Queries
391 |
392 | ```typescript
393 | // Get deployment statistics by SSG
394 | const ssgStats = await kg.aggregate({
395 | groupBy: "ssg",
396 | metrics: ["successRate", "buildTime", "userSatisfaction"],
397 | filters: {
398 | timestamp: { gte: "2024-01-01" },
399 | },
400 | });
401 | ```
402 |
403 | ### Pattern Detection
404 |
405 | #### Success Patterns
406 |
407 | ```typescript
408 | // Identify high-success patterns
409 | const successPatterns = await kg.findPatterns({
410 | nodeType: "project",
411 | edgeType: "deployed_with",
412 | threshold: 0.9,
413 | minOccurrences: 5,
414 | });
415 |
416 | // Example pattern: TypeScript + Docusaurus = 95% success rate
417 | ```
418 |
419 | #### Failure Analysis
420 |
421 | ```typescript
422 | // Analyze failure patterns
423 | const failurePatterns = await kg.findPatterns({
424 | nodeType: "project",
425 | edgeType: "failed_at",
426 | groupBy: ["technology", "ssg", "errorType"],
427 | });
428 | ```
429 |
430 | ## Memory Management
431 |
432 | ### Storage and Persistence
433 |
434 | ```typescript
435 | // Configure storage directory
436 | const storage = new KnowledgeGraphStorage({
437 | directory: "/path/to/kg-storage",
438 | format: "jsonl", // or "sqlite", "json"
439 | compression: true,
440 | backupInterval: "daily",
441 | });
442 |
443 | // Initialize with storage
444 | await initializeKnowledgeGraph(storage);
445 | ```
446 |
447 | ### Memory Cleanup
448 |
449 | ```typescript
450 | import { memoryCleanup } from "./memory-management";
451 |
452 | // Clean old memories (default: 30 days)
453 | await memoryCleanup({
454 | daysToKeep: 30,
455 | dryRun: false, // Set true to preview
456 | });
457 | ```
458 |
459 | ### Memory Export/Import
460 |
461 | ```typescript
462 | import { memoryExport, memoryImportAdvanced } from "./memory-management";
463 |
464 | // Export knowledge graph
465 | await memoryExport({
466 | format: "json",
467 | outputPath: "/backup/kg-export.json",
468 | filter: {
469 | nodeTypes: ["project", "configuration"],
470 | dateRange: { since: "2024-01-01" },
471 | },
472 | });
473 |
474 | // Import knowledge graph
475 | await memoryImportAdvanced({
476 | inputPath: "/backup/kg-export.json",
477 | options: {
478 | mergeStrategy: "update",
479 | validateSchema: true,
480 | conflictResolution: "newer-wins",
481 | },
482 | });
483 | ```
484 |
485 | ## Analytics and Insights
486 |
487 | ### Memory Insights
488 |
489 | ```typescript
490 | import { memoryInsights } from "./memory-management";
491 |
492 | const insights = await memoryInsights({
493 | projectId: "my-project",
494 | timeRange: {
495 | from: "2024-01-01",
496 | to: "2024-12-31",
497 | },
498 | });
499 |
500 | console.log(`Deployment success rate: ${insights.deploymentSuccessRate}`);
501 | console.log(`Most successful SSG: ${insights.mostSuccessfulSSG}`);
502 | console.log(`Optimization opportunities: ${insights.optimizations.length}`);
503 | ```
504 |
505 | ### Temporal Analysis
506 |
507 | ```typescript
508 | import { memoryTemporalAnalysis } from "./memory-management";
509 |
510 | const trends = await memoryTemporalAnalysis({
511 | analysisType: "patterns",
512 | query: {
513 | nodeType: "project",
514 | edgeType: "deployed_with",
515 | timeWindow: "monthly",
516 | },
517 | });
518 |
519 | // Analyze deployment trends over time
520 | trends.patterns.forEach((pattern) => {
521 | console.log(`${pattern.month}: ${pattern.successRate}% success`);
522 | });
523 | ```
524 |
525 | ### Intelligent Analysis
526 |
527 | ```typescript
528 | import { memoryIntelligentAnalysis } from "./memory-management";
529 |
530 | const analysis = await memoryIntelligentAnalysis({
531 | projectPath: "/path/to/project",
532 | baseAnalysis: repositoryAnalysis,
533 | });
534 |
535 | console.log(`Predicted success rate: ${analysis.predictions.successRate}`);
536 | console.log(`Recommendations: ${analysis.recommendations.length}`);
537 | console.log(`Risk factors: ${analysis.riskFactors.length}`);
538 | ```
539 |
540 | ## Visualization
541 |
542 | ### Network Visualization
543 |
544 | ```typescript
545 | import { memoryVisualization } from "./memory-management";
546 |
547 | // Generate network diagram
548 | const networkViz = await memoryVisualization({
549 | visualizationType: "network",
550 | options: {
551 | layout: "force-directed",
552 | nodeSize: "degree",
553 | colorBy: "nodeType",
554 | filterEdges: ["deployed_with", "recommends"],
555 | },
556 | });
557 |
558 | // Export as SVG or interactive HTML
559 | await networkViz.export("/output/knowledge-graph.svg");
560 | ```
561 |
562 | ### Timeline Dashboard
563 |
564 | ```typescript
565 | // Generate deployment timeline
566 | const timeline = await memoryVisualization({
567 | visualizationType: "timeline",
568 | options: {
569 | timeRange: "last-6-months",
570 | groupBy: "project",
571 | metrics: ["success-rate", "build-time"],
572 | interactive: true,
573 | },
574 | });
575 | ```
576 |
577 | ## Best Practices
578 |
579 | ### Performance Optimization
580 |
581 | - Use indexed queries for frequent lookups
582 | - Implement query result caching for repeated patterns
583 | - Periodically clean up outdated relationships
584 | - Use batch operations for bulk updates
585 |
586 | ### Data Quality
587 |
588 | - Validate node properties before insertion
589 | - Implement schema versioning for compatibility
590 | - Use unique constraints to prevent duplicates
591 | - Regular integrity checks and repair
592 |
593 | ### Security and Privacy
594 |
595 | - Encrypt sensitive preference data
596 | - Implement access controls for user data
597 | - Audit log for data access and modifications
598 | - GDPR compliance for user preference management
599 |
600 | ### Monitoring and Maintenance
601 |
602 | - Monitor query performance and optimization
603 | - Track knowledge graph growth and memory usage
604 | - Automated backup and disaster recovery
605 | - Version control for schema changes
606 |
607 | ## Troubleshooting
608 |
609 | ### Common Issues
610 |
611 | **Memory Growth**
612 |
613 | - Implement periodic cleanup of old deployment records
614 | - Archive historical data beyond retention period
615 | - Monitor node/edge count growth patterns
616 |
617 | **Query Performance**
618 |
619 | - Add indexes for frequently queried properties
620 | - Optimize complex traversal queries
621 | - Use query result caching for expensive operations
622 |
623 | **Data Consistency**
624 |
625 | - Validate relationships before creation
626 | - Implement transaction-like operations for atomic updates
627 | - Regular consistency checks and repair tools
628 |
629 | ### Debug Tools
630 |
631 | **Graph Inspector**
632 |
633 | ```typescript
634 | import { graphInspector } from "./debug-tools";
635 |
636 | const stats = await graphInspector.getStatistics();
637 | console.log(`Nodes: ${stats.nodeCount}, Edges: ${stats.edgeCount}`);
638 | console.log(`Storage size: ${stats.storageSize}MB`);
639 |
640 | const orphanedNodes = await graphInspector.findOrphanedNodes();
641 | console.log(`Orphaned nodes: ${orphanedNodes.length}`);
642 | ```
643 |
644 | **Query Profiler**
645 |
646 | ```typescript
647 | const profiler = await graphInspector.profileQuery(complexQuery);
648 | console.log(`Execution time: ${profiler.executionTime}ms`);
649 | console.log(`Nodes traversed: ${profiler.nodesTraversed}`);
650 | console.log(`Optimization suggestions: ${profiler.suggestions}`);
651 | ```
652 |
653 | ## Related Documentation
654 |
655 | - [Memory System](./tutorials/memory-workflows.md) - Overall memory architecture and patterns
656 | - [User Preferences](./reference/mcp-tools.md#manage_preferences) - Preference learning and management
657 | - [Deployment Tracking](./explanation/architecture.md#deployment-tracking) - Deployment outcome analysis
658 | - [Repository Analysis](./how-to/repository-analysis.md) - Project analysis and indexing
659 |
```
--------------------------------------------------------------------------------
/tests/api/mcp-responses.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | // API tests for MCP response format compliance and standardization
2 | import { formatMCPResponse, MCPToolResponse } from "../../src/types/api";
3 |
4 | describe("API Response Standardization Tests", () => {
5 | describe("MCPToolResponse Interface Compliance", () => {
6 | it("should validate successful response structure", () => {
7 | const successResponse: MCPToolResponse<{ data: string }> = {
8 | success: true,
9 | data: { data: "test-data" },
10 | metadata: {
11 | toolVersion: "1.0.0",
12 | executionTime: 100,
13 | timestamp: "2023-01-01T00:00:00.000Z",
14 | },
15 | recommendations: [
16 | {
17 | type: "info",
18 | title: "Test Recommendation",
19 | description: "This is a test recommendation",
20 | },
21 | ],
22 | nextSteps: [
23 | {
24 | action: "Next Action",
25 | toolRequired: "next_tool",
26 | description: "Description of next step",
27 | priority: "high",
28 | },
29 | ],
30 | };
31 |
32 | expect(successResponse.success).toBe(true);
33 | expect(successResponse.data).toBeDefined();
34 | expect(successResponse.metadata).toBeDefined();
35 | expect(successResponse.metadata.toolVersion).toBe("1.0.0");
36 | expect(successResponse.metadata.executionTime).toBe(100);
37 | expect(successResponse.recommendations).toHaveLength(1);
38 | expect(successResponse.nextSteps).toHaveLength(1);
39 | });
40 |
41 | it("should validate error response structure", () => {
42 | const errorResponse: MCPToolResponse = {
43 | success: false,
44 | error: {
45 | code: "TEST_ERROR",
46 | message: "Test error message",
47 | details: { context: "test" },
48 | resolution: "Test resolution steps",
49 | },
50 | metadata: {
51 | toolVersion: "1.0.0",
52 | executionTime: 50,
53 | timestamp: "2023-01-01T00:00:00.000Z",
54 | },
55 | };
56 |
57 | expect(errorResponse.success).toBe(false);
58 | expect(errorResponse.error).toBeDefined();
59 | expect(errorResponse.error!.code).toBe("TEST_ERROR");
60 | expect(errorResponse.error!.message).toBe("Test error message");
61 | expect(errorResponse.error!.resolution).toBe("Test resolution steps");
62 | expect(errorResponse.data).toBeUndefined();
63 | });
64 |
65 | it("should validate recommendation types", () => {
66 | const recommendations = [
67 | {
68 | type: "info" as const,
69 | title: "Info",
70 | description: "Info description",
71 | },
72 | {
73 | type: "warning" as const,
74 | title: "Warning",
75 | description: "Warning description",
76 | },
77 | {
78 | type: "critical" as const,
79 | title: "Critical",
80 | description: "Critical description",
81 | },
82 | ];
83 |
84 | recommendations.forEach((rec) => {
85 | expect(["info", "warning", "critical"]).toContain(rec.type);
86 | expect(rec.title).toBeDefined();
87 | expect(rec.description).toBeDefined();
88 | });
89 | });
90 |
91 | it("should validate next step priorities", () => {
92 | const nextSteps = [
93 | {
94 | action: "Low Priority",
95 | toolRequired: "tool1",
96 | priority: "low" as const,
97 | },
98 | {
99 | action: "Medium Priority",
100 | toolRequired: "tool2",
101 | priority: "medium" as const,
102 | },
103 | {
104 | action: "High Priority",
105 | toolRequired: "tool3",
106 | priority: "high" as const,
107 | },
108 | ];
109 |
110 | nextSteps.forEach((step) => {
111 | expect(["low", "medium", "high"]).toContain(step.priority);
112 | expect(step.action).toBeDefined();
113 | expect(step.toolRequired).toBeDefined();
114 | });
115 | });
116 | });
117 |
118 | describe("formatMCPResponse Function", () => {
119 | it("should format successful response correctly", () => {
120 | const response: MCPToolResponse<{ result: string }> = {
121 | success: true,
122 | data: { result: "success" },
123 | metadata: {
124 | toolVersion: "1.0.0",
125 | executionTime: 123,
126 | timestamp: "2023-01-01T12:00:00.000Z",
127 | },
128 | recommendations: [
129 | {
130 | type: "info",
131 | title: "Success",
132 | description: "Operation completed successfully",
133 | },
134 | ],
135 | nextSteps: [
136 | {
137 | action: "Proceed to next step",
138 | toolRequired: "next_tool",
139 | priority: "medium",
140 | },
141 | ],
142 | };
143 |
144 | const formatted = formatMCPResponse(response);
145 |
146 | expect(formatted.content).toBeDefined();
147 | expect(formatted.content.length).toBeGreaterThan(0);
148 | expect(formatted.isError).toBeFalsy();
149 |
150 | // Check main data is included
151 | const dataContent = formatted.content.find((c) =>
152 | c.text.includes("success"),
153 | );
154 | expect(dataContent).toBeDefined();
155 |
156 | // Check metadata is included
157 | const metadataContent = formatted.content.find((c) =>
158 | c.text.includes("123ms"),
159 | );
160 | expect(metadataContent).toBeDefined();
161 |
162 | // Check recommendations are included
163 | const recommendationContent = formatted.content.find((c) =>
164 | c.text.includes("Recommendations:"),
165 | );
166 | expect(recommendationContent).toBeDefined();
167 |
168 | // Check next steps are included
169 | const nextStepContent = formatted.content.find((c) =>
170 | c.text.includes("Next Steps:"),
171 | );
172 | expect(nextStepContent).toBeDefined();
173 | });
174 |
175 | it("should format error response correctly", () => {
176 | const errorResponse: MCPToolResponse = {
177 | success: false,
178 | error: {
179 | code: "VALIDATION_ERROR",
180 | message: "Input validation failed",
181 | resolution: "Check your input parameters",
182 | },
183 | metadata: {
184 | toolVersion: "1.0.0",
185 | executionTime: 25,
186 | timestamp: "2023-01-01T12:00:00.000Z",
187 | },
188 | };
189 |
190 | const formatted = formatMCPResponse(errorResponse);
191 |
192 | expect(formatted.content).toBeDefined();
193 | expect(formatted.isError).toBe(true);
194 |
195 | // Check error message is included
196 | const errorContent = formatted.content.find((c) =>
197 | c.text.includes("Input validation failed"),
198 | );
199 | expect(errorContent).toBeDefined();
200 |
201 | // Check resolution is included
202 | const resolutionContent = formatted.content.find((c) =>
203 | c.text.includes("Check your input parameters"),
204 | );
205 | expect(resolutionContent).toBeDefined();
206 | });
207 |
208 | it("should handle responses without optional fields", () => {
209 | const minimalResponse: MCPToolResponse<string> = {
210 | success: true,
211 | data: "minimal data",
212 | metadata: {
213 | toolVersion: "1.0.0",
214 | executionTime: 10,
215 | timestamp: "2023-01-01T12:00:00.000Z",
216 | },
217 | };
218 |
219 | const formatted = formatMCPResponse(minimalResponse);
220 |
221 | expect(formatted.content).toBeDefined();
222 | expect(formatted.isError).toBeFalsy();
223 |
224 | // Should not include recommendations or next steps sections
225 | const fullText = formatted.content.map((c) => c.text).join("\n");
226 | expect(fullText).not.toContain("Recommendations:");
227 | expect(fullText).not.toContain("Next Steps:");
228 | });
229 |
230 | it("should include recommendation icons correctly", () => {
231 | const response: MCPToolResponse<{}> = {
232 | success: true,
233 | data: {},
234 | metadata: {
235 | toolVersion: "1.0.0",
236 | executionTime: 10,
237 | timestamp: "2023-01-01T12:00:00.000Z",
238 | },
239 | recommendations: [
240 | { type: "info", title: "Info", description: "Info description" },
241 | {
242 | type: "warning",
243 | title: "Warning",
244 | description: "Warning description",
245 | },
246 | {
247 | type: "critical",
248 | title: "Critical",
249 | description: "Critical description",
250 | },
251 | ],
252 | };
253 |
254 | const formatted = formatMCPResponse(response);
255 | const recommendationText =
256 | formatted.content.find((c) => c.text.includes("Recommendations:"))
257 | ?.text || "";
258 |
259 | expect(recommendationText).toContain("ℹ️"); // Info icon
260 | expect(recommendationText).toContain("⚠️"); // Warning icon
261 | expect(recommendationText).toContain("🔴"); // Critical icon
262 | });
263 |
264 | it("should format next steps without toolRequired but with description", () => {
265 | const response: MCPToolResponse<{}> = {
266 | success: true,
267 | data: {},
268 | metadata: {
269 | toolVersion: "1.0.0",
270 | executionTime: 10,
271 | timestamp: "2023-01-01T12:00:00.000Z",
272 | },
273 | nextSteps: [
274 | {
275 | action: "Manual Step",
276 | description: "This step requires manual intervention",
277 | priority: "high",
278 | },
279 | ],
280 | };
281 |
282 | const formatted = formatMCPResponse(response);
283 | const nextStepText =
284 | formatted.content.find((c) => c.text.includes("Next Steps:"))?.text ||
285 | "";
286 |
287 | expect(nextStepText).toContain("Manual Step");
288 | expect(nextStepText).toContain("This step requires manual intervention");
289 | expect(nextStepText).not.toContain("use "); // Should not have "use" since no toolRequired
290 | });
291 | });
292 |
293 | describe("Response Consistency Across Tools", () => {
294 | it("should ensure all tools follow the same metadata structure", () => {
295 | const commonMetadata = {
296 | toolVersion: "1.0.0",
297 | executionTime: 100,
298 | timestamp: "2023-01-01T12:00:00.000Z",
299 | };
300 |
301 | // Test that metadata structure is consistent
302 | expect(commonMetadata.toolVersion).toMatch(/^\d+\.\d+\.\d+$/);
303 | expect(typeof commonMetadata.executionTime).toBe("number");
304 | expect(commonMetadata.executionTime).toBeGreaterThanOrEqual(0);
305 | expect(commonMetadata.timestamp).toMatch(
306 | /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/,
307 | );
308 | });
309 |
310 | it("should validate error code consistency", () => {
311 | const errorCodes = [
312 | "ANALYSIS_FAILED",
313 | "RECOMMENDATION_FAILED",
314 | "CONFIG_GENERATION_FAILED",
315 | "STRUCTURE_SETUP_FAILED",
316 | "DEPLOYMENT_SETUP_FAILED",
317 | "VERIFICATION_FAILED",
318 | ];
319 |
320 | errorCodes.forEach((code) => {
321 | expect(code).toMatch(/^[A-Z_]+$/);
322 | expect(code).toContain("_");
323 | expect(code.endsWith("_FAILED")).toBe(true);
324 | });
325 | });
326 |
327 | it("should validate next step tool references", () => {
328 | const validTools = [
329 | "analyze_repository",
330 | "recommend_ssg",
331 | "generate_config",
332 | "setup_structure",
333 | "deploy_pages",
334 | "verify_deployment",
335 | ];
336 |
337 | validTools.forEach((tool) => {
338 | expect(tool).toMatch(/^[a-z_]+$/);
339 | expect(tool).not.toContain("-");
340 | expect(tool).not.toContain(" ");
341 | });
342 | });
343 |
344 | it("should validate recommendation action patterns", () => {
345 | const recommendationActions = [
346 | "Get SSG Recommendation",
347 | "Generate Configuration",
348 | "Setup Documentation Structure",
349 | "Setup GitHub Pages Deployment",
350 | "Verify Deployment Setup",
351 | ];
352 |
353 | recommendationActions.forEach((action) => {
354 | expect(action).toMatch(/^[A-Z]/); // Starts with capital
355 | expect(action.length).toBeGreaterThan(5); // Meaningful length
356 | expect(action.endsWith(".")).toBe(false); // No trailing period
357 | });
358 | });
359 | });
360 |
361 | describe("Backward Compatibility", () => {
362 | it("should maintain MCP content format compatibility", () => {
363 | const response: MCPToolResponse<{ test: boolean }> = {
364 | success: true,
365 | data: { test: true },
366 | metadata: {
367 | toolVersion: "1.0.0",
368 | executionTime: 50,
369 | timestamp: "2023-01-01T12:00:00.000Z",
370 | },
371 | };
372 |
373 | const formatted = formatMCPResponse(response);
374 |
375 | // Must have content array for MCP compatibility
376 | expect(formatted.content).toBeDefined();
377 | expect(Array.isArray(formatted.content)).toBe(true);
378 |
379 | // Each content item must have type and text
380 | formatted.content.forEach((item) => {
381 | expect(item.type).toBe("text");
382 | expect(typeof item.text).toBe("string");
383 | expect(item.text.length).toBeGreaterThan(0);
384 | });
385 | });
386 |
387 | it("should handle legacy response format gracefully", () => {
388 | // Test that we can still process responses that don't have all new fields
389 | const legacyStyleData = {
390 | success: true,
391 | result: "legacy result",
392 | timestamp: "2023-01-01T12:00:00.000Z",
393 | };
394 |
395 | // Should not throw even if not strictly typed
396 | expect(() => {
397 | const formatted = formatMCPResponse({
398 | success: true,
399 | data: legacyStyleData,
400 | metadata: {
401 | toolVersion: "1.0.0",
402 | executionTime: 100,
403 | timestamp: "2023-01-01T12:00:00.000Z",
404 | },
405 | });
406 | return formatted;
407 | }).not.toThrow();
408 | });
409 | });
410 |
411 | describe("Error Boundary Testing", () => {
412 | it("should handle undefined data gracefully", () => {
413 | const response: MCPToolResponse = {
414 | success: true,
415 | // data is undefined
416 | metadata: {
417 | toolVersion: "1.0.0",
418 | executionTime: 10,
419 | timestamp: "2023-01-01T12:00:00.000Z",
420 | },
421 | };
422 |
423 | const formatted = formatMCPResponse(response);
424 | expect(formatted.content).toBeDefined();
425 | expect(formatted.content.length).toBeGreaterThan(0);
426 | });
427 |
428 | it("should handle null values in data", () => {
429 | const response: MCPToolResponse<{ value: null }> = {
430 | success: true,
431 | data: { value: null },
432 | metadata: {
433 | toolVersion: "1.0.0",
434 | executionTime: 10,
435 | timestamp: "2023-01-01T12:00:00.000Z",
436 | },
437 | };
438 |
439 | expect(() => formatMCPResponse(response)).not.toThrow();
440 | });
441 |
442 | it("should handle very large data objects", () => {
443 | const largeData = {
444 | items: Array.from({ length: 1000 }, (_, i) => ({
445 | id: i,
446 | value: `item-${i}`,
447 | })),
448 | };
449 |
450 | const response: MCPToolResponse<typeof largeData> = {
451 | success: true,
452 | data: largeData,
453 | metadata: {
454 | toolVersion: "1.0.0",
455 | executionTime: 1000,
456 | timestamp: "2023-01-01T12:00:00.000Z",
457 | },
458 | };
459 |
460 | const formatted = formatMCPResponse(response);
461 | expect(formatted.content).toBeDefined();
462 |
463 | // Should include the large data in JSON format
464 | const dataContent = formatted.content.find((c) =>
465 | c.text.includes('"items"'),
466 | );
467 | expect(dataContent).toBeDefined();
468 | });
469 |
470 | it("should handle circular references safely", () => {
471 | const circularData: any = { name: "test" };
472 | circularData.self = circularData;
473 |
474 | // Should not cause JSON.stringify to throw
475 | expect(() => {
476 | JSON.stringify(circularData);
477 | }).toThrow();
478 |
479 | // But our formatter should handle it (though we should avoid circular refs)
480 | // This test documents the expected behavior
481 | const response: MCPToolResponse<any> = {
482 | success: true,
483 | data: { safe: "data" }, // Use safe data instead
484 | metadata: {
485 | toolVersion: "1.0.0",
486 | executionTime: 10,
487 | timestamp: "2023-01-01T12:00:00.000Z",
488 | },
489 | };
490 |
491 | expect(() => formatMCPResponse(response)).not.toThrow();
492 | });
493 | });
494 | });
495 |
```
--------------------------------------------------------------------------------
/tests/memory/manager.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Comprehensive unit tests for Memory Manager
3 | * Tests memory management, search, caching, and context-aware operations
4 | * Part of Issue #54 - Core Memory System Unit Tests
5 | */
6 |
7 | import { promises as fs } from "fs";
8 | import path from "path";
9 | import os from "os";
10 | import {
11 | MemoryManager,
12 | MemoryContext,
13 | MemorySearchOptions,
14 | } from "../../src/memory/manager.js";
15 | import { MemoryEntry } from "../../src/memory/storage.js";
16 |
17 | describe("MemoryManager", () => {
18 | let manager: MemoryManager;
19 | let tempDir: string;
20 |
21 | beforeEach(async () => {
22 | // Create unique temp directory for each test
23 | tempDir = path.join(
24 | os.tmpdir(),
25 | `memory-manager-test-${Date.now()}-${Math.random()
26 | .toString(36)
27 | .substr(2, 9)}`,
28 | );
29 | await fs.mkdir(tempDir, { recursive: true });
30 | manager = new MemoryManager(tempDir);
31 | await manager.initialize();
32 | });
33 |
34 | afterEach(async () => {
35 | // Cleanup temp directory
36 | try {
37 | await fs.rm(tempDir, { recursive: true, force: true });
38 | } catch (error) {
39 | // Ignore cleanup errors
40 | }
41 | });
42 |
43 | describe("Basic Memory Operations", () => {
44 | test("should create manager instance and initialize", async () => {
45 | expect(manager).toBeDefined();
46 | expect(manager).toBeInstanceOf(MemoryManager);
47 | });
48 |
49 | test("should remember and recall memories", async () => {
50 | const data = {
51 | projectName: "test-project",
52 | language: "typescript",
53 | framework: "react",
54 | };
55 |
56 | const metadata = {
57 | projectId: "test-proj-001",
58 | repository: "github.com/test/repo",
59 | tags: ["frontend", "typescript"],
60 | };
61 |
62 | // Set context to ensure projectId is preserved
63 | manager.setContext({ projectId: "test-proj-001" });
64 |
65 | const memoryEntry = await manager.remember("analysis", data, metadata);
66 | expect(memoryEntry.id).toBeDefined();
67 | expect(typeof memoryEntry.id).toBe("string");
68 |
69 | const recalled = await manager.recall(memoryEntry.id);
70 | expect(recalled).not.toBeNull();
71 | expect(recalled?.data).toEqual(data);
72 | expect(recalled?.metadata.projectId).toBe("test-proj-001");
73 | expect(recalled?.type).toBe("analysis");
74 | });
75 |
76 | test("should return null for non-existent memory", async () => {
77 | const result = await manager.recall("non-existent-id");
78 | expect(result).toBeNull();
79 | });
80 |
81 | test("should forget memories", async () => {
82 | const memoryEntry = await manager.remember("analysis", {
83 | data: "to-forget",
84 | });
85 |
86 | // Verify it exists
87 | const beforeForget = await manager.recall(memoryEntry.id);
88 | expect(beforeForget).not.toBeNull();
89 |
90 | // Forget it
91 | const forgotten = await manager.forget(memoryEntry.id);
92 | expect(forgotten).toBe(true);
93 |
94 | // Verify it's gone
95 | const afterForget = await manager.recall(memoryEntry.id);
96 | expect(afterForget).toBeNull();
97 | });
98 |
99 | test("should return false when forgetting non-existent memory", async () => {
100 | const result = await manager.forget("non-existent-id");
101 | expect(result).toBe(false);
102 | });
103 | });
104 |
105 | describe("Context Management", () => {
106 | test("should set and get context", async () => {
107 | const context: MemoryContext = {
108 | projectId: "context-test",
109 | repository: "github.com/context/repo",
110 | branch: "feature/memory",
111 | user: "test-user",
112 | session: "session-123",
113 | };
114 |
115 | manager.setContext(context);
116 |
117 | const data = { contextTest: true, value: 42 };
118 | const memoryEntry = await manager.remember("analysis", data);
119 |
120 | expect(memoryEntry.metadata.projectId).toBe("context-test");
121 | });
122 |
123 | test("should use context when remembering", async () => {
124 | const context: MemoryContext = {
125 | projectId: "auto-context-test",
126 | repository: "github.com/auto/repo",
127 | };
128 |
129 | manager.setContext(context);
130 |
131 | // Create multiple memories with current context
132 | const memory1 = await manager.remember("analysis", { step: 1 });
133 | const memory2 = await manager.remember("recommendation", { step: 2 });
134 | const memory3 = await manager.remember("deployment", { step: 3 });
135 |
136 | // Verify memories inherit the context
137 | expect(memory1.metadata.projectId).toBe("auto-context-test");
138 | expect(memory2.metadata.projectId).toBe("auto-context-test");
139 | expect(memory3.metadata.projectId).toBe("auto-context-test");
140 |
141 | // Test that we can recall them
142 | const recalled1 = await manager.recall(memory1.id);
143 | expect(recalled1?.metadata.projectId).toBe("auto-context-test");
144 | });
145 | });
146 |
147 | describe("Search Functionality", () => {
148 | test("should handle search operations", async () => {
149 | // Create some test data first
150 | manager.setContext({ projectId: "search-test" });
151 |
152 | await manager.remember(
153 | "analysis",
154 | {
155 | project: "test-search",
156 | language: "typescript",
157 | },
158 | { tags: ["frontend"] },
159 | );
160 |
161 | // Test basic search functionality
162 | const results = await manager.search("");
163 | expect(Array.isArray(results)).toBe(true);
164 |
165 | // Search functionality may be basic, so we just test it doesn't throw
166 | const projectResults = await manager.search({ projectId: "search-test" });
167 | expect(Array.isArray(projectResults)).toBe(true);
168 | });
169 |
170 | test("should handle search with different query types", async () => {
171 | const options: MemorySearchOptions = {
172 | semantic: false,
173 | fuzzy: true,
174 | sortBy: "timestamp",
175 | };
176 |
177 | const results = await manager.search("test", options);
178 | expect(Array.isArray(results)).toBe(true);
179 | });
180 | });
181 |
182 | describe("Memory Analytics", () => {
183 | test("should handle basic memory queries", async () => {
184 | // Create test data
185 | manager.setContext({ projectId: "analytics-test" });
186 |
187 | await manager.remember("analysis", { score: 85 });
188 | await manager.remember("recommendation", { confidence: 0.8 });
189 |
190 | // Test basic search functionality
191 | const allMemories = await manager.search("");
192 | expect(Array.isArray(allMemories)).toBe(true);
193 |
194 | // The number of memories may vary based on implementation
195 | // Just verify the search works and returns memories when they exist
196 | if (allMemories.length > 0) {
197 | expect(allMemories[0]).toHaveProperty("type");
198 | expect(allMemories[0]).toHaveProperty("data");
199 | expect(allMemories[0]).toHaveProperty("metadata");
200 | }
201 | });
202 | });
203 |
204 | describe("Caching and Performance", () => {
205 | test("should handle performance operations", async () => {
206 | // Store test data
207 | manager.setContext({ projectId: "cache-test" });
208 |
209 | await manager.remember("analysis", { cached: true });
210 | await manager.remember("recommendation", { cached: true });
211 |
212 | // Test search performance
213 | const startTime1 = Date.now();
214 | const results1 = await manager.search("");
215 | const time1 = Date.now() - startTime1;
216 |
217 | const startTime2 = Date.now();
218 | const results2 = await manager.search("");
219 | const time2 = Date.now() - startTime2;
220 |
221 | expect(Array.isArray(results1)).toBe(true);
222 | expect(Array.isArray(results2)).toBe(true);
223 |
224 | // Both searches should complete quickly
225 | expect(time1).toBeLessThan(1000);
226 | expect(time2).toBeLessThan(1000);
227 | });
228 |
229 | test("should handle concurrent operations safely", async () => {
230 | const concurrentOps = 10;
231 | const promises: Promise<MemoryEntry>[] = [];
232 |
233 | manager.setContext({ projectId: "concurrent-test" });
234 |
235 | // Create multiple concurrent remember operations
236 | for (let i = 0; i < concurrentOps; i++) {
237 | const promise = manager.remember(
238 | "analysis",
239 | {
240 | index: i,
241 | data: `concurrent-test-${i}`,
242 | },
243 | {
244 | tags: [`tag-${i % 5}`],
245 | },
246 | );
247 | promises.push(promise);
248 | }
249 |
250 | const memoryEntries = await Promise.all(promises);
251 | expect(memoryEntries).toHaveLength(concurrentOps);
252 | expect(new Set(memoryEntries.map((m) => m.id)).size).toBe(concurrentOps); // All IDs should be unique
253 | });
254 | });
255 |
256 | describe("Memory Lifecycle Management", () => {
257 | test("should manage memory entries over time", async () => {
258 | manager.setContext({ projectId: "lifecycle-test" });
259 |
260 | const originalData = { version: 1, status: "draft" };
261 | const memoryEntry = await manager.remember("analysis", originalData);
262 |
263 | expect(memoryEntry.data.version).toBe(1);
264 | expect(memoryEntry.data.status).toBe("draft");
265 |
266 | // Verify persistence
267 | const recalled = await manager.recall(memoryEntry.id);
268 | expect(recalled?.data.version).toBe(1);
269 | expect(recalled?.data.status).toBe("draft");
270 | });
271 |
272 | test("should handle bulk operations efficiently", async () => {
273 | const bulkSize = 20;
274 | const memoryEntries: MemoryEntry[] = [];
275 |
276 | manager.setContext({ projectId: "bulk-test" });
277 |
278 | // Create bulk memories
279 | const startTime = Date.now();
280 | for (let i = 0; i < bulkSize; i++) {
281 | const entry = await manager.remember("analysis", {
282 | index: i,
283 | category: i % 3 === 0 ? "A" : i % 3 === 1 ? "B" : "C",
284 | });
285 | memoryEntries.push(entry);
286 | }
287 | const createTime = Date.now() - startTime;
288 |
289 | expect(createTime).toBeLessThan(5000); // Should complete within 5 seconds
290 | expect(memoryEntries).toHaveLength(bulkSize);
291 |
292 | // Test search functionality
293 | const searchStartTime = Date.now();
294 | const allMemories = await manager.search("");
295 | const searchTime = Date.now() - searchStartTime;
296 |
297 | expect(Array.isArray(allMemories)).toBe(true);
298 | expect(searchTime).toBeLessThan(1000); // Should search within 1 second
299 | });
300 | });
301 |
302 | describe("Error Handling", () => {
303 | test("should handle invalid memory types gracefully", async () => {
304 | // TypeScript should prevent this, but test runtime behavior
305 | const memoryEntry = await manager.remember("configuration", {
306 | test: true,
307 | });
308 | const recalled = await manager.recall(memoryEntry.id);
309 |
310 | expect(recalled?.type).toBe("configuration");
311 | expect(recalled?.data.test).toBe(true);
312 | });
313 |
314 | test("should handle malformed search queries", async () => {
315 | // Test with various edge case queries
316 | const emptyResult = await manager.search("");
317 | expect(Array.isArray(emptyResult)).toBe(true);
318 |
319 | const specialCharsResult = await manager.search("@#$%^&*()[]{}");
320 | expect(Array.isArray(specialCharsResult)).toBe(true);
321 |
322 | const unicodeResult = await manager.search("测试🚀");
323 | expect(Array.isArray(unicodeResult)).toBe(true);
324 | });
325 |
326 | test("should handle memory storage errors", async () => {
327 | // Test with extremely large data that might cause issues
328 | const largeData = {
329 | huge: "x".repeat(100000), // 100KB string
330 | array: new Array(10000)
331 | .fill(0)
332 | .map((_, i) => ({ id: i, data: `item-${i}` })),
333 | };
334 |
335 | // Should handle large data gracefully
336 | const memoryEntry = await manager.remember("analysis", largeData);
337 | expect(memoryEntry.id).toBeDefined();
338 |
339 | const recalled = await manager.recall(memoryEntry.id);
340 | expect(recalled?.data.huge).toHaveLength(100000);
341 | expect(recalled?.data.array).toHaveLength(10000);
342 | });
343 |
344 | test("should handle non-existent memory operations", async () => {
345 | // Test recalling non-existent memory
346 | const nonExistent = await manager.recall("non-existent-id");
347 | expect(nonExistent).toBeNull();
348 |
349 | // Test forgetting non-existent memory
350 | const forgotResult = await manager.forget("non-existent-id");
351 | expect(forgotResult).toBe(false);
352 |
353 | // Test searching with no results
354 | const searchResults = await manager.search("definitely-not-found-12345");
355 | expect(Array.isArray(searchResults)).toBe(true);
356 | expect(searchResults).toHaveLength(0);
357 | });
358 | });
359 |
360 | describe("Event System", () => {
361 | test("should emit events on memory operations", async () => {
362 | let eventCount = 0;
363 | const events: string[] = [];
364 |
365 | manager.on("memory-created", (entry: MemoryEntry) => {
366 | expect(entry.type).toBe("analysis");
367 | expect(entry.data.eventTest).toBe(true);
368 | eventCount++;
369 | events.push("created");
370 | });
371 |
372 | manager.on("memory-deleted", (id: string) => {
373 | expect(typeof id).toBe("string");
374 | eventCount++;
375 | events.push("deleted");
376 | });
377 |
378 | // Trigger events
379 | const memoryEntry = await manager.remember("analysis", {
380 | eventTest: true,
381 | });
382 | await manager.forget(memoryEntry.id);
383 |
384 | // Give events time to fire
385 | await new Promise((resolve) => setTimeout(resolve, 50));
386 |
387 | // Verify events were triggered
388 | expect(eventCount).toBeGreaterThanOrEqual(1); // At least memory-created should fire
389 | expect(events).toContain("created");
390 | });
391 |
392 | test("should emit context change events", () => {
393 | let contextChanged = false;
394 |
395 | manager.on("context-changed", (context: MemoryContext) => {
396 | expect(context.projectId).toBe("event-test");
397 | expect(context.user).toBe("event-user");
398 | contextChanged = true;
399 | });
400 |
401 | manager.setContext({
402 | projectId: "event-test",
403 | user: "event-user",
404 | });
405 |
406 | // Give event time to fire
407 | setTimeout(() => {
408 | // Event system may not be implemented, so we don't require it
409 | expect(true).toBe(true);
410 | }, 50);
411 | });
412 | });
413 |
414 | describe("Search with Grouping and Sorting", () => {
415 | test("should group results by type", async () => {
416 | await manager.remember("analysis", { test: 1 }, { projectId: "proj1" });
417 | await manager.remember("deployment", { test: 2 }, { projectId: "proj1" });
418 | await manager.remember("analysis", { test: 3 }, { projectId: "proj2" });
419 |
420 | const grouped: any = await manager.search("", { groupBy: "type" });
421 |
422 | expect(grouped).toHaveProperty("analysis");
423 | expect(grouped).toHaveProperty("deployment");
424 | expect(grouped.analysis.length).toBe(2);
425 | expect(grouped.deployment.length).toBe(1);
426 | });
427 |
428 | test("should group results by project", async () => {
429 | manager.setContext({ projectId: "proj1" });
430 | await manager.remember("analysis", { test: 1 });
431 |
432 | manager.setContext({ projectId: "proj2" });
433 | await manager.remember("analysis", { test: 2 });
434 |
435 | const grouped: any = await manager.search("", { groupBy: "project" });
436 |
437 | expect(grouped).toHaveProperty("proj1");
438 | expect(grouped).toHaveProperty("proj2");
439 | });
440 |
441 | test("should group results by date", async () => {
442 | await manager.remember("analysis", { test: 1 }, { projectId: "proj1" });
443 |
444 | const grouped: any = await manager.search("", { groupBy: "date" });
445 |
446 | const today = new Date().toISOString().split("T")[0];
447 | expect(grouped).toHaveProperty(today);
448 | });
449 |
450 | test("should sort results by type", async () => {
451 | await manager.remember("recommendation", { test: 1 }, {});
452 | await manager.remember("analysis", { test: 2 }, {});
453 |
454 | const results = await manager.search("", { sortBy: "type" });
455 |
456 | expect(results[0].type).toBe("analysis");
457 | expect(results[1].type).toBe("recommendation");
458 | });
459 | });
460 | });
461 |
```