#
tokens: 49013/50000 12/44 files (page 2/3)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 2 of 3. Use http://codebase.md/vltansky/cursor-chat-history-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cursor
│   ├── mcp.json
│   └── rules
│       ├── cursor_rules.mdc
│       ├── dev_workflow.mdc
│       ├── general.mdc
│       ├── mcp.mdc
│       ├── project-overview.mdc
│       ├── self_improve.mdc
│       ├── taskmaster.mdc
│       ├── tests.mdc
│       └── typescript-patterns.mdc
├── .github
│   ├── dependabot.yml
│   └── workflows
│       └── ci.yml
├── .gitignore
├── .roo
│   ├── rules
│   │   ├── dev_workflow.md
│   │   ├── roo_rules.md
│   │   ├── self_improve.md
│   │   └── taskmaster.md
│   ├── rules-architect
│   │   └── architect-rules
│   ├── rules-ask
│   │   └── ask-rules
│   ├── rules-boomerang
│   │   └── boomerang-rules
│   ├── rules-code
│   │   └── code-rules
│   ├── rules-debug
│   │   └── debug-rules
│   └── rules-test
│       └── test-rules
├── .roomodes
├── .taskmaster
│   ├── .taskmaster
│   │   └── config.json
│   ├── config.json
│   └── reports
│       └── task-complexity-report.json
├── .taskmasterconfig
├── .windsurfrules
├── docs
│   ├── research.md
│   └── use-cases.md
├── LICENSE
├── package.json
├── README.md
├── scripts
│   └── example_prd.txt
├── src
│   ├── database
│   │   ├── parser.test.ts
│   │   ├── parser.ts
│   │   ├── reader.test.ts
│   │   ├── reader.ts
│   │   └── types.ts
│   ├── server.test.ts
│   ├── server.ts
│   ├── tools
│   │   ├── analytics-tools.ts
│   │   ├── conversation-tools.test.ts
│   │   ├── conversation-tools.ts
│   │   └── extraction-tools.ts
│   └── utils
│       ├── analytics.ts
│       ├── cache.test.ts
│       ├── cache.ts
│       ├── database-utils.test.ts
│       ├── database-utils.ts
│       ├── errors.test.ts
│       ├── errors.ts
│       ├── exporters.ts
│       ├── formatter.ts
│       ├── relationships.ts
│       ├── validation.test.ts
│       └── validation.ts
├── tsconfig.json
├── vitest.config.ts
└── yarn.lock
```

# Files

--------------------------------------------------------------------------------
/src/utils/database-utils.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { homedir, platform } from 'os';
  2 | import { join, resolve } from 'path';
  3 | import { existsSync } from 'fs';
  4 | import type { CursorDatabasePaths, DatabaseConfig } from '../database/types.js';
  5 | 
  6 | // Platform-specific database paths (lazy-loaded to support testing)
  7 | export function getCursorDatabasePaths(): CursorDatabasePaths {
  8 |   return {
  9 |     macOS: join(homedir(), 'Library/Application Support/Cursor/User/globalStorage/state.vscdb'),
 10 |     windows: join(homedir(), 'AppData/Roaming/Cursor/User/globalStorage/state.vscdb'),
 11 |     linux: join(homedir(), '.config/Cursor/User/globalStorage/state.vscdb')
 12 |   };
 13 | }
 14 | 
 15 | /**
 16 |  * Detect the current operating system
 17 |  * @returns The detected operating system as a string
 18 |  */
 19 | export function detectOperatingSystem(): 'macOS' | 'windows' | 'linux' | 'unknown' {
 20 |   const currentPlatform = platform();
 21 | 
 22 |   switch (currentPlatform) {
 23 |     case 'darwin':
 24 |       return 'macOS';
 25 |     case 'win32':
 26 |       return 'windows';
 27 |     case 'linux':
 28 |       return 'linux';
 29 |     default:
 30 |       return 'unknown';
 31 |   }
 32 | }
 33 | 
 34 | /**
 35 |  * Get the default database path for a specific operating system
 36 |  * @param os The operating system identifier
 37 |  * @returns The default database path for the OS
 38 |  */
 39 | export function getDefaultDatabasePath(os: string): string {
 40 |   const paths = getCursorDatabasePaths();
 41 |   switch (os) {
 42 |     case 'macOS':
 43 |     case 'darwin':
 44 |       return paths.macOS;
 45 |     case 'windows':
 46 |     case 'win32':
 47 |       return paths.windows;
 48 |     case 'linux':
 49 |       return paths.linux;
 50 |     default:
 51 |       // Fallback to Linux path for unknown operating systems
 52 |       return paths.linux;
 53 |   }
 54 | }
 55 | 
 56 | /**
 57 |  * Check if the database file exists at the specified path
 58 |  * @param path The path to verify
 59 |  * @returns Object with verification result and optional error message
 60 |  */
 61 | export function verifyDatabasePath(path: string): { exists: boolean; error?: string } {
 62 |   try {
 63 |     if (!path) {
 64 |       return { exists: false, error: 'Database path is empty' };
 65 |     }
 66 | 
 67 |     const resolvedPath = resolve(path);
 68 |     const exists = existsSync(resolvedPath);
 69 | 
 70 |     if (!exists) {
 71 |       console.warn(`Database file not found at: ${resolvedPath}`);
 72 |       return {
 73 |         exists: false,
 74 |         error: `Database file not found at: ${resolvedPath}. Make sure Cursor is installed and has been used to create conversations.`
 75 |       };
 76 |     }
 77 | 
 78 |     return { exists: true };
 79 |   } catch (error) {
 80 |     const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred';
 81 |     return {
 82 |       exists: false,
 83 |       error: `Error verifying database path: ${errorMessage}`
 84 |     };
 85 |   }
 86 | }
 87 | 
 88 | /**
 89 |  * Get user-configured database path from environment variables or configuration
 90 |  * @returns The user-configured path if found, null otherwise
 91 |  */
 92 | export function getUserConfiguredDatabasePath(): string | null {
 93 |   // Check environment variable first
 94 |   const envPath = process.env.CURSOR_DB_PATH;
 95 |   if (envPath) {
 96 |     const resolvedPath = resolve(envPath.replace(/^~/, homedir()));
 97 |     const verification = verifyDatabasePath(resolvedPath);
 98 |     if (verification.exists) {
 99 |       return resolvedPath;
100 |     } else {
101 |       console.warn(`User-configured database path is invalid: ${verification.error}`);
102 |     }
103 |   }
104 | 
105 |   return null;
106 | }
107 | 
108 | /**
109 |  * Main function to detect the appropriate Cursor database path
110 |  * Combines all detection mechanisms with proper fallback handling
111 |  * @returns The resolved database path
112 |  * @throws Error if no valid database path can be determined
113 |  */
114 | export function detectDatabasePath(): string {
115 |   // 1. Check for user-configured path first
116 |   const userConfiguredPath = getUserConfiguredDatabasePath();
117 |   if (userConfiguredPath) {
118 |     return userConfiguredPath;
119 |   }
120 | 
121 |   // 2. Detect OS and use default path
122 |   const os = detectOperatingSystem();
123 |   const defaultPath = getDefaultDatabasePath(os);
124 |   const resolvedPath = resolve(defaultPath);
125 | 
126 |   // 3. Verify the default path exists
127 |   const verification = verifyDatabasePath(resolvedPath);
128 |   if (verification.exists) {
129 |     return resolvedPath;
130 |   }
131 | 
132 |   // 4. Implement fallback mechanisms
133 |   console.warn(`Default database path verification failed: ${verification.error}`);
134 | 
135 |   // Try alternative common locations as fallbacks
136 |   const fallbackPaths = getFallbackDatabasePaths(os);
137 |   for (const fallbackPath of fallbackPaths) {
138 |     const resolvedFallback = resolve(fallbackPath);
139 |     const fallbackVerification = verifyDatabasePath(resolvedFallback);
140 |     if (fallbackVerification.exists) {
141 |       console.log(`Using fallback database path: ${resolvedFallback}`);
142 |       return resolvedFallback;
143 |     }
144 |   }
145 | 
146 |   // If no valid path found, throw descriptive error
147 |   throw new Error(
148 |     `Unable to locate Cursor database file. Tried:\n` +
149 |     `- User configured: ${process.env.CURSOR_DB_PATH || 'Not set'}\n` +
150 |     `- Default (${os}): ${resolvedPath}\n` +
151 |     `- Fallback paths: ${fallbackPaths.join(', ')}\n\n` +
152 |     `Please ensure Cursor is installed and has been used to create conversations, ` +
153 |     `or set the CURSOR_DB_PATH environment variable to the correct database location.`
154 |   );
155 | }
156 | 
157 | /**
158 |  * Get fallback database paths for the given operating system
159 |  * @param os The operating system identifier
160 |  * @returns Array of fallback paths to try
161 |  */
162 | function getFallbackDatabasePaths(os: string): string[] {
163 |   const fallbacks: string[] = [];
164 | 
165 |   switch (os) {
166 |     case 'macOS':
167 |     case 'darwin':
168 |       fallbacks.push(
169 |         join(homedir(), 'Library/Application Support/Cursor/cursor.db'),
170 |         join(homedir(), 'Library/Application Support/Cursor/User/cursor.db'),
171 |         join(homedir(), 'Library/Application Support/Cursor/state.vscdb')
172 |       );
173 |       break;
174 |     case 'windows':
175 |     case 'win32':
176 |       fallbacks.push(
177 |         join(homedir(), 'AppData/Roaming/Cursor/cursor.db'),
178 |         join(homedir(), 'AppData/Roaming/Cursor/User/cursor.db'),
179 |         join(homedir(), 'AppData/Roaming/Cursor/state.vscdb')
180 |       );
181 |       break;
182 |     case 'linux':
183 |       fallbacks.push(
184 |         join(homedir(), '.config/Cursor/cursor.db'),
185 |         join(homedir(), '.config/Cursor/User/cursor.db'),
186 |         join(homedir(), '.config/Cursor/state.vscdb')
187 |       );
188 |       break;
189 |     default:
190 |       // For unknown OS, try Linux-style paths
191 |       fallbacks.push(
192 |         join(homedir(), '.config/Cursor/cursor.db'),
193 |         join(homedir(), '.config/Cursor/User/cursor.db'),
194 |         join(homedir(), '.config/Cursor/state.vscdb')
195 |       );
196 |   }
197 | 
198 |   return fallbacks;
199 | }
200 | 
201 | /**
202 |  * Automatically detect the Cursor database path for the current platform
203 |  * @deprecated Use detectDatabasePath() instead for more robust detection
204 |  */
205 | export function detectCursorDatabasePath(): string {
206 |   return detectDatabasePath();
207 | }
208 | 
209 | /**
210 |  * Validate that the database path exists and is accessible
211 |  * @deprecated Use verifyDatabasePath() instead for consistent error handling
212 |  */
213 | export function validateDatabasePath(dbPath: string): { valid: boolean; error?: string } {
214 |   const verification = verifyDatabasePath(dbPath);
215 |   return {
216 |     valid: verification.exists,
217 |     error: verification.error
218 |   };
219 | }
220 | 
221 | /**
222 |  * Create default database configuration
223 |  */
224 | export function createDefaultDatabaseConfig(customDbPath?: string): DatabaseConfig {
225 |   const dbPath = customDbPath || detectDatabasePath();
226 | 
227 |   return {
228 |     dbPath,
229 |     maxConversations: 1000,
230 |     cacheEnabled: true,
231 |     minConversationSize: 100, // Reduced from 5000 to capture more conversations
232 |     resolveBubblesAutomatically: true
233 |   };
234 | }
235 | 
236 | /**
237 |  * Extract composer ID from a composerData key
238 |  */
239 | export function extractComposerIdFromKey(key: string): string | null {
240 |   const match = key.match(/^composerData:(.+)$/);
241 |   return match ? match[1] : null;
242 | }
243 | 
244 | /**
245 |  * Extract bubble ID components from a bubbleId key
246 |  */
247 | export function extractBubbleIdComponents(key: string): { composerId: string; bubbleId: string } | null {
248 |   const match = key.match(/^bubbleId:([^:]+):(.+)$/);
249 |   return match ? { composerId: match[1], bubbleId: match[2] } : null;
250 | }
251 | 
252 | /**
253 |  * Generate a bubbleId key for modern format message lookup
254 |  */
255 | export function generateBubbleIdKey(composerId: string, bubbleId: string): string {
256 |   return `bubbleId:${composerId}:${bubbleId}`;
257 | }
258 | 
259 | /**
260 |  * Check if a key is a composerData key
261 |  */
262 | export function isComposerDataKey(key: string): boolean {
263 |   return key.startsWith('composerData:');
264 | }
265 | 
266 | /**
267 |  * Check if a key is a bubbleId key
268 |  */
269 | export function isBubbleIdKey(key: string): boolean {
270 |   return key.startsWith('bubbleId:');
271 | }
272 | 
273 | /**
274 |  * Sanitize and validate conversation size filter
275 |  */
276 | export function sanitizeMinConversationSize(size?: number): number {
277 |   if (typeof size !== 'number' || size < 0) {
278 |     return 100; // Default minimum size (reduced from 5000)
279 |   }
280 |   return Math.floor(size);
281 | }
282 | 
283 | /**
284 |  * Sanitize and validate limit parameter
285 |  */
286 | export function sanitizeLimit(limit?: number, maxLimit: number = 1000): number {
287 |   if (typeof limit !== 'number' || limit <= 0) {
288 |     return maxLimit; // Default to max limit instead of 10
289 |   }
290 |   return Math.min(Math.floor(limit), maxLimit);
291 | }
292 | 
293 | /**
294 |  * Create SQL LIKE pattern for file pattern matching
295 |  */
296 | export function createFilePatternLike(pattern: string): string {
297 |   // Escape SQL special characters and convert glob patterns
298 |   return pattern
299 |     .replace(/[%_]/g, '\\$&')  // Escape SQL wildcards
300 |     .replace(/\*/g, '%')       // Convert * to SQL %
301 |     .replace(/\?/g, '_');      // Convert ? to SQL _
302 | }
303 | 
304 | /**
305 |  * Validate and sanitize search query
306 |  */
307 | export function sanitizeSearchQuery(query: string): string {
308 |   if (typeof query !== 'string') {
309 |     throw new Error('Search query must be a string');
310 |   }
311 | 
312 |   const trimmed = query.trim();
313 |   if (trimmed.length === 0) {
314 |     throw new Error('Search query cannot be empty');
315 |   }
316 | 
317 |   if (trimmed.length > 1000) {
318 |     throw new Error('Search query is too long (max 1000 characters)');
319 |   }
320 | 
321 |   return trimmed;
322 | }
```

--------------------------------------------------------------------------------
/src/utils/cache.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
  2 | import { Cache, createCache, CachePresets } from './cache.js';
  3 | 
  4 | describe('Cache', () => {
  5 |   let cache: Cache;
  6 | 
  7 |   beforeEach(() => {
  8 |     cache = new Cache();
  9 |   });
 10 | 
 11 |   afterEach(() => {
 12 |     cache.destroy();
 13 |   });
 14 | 
 15 |   describe('Basic Operations', () => {
 16 |     it('should store and retrieve values', () => {
 17 |       cache.set('key1', 'value1');
 18 |       expect(cache.get('key1')).toBe('value1');
 19 |     });
 20 | 
 21 |     it('should return undefined for non-existent keys', () => {
 22 |       expect(cache.get('nonexistent')).toBeUndefined();
 23 |     });
 24 | 
 25 |     it('should check if key exists', () => {
 26 |       cache.set('key1', 'value1');
 27 |       expect(cache.has('key1')).toBe(true);
 28 |       expect(cache.has('nonexistent')).toBe(false);
 29 |     });
 30 | 
 31 |     it('should delete entries', () => {
 32 |       cache.set('key1', 'value1');
 33 |       expect(cache.has('key1')).toBe(true);
 34 | 
 35 |       const deleted = cache.delete('key1');
 36 |       expect(deleted).toBe(true);
 37 |       expect(cache.has('key1')).toBe(false);
 38 |       expect(cache.get('key1')).toBeUndefined();
 39 |     });
 40 | 
 41 |     it('should return false when deleting non-existent key', () => {
 42 |       const deleted = cache.delete('nonexistent');
 43 |       expect(deleted).toBe(false);
 44 |     });
 45 | 
 46 |     it('should clear all entries', () => {
 47 |       cache.set('key1', 'value1');
 48 |       cache.set('key2', 'value2');
 49 |       expect(cache.size()).toBe(2);
 50 | 
 51 |       cache.clear();
 52 |       expect(cache.size()).toBe(0);
 53 |       expect(cache.has('key1')).toBe(false);
 54 |       expect(cache.has('key2')).toBe(false);
 55 |     });
 56 | 
 57 |     it('should track cache size', () => {
 58 |       expect(cache.size()).toBe(0);
 59 | 
 60 |       cache.set('key1', 'value1');
 61 |       expect(cache.size()).toBe(1);
 62 | 
 63 |       cache.set('key2', 'value2');
 64 |       expect(cache.size()).toBe(2);
 65 | 
 66 |       cache.delete('key1');
 67 |       expect(cache.size()).toBe(1);
 68 |     });
 69 | 
 70 |     it('should get all keys', () => {
 71 |       cache.set('key1', 'value1');
 72 |       cache.set('key2', 'value2');
 73 | 
 74 |       const keys = cache.keys();
 75 |       expect(keys).toContain('key1');
 76 |       expect(keys).toContain('key2');
 77 |       expect(keys).toHaveLength(2);
 78 |     });
 79 | 
 80 |     it('should get all values', () => {
 81 |       cache.set('key1', 'value1');
 82 |       cache.set('key2', 'value2');
 83 | 
 84 |       const values = cache.values();
 85 |       expect(values).toContain('value1');
 86 |       expect(values).toContain('value2');
 87 |       expect(values).toHaveLength(2);
 88 |     });
 89 |   });
 90 | 
 91 |   describe('TTL (Time-To-Live)', () => {
 92 |     beforeEach(() => {
 93 |       vi.useFakeTimers();
 94 |     });
 95 | 
 96 |     afterEach(() => {
 97 |       vi.useRealTimers();
 98 |     });
 99 | 
100 |     it('should expire entries after TTL', () => {
101 |       cache = new Cache({ defaultTTL: 1000, enableCleanup: false });
102 | 
103 |       cache.set('key1', 'value1');
104 |       expect(cache.get('key1')).toBe('value1');
105 | 
106 |       // Advance time by 1001ms (past TTL)
107 |       vi.advanceTimersByTime(1001);
108 | 
109 |       expect(cache.get('key1')).toBeUndefined();
110 |       expect(cache.has('key1')).toBe(false);
111 |     });
112 | 
113 |     it('should use custom TTL for individual entries', () => {
114 |       cache = new Cache({ defaultTTL: 5000, enableCleanup: false });
115 | 
116 |       cache.set('key1', 'value1', 1000); // Custom TTL of 1 second
117 |       cache.set('key2', 'value2'); // Uses default TTL of 5 seconds
118 | 
119 |       // Advance time by 1001ms
120 |       vi.advanceTimersByTime(1001);
121 | 
122 |       expect(cache.get('key1')).toBeUndefined(); // Should be expired
123 |       expect(cache.get('key2')).toBe('value2'); // Should still exist
124 |     });
125 | 
126 |     it('should handle entries with no TTL (never expire)', () => {
127 |       cache = new Cache({ defaultTTL: 0, enableCleanup: false });
128 | 
129 |       cache.set('key1', 'value1');
130 | 
131 |       // Advance time significantly
132 |       vi.advanceTimersByTime(10000);
133 | 
134 |       expect(cache.get('key1')).toBe('value1');
135 |     });
136 | 
137 |     it('should manually cleanup expired entries', () => {
138 |       cache = new Cache({ defaultTTL: 1000, enableCleanup: false });
139 | 
140 |       cache.set('key1', 'value1');
141 |       cache.set('key2', 'value2');
142 |       expect(cache.size()).toBe(2);
143 | 
144 |       // Advance time past TTL
145 |       vi.advanceTimersByTime(1001);
146 | 
147 |       const cleanedCount = cache.cleanup();
148 |       expect(cleanedCount).toBe(2);
149 |       expect(cache.size()).toBe(0);
150 |     });
151 |   });
152 | 
153 |   describe('Size Limits and Eviction', () => {
154 |     it('should evict entries when max size is reached (LRU)', () => {
155 |       cache = new Cache({ maxSize: 2, evictionPolicy: 'lru', enableCleanup: false });
156 | 
157 |       cache.set('key1', 'value1');
158 |       cache.set('key2', 'value2');
159 |       expect(cache.size()).toBe(2);
160 | 
161 |       // Access key1 to make it more recently used
162 |       cache.get('key1');
163 | 
164 |       // Add third entry, should evict key2 (least recently used)
165 |       cache.set('key3', 'value3');
166 |       expect(cache.size()).toBe(2);
167 |       expect(cache.has('key1')).toBe(true);
168 |       expect(cache.has('key2')).toBe(false);
169 |       expect(cache.has('key3')).toBe(true);
170 |     });
171 | 
172 |     it('should evict entries when max size is reached (FIFO)', () => {
173 |       cache = new Cache({ maxSize: 2, evictionPolicy: 'fifo', enableCleanup: false });
174 | 
175 |       cache.set('key1', 'value1');
176 |       cache.set('key2', 'value2');
177 |       expect(cache.size()).toBe(2);
178 | 
179 |       // Access key1 (shouldn't matter for FIFO)
180 |       cache.get('key1');
181 | 
182 |       // Add third entry, should evict key1 (first in)
183 |       cache.set('key3', 'value3');
184 |       expect(cache.size()).toBe(2);
185 |       expect(cache.has('key1')).toBe(false);
186 |       expect(cache.has('key2')).toBe(true);
187 |       expect(cache.has('key3')).toBe(true);
188 |     });
189 | 
190 |     it('should handle updating existing keys without eviction', () => {
191 |       cache = new Cache({ maxSize: 2, enableCleanup: false });
192 | 
193 |       cache.set('key1', 'value1');
194 |       cache.set('key2', 'value2');
195 |       expect(cache.size()).toBe(2);
196 | 
197 |       // Update existing key
198 |       cache.set('key1', 'updated_value1');
199 |       expect(cache.size()).toBe(2);
200 |       expect(cache.get('key1')).toBe('updated_value1');
201 |       expect(cache.has('key2')).toBe(true);
202 |     });
203 |   });
204 | 
205 |   describe('Statistics', () => {
206 |     it('should track hits and misses', () => {
207 |       cache.set('key1', 'value1');
208 | 
209 |       // Hit
210 |       cache.get('key1');
211 | 
212 |       // Miss
213 |       cache.get('nonexistent');
214 | 
215 |       const stats = cache.getStats();
216 |       expect(stats.hits).toBe(1);
217 |       expect(stats.misses).toBe(1);
218 |       expect(stats.hitRate).toBe(50);
219 |     });
220 | 
221 |     it('should track evictions', () => {
222 |       cache = new Cache({ maxSize: 1, enableCleanup: false });
223 | 
224 |       cache.set('key1', 'value1');
225 |       cache.set('key2', 'value2'); // Should evict key1
226 | 
227 |       const stats = cache.getStats();
228 |       expect(stats.evictions).toBe(1);
229 |     });
230 | 
231 |     it('should track expirations', () => {
232 |       vi.useFakeTimers();
233 |       cache = new Cache({ defaultTTL: 1000, enableCleanup: false });
234 | 
235 |       cache.set('key1', 'value1');
236 | 
237 |       // Advance time past TTL
238 |       vi.advanceTimersByTime(1001);
239 | 
240 |       // Try to access expired entry
241 |       cache.get('key1');
242 | 
243 |       const stats = cache.getStats();
244 |       expect(stats.expirations).toBe(1);
245 | 
246 |       vi.useRealTimers();
247 |     });
248 | 
249 |     it('should reset statistics', () => {
250 |       cache.set('key1', 'value1');
251 |       cache.get('key1'); // Hit
252 |       cache.get('nonexistent'); // Miss
253 | 
254 |       let stats = cache.getStats();
255 |       expect(stats.hits).toBe(1);
256 |       expect(stats.misses).toBe(1);
257 | 
258 |       cache.resetStats();
259 | 
260 |       stats = cache.getStats();
261 |       expect(stats.hits).toBe(0);
262 |       expect(stats.misses).toBe(0);
263 |       expect(stats.hitRate).toBe(0);
264 |     });
265 |   });
266 | 
267 |   describe('Automatic Cleanup', () => {
268 |     beforeEach(() => {
269 |       vi.useFakeTimers();
270 |     });
271 | 
272 |     afterEach(() => {
273 |       vi.useRealTimers();
274 |     });
275 | 
276 |     it('should automatically cleanup expired entries', () => {
277 |       cache = new Cache({
278 |         defaultTTL: 1000,
279 |         enableCleanup: true,
280 |         cleanupInterval: 500
281 |       });
282 | 
283 |       cache.set('key1', 'value1');
284 |       expect(cache.size()).toBe(1);
285 | 
286 |       // Advance time past TTL but before cleanup interval
287 |       vi.advanceTimersByTime(1001);
288 |       expect(cache.size()).toBe(1); // Still there, cleanup hasn't run
289 | 
290 |       // Advance time to trigger cleanup
291 |       vi.advanceTimersByTime(500);
292 |       expect(cache.size()).toBe(0); // Should be cleaned up
293 |     });
294 | 
295 |     it('should stop cleanup timer when destroyed', () => {
296 |       cache = new Cache({ enableCleanup: true, cleanupInterval: 100 });
297 | 
298 |       const clearIntervalSpy = vi.spyOn(global, 'clearInterval');
299 | 
300 |       cache.destroy();
301 | 
302 |       expect(clearIntervalSpy).toHaveBeenCalled();
303 |     });
304 |   });
305 | 
306 |   describe('Type Safety', () => {
307 |     it('should work with typed values', () => {
308 |       interface User {
309 |         id: number;
310 |         name: string;
311 |       }
312 | 
313 |       const userCache = new Cache<User>();
314 |       const user: User = { id: 1, name: 'John' };
315 | 
316 |       userCache.set('user1', user);
317 |       const retrieved = userCache.get('user1');
318 | 
319 |       expect(retrieved).toEqual(user);
320 |       expect(retrieved?.id).toBe(1);
321 |       expect(retrieved?.name).toBe('John');
322 |     });
323 |   });
324 | });
325 | 
326 | describe('createCache', () => {
327 |   it('should create a cache instance', () => {
328 |     const cache = createCache({ maxSize: 100 });
329 |     expect(cache).toBeInstanceOf(Cache);
330 | 
331 |     cache.set('test', 'value');
332 |     expect(cache.get('test')).toBe('value');
333 | 
334 |     cache.destroy();
335 |   });
336 | });
337 | 
338 | describe('CachePresets', () => {
339 |   it('should provide predefined configurations', () => {
340 |     expect(CachePresets.small.maxSize).toBe(100);
341 |     expect(CachePresets.medium.maxSize).toBe(500);
342 |     expect(CachePresets.large.maxSize).toBe(2000);
343 |     expect(CachePresets.persistent.maxSize).toBe(1000);
344 | 
345 |     expect(CachePresets.small.evictionPolicy).toBe('lru');
346 |     expect(CachePresets.persistent.evictionPolicy).toBe('fifo');
347 |   });
348 | 
349 |   it('should work with preset configurations', () => {
350 |     const cache = new Cache(CachePresets.small);
351 | 
352 |     cache.set('test', 'value');
353 |     expect(cache.get('test')).toBe('value');
354 | 
355 |     const stats = cache.getStats();
356 |     expect(stats.maxSize).toBe(100);
357 | 
358 |     cache.destroy();
359 |   });
360 | });
```

--------------------------------------------------------------------------------
/src/database/parser.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import type {
  2 |   CursorConversation,
  3 |   LegacyCursorConversation,
  4 |   ModernCursorConversation,
  5 |   ConversationMessage,
  6 |   BubbleMessage,
  7 |   CodeBlock
  8 | } from './types.js';
  9 | import {
 10 |   isLegacyConversation,
 11 |   isModernConversation
 12 | } from './types.js';
 13 | 
 14 | export class ConversationParser {
 15 |   /**
 16 |    * Parse conversation JSON data
 17 |    */
 18 |   parseConversationJSON(rawData: string): CursorConversation {
 19 |     try {
 20 |       const parsed = JSON.parse(rawData);
 21 | 
 22 |       if (!this.isValidConversation(parsed)) {
 23 |         throw new Error('Invalid conversation format');
 24 |       }
 25 | 
 26 |       return parsed as CursorConversation;
 27 |     } catch (error) {
 28 |       throw new Error(`Failed to parse conversation JSON: ${error instanceof Error ? error.message : 'Unknown error'}`);
 29 |     }
 30 |   }
 31 | 
 32 |   /**
 33 |    * Validate conversation structure
 34 |    */
 35 |   private isValidConversation(data: any): boolean {
 36 |     if (!data || typeof data !== 'object') {
 37 |       return false;
 38 |     }
 39 | 
 40 |     if (typeof data.composerId !== 'string') {
 41 |       return false;
 42 |     }
 43 | 
 44 |     if (Array.isArray(data.conversation)) {
 45 |       return this.isValidLegacyConversation(data);
 46 |     }
 47 | 
 48 |     if (typeof data._v === 'number' && Array.isArray(data.fullConversationHeadersOnly)) {
 49 |       return this.isValidModernConversation(data);
 50 |     }
 51 | 
 52 |     return false;
 53 |   }
 54 | 
 55 |   /**
 56 |    * Validate legacy conversation format
 57 |    */
 58 |   private isValidLegacyConversation(data: any): boolean {
 59 |     if (!Array.isArray(data.conversation)) {
 60 |       return false;
 61 |     }
 62 | 
 63 |     for (const message of data.conversation) {
 64 |       if (!this.isValidMessage(message)) {
 65 |         return false;
 66 |       }
 67 |     }
 68 | 
 69 |     return true;
 70 |   }
 71 | 
 72 |   /**
 73 |    * Validate modern conversation format
 74 |    */
 75 |   private isValidModernConversation(data: any): boolean {
 76 |     if (!Array.isArray(data.fullConversationHeadersOnly)) {
 77 |       return false;
 78 |     }
 79 | 
 80 |     for (const header of data.fullConversationHeadersOnly) {
 81 |       if (!this.isValidConversationHeader(header)) {
 82 |         return false;
 83 |       }
 84 |     }
 85 | 
 86 |     return true;
 87 |   }
 88 | 
 89 |   /**
 90 |    * Validate message structure
 91 |    */
 92 |   private isValidMessage(message: any): boolean {
 93 |     return (
 94 |       message &&
 95 |       typeof message === 'object' &&
 96 |       typeof message.type === 'number' &&
 97 |       typeof message.bubbleId === 'string' &&
 98 |       typeof message.text === 'string'
 99 |     );
100 |   }
101 | 
102 |   /**
103 |    * Validate conversation header structure
104 |    */
105 |   private isValidConversationHeader(header: any): boolean {
106 |     return (
107 |       header &&
108 |       typeof header === 'object' &&
109 |       typeof header.type === 'number' &&
110 |       typeof header.bubbleId === 'string'
111 |     );
112 |   }
113 | 
114 |   /**
115 |    * Extract messages from conversation (legacy format only)
116 |    */
117 |   extractMessages(conversation: CursorConversation): ConversationMessage[] {
118 |     if (isLegacyConversation(conversation)) {
119 |       return conversation.conversation;
120 |     }
121 | 
122 |     // For modern format, messages need to be resolved separately
123 |     return [];
124 |   }
125 | 
126 |   /**
127 |    * Extract code blocks from conversation
128 |    */
129 |   extractCodeBlocks(conversation: CursorConversation): CodeBlock[] {
130 |     const codeBlocks: CodeBlock[] = [];
131 | 
132 |     if (isLegacyConversation(conversation)) {
133 |       for (const message of conversation.conversation) {
134 |         if (message.suggestedCodeBlocks) {
135 |           codeBlocks.push(...message.suggestedCodeBlocks);
136 |         }
137 |       }
138 |     }
139 | 
140 |     return codeBlocks;
141 |   }
142 | 
143 |   /**
144 |    * Extract file references from conversation
145 |    */
146 |   extractFileReferences(conversation: CursorConversation): string[] {
147 |     const files: string[] = [];
148 | 
149 |     if (isLegacyConversation(conversation)) {
150 |       for (const message of conversation.conversation) {
151 |         if (message.relevantFiles) {
152 |           files.push(...message.relevantFiles);
153 |         }
154 |       }
155 |     }
156 | 
157 |     return Array.from(new Set(files));
158 |   }
159 | 
160 |   /**
161 |    * Extract attached folder references from conversation
162 |    */
163 |   extractAttachedFolders(conversation: CursorConversation): string[] {
164 |     const folders: string[] = [];
165 | 
166 |     if (isLegacyConversation(conversation)) {
167 |       for (const message of conversation.conversation) {
168 |         if (message.attachedFoldersNew) {
169 |           folders.push(...message.attachedFoldersNew);
170 |         }
171 |       }
172 |     }
173 | 
174 |     return Array.from(new Set(folders));
175 |   }
176 | 
177 |   /**
178 |    * Extract timestamps from conversation (limited availability)
179 |    */
180 |   extractTimestamps(conversation: CursorConversation): Date[] {
181 |     const timestamps: Date[] = [];
182 | 
183 |     if (isLegacyConversation(conversation)) {
184 |       for (const message of conversation.conversation) {
185 |         if (message.timestamp) {
186 |           try {
187 |             const date = new Date(message.timestamp);
188 |             if (!isNaN(date.getTime())) {
189 |               timestamps.push(date);
190 |             }
191 |           } catch (error) {
192 |             // Skip invalid timestamps
193 |           }
194 |         }
195 |       }
196 |     }
197 | 
198 |     return timestamps;
199 |   }
200 | 
201 |   /**
202 |    * Get conversation metadata
203 |    */
204 |   getConversationMetadata(conversation: CursorConversation): {
205 |     format: 'legacy' | 'modern';
206 |     messageCount: number;
207 |     hasCodeBlocks: boolean;
208 |     codeBlockCount: number;
209 |     fileCount: number;
210 |     folderCount: number;
211 |     hasStoredSummary: boolean;
212 |     size: number;
213 |   } {
214 |     const format = isLegacyConversation(conversation) ? 'legacy' : 'modern';
215 |     const size = JSON.stringify(conversation).length;
216 | 
217 |     let messageCount = 0;
218 |     let codeBlockCount = 0;
219 |     let fileCount = 0;
220 |     let folderCount = 0;
221 | 
222 |     if (isLegacyConversation(conversation)) {
223 |       messageCount = conversation.conversation.length;
224 | 
225 |       for (const message of conversation.conversation) {
226 |         if (message.suggestedCodeBlocks) {
227 |           codeBlockCount += message.suggestedCodeBlocks.length;
228 |         }
229 |         if (message.relevantFiles) {
230 |           fileCount += message.relevantFiles.length;
231 |         }
232 |         if (message.attachedFoldersNew) {
233 |           folderCount += message.attachedFoldersNew.length;
234 |         }
235 |       }
236 |     } else if (isModernConversation(conversation)) {
237 |       messageCount = conversation.fullConversationHeadersOnly.length;
238 |       // Note: For modern format, accurate counts would require resolving bubble messages
239 |     }
240 | 
241 |     const hasCodeBlocks = codeBlockCount > 0;
242 |     const hasStoredSummary = !!(conversation.text || conversation.richText || (conversation as any).storedSummary);
243 | 
244 |     return {
245 |       format,
246 |       messageCount,
247 |       hasCodeBlocks,
248 |       codeBlockCount,
249 |       fileCount,
250 |       folderCount,
251 |       hasStoredSummary,
252 |       size
253 |     };
254 |   }
255 | 
256 |   /**
257 |    * Extract user messages only
258 |    */
259 |   extractUserMessages(conversation: CursorConversation): ConversationMessage[] {
260 |     if (isLegacyConversation(conversation)) {
261 |       return conversation.conversation.filter(message => message.type === 1);
262 |     }
263 | 
264 |     return [];
265 |   }
266 | 
267 |   /**
268 |    * Extract AI messages only
269 |    */
270 |   extractAIMessages(conversation: CursorConversation): ConversationMessage[] {
271 |     if (isLegacyConversation(conversation)) {
272 |       return conversation.conversation.filter(message => message.type === 2);
273 |     }
274 | 
275 |     return [];
276 |   }
277 | 
278 |   /**
279 |    * Get first user message
280 |    */
281 |   getFirstUserMessage(conversation: CursorConversation): ConversationMessage | null {
282 |     if (isLegacyConversation(conversation)) {
283 |       const userMessages = conversation.conversation.filter(message => message.type === 1);
284 |       return userMessages.length > 0 ? userMessages[0] : null;
285 |     }
286 | 
287 |     return null;
288 |   }
289 | 
290 |   /**
291 |    * Get last message
292 |    */
293 |   getLastMessage(conversation: CursorConversation): ConversationMessage | null {
294 |     if (isLegacyConversation(conversation)) {
295 |       const messages = conversation.conversation;
296 |       return messages.length > 0 ? messages[messages.length - 1] : null;
297 |     }
298 | 
299 |     return null;
300 |   }
301 | 
302 |   /**
303 |    * Search for text within conversation messages
304 |    */
305 |   searchInConversation(conversation: CursorConversation, query: string, caseSensitive: boolean = false): {
306 |     messageIndex: number;
307 |     message: ConversationMessage;
308 |     matchPositions: number[];
309 |   }[] {
310 |     const results: {
311 |       messageIndex: number;
312 |       message: ConversationMessage;
313 |       matchPositions: number[];
314 |     }[] = [];
315 | 
316 |     if (isLegacyConversation(conversation)) {
317 |       const searchQuery = caseSensitive ? query : query.toLowerCase();
318 | 
319 |       conversation.conversation.forEach((message, index) => {
320 |         const text = caseSensitive ? message.text : message.text.toLowerCase();
321 |         const matchPositions: number[] = [];
322 | 
323 |         let position = 0;
324 |         while (position < text.length) {
325 |           const found = text.indexOf(searchQuery, position);
326 |           if (found === -1) break;
327 | 
328 |           matchPositions.push(found);
329 |           position = found + 1;
330 |         }
331 | 
332 |         if (matchPositions.length > 0) {
333 |           results.push({
334 |             messageIndex: index,
335 |             message,
336 |             matchPositions
337 |           });
338 |         }
339 |       });
340 |     }
341 | 
342 |     return results;
343 |   }
344 | 
345 |   /**
346 |    * Check if conversation contains summarization content
347 |    */
348 |   containsSummarization(conversation: CursorConversation): boolean {
349 |     const summarizationKeywords = ['summarization', 'summarize', 'summary'];
350 | 
351 |     if (isLegacyConversation(conversation)) {
352 |       for (const message of conversation.conversation) {
353 |         const text = message.text.toLowerCase();
354 |         if (summarizationKeywords.some(keyword => text.includes(keyword))) {
355 |           return true;
356 |         }
357 |       }
358 |     }
359 | 
360 |     // Also check stored summary fields
361 |     const text = conversation.text?.toLowerCase() || '';
362 |     const richText = conversation.richText?.toLowerCase() || '';
363 | 
364 |     return summarizationKeywords.some(keyword =>
365 |       text.includes(keyword) || richText.includes(keyword)
366 |     );
367 |   }
368 | 
369 |   /**
370 |    * Parse bubble message JSON
371 |    */
372 |   parseBubbleMessage(rawData: string): BubbleMessage {
373 |     try {
374 |       const parsed = JSON.parse(rawData);
375 | 
376 |       if (!this.isValidBubbleMessage(parsed)) {
377 |         throw new Error('Invalid bubble message format');
378 |       }
379 | 
380 |       return parsed as BubbleMessage;
381 |     } catch (error) {
382 |       throw new Error(`Failed to parse bubble message JSON: ${error instanceof Error ? error.message : 'Unknown error'}`);
383 |     }
384 |   }
385 | 
386 |   /**
387 |    * Validate bubble message structure
388 |    */
389 |   private isValidBubbleMessage(data: any): boolean {
390 |     return (
391 |       data &&
392 |       typeof data === 'object' &&
393 |       typeof data.type === 'number' &&
394 |       typeof data.text === 'string'
395 |     );
396 |     }
397 | }
```

--------------------------------------------------------------------------------
/src/utils/database-utils.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
  2 | import { homedir, platform } from 'os';
  3 | import { existsSync } from 'fs';
  4 | import { join, resolve } from 'path';
  5 | import {
  6 |   detectOperatingSystem,
  7 |   getDefaultDatabasePath,
  8 |   verifyDatabasePath,
  9 |   getUserConfiguredDatabasePath,
 10 |   detectDatabasePath,
 11 |   detectCursorDatabasePath,
 12 |   validateDatabasePath,
 13 |   createDefaultDatabaseConfig,
 14 |   getCursorDatabasePaths
 15 | } from './database-utils.js';
 16 | 
 17 | // Mock the os module
 18 | vi.mock('os', () => ({
 19 |   platform: vi.fn(),
 20 |   homedir: vi.fn()
 21 | }));
 22 | 
 23 | // Mock the fs module
 24 | vi.mock('fs', () => ({
 25 |   existsSync: vi.fn()
 26 | }));
 27 | 
 28 | // Mock console methods to avoid noise in tests
 29 | vi.mock('console', () => ({
 30 |   warn: vi.fn(),
 31 |   log: vi.fn()
 32 | }));
 33 | 
 34 | const mockPlatform = vi.mocked(platform);
 35 | const mockHomedir = vi.mocked(homedir);
 36 | const mockExistsSync = vi.mocked(existsSync);
 37 | 
 38 | describe('Database Utils', () => {
 39 |   beforeEach(() => {
 40 |     vi.clearAllMocks();
 41 |     mockHomedir.mockReturnValue('/home/testuser');
 42 |     delete process.env.CURSOR_DB_PATH;
 43 |   });
 44 | 
 45 |   afterEach(() => {
 46 |     vi.restoreAllMocks();
 47 |   });
 48 | 
 49 |   describe('detectOperatingSystem', () => {
 50 |     it('should detect macOS correctly', () => {
 51 |       mockPlatform.mockReturnValue('darwin');
 52 |       expect(detectOperatingSystem()).toBe('macOS');
 53 |     });
 54 | 
 55 |     it('should detect Windows correctly', () => {
 56 |       mockPlatform.mockReturnValue('win32');
 57 |       expect(detectOperatingSystem()).toBe('windows');
 58 |     });
 59 | 
 60 |     it('should detect Linux correctly', () => {
 61 |       mockPlatform.mockReturnValue('linux');
 62 |       expect(detectOperatingSystem()).toBe('linux');
 63 |     });
 64 | 
 65 |     it('should return unknown for unrecognized platforms', () => {
 66 |       mockPlatform.mockReturnValue('freebsd');
 67 |       expect(detectOperatingSystem()).toBe('unknown');
 68 |     });
 69 |   });
 70 | 
 71 |   describe('getDefaultDatabasePath', () => {
 72 |     beforeEach(() => {
 73 |       mockHomedir.mockReturnValue('/home/testuser');
 74 |     });
 75 | 
 76 |     it('should return macOS path for macOS', () => {
 77 |       const expected = join('/home/testuser', 'Library/Application Support/Cursor/User/globalStorage/state.vscdb');
 78 |       expect(getDefaultDatabasePath('macOS')).toBe(expected);
 79 |       expect(getDefaultDatabasePath('darwin')).toBe(expected);
 80 |     });
 81 | 
 82 |     it('should return Windows path for Windows', () => {
 83 |       const expected = join('/home/testuser', 'AppData/Roaming/Cursor/User/globalStorage/state.vscdb');
 84 |       expect(getDefaultDatabasePath('windows')).toBe(expected);
 85 |       expect(getDefaultDatabasePath('win32')).toBe(expected);
 86 |     });
 87 | 
 88 |     it('should return Linux path for Linux', () => {
 89 |       const expected = join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb');
 90 |       expect(getDefaultDatabasePath('linux')).toBe(expected);
 91 |     });
 92 | 
 93 |     it('should fallback to Linux path for unknown OS', () => {
 94 |       const expected = join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb');
 95 |       expect(getDefaultDatabasePath('unknown')).toBe(expected);
 96 |     });
 97 |   });
 98 | 
 99 |   describe('verifyDatabasePath', () => {
100 |     it('should return exists: false for empty path', () => {
101 |       const result = verifyDatabasePath('');
102 |       expect(result.exists).toBe(false);
103 |       expect(result.error).toBe('Database path is empty');
104 |     });
105 | 
106 |     it('should return exists: true when file exists', () => {
107 |       mockExistsSync.mockReturnValue(true);
108 |       const result = verifyDatabasePath('/path/to/db.vscdb');
109 |       expect(result.exists).toBe(true);
110 |       expect(result.error).toBeUndefined();
111 |     });
112 | 
113 |     it('should return exists: false when file does not exist', () => {
114 |       mockExistsSync.mockReturnValue(false);
115 |       const result = verifyDatabasePath('/path/to/nonexistent.vscdb');
116 |       expect(result.exists).toBe(false);
117 |       expect(result.error).toContain('Database file not found');
118 |     });
119 | 
120 |     it('should handle file system errors gracefully', () => {
121 |       mockExistsSync.mockImplementation(() => {
122 |         throw new Error('Permission denied');
123 |       });
124 |       const result = verifyDatabasePath('/path/to/db.vscdb');
125 |       expect(result.exists).toBe(false);
126 |       expect(result.error).toContain('Error verifying database path: Permission denied');
127 |     });
128 |   });
129 | 
130 |   describe('getUserConfiguredDatabasePath', () => {
131 |     it('should return null when no environment variable is set', () => {
132 |       expect(getUserConfiguredDatabasePath()).toBeNull();
133 |     });
134 | 
135 |     it('should return resolved path when environment variable is set and file exists', () => {
136 |       process.env.CURSOR_DB_PATH = '~/custom/path/db.vscdb';
137 |       mockHomedir.mockReturnValue('/home/testuser');
138 |       mockExistsSync.mockReturnValue(true);
139 | 
140 |       const result = getUserConfiguredDatabasePath();
141 |       expect(result).toBe(resolve('/home/testuser/custom/path/db.vscdb'));
142 |     });
143 | 
144 |     it('should return null when environment variable is set but file does not exist', () => {
145 |       process.env.CURSOR_DB_PATH = '~/custom/path/nonexistent.vscdb';
146 |       mockHomedir.mockReturnValue('/home/testuser');
147 |       mockExistsSync.mockReturnValue(false);
148 | 
149 |       const result = getUserConfiguredDatabasePath();
150 |       expect(result).toBeNull();
151 |     });
152 | 
153 |     it('should handle absolute paths correctly', () => {
154 |       process.env.CURSOR_DB_PATH = '/absolute/path/db.vscdb';
155 |       mockExistsSync.mockReturnValue(true);
156 | 
157 |       const result = getUserConfiguredDatabasePath();
158 |       expect(result).toBe(resolve('/absolute/path/db.vscdb'));
159 |     });
160 |   });
161 | 
162 |   describe('detectDatabasePath', () => {
163 |     beforeEach(() => {
164 |       mockHomedir.mockReturnValue('/home/testuser');
165 |       mockPlatform.mockReturnValue('linux');
166 |     });
167 | 
168 |     it('should return user-configured path when available', () => {
169 |       process.env.CURSOR_DB_PATH = '/custom/path/db.vscdb';
170 |       mockExistsSync.mockReturnValue(true);
171 | 
172 |       const result = detectDatabasePath();
173 |       expect(result).toBe(resolve('/custom/path/db.vscdb'));
174 |     });
175 | 
176 |     it('should return default path when user config is not available but default exists', () => {
177 |       mockExistsSync.mockReturnValue(true);
178 | 
179 |       const result = detectDatabasePath();
180 |       const expectedPath = resolve(join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb'));
181 |       expect(result).toBe(expectedPath);
182 |     });
183 | 
184 |     it('should try fallback paths when default does not exist', () => {
185 |       // First call (default path) returns false, second call (fallback) returns true
186 |       mockExistsSync
187 |         .mockReturnValueOnce(false) // Default path doesn't exist
188 |         .mockReturnValueOnce(true); // First fallback exists
189 | 
190 |       const result = detectDatabasePath();
191 |       // Should find the first fallback path
192 |       expect(result).toBeDefined();
193 |       expect(mockExistsSync).toHaveBeenCalledTimes(2);
194 |     });
195 | 
196 |     it('should throw error when no valid path is found', () => {
197 |       mockExistsSync.mockReturnValue(false); // All paths fail
198 | 
199 |       expect(() => detectDatabasePath()).toThrow('Unable to locate Cursor database file');
200 |     });
201 | 
202 |     it('should work correctly for macOS', () => {
203 |       mockPlatform.mockReturnValue('darwin');
204 |       mockExistsSync.mockReturnValue(true);
205 | 
206 |       const result = detectDatabasePath();
207 |       const expectedPath = resolve(join('/home/testuser', 'Library/Application Support/Cursor/User/globalStorage/state.vscdb'));
208 |       expect(result).toBe(expectedPath);
209 |     });
210 | 
211 |     it('should work correctly for Windows', () => {
212 |       mockPlatform.mockReturnValue('win32');
213 |       mockExistsSync.mockReturnValue(true);
214 | 
215 |       const result = detectDatabasePath();
216 |       const expectedPath = resolve(join('/home/testuser', 'AppData/Roaming/Cursor/User/globalStorage/state.vscdb'));
217 |       expect(result).toBe(expectedPath);
218 |     });
219 |   });
220 | 
221 |   describe('validateDatabasePath (deprecated)', () => {
222 |     it('should work as a wrapper around verifyDatabasePath', () => {
223 |       mockExistsSync.mockReturnValue(true);
224 |       const result = validateDatabasePath('/path/to/db.vscdb');
225 |       expect(result.valid).toBe(true);
226 |       expect(result.error).toBeUndefined();
227 |     });
228 | 
229 |     it('should return invalid for non-existent files', () => {
230 |       mockExistsSync.mockReturnValue(false);
231 |       const result = validateDatabasePath('/path/to/nonexistent.vscdb');
232 |       expect(result.valid).toBe(false);
233 |       expect(result.error).toContain('Database file not found');
234 |     });
235 |   });
236 | 
237 |   describe('detectCursorDatabasePath (deprecated)', () => {
238 |     it('should work as a wrapper around detectDatabasePath', () => {
239 |       mockPlatform.mockReturnValue('linux');
240 |       mockHomedir.mockReturnValue('/home/testuser');
241 |       mockExistsSync.mockReturnValue(true);
242 | 
243 |       const result = detectCursorDatabasePath();
244 |       const expectedPath = resolve(join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb'));
245 |       expect(result).toBe(expectedPath);
246 |     });
247 |   });
248 | 
249 |   describe('createDefaultDatabaseConfig', () => {
250 |     beforeEach(() => {
251 |       mockPlatform.mockReturnValue('linux');
252 |       mockHomedir.mockReturnValue('/home/testuser');
253 |       mockExistsSync.mockReturnValue(true);
254 |     });
255 | 
256 |     it('should use custom path when provided', () => {
257 |       const customPath = '/custom/path/db.vscdb';
258 |       const config = createDefaultDatabaseConfig(customPath);
259 | 
260 |       expect(config.dbPath).toBe(customPath);
261 |       expect(config.maxConversations).toBe(1000);
262 |       expect(config.cacheEnabled).toBe(true);
263 |       expect(config.minConversationSize).toBe(5000);
264 |       expect(config.resolveBubblesAutomatically).toBe(true);
265 |     });
266 | 
267 |     it('should detect path automatically when no custom path provided', () => {
268 |       const config = createDefaultDatabaseConfig();
269 | 
270 |       const expectedPath = resolve(join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb'));
271 |       expect(config.dbPath).toBe(expectedPath);
272 |     });
273 |   });
274 | 
275 |     describe('getCursorDatabasePaths function', () => {
276 |     it('should return correct paths for all platforms', () => {
277 |       // Mock homedir for consistent testing
278 |       mockHomedir.mockReturnValue('/home/testuser');
279 | 
280 |       const paths = getCursorDatabasePaths();
281 |       expect(paths.macOS).toBe(
282 |         join('/home/testuser', 'Library/Application Support/Cursor/User/globalStorage/state.vscdb')
283 |       );
284 |       expect(paths.windows).toBe(
285 |         join('/home/testuser', 'AppData/Roaming/Cursor/User/globalStorage/state.vscdb')
286 |       );
287 |       expect(paths.linux).toBe(
288 |         join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb')
289 |       );
290 |     });
291 |   });
292 | 
293 |   describe('Edge cases and error handling', () => {
294 |     it('should handle null/undefined paths gracefully', () => {
295 |       const result = verifyDatabasePath(null as any);
296 |       expect(result.exists).toBe(false);
297 |       expect(result.error).toContain('Database path is empty');
298 |     });
299 | 
300 |     it('should handle environment variable with tilde expansion', () => {
301 |       process.env.CURSOR_DB_PATH = '~/Documents/cursor.db';
302 |       mockHomedir.mockReturnValue('/Users/testuser');
303 |       mockExistsSync.mockReturnValue(true);
304 | 
305 |       const result = getUserConfiguredDatabasePath();
306 |       expect(result).toBe(resolve('/Users/testuser/Documents/cursor.db'));
307 |     });
308 | 
309 |     it('should handle unknown operating systems with fallback', () => {
310 |       mockPlatform.mockReturnValue('aix');
311 |       mockHomedir.mockReturnValue('/home/testuser');
312 |       mockExistsSync.mockReturnValue(true);
313 | 
314 |       const result = detectDatabasePath();
315 |       // Should fallback to Linux-style path
316 |       const expectedPath = resolve(join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb'));
317 |       expect(result).toBe(expectedPath);
318 |     });
319 |   });
320 | });
```

--------------------------------------------------------------------------------
/src/database/types.ts:
--------------------------------------------------------------------------------

```typescript
  1 | // Type definitions for Cursor chat data
  2 | // Supports both legacy and modern conversation formats
  3 | 
  4 | export interface CursorDiskKV {
  5 |   key: string;
  6 |   value: string;
  7 | }
  8 | 
  9 | // Key patterns in the Cursor database
 10 | export type CursorKeyPatterns = {
 11 |   composerData: `composerData:${string}`;
 12 |   bubbleId: `bubbleId:${string}:${string}`;
 13 |   messageRequestContext: `messageRequestContext:${string}:${string}`;
 14 |   checkpointId: `checkpointId:${string}`;
 15 |   codeBlockDiff: `codeBlockDiff:${string}`;
 16 | };
 17 | 
 18 | // Legacy format conversation structure
 19 | export interface LegacyCursorConversation {
 20 |   composerId: string;
 21 |   conversation: ConversationMessage[];
 22 |   hasLoaded: boolean;
 23 |   text: string;                        // May contain conversation summary (often empty)
 24 |   richText: string;                    // May contain formatted summary (often empty)
 25 | }
 26 | 
 27 | // Modern format conversation structure
 28 | export interface ModernCursorConversation {
 29 |   _v: number;                          // Version field (e.g., 3)
 30 |   composerId: string;
 31 |   richText: string;                    // May contain formatted summary (often empty)
 32 |   hasLoaded: boolean;
 33 |   text: string;                        // May contain conversation summary (often empty)
 34 |   fullConversationHeadersOnly: ConversationHeader[];
 35 |   name?: string;                       // Conversation title (Modern format only)
 36 |   latestConversationSummary?: {        // AI-generated summary structure
 37 |     summary: {
 38 |       summary: string;                 // The actual AI-generated summary text
 39 |     };
 40 |   };
 41 |   context?: {                          // Context information including file selections
 42 |     fileSelections?: Array<{
 43 |       uri: {
 44 |         fsPath: string;                // Full file system path
 45 |         path: string;                  // Path (usually same as fsPath)
 46 |       };
 47 |     }>;
 48 |     // ... other context fields may exist
 49 |   };
 50 | }
 51 | 
 52 | // Union type for both conversation formats
 53 | export type CursorConversation = LegacyCursorConversation | ModernCursorConversation;
 54 | 
 55 | // Message structure for legacy format
 56 | export interface ConversationMessage {
 57 |   type: number;                        // 1 = user, 2 = AI
 58 |   bubbleId: string;
 59 |   attachedFoldersNew: string[];
 60 |   suggestedCodeBlocks: CodeBlock[];
 61 |   relevantFiles: string[];
 62 |   text: string;                        // Message content
 63 |   timestamp?: string;
 64 |   context?: {                          // Context information including file selections
 65 |     fileSelections?: Array<{
 66 |       uri: {
 67 |         fsPath: string;                // Full file system path
 68 |         path: string;                  // Path (usually same as fsPath)
 69 |       };
 70 |     }>;
 71 |     // ... other context fields may exist
 72 |   };
 73 | }
 74 | 
 75 | // Header structure for modern format
 76 | export interface ConversationHeader {
 77 |   bubbleId: string;
 78 |   type: number;                        // 1 = user, 2 = AI
 79 |   serverBubbleId?: string;             // For AI responses
 80 | }
 81 | 
 82 | // Individual message for modern format (stored separately)
 83 | export interface BubbleMessage {
 84 |   text: string;                        // Message content
 85 |   type: number;
 86 |   attachedFoldersNew?: string[];
 87 |   suggestedCodeBlocks?: CodeBlock[];
 88 |   relevantFiles?: string[];
 89 |   timestamp?: string;
 90 |   context?: {                          // Context information including file selections
 91 |     fileSelections?: Array<{
 92 |       uri: {
 93 |         fsPath: string;                // Full file system path
 94 |         path: string;                  // Path (usually same as fsPath)
 95 |       };
 96 |     }>;
 97 |     // ... other context fields may exist
 98 |   };
 99 | }
100 | 
101 | // Code block structure
102 | export interface CodeBlock {
103 |   language: string;
104 |   code: string;
105 |   filename?: string;
106 | }
107 | 
108 | // Conversation summary data
109 | export interface ConversationSummary {
110 |   composerId: string;
111 |   format: 'legacy' | 'modern';
112 |   messageCount: number;
113 |   hasCodeBlocks: boolean;
114 |   codeBlockCount: number;
115 |   relevantFiles: string[];
116 |   attachedFolders: string[];
117 |   firstMessage?: string;               // Truncated first user message
118 |   lastMessage?: string;                // Last message in conversation
119 |   storedSummary?: string;              // From text field if available
120 |   storedRichText?: string;             // From richText field if available
121 |   title?: string;                      // From 'name' field (Modern format only)
122 |   aiGeneratedSummary?: string;         // From 'latestConversationSummary.summary.summary'
123 |   conversationSize: number;            // Size in bytes
124 | }
125 | 
126 | // Search result structure
127 | export interface ConversationSearchResult {
128 |   composerId: string;
129 |   format: 'legacy' | 'modern';
130 |   matches: SearchMatch[];
131 |   relevantFiles: string[];
132 |   attachedFolders: string[];
133 |   maxLastMessageLength?: number;             // Max length for last message
134 |   includeStoredSummary?: boolean;            // Include text/richText fields
135 |   includeFileList?: boolean;                 // Include relevant files
136 |   includeCodeBlockCount?: boolean;           // Count code blocks
137 |   includeAttachedFolders?: boolean;          // Include attached folders
138 |   includeMetadata?: boolean;                 // Include metadata information
139 |   includeTitle?: boolean;                    // Include conversation title (Modern format)
140 |   includeAIGeneratedSummary?: boolean;       // Include AI-generated summary (Modern format)
141 | }
142 | 
143 | export interface SearchMatch {
144 |   messageIndex?: number;               // For legacy format
145 |   bubbleId?: string;                   // For modern format
146 |   text: string;
147 |   context: string;                     // Surrounding text
148 |   type: number;                        // 1 = user, 2 = AI
149 | }
150 | 
151 | // Statistics structure
152 | export interface ConversationStats {
153 |   totalConversations: number;
154 |   legacyFormatCount: number;
155 |   modernFormatCount: number;
156 |   averageConversationSize: number;
157 |   totalConversationsWithCode: number;
158 |   mostCommonFiles: Array<{ file: string; count: number }>;
159 |   mostCommonFolders: Array<{ folder: string; count: number }>;
160 | }
161 | 
162 | // Filter options for conversation queries
163 | export interface ConversationFilters {
164 |   dateRange?: { start: Date; end: Date };    // ⚠️ Limited - no reliable timestamps
165 |   minLength?: number;                        // Filter by conversation size
166 |   keywords?: string[];                       // Search in conversation content
167 |   projectPath?: string;                      // Filter by attached folders
168 |   relevantFiles?: string[];                  // Filter by specific files mentioned
169 |   filePattern?: string;                      // Filter by file pattern (e.g., "*.tsx")
170 |   hasCodeBlocks?: boolean;                   // Filter conversations with code
171 |   format?: 'legacy' | 'modern' | 'both';    // Filter by conversation format
172 | }
173 | 
174 | // Summary options
175 | export interface SummaryOptions {
176 |   includeFirstMessage?: boolean;             // Include truncated first message
177 |   includeLastMessage?: boolean;              // Include last message
178 |   maxFirstMessageLength?: number;            // Max length for first message
179 |   maxLastMessageLength?: number;             // Max length for last message
180 |   includeStoredSummary?: boolean;            // Include text/richText fields
181 |   includeFileList?: boolean;                 // Include relevant files
182 |   includeCodeBlockCount?: boolean;           // Count code blocks
183 |   includeAttachedFolders?: boolean;          // Include attached folders
184 |   includeMetadata?: boolean;                 // Include metadata information
185 |   includeTitle?: boolean;                    // Include conversation title (Modern format)
186 |   includeAIGeneratedSummary?: boolean;       // Include AI-generated summary (Modern format)
187 | }
188 | 
189 | // Database configuration
190 | export interface DatabaseConfig {
191 |   dbPath: string;
192 |   maxConversations?: number;                 // Limit for performance
193 |   cacheEnabled?: boolean;                    // Cache frequently accessed data
194 |   minConversationSize?: number;              // Minimum size to consider valid
195 |   resolveBubblesAutomatically?: boolean;     // Auto-resolve bubble messages
196 | }
197 | 
198 | // Platform-specific database paths
199 | export interface CursorDatabasePaths {
200 |   macOS: string;
201 |   windows: string;
202 |   linux: string;
203 | }
204 | 
205 | // Type guards for format detection
206 | export function isLegacyConversation(conversation: any): conversation is LegacyCursorConversation {
207 |   return conversation &&
208 |          typeof conversation.composerId === 'string' &&
209 |          Array.isArray(conversation.conversation) &&
210 |          !conversation._v;
211 | }
212 | 
213 | export function isModernConversation(conversation: any): conversation is ModernCursorConversation {
214 |   return conversation &&
215 |          typeof conversation.composerId === 'string' &&
216 |          typeof conversation._v === 'number' &&
217 |          Array.isArray(conversation.fullConversationHeadersOnly);
218 | }
219 | 
220 | // New types for analytics tools
221 | 
222 | export interface ConversationAnalytics {
223 |   overview: {
224 |     totalConversations: number;
225 |     totalMessages: number;
226 |     totalCodeBlocks: number;
227 |     averageConversationSize: number;
228 |     averageMessagesPerConversation: number;
229 |     totalFiles: number;
230 |     totalFolders: number;
231 |   };
232 |   breakdowns: {
233 |     files?: Array<{
234 |       file: string;
235 |       mentions: number;
236 |       conversations: string[];
237 |       extension: string;
238 |       projectPath?: string;
239 |     }>;
240 |     languages?: Array<{
241 |       language: string;
242 |       codeBlocks: number;
243 |       conversations: string[];
244 |       averageCodeLength: number;
245 |     }>;
246 |     temporal?: Array<{
247 |       period: string;
248 |       conversationCount: number;
249 |       messageCount: number;
250 |       averageSize: number;
251 |       conversationIds: string[];
252 |     }>;
253 |     size?: {
254 |       distribution: number[];
255 |       percentiles: Record<string, number>;
256 |       bins: Array<{ range: string; count: number }>;
257 |     };
258 |   };
259 |   scope: {
260 |     type: string;
261 |     projectPath?: string;
262 |     recentDays?: number;
263 |     totalScanned: number;
264 |   };
265 |   // Include conversation IDs for follow-up analysis
266 |   conversationIds: string[];
267 |   // Include basic conversation info for immediate access
268 |   conversations: Array<{
269 |     composerId: string;
270 |     messageCount: number;
271 |     size: number;
272 |     files: string[];
273 |     hasCodeBlocks: boolean;
274 |   }>;
275 | }
276 | 
277 | export interface RelatedConversationsResult {
278 |   reference: {
279 |     composerId: string;
280 |     files: string[];
281 |     folders: string[];
282 |     languages: string[];
283 |     messageCount: number;
284 |     size: number;
285 |   };
286 |   related: Array<{
287 |     composerId: string;
288 |     relationshipScore: number;
289 |     relationships: {
290 |       sharedFiles?: string[];
291 |       sharedFolders?: string[];
292 |       sharedLanguages?: string[];
293 |       sizeSimilarity?: number;
294 |       temporalProximity?: number;
295 |     };
296 |     summary: string;
297 |     scoreBreakdown?: Record<string, number>;
298 |   }>;
299 | }
300 | 
301 | export interface ExtractedElements {
302 |   conversations: Array<{
303 |     composerId: string;
304 |     format: 'legacy' | 'modern';
305 |     elements: {
306 |       files?: Array<{
307 |         path: string;
308 |         extension: string;
309 |         context?: string;
310 |         messageType: 'user' | 'assistant';
311 |       }>;
312 |       folders?: Array<{
313 |         path: string;
314 |         context?: string;
315 |       }>;
316 |       languages?: Array<{
317 |         language: string;
318 |         codeBlocks: number;
319 |         totalLines: number;
320 |         averageLength: number;
321 |       }>;
322 |       codeblocks?: Array<{
323 |         language: string;
324 |         code: string;
325 |         filename?: string;
326 |         lineCount: number;
327 |         messageType: 'user' | 'assistant';
328 |         context?: string;
329 |       }>;
330 |       metadata?: {
331 |         messageCount: number;
332 |         size: number;
333 |         format: 'legacy' | 'modern';
334 |         userMessages: number;
335 |         assistantMessages: number;
336 |         hasCodeBlocks: boolean;
337 |         hasFileReferences: boolean;
338 |       };
339 |       structure?: {
340 |         messageFlow: Array<{ type: 'user' | 'assistant'; length: number; hasCode: boolean }>;
341 |         conversationPattern: string;
342 |         averageMessageLength: number;
343 |         longestMessage: number;
344 |       };
345 |     };
346 |   }>;
347 | }
348 | 
349 | export interface ExportedData {
350 |   format: string;
351 |   data: any;
352 |   metadata: {
353 |     exportedCount: number;
354 |     totalAvailable: number;
355 |     exportTimestamp: string;
356 |     filters: Record<string, any>;
357 |   };
358 | }
```

--------------------------------------------------------------------------------
/src/utils/validation.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect } from 'vitest';
  2 | import {
  3 |   validateRequired,
  4 |   validateStringLength,
  5 |   validateNumberRange,
  6 |   validateArrayLength,
  7 |   validateEnum,
  8 |   validateConversationId,
  9 |   validateBubbleId,
 10 |   validateSearchQuery,
 11 |   validateFilePath,
 12 |   validateProjectPath,
 13 |   validateWithSchema,
 14 |   validateBoolean,
 15 |   validateLimit,
 16 |   validateOffset,
 17 |   validateContextLines
 18 | } from './validation.js';
 19 | import {
 20 |   MissingParameterError,
 21 |   InvalidParameterError,
 22 |   ValidationError
 23 | } from './errors.js';
 24 | import { z } from 'zod';
 25 | 
 26 | describe('Validation Utils', () => {
 27 |   describe('validateRequired', () => {
 28 |     it('should return value when present', () => {
 29 |       expect(validateRequired('test', 'param')).toBe('test');
 30 |       expect(validateRequired(123, 'param')).toBe(123);
 31 |       expect(validateRequired(false, 'param')).toBe(false);
 32 |       expect(validateRequired([], 'param')).toEqual([]);
 33 |     });
 34 | 
 35 |     it('should throw MissingParameterError for undefined', () => {
 36 |       expect(() => validateRequired(undefined, 'param')).toThrow(MissingParameterError);
 37 |       expect(() => validateRequired(undefined, 'param')).toThrow('Missing required parameter: param');
 38 |     });
 39 | 
 40 |     it('should throw MissingParameterError for null', () => {
 41 |       expect(() => validateRequired(null, 'param')).toThrow(MissingParameterError);
 42 |       expect(() => validateRequired(null, 'param')).toThrow('Missing required parameter: param');
 43 |     });
 44 | 
 45 |     it('should throw InvalidParameterError for empty string', () => {
 46 |       expect(() => validateRequired('', 'param')).toThrow(InvalidParameterError);
 47 |       expect(() => validateRequired('   ', 'param')).toThrow(InvalidParameterError);
 48 |     });
 49 |   });
 50 | 
 51 |   describe('validateStringLength', () => {
 52 |     it('should return undefined for undefined input', () => {
 53 |       expect(validateStringLength(undefined, 'param')).toBeUndefined();
 54 |     });
 55 | 
 56 |     it('should validate string length correctly', () => {
 57 |       expect(validateStringLength('test', 'param', 1, 10)).toBe('test');
 58 |       expect(validateStringLength('a', 'param', 1, 10)).toBe('a');
 59 |       expect(validateStringLength('1234567890', 'param', 1, 10)).toBe('1234567890');
 60 |     });
 61 | 
 62 |     it('should throw for non-string input', () => {
 63 |       expect(() => validateStringLength(123 as any, 'param')).toThrow(InvalidParameterError);
 64 |       expect(() => validateStringLength([] as any, 'param')).toThrow(InvalidParameterError);
 65 |     });
 66 | 
 67 |     it('should throw for string too short', () => {
 68 |       expect(() => validateStringLength('', 'param', 1)).toThrow(InvalidParameterError);
 69 |       expect(() => validateStringLength('ab', 'param', 3)).toThrow(InvalidParameterError);
 70 |     });
 71 | 
 72 |     it('should throw for string too long', () => {
 73 |       expect(() => validateStringLength('toolong', 'param', 1, 5)).toThrow(InvalidParameterError);
 74 |     });
 75 | 
 76 |     it('should use default minLength of 1', () => {
 77 |       expect(() => validateStringLength('', 'param')).toThrow(InvalidParameterError);
 78 |       expect(validateStringLength('a', 'param')).toBe('a');
 79 |     });
 80 |   });
 81 | 
 82 |   describe('validateNumberRange', () => {
 83 |     it('should return undefined for undefined input', () => {
 84 |       expect(validateNumberRange(undefined, 'param')).toBeUndefined();
 85 |     });
 86 | 
 87 |     it('should validate number range correctly', () => {
 88 |       expect(validateNumberRange(5, 'param', 1, 10)).toBe(5);
 89 |       expect(validateNumberRange(1, 'param', 1, 10)).toBe(1);
 90 |       expect(validateNumberRange(10, 'param', 1, 10)).toBe(10);
 91 |     });
 92 | 
 93 |     it('should throw for non-number input', () => {
 94 |       expect(() => validateNumberRange('5' as any, 'param')).toThrow(InvalidParameterError);
 95 |       expect(() => validateNumberRange(NaN, 'param')).toThrow(InvalidParameterError);
 96 |     });
 97 | 
 98 |     it('should throw for number too small', () => {
 99 |       expect(() => validateNumberRange(0, 'param', 1)).toThrow(InvalidParameterError);
100 |       expect(() => validateNumberRange(-5, 'param', 0, 10)).toThrow(InvalidParameterError);
101 |     });
102 | 
103 |     it('should throw for number too large', () => {
104 |       expect(() => validateNumberRange(11, 'param', 1, 10)).toThrow(InvalidParameterError);
105 |     });
106 | 
107 |     it('should work with only min or max specified', () => {
108 |       expect(validateNumberRange(5, 'param', 1)).toBe(5);
109 |       expect(validateNumberRange(5, 'param', undefined, 10)).toBe(5);
110 |     });
111 |   });
112 | 
113 |   describe('validateArrayLength', () => {
114 |     it('should return undefined for undefined input', () => {
115 |       expect(validateArrayLength(undefined, 'param')).toBeUndefined();
116 |     });
117 | 
118 |     it('should validate array length correctly', () => {
119 |       expect(validateArrayLength([1, 2, 3], 'param', 1, 5)).toEqual([1, 2, 3]);
120 |       expect(validateArrayLength([], 'param', 0, 5)).toEqual([]);
121 |       expect(validateArrayLength([1], 'param', 1, 1)).toEqual([1]);
122 |     });
123 | 
124 |     it('should throw for non-array input', () => {
125 |       expect(() => validateArrayLength('not array' as any, 'param')).toThrow(InvalidParameterError);
126 |       expect(() => validateArrayLength(123 as any, 'param')).toThrow(InvalidParameterError);
127 |     });
128 | 
129 |     it('should throw for array too short', () => {
130 |       expect(() => validateArrayLength([], 'param', 1)).toThrow(InvalidParameterError);
131 |       expect(() => validateArrayLength([1], 'param', 2)).toThrow(InvalidParameterError);
132 |     });
133 | 
134 |     it('should throw for array too long', () => {
135 |       expect(() => validateArrayLength([1, 2, 3], 'param', 0, 2)).toThrow(InvalidParameterError);
136 |     });
137 | 
138 |     it('should use default minLength of 0', () => {
139 |       expect(validateArrayLength([], 'param')).toEqual([]);
140 |     });
141 |   });
142 | 
143 |   describe('validateEnum', () => {
144 |     const allowedValues = ['option1', 'option2', 'option3'] as const;
145 | 
146 |     it('should return undefined for undefined input', () => {
147 |       expect(validateEnum(undefined, 'param', allowedValues)).toBeUndefined();
148 |     });
149 | 
150 |     it('should validate enum values correctly', () => {
151 |       expect(validateEnum('option1', 'param', allowedValues)).toBe('option1');
152 |       expect(validateEnum('option2', 'param', allowedValues)).toBe('option2');
153 |       expect(validateEnum('option3', 'param', allowedValues)).toBe('option3');
154 |     });
155 | 
156 |     it('should throw for invalid enum value', () => {
157 |       expect(() => validateEnum('invalid' as any, 'param', allowedValues)).toThrow(InvalidParameterError);
158 |       expect(() => validateEnum('invalid' as any, 'param', allowedValues))
159 |         .toThrow('one of: option1, option2, option3');
160 |     });
161 |   });
162 | 
163 |   describe('validateConversationId', () => {
164 |     it('should validate correct conversation IDs', () => {
165 |       expect(validateConversationId('abc123')).toBe('abc123');
166 |       expect(validateConversationId('conversation-id')).toBe('conversation-id');
167 |       expect(validateConversationId('conv_123')).toBe('conv_123');
168 |       expect(validateConversationId('ABC123')).toBe('ABC123');
169 |     });
170 | 
171 |     it('should throw for empty or missing conversation ID', () => {
172 |       expect(() => validateConversationId('')).toThrow(MissingParameterError);
173 |     });
174 | 
175 |     it('should throw for invalid characters', () => {
176 |       expect(() => validateConversationId('conv@123')).toThrow(InvalidParameterError);
177 |       expect(() => validateConversationId('conv 123')).toThrow(InvalidParameterError);
178 |       expect(() => validateConversationId('conv.123')).toThrow(InvalidParameterError);
179 |     });
180 | 
181 |     it('should throw for too long conversation ID', () => {
182 |       const longId = 'a'.repeat(101);
183 |       expect(() => validateConversationId(longId)).toThrow(InvalidParameterError);
184 |     });
185 |   });
186 | 
187 |   describe('validateBubbleId', () => {
188 |     it('should validate correct bubble IDs', () => {
189 |       expect(validateBubbleId('bubble123')).toBe('bubble123');
190 |       expect(validateBubbleId('bubble-id')).toBe('bubble-id');
191 |       expect(validateBubbleId('bubble_123')).toBe('bubble_123');
192 |     });
193 | 
194 |     it('should throw for empty or missing bubble ID', () => {
195 |       expect(() => validateBubbleId('')).toThrow(MissingParameterError);
196 |     });
197 | 
198 |     it('should throw for invalid characters', () => {
199 |       expect(() => validateBubbleId('bubble@123')).toThrow(InvalidParameterError);
200 |       expect(() => validateBubbleId('bubble 123')).toThrow(InvalidParameterError);
201 |     });
202 |   });
203 | 
204 |   describe('validateSearchQuery', () => {
205 |     it('should validate correct search queries', () => {
206 |       expect(validateSearchQuery('test query')).toBe('test query');
207 |       expect(validateSearchQuery('a')).toBe('a');
208 |     });
209 | 
210 |     it('should throw for empty query', () => {
211 |       expect(() => validateSearchQuery('')).toThrow(MissingParameterError);
212 |     });
213 | 
214 |     it('should throw for too long query', () => {
215 |       const longQuery = 'a'.repeat(1001);
216 |       expect(() => validateSearchQuery(longQuery)).toThrow(InvalidParameterError);
217 |     });
218 |   });
219 | 
220 |   describe('validateFilePath', () => {
221 |     it('should return undefined for undefined input', () => {
222 |       expect(validateFilePath(undefined, 'param')).toBeUndefined();
223 |     });
224 | 
225 |     it('should validate correct file paths', () => {
226 |       expect(validateFilePath('/path/to/file.txt', 'param')).toBe('/path/to/file.txt');
227 |       expect(validateFilePath('relative/path.js', 'param')).toBe('relative/path.js');
228 |       expect(validateFilePath('file.ts', 'param')).toBe('file.ts');
229 |     });
230 | 
231 |     it('should throw for empty path', () => {
232 |       expect(() => validateFilePath('', 'param')).toThrow(InvalidParameterError);
233 |     });
234 | 
235 |     it('should throw for too long path', () => {
236 |       const longPath = 'a'.repeat(1001);
237 |       expect(() => validateFilePath(longPath, 'param')).toThrow(InvalidParameterError);
238 |     });
239 |   });
240 | 
241 |   describe('validateProjectPath', () => {
242 |     it('should validate correct project paths', () => {
243 |       expect(validateProjectPath('/project/path')).toBe('/project/path');
244 |       expect(validateProjectPath('relative/project')).toBe('relative/project');
245 |     });
246 | 
247 |     it('should throw for empty path', () => {
248 |       expect(() => validateProjectPath('')).toThrow(MissingParameterError);
249 |     });
250 | 
251 |     it('should throw for too long path', () => {
252 |       const longPath = 'a'.repeat(1001);
253 |       expect(() => validateProjectPath(longPath)).toThrow(InvalidParameterError);
254 |     });
255 |   });
256 | 
257 |   describe('validateWithSchema', () => {
258 |     const testSchema = z.object({
259 |       name: z.string(),
260 |       age: z.number().min(0)
261 |     });
262 | 
263 |     it('should validate correct input', () => {
264 |       const input = { name: 'John', age: 30 };
265 |       expect(validateWithSchema(input, testSchema)).toEqual(input);
266 |     });
267 | 
268 |     it('should throw ValidationError for invalid input', () => {
269 |       const input = { name: 'John', age: -5 };
270 |       expect(() => validateWithSchema(input, testSchema)).toThrow(ValidationError);
271 |     });
272 | 
273 |     it('should throw ValidationError for missing fields', () => {
274 |       const input = { name: 'John' };
275 |       expect(() => validateWithSchema(input, testSchema)).toThrow(ValidationError);
276 |     });
277 | 
278 |     it('should include context in error message', () => {
279 |       const input = { name: 'John', age: -5 };
280 |       expect(() => validateWithSchema(input, testSchema, 'user data'))
281 |         .toThrow('Validation error in user data');
282 |     });
283 |   });
284 | 
285 |   describe('validateBoolean', () => {
286 |     it('should return undefined for undefined input', () => {
287 |       expect(validateBoolean(undefined, 'param')).toBeUndefined();
288 |     });
289 | 
290 |     it('should validate boolean values', () => {
291 |       expect(validateBoolean(true, 'param')).toBe(true);
292 |       expect(validateBoolean(false, 'param')).toBe(false);
293 |     });
294 | 
295 |     it('should throw for non-boolean input', () => {
296 |       expect(() => validateBoolean('true' as any, 'param')).toThrow(InvalidParameterError);
297 |       expect(() => validateBoolean(1 as any, 'param')).toThrow(InvalidParameterError);
298 |     });
299 |   });
300 | 
301 |   describe('validateLimit', () => {
302 |     it('should return default limit for undefined input', () => {
303 |       expect(validateLimit(undefined)).toBe(10);
304 |       expect(validateLimit(undefined, 20)).toBe(20);
305 |     });
306 | 
307 |     it('should validate correct limits', () => {
308 |       expect(validateLimit(5)).toBe(5);
309 |       expect(validateLimit(100)).toBe(100);
310 |     });
311 | 
312 |     it('should throw for invalid limits', () => {
313 |       expect(() => validateLimit(0)).toThrow(InvalidParameterError);
314 |       expect(() => validateLimit(-5)).toThrow(InvalidParameterError);
315 |       expect(() => validateLimit(1001)).toThrow(InvalidParameterError);
316 |     });
317 |   });
318 | 
319 |   describe('validateOffset', () => {
320 |     it('should return 0 for undefined input', () => {
321 |       expect(validateOffset(undefined)).toBe(0);
322 |     });
323 | 
324 |     it('should validate correct offsets', () => {
325 |       expect(validateOffset(0)).toBe(0);
326 |       expect(validateOffset(50)).toBe(50);
327 |     });
328 | 
329 |     it('should throw for negative offset', () => {
330 |       expect(() => validateOffset(-1)).toThrow(InvalidParameterError);
331 |     });
332 |   });
333 | 
334 |   describe('validateContextLines', () => {
335 |     it('should return 3 for undefined input', () => {
336 |       expect(validateContextLines(undefined)).toBe(3);
337 |     });
338 | 
339 |     it('should validate correct context lines', () => {
340 |       expect(validateContextLines(0)).toBe(0);
341 |       expect(validateContextLines(5)).toBe(5);
342 |       expect(validateContextLines(10)).toBe(10);
343 |     });
344 | 
345 |     it('should throw for invalid context lines', () => {
346 |       expect(() => validateContextLines(-1)).toThrow(InvalidParameterError);
347 |       expect(() => validateContextLines(11)).toThrow(InvalidParameterError);
348 |     });
349 |   });
350 | });
```

--------------------------------------------------------------------------------
/src/database/reader.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
  2 | import { CursorDatabaseReader } from './reader.js';
  3 | import Database from 'better-sqlite3';
  4 | 
  5 | // Mock better-sqlite3
  6 | vi.mock('better-sqlite3');
  7 | 
  8 | const mockDatabase = vi.mocked(Database);
  9 | 
 10 | describe('CursorDatabaseReader', () => {
 11 |   let mockDb: any;
 12 |   let reader: CursorDatabaseReader;
 13 | 
 14 |   beforeEach(() => {
 15 |     mockDb = {
 16 |       prepare: vi.fn(),
 17 |       close: vi.fn(),
 18 |       exec: vi.fn()
 19 |     };
 20 | 
 21 |     mockDatabase.mockReturnValue(mockDb);
 22 | 
 23 |     reader = new CursorDatabaseReader({
 24 |       dbPath: '/test/path/cursor.db',
 25 |       minConversationSize: 1000
 26 |     });
 27 |   });
 28 | 
 29 |   afterEach(() => {
 30 |     vi.clearAllMocks();
 31 |   });
 32 | 
 33 |   describe('constructor', () => {
 34 |     it('should create reader with default options', () => {
 35 |       const defaultReader = new CursorDatabaseReader({ dbPath: '/test/cursor.db' });
 36 |       expect(defaultReader).toBeDefined();
 37 |     });
 38 | 
 39 |     it('should create reader with custom options', () => {
 40 |       const customReader = new CursorDatabaseReader({
 41 |         dbPath: '/custom/path.db',
 42 |         minConversationSize: 5000,
 43 |         cacheEnabled: false
 44 |       });
 45 |       expect(customReader).toBeDefined();
 46 |     });
 47 |   });
 48 | 
 49 |   describe('connect', () => {
 50 |     it('should connect to database successfully', async () => {
 51 |       const mockPrepare = vi.fn().mockReturnValue({
 52 |         get: vi.fn().mockReturnValue({ count: 10 })
 53 |       });
 54 |       mockDb.prepare.mockReturnValue(mockPrepare);
 55 | 
 56 |       await reader.connect();
 57 | 
 58 |       expect(mockDatabase).toHaveBeenCalledWith('/test/path/cursor.db', { readonly: true });
 59 |       expect(mockDb.exec).toHaveBeenCalledWith('PRAGMA journal_mode = WAL;');
 60 |     });
 61 | 
 62 |     it('should handle connection errors', async () => {
 63 |       mockDatabase.mockImplementation(() => {
 64 |         throw new Error('Database connection failed');
 65 |       });
 66 | 
 67 |       await expect(reader.connect()).rejects.toThrow('Database connection failed');
 68 |     });
 69 | 
 70 |     it('should handle connection with cache disabled', async () => {
 71 |       const noCacheReader = new CursorDatabaseReader({
 72 |         dbPath: '/test/cursor.db',
 73 |         cacheEnabled: false
 74 |       });
 75 | 
 76 |       const mockPrepare = vi.fn().mockReturnValue({
 77 |         get: vi.fn().mockReturnValue({ count: 5 })
 78 |       });
 79 |       mockDb.prepare.mockReturnValue(mockPrepare);
 80 | 
 81 |       await noCacheReader.connect();
 82 | 
 83 |       expect(mockDatabase).toHaveBeenCalledWith('/test/cursor.db', { readonly: true });
 84 |     });
 85 |   });
 86 | 
 87 |   describe('close', () => {
 88 |     it('should close database connection', () => {
 89 |       reader['db'] = mockDb;
 90 |       reader.close();
 91 | 
 92 |       expect(mockDb.close).toHaveBeenCalled();
 93 |     });
 94 | 
 95 |     it('should handle close when not connected', () => {
 96 |       expect(() => reader.close()).not.toThrow();
 97 |     });
 98 |   });
 99 | 
100 |   describe('getConversationIds', () => {
101 |     beforeEach(async () => {
102 |       const mockPrepare = vi.fn().mockReturnValue({
103 |         get: vi.fn().mockReturnValue({ count: 10 }),
104 |         all: vi.fn().mockReturnValue([
105 |           { composerId: 'conv1' },
106 |           { composerId: 'conv2' }
107 |         ])
108 |       });
109 |       mockDb.prepare.mockReturnValue(mockPrepare);
110 |       await reader.connect();
111 |     });
112 | 
113 |     it('should get conversation IDs with default filters', async () => {
114 |       const result = await reader.getConversationIds({});
115 | 
116 |       expect(result).toEqual(['conv1', 'conv2']);
117 |     });
118 | 
119 |     it('should apply minLength filter', async () => {
120 |       const mockStmt = {
121 |         all: vi.fn().mockReturnValue([{ composerId: 'conv1' }])
122 |       };
123 |       mockDb.prepare.mockReturnValue(mockStmt);
124 | 
125 |       const result = await reader.getConversationIds({ minLength: 2000 });
126 | 
127 |       expect(result).toEqual(['conv1']);
128 |       expect(mockDb.prepare).toHaveBeenCalledWith(
129 |         expect.stringContaining('LENGTH(text) >= ?')
130 |       );
131 |     });
132 | 
133 |     it('should apply keywords filter', async () => {
134 |       const mockStmt = {
135 |         all: vi.fn().mockReturnValue([{ composerId: 'conv1' }])
136 |       };
137 |       mockDb.prepare.mockReturnValue(mockStmt);
138 | 
139 |       const result = await reader.getConversationIds({ keywords: ['test', 'query'] });
140 | 
141 |       expect(result).toEqual(['conv1']);
142 |       expect(mockDb.prepare).toHaveBeenCalledWith(
143 |         expect.stringContaining('text LIKE ?')
144 |       );
145 |     });
146 | 
147 |     it('should apply format filter', async () => {
148 |       const mockStmt = {
149 |         all: vi.fn().mockReturnValue([{ composerId: 'conv1' }])
150 |       };
151 |       mockDb.prepare.mockReturnValue(mockStmt);
152 | 
153 |       const result = await reader.getConversationIds({ format: 'modern' });
154 | 
155 |       expect(result).toEqual(['conv1']);
156 |       expect(mockDb.prepare).toHaveBeenCalledWith(
157 |         expect.stringContaining('_v IS NOT NULL')
158 |       );
159 |     });
160 |   });
161 | 
162 |   describe('getConversationById', () => {
163 |     beforeEach(async () => {
164 |       const mockPrepare = vi.fn().mockReturnValue({
165 |         get: vi.fn().mockReturnValue({ count: 10 })
166 |       });
167 |       mockDb.prepare.mockReturnValue(mockPrepare);
168 |       await reader.connect();
169 |     });
170 | 
171 |     it('should get conversation by ID', async () => {
172 |       const mockConversation = {
173 |         composerId: 'conv1',
174 |         text: 'conversation text',
175 |         conversation: JSON.stringify([{ type: 1, text: 'hello' }])
176 |       };
177 | 
178 |       const mockStmt = {
179 |         get: vi.fn().mockReturnValue(mockConversation)
180 |       };
181 |       mockDb.prepare.mockReturnValue(mockStmt);
182 | 
183 |       const result = await reader.getConversationById('conv1');
184 | 
185 |       expect(result).toEqual({
186 |         composerId: 'conv1',
187 |         text: 'conversation text',
188 |         conversation: [{ type: 1, text: 'hello' }]
189 |       });
190 |       expect(mockStmt.get).toHaveBeenCalledWith('conv1');
191 |     });
192 | 
193 |     it('should return null for non-existent conversation', async () => {
194 |       const mockStmt = {
195 |         get: vi.fn().mockReturnValue(undefined)
196 |       };
197 |       mockDb.prepare.mockReturnValue(mockStmt);
198 | 
199 |       const result = await reader.getConversationById('nonexistent');
200 | 
201 |       expect(result).toBeNull();
202 |     });
203 | 
204 |     it('should handle JSON parsing errors gracefully', async () => {
205 |       const mockConversation = {
206 |         composerId: 'conv1',
207 |         text: 'conversation text',
208 |         conversation: 'invalid json'
209 |       };
210 | 
211 |       const mockStmt = {
212 |         get: vi.fn().mockReturnValue(mockConversation)
213 |       };
214 |       mockDb.prepare.mockReturnValue(mockStmt);
215 | 
216 |       const result = await reader.getConversationById('conv1');
217 | 
218 |       expect(result).toEqual({
219 |         composerId: 'conv1',
220 |         text: 'conversation text',
221 |         conversation: []
222 |       });
223 |     });
224 |   });
225 | 
226 |   describe('getConversationSummary', () => {
227 |     beforeEach(async () => {
228 |       const mockPrepare = vi.fn().mockReturnValue({
229 |         get: vi.fn().mockReturnValue({ count: 10 })
230 |       });
231 |       mockDb.prepare.mockReturnValue(mockPrepare);
232 |       await reader.connect();
233 |     });
234 | 
235 |     it('should get conversation summary with default options', async () => {
236 |       const mockConversation = {
237 |         composerId: 'conv1',
238 |         text: 'stored summary',
239 |         richText: 'rich text',
240 |         conversation: JSON.stringify([
241 |           { type: 1, text: 'first message' },
242 |           { type: 2, text: 'second message' }
243 |         ])
244 |       };
245 | 
246 |       const mockStmt = {
247 |         get: vi.fn().mockReturnValue(mockConversation)
248 |       };
249 |       mockDb.prepare.mockReturnValue(mockStmt);
250 | 
251 |       const result = await reader.getConversationSummary('conv1');
252 | 
253 |       expect(result).toEqual({
254 |         composerId: 'conv1',
255 |         format: 'legacy',
256 |         messageCount: 2,
257 |         hasCodeBlocks: false,
258 |         conversationSize: expect.any(Number),
259 |         relevantFiles: [],
260 |         attachedFolders: []
261 |       });
262 |     });
263 | 
264 |     it('should include first message when requested', async () => {
265 |       const mockConversation = {
266 |         composerId: 'conv1',
267 |         conversation: JSON.stringify([
268 |           { type: 1, text: 'This is the first message' }
269 |         ])
270 |       };
271 | 
272 |       const mockStmt = {
273 |         get: vi.fn().mockReturnValue(mockConversation)
274 |       };
275 |       mockDb.prepare.mockReturnValue(mockStmt);
276 | 
277 |       const result = await reader.getConversationSummary('conv1', {
278 |         includeFirstMessage: true,
279 |         maxFirstMessageLength: 50
280 |       });
281 | 
282 |       expect(result?.firstMessage).toBe('This is the first message');
283 |     });
284 | 
285 |     it('should detect code blocks', async () => {
286 |       const mockConversation = {
287 |         composerId: 'conv1',
288 |         conversation: JSON.stringify([
289 |           {
290 |             type: 1,
291 |             text: 'message',
292 |             suggestedCodeBlocks: [{ language: 'js', code: 'console.log()' }]
293 |           }
294 |         ])
295 |       };
296 | 
297 |       const mockStmt = {
298 |         get: vi.fn().mockReturnValue(mockConversation)
299 |       };
300 |       mockDb.prepare.mockReturnValue(mockStmt);
301 | 
302 |       const result = await reader.getConversationSummary('conv1', {
303 |         includeCodeBlockCount: true
304 |       });
305 | 
306 |       expect(result?.hasCodeBlocks).toBe(true);
307 |       expect(result?.codeBlockCount).toBe(1);
308 |     });
309 | 
310 |     it('should return null for non-existent conversation', async () => {
311 |       const mockStmt = {
312 |         get: vi.fn().mockReturnValue(undefined)
313 |       };
314 |       mockDb.prepare.mockReturnValue(mockStmt);
315 | 
316 |       const result = await reader.getConversationSummary('nonexistent');
317 | 
318 |       expect(result).toBeNull();
319 |     });
320 |   });
321 | 
322 |   describe('getBubbleMessage', () => {
323 |     beforeEach(async () => {
324 |       const mockPrepare = vi.fn().mockReturnValue({
325 |         get: vi.fn().mockReturnValue({ count: 10 })
326 |       });
327 |       mockDb.prepare.mockReturnValue(mockPrepare);
328 |       await reader.connect();
329 |     });
330 | 
331 |     it('should get bubble message', async () => {
332 |       const mockBubble = {
333 |         bubbleId: 'bubble1',
334 |         type: 1,
335 |         text: 'bubble text',
336 |         relevantFiles: JSON.stringify(['file1.ts']),
337 |         suggestedCodeBlocks: JSON.stringify([]),
338 |         attachedFoldersNew: JSON.stringify(['folder1'])
339 |       };
340 | 
341 |       const mockStmt = {
342 |         get: vi.fn().mockReturnValue(mockBubble)
343 |       };
344 |       mockDb.prepare.mockReturnValue(mockStmt);
345 | 
346 |       const result = await reader.getBubbleMessage('conv1', 'bubble1');
347 | 
348 |       expect(result).toEqual({
349 |         bubbleId: 'bubble1',
350 |         type: 1,
351 |         text: 'bubble text',
352 |         relevantFiles: ['file1.ts'],
353 |         suggestedCodeBlocks: [],
354 |         attachedFoldersNew: ['folder1']
355 |       });
356 |     });
357 | 
358 |     it('should return null for non-existent bubble', async () => {
359 |       const mockStmt = {
360 |         get: vi.fn().mockReturnValue(undefined)
361 |       };
362 |       mockDb.prepare.mockReturnValue(mockStmt);
363 | 
364 |       const result = await reader.getBubbleMessage('conv1', 'nonexistent');
365 | 
366 |       expect(result).toBeNull();
367 |     });
368 |   });
369 | 
370 |   describe('searchConversations', () => {
371 |     beforeEach(async () => {
372 |       const mockPrepare = vi.fn().mockReturnValue({
373 |         get: vi.fn().mockReturnValue({ count: 10 })
374 |       });
375 |       mockDb.prepare.mockReturnValue(mockPrepare);
376 |       await reader.connect();
377 |     });
378 | 
379 |     it('should search conversations', async () => {
380 |       const mockResults = [
381 |         {
382 |           composerId: 'conv1',
383 |           text: 'conversation with search term',
384 |           conversation: JSON.stringify([
385 |             { type: 1, text: 'message with search term' }
386 |           ])
387 |         }
388 |       ];
389 | 
390 |       const mockStmt = {
391 |         all: vi.fn().mockReturnValue(mockResults)
392 |       };
393 |       mockDb.prepare.mockReturnValue(mockStmt);
394 | 
395 |       const result = await reader.searchConversations('search term');
396 | 
397 |       expect(result).toHaveLength(1);
398 |       expect(result[0].composerId).toBe('conv1');
399 |       expect(result[0].matches).toBeDefined();
400 |     });
401 | 
402 |     it('should apply search options', async () => {
403 |       const mockStmt = {
404 |         all: vi.fn().mockReturnValue([])
405 |       };
406 |       mockDb.prepare.mockReturnValue(mockStmt);
407 | 
408 |       await reader.searchConversations('query', {
409 |         maxResults: 5,
410 |         searchType: 'code',
411 |         format: 'modern'
412 |       });
413 | 
414 |       expect(mockDb.prepare).toHaveBeenCalledWith(
415 |         expect.stringContaining('LIMIT 5')
416 |       );
417 |     });
418 |   });
419 | 
420 |   describe('getConversationIdsByProject', () => {
421 |     beforeEach(async () => {
422 |       const mockPrepare = vi.fn().mockReturnValue({
423 |         get: vi.fn().mockReturnValue({ count: 10 })
424 |       });
425 |       mockDb.prepare.mockReturnValue(mockPrepare);
426 |       await reader.connect();
427 |     });
428 | 
429 |     it('should get conversations by project path', async () => {
430 |       const mockResults = [
431 |         { composerId: 'conv1', relevanceScore: 0.9 },
432 |         { composerId: 'conv2', relevanceScore: 0.7 }
433 |       ];
434 | 
435 |       const mockStmt = {
436 |         all: vi.fn().mockReturnValue(mockResults)
437 |       };
438 |       mockDb.prepare.mockReturnValue(mockStmt);
439 | 
440 |       const result = await reader.getConversationIdsByProject('/project/path');
441 | 
442 |       expect(result).toEqual(mockResults);
443 |       expect(mockDb.prepare).toHaveBeenCalledWith(
444 |         expect.stringContaining('attachedFoldersNew LIKE ?')
445 |       );
446 |     });
447 | 
448 |     it('should apply project search options', async () => {
449 |       const mockStmt = {
450 |         all: vi.fn().mockReturnValue([])
451 |       };
452 |       mockDb.prepare.mockReturnValue(mockStmt);
453 | 
454 |       await reader.getConversationIdsByProject('/project', {
455 |         filePattern: '*.ts',
456 |         limit: 10,
457 |         orderBy: 'relevance'
458 |       });
459 | 
460 |       expect(mockDb.prepare).toHaveBeenCalledWith(
461 |         expect.stringContaining('relevantFiles LIKE ?')
462 |       );
463 |     });
464 |   });
465 | 
466 |   describe('Error Handling', () => {
467 |     it('should handle database errors gracefully', async () => {
468 |       const mockStmt = {
469 |         get: vi.fn().mockImplementation(() => {
470 |           throw new Error('Database error');
471 |         })
472 |       };
473 |       mockDb.prepare.mockReturnValue(mockStmt);
474 | 
475 |       await reader.connect();
476 | 
477 |       await expect(reader.getConversationById('conv1')).rejects.toThrow('Database error');
478 |     });
479 | 
480 |     it('should handle missing database connection', async () => {
481 |       const unconnectedReader = new CursorDatabaseReader({ dbPath: '/test/cursor.db' });
482 | 
483 |       await expect(unconnectedReader.getConversationIds({})).rejects.toThrow();
484 |     });
485 |   });
486 | });
```

--------------------------------------------------------------------------------
/src/utils/errors.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect } from 'vitest';
  2 | import {
  3 |   MCPError,
  4 |   DatabaseError,
  5 |   DatabaseConnectionError,
  6 |   ConversationNotFoundError,
  7 |   BubbleMessageNotFoundError,
  8 |   ValidationError,
  9 |   MissingParameterError,
 10 |   InvalidParameterError,
 11 |   FileSystemError,
 12 |   DatabasePathNotFoundError,
 13 |   ConversationParseError,
 14 |   SearchError,
 15 |   CacheError,
 16 |   isMCPError,
 17 |   getErrorInfo
 18 | } from './errors.js';
 19 | 
 20 | describe('Error Utils', () => {
 21 |   describe('MCPError', () => {
 22 |     it('should create basic MCP error', () => {
 23 |       const error = new MCPError('Test message');
 24 | 
 25 |       expect(error.message).toBe('Test message');
 26 |       expect(error.code).toBe('MCP_ERROR');
 27 |       expect(error.statusCode).toBe(500);
 28 |       expect(error.name).toBe('MCPError');
 29 |       expect(error).toBeInstanceOf(Error);
 30 |       expect(error).toBeInstanceOf(MCPError);
 31 |     });
 32 | 
 33 |     it('should create MCP error with custom code and status', () => {
 34 |       const error = new MCPError('Custom message', 'CUSTOM_CODE', 400);
 35 | 
 36 |       expect(error.message).toBe('Custom message');
 37 |       expect(error.code).toBe('CUSTOM_CODE');
 38 |       expect(error.statusCode).toBe(400);
 39 |     });
 40 | 
 41 |     it('should maintain proper stack trace', () => {
 42 |       const error = new MCPError('Test message');
 43 |       expect(error.stack).toBeDefined();
 44 |       expect(error.stack).toContain('MCPError');
 45 |     });
 46 |   });
 47 | 
 48 |   describe('DatabaseError', () => {
 49 |     it('should create database error without original error', () => {
 50 |       const error = new DatabaseError('Connection failed');
 51 | 
 52 |       expect(error.message).toBe('Database error: Connection failed');
 53 |       expect(error.code).toBe('DATABASE_ERROR');
 54 |       expect(error.statusCode).toBe(500);
 55 |       expect(error).toBeInstanceOf(MCPError);
 56 |       expect(error).toBeInstanceOf(DatabaseError);
 57 |     });
 58 | 
 59 |     it('should create database error with original error', () => {
 60 |       const originalError = new Error('Original error message');
 61 |       const error = new DatabaseError('Connection failed', originalError);
 62 | 
 63 |       expect(error.message).toBe('Database error: Connection failed. Original: Original error message');
 64 |       expect(error.code).toBe('DATABASE_ERROR');
 65 |       expect(error.stack).toContain('Caused by:');
 66 |     });
 67 | 
 68 |     it('should handle original error without stack', () => {
 69 |       const originalError = new Error('Original error message');
 70 |       originalError.stack = undefined;
 71 |       const error = new DatabaseError('Connection failed', originalError);
 72 | 
 73 |       expect(error.message).toBe('Database error: Connection failed. Original: Original error message');
 74 |     });
 75 |   });
 76 | 
 77 |   describe('DatabaseConnectionError', () => {
 78 |     it('should create database connection error', () => {
 79 |       const dbPath = '/path/to/database.db';
 80 |       const error = new DatabaseConnectionError(dbPath);
 81 | 
 82 |       expect(error.message).toBe(`Database error: Failed to connect to database at path: ${dbPath}`);
 83 |       expect(error.code).toBe('DATABASE_CONNECTION_ERROR');
 84 |       expect(error.statusCode).toBe(500);
 85 |       expect(error).toBeInstanceOf(DatabaseError);
 86 |     });
 87 | 
 88 |     it('should create database connection error with original error', () => {
 89 |       const dbPath = '/path/to/database.db';
 90 |       const originalError = new Error('Permission denied');
 91 |       const error = new DatabaseConnectionError(dbPath, originalError);
 92 | 
 93 |       expect(error.message).toContain('Failed to connect to database at path: /path/to/database.db');
 94 |       expect(error.message).toContain('Permission denied');
 95 |       expect(error.code).toBe('DATABASE_CONNECTION_ERROR');
 96 |     });
 97 |   });
 98 | 
 99 |   describe('ConversationNotFoundError', () => {
100 |     it('should create conversation not found error', () => {
101 |       const conversationId = 'conv123';
102 |       const error = new ConversationNotFoundError(conversationId);
103 | 
104 |       expect(error.message).toBe('Conversation not found: conv123');
105 |       expect(error.code).toBe('CONVERSATION_NOT_FOUND');
106 |       expect(error.statusCode).toBe(404);
107 |       expect(error.conversationId).toBe(conversationId);
108 |       expect(error).toBeInstanceOf(MCPError);
109 |     });
110 |   });
111 | 
112 |   describe('BubbleMessageNotFoundError', () => {
113 |     it('should create bubble message not found error', () => {
114 |       const composerId = 'composer123';
115 |       const bubbleId = 'bubble456';
116 |       const error = new BubbleMessageNotFoundError(composerId, bubbleId);
117 | 
118 |       expect(error.message).toBe('Bubble message not found: bubble456 in conversation composer123');
119 |       expect(error.code).toBe('BUBBLE_MESSAGE_NOT_FOUND');
120 |       expect(error.statusCode).toBe(404);
121 |       expect(error.composerId).toBe(composerId);
122 |       expect(error.bubbleId).toBe(bubbleId);
123 |       expect(error).toBeInstanceOf(MCPError);
124 |     });
125 |   });
126 | 
127 |   describe('ValidationError', () => {
128 |     it('should create validation error without field info', () => {
129 |       const error = new ValidationError('Invalid input');
130 | 
131 |       expect(error.message).toBe('Validation error: Invalid input');
132 |       expect(error.code).toBe('VALIDATION_ERROR');
133 |       expect(error.statusCode).toBe(400);
134 |       expect(error.field).toBeUndefined();
135 |       expect(error.value).toBeUndefined();
136 |       expect(error).toBeInstanceOf(MCPError);
137 |     });
138 | 
139 |     it('should create validation error with field info', () => {
140 |       const error = new ValidationError('Invalid email format', 'email', 'invalid-email');
141 | 
142 |       expect(error.message).toBe('Validation error: Invalid email format');
143 |       expect(error.code).toBe('VALIDATION_ERROR');
144 |       expect(error.field).toBe('email');
145 |       expect(error.value).toBe('invalid-email');
146 |     });
147 |   });
148 | 
149 |   describe('MissingParameterError', () => {
150 |     it('should create missing parameter error', () => {
151 |       const error = new MissingParameterError('username');
152 | 
153 |       expect(error.message).toBe('Validation error: Missing required parameter: username');
154 |       expect(error.code).toBe('MISSING_PARAMETER');
155 |       expect(error.statusCode).toBe(400);
156 |       expect(error.field).toBe('username');
157 |       expect(error).toBeInstanceOf(ValidationError);
158 |     });
159 |   });
160 | 
161 |   describe('InvalidParameterError', () => {
162 |     it('should create invalid parameter error without expected type', () => {
163 |       const error = new InvalidParameterError('age', 'not-a-number');
164 | 
165 |       expect(error.message).toBe("Validation error: Invalid parameter 'age': not-a-number");
166 |       expect(error.code).toBe('INVALID_PARAMETER');
167 |       expect(error.field).toBe('age');
168 |       expect(error.value).toBe('not-a-number');
169 |       expect(error).toBeInstanceOf(ValidationError);
170 |     });
171 | 
172 |     it('should create invalid parameter error with expected type', () => {
173 |       const error = new InvalidParameterError('age', 'not-a-number', 'number');
174 | 
175 |       expect(error.message).toBe("Validation error: Invalid parameter 'age': expected number, got string");
176 |       expect(error.code).toBe('INVALID_PARAMETER');
177 |       expect(error.field).toBe('age');
178 |       expect(error.value).toBe('not-a-number');
179 |     });
180 |   });
181 | 
182 |   describe('FileSystemError', () => {
183 |     it('should create file system error without original error', () => {
184 |       const path = '/path/to/file';
185 |       const error = new FileSystemError('File not found', path);
186 | 
187 |       expect(error.message).toBe('File system error: File not found');
188 |       expect(error.code).toBe('FILESYSTEM_ERROR');
189 |       expect(error.statusCode).toBe(500);
190 |       expect(error.path).toBe(path);
191 |       expect(error).toBeInstanceOf(MCPError);
192 |     });
193 | 
194 |     it('should create file system error with original error', () => {
195 |       const path = '/path/to/file';
196 |       const originalError = new Error('Permission denied');
197 |       const error = new FileSystemError('File not found', path, originalError);
198 | 
199 |       expect(error.message).toBe('File system error: File not found. Original: Permission denied');
200 |       expect(error.path).toBe(path);
201 |       expect(error.stack).toContain('Caused by:');
202 |     });
203 |   });
204 | 
205 |   describe('DatabasePathNotFoundError', () => {
206 |     it('should create database path not found error', () => {
207 |       const attemptedPaths = ['/path1/db', '/path2/db', '/path3/db'];
208 |       const error = new DatabasePathNotFoundError(attemptedPaths);
209 | 
210 |       expect(error.message).toBe('File system error: Could not find Cursor database. Attempted paths: /path1/db, /path2/db, /path3/db');
211 |       expect(error.code).toBe('DATABASE_PATH_NOT_FOUND');
212 |       expect(error.path).toBe('/path1/db');
213 |       expect(error).toBeInstanceOf(FileSystemError);
214 |     });
215 | 
216 |     it('should handle empty attempted paths array', () => {
217 |       const error = new DatabasePathNotFoundError([]);
218 | 
219 |       expect(error.message).toContain('Could not find Cursor database. Attempted paths: ');
220 |       expect(error.path).toBe('unknown');
221 |     });
222 |   });
223 | 
224 |   describe('ConversationParseError', () => {
225 |     it('should create conversation parse error without conversation ID', () => {
226 |       const error = new ConversationParseError('Invalid JSON format');
227 | 
228 |       expect(error.message).toBe('Parse error: Invalid JSON format');
229 |       expect(error.code).toBe('CONVERSATION_PARSE_ERROR');
230 |       expect(error.statusCode).toBe(500);
231 |       expect(error.conversationId).toBeUndefined();
232 |       expect(error).toBeInstanceOf(MCPError);
233 |     });
234 | 
235 |     it('should create conversation parse error with conversation ID', () => {
236 |       const conversationId = 'conv123';
237 |       const error = new ConversationParseError('Invalid JSON format', conversationId);
238 | 
239 |       expect(error.message).toBe('Parse error: Invalid JSON format');
240 |       expect(error.conversationId).toBe(conversationId);
241 |     });
242 | 
243 |     it('should create conversation parse error with original error', () => {
244 |       const originalError = new Error('JSON syntax error');
245 |       const error = new ConversationParseError('Invalid JSON format', 'conv123', originalError);
246 | 
247 |       expect(error.message).toBe('Parse error: Invalid JSON format. Original: JSON syntax error');
248 |       expect(error.stack).toContain('Caused by:');
249 |     });
250 |   });
251 | 
252 |   describe('SearchError', () => {
253 |     it('should create search error without original error', () => {
254 |       const query = 'test query';
255 |       const error = new SearchError('Search failed', query);
256 | 
257 |       expect(error.message).toBe('Search error: Search failed');
258 |       expect(error.code).toBe('SEARCH_ERROR');
259 |       expect(error.statusCode).toBe(500);
260 |       expect(error.query).toBe(query);
261 |       expect(error).toBeInstanceOf(MCPError);
262 |     });
263 | 
264 |     it('should create search error with original error', () => {
265 |       const query = 'test query';
266 |       const originalError = new Error('Database timeout');
267 |       const error = new SearchError('Search failed', query, originalError);
268 | 
269 |       expect(error.message).toBe('Search error: Search failed. Original: Database timeout');
270 |       expect(error.query).toBe(query);
271 |       expect(error.stack).toContain('Caused by:');
272 |     });
273 |   });
274 | 
275 |   describe('CacheError', () => {
276 |     it('should create cache error without key', () => {
277 |       const operation = 'get';
278 |       const error = new CacheError('Cache miss', operation);
279 | 
280 |       expect(error.message).toBe('Cache error: Cache miss');
281 |       expect(error.code).toBe('CACHE_ERROR');
282 |       expect(error.statusCode).toBe(500);
283 |       expect(error.operation).toBe(operation);
284 |       expect(error.key).toBeUndefined();
285 |       expect(error).toBeInstanceOf(MCPError);
286 |     });
287 | 
288 |     it('should create cache error with key', () => {
289 |       const operation = 'set';
290 |       const key = 'cache-key';
291 |       const error = new CacheError('Cache write failed', operation, key);
292 | 
293 |       expect(error.message).toBe('Cache error: Cache write failed');
294 |       expect(error.operation).toBe(operation);
295 |       expect(error.key).toBe(key);
296 |     });
297 | 
298 |     it('should create cache error with original error', () => {
299 |       const originalError = new Error('Memory full');
300 |       const error = new CacheError('Cache write failed', 'set', 'key', originalError);
301 | 
302 |       expect(error.message).toBe('Cache error: Cache write failed. Original: Memory full');
303 |       expect(error.stack).toContain('Caused by:');
304 |     });
305 |   });
306 | 
307 |   describe('isMCPError', () => {
308 |     it('should return true for MCP errors', () => {
309 |       expect(isMCPError(new MCPError('test'))).toBe(true);
310 |       expect(isMCPError(new DatabaseError('test'))).toBe(true);
311 |       expect(isMCPError(new ValidationError('test'))).toBe(true);
312 |       expect(isMCPError(new ConversationNotFoundError('test'))).toBe(true);
313 |     });
314 | 
315 |     it('should return false for non-MCP errors', () => {
316 |       expect(isMCPError(new Error('test'))).toBe(false);
317 |       expect(isMCPError(new TypeError('test'))).toBe(false);
318 |       expect(isMCPError('not an error')).toBe(false);
319 |       expect(isMCPError(null)).toBe(false);
320 |       expect(isMCPError(undefined)).toBe(false);
321 |     });
322 | 
323 |     it('should return false for objects that look like MCP errors', () => {
324 |       const fakeError = {
325 |         message: 'test',
326 |         code: 'TEST_ERROR',
327 |         statusCode: 400
328 |       };
329 |       expect(isMCPError(fakeError)).toBe(false);
330 |     });
331 |   });
332 | 
333 |   describe('getErrorInfo', () => {
334 |     it('should extract info from MCP errors', () => {
335 |       const error = new DatabaseError('Database connection failed');
336 |       const info = getErrorInfo(error);
337 | 
338 |       expect(info.message).toBe('Database error: Database connection failed');
339 |       expect(info.code).toBe('DATABASE_ERROR');
340 |       expect(info.statusCode).toBe(500);
341 |       expect(info.stack).toBeDefined();
342 |       expect(info.originalError).toBeUndefined();
343 |     });
344 | 
345 |     it('should extract info from regular errors', () => {
346 |       const error = new Error('Regular error');
347 |       const info = getErrorInfo(error);
348 | 
349 |       expect(info.message).toBe('Regular error');
350 |       expect(info.code).toBe('UNKNOWN_ERROR');
351 |       expect(info.statusCode).toBe(500);
352 |       expect(info.stack).toBeDefined();
353 |     });
354 | 
355 |     it('should handle non-error objects', () => {
356 |       const info = getErrorInfo('string error');
357 | 
358 |       expect(info.message).toBe('string error');
359 |       expect(info.code).toBe('UNKNOWN_ERROR');
360 |       expect(info.statusCode).toBe(500);
361 |       expect(info.stack).toBeUndefined();
362 |     });
363 | 
364 |     it('should handle null and undefined', () => {
365 |       expect(getErrorInfo(null).message).toBe('Unknown error occurred');
366 |       expect(getErrorInfo(undefined).message).toBe('Unknown error occurred');
367 |     });
368 | 
369 |     it('should handle objects with toString method', () => {
370 |       const obj = {
371 |         toString: () => 'Custom error message'
372 |       };
373 |       const info = getErrorInfo(obj);
374 | 
375 |       expect(info.message).toBe('Custom error message');
376 |       expect(info.code).toBe('UNKNOWN_ERROR');
377 |     });
378 | 
379 |     it('should include original error info for nested errors', () => {
380 |       const originalError = new Error('Original error');
381 |       const error = new DatabaseError('Wrapper error', originalError);
382 |       const info = getErrorInfo(error);
383 | 
384 |       expect(info.originalError).toBe('Original error');
385 |     });
386 |   });
387 | });
```

--------------------------------------------------------------------------------
/docs/use-cases.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Cursor Chat History MCP - Use Cases
  2 | 
  3 | This document provides comprehensive use case examples for the Cursor Chat History MCP, organized by user type, scenario, and application domain.
  4 | 
  5 | ## 🎯 Personal Development & Learning
  6 | 
  7 | ### Skill Assessment & Growth Tracking
  8 | 
  9 | **Track Your Learning Journey**
 10 | ```
 11 | "Analyze my conversations over the last 6 months to identify which programming concepts I ask about most frequently"
 12 | "Find conversations where I progressed from asking basic questions to implementing complex solutions"
 13 | "Generate a timeline of my React learning based on conversation complexity over time"
 14 | ```
 15 | 
 16 | **Identify Knowledge Gaps**
 17 | ```
 18 | "Search for patterns in my debugging conversations to find recurring issues I struggle with"
 19 | "Find topics I've never discussed but are common in my project files"
 20 | "Analyze conversations where I repeatedly asked similar questions about async/await"
 21 | ```
 22 | 
 23 | **Create Personal Learning Materials**
 24 | ```
 25 | "Extract all my successful problem-solving conversations to create a personal reference guide"
 26 | "Find conversations where I learned new concepts and turn them into study notes"
 27 | "Generate flashcards from conversations about complex algorithms I've implemented"
 28 | ```
 29 | 
 30 | ### Code Quality Improvement
 31 | 
 32 | **Personal Coding Standards**
 33 | ```
 34 | "Analyze my TypeScript conversations to create my personal interface naming conventions"
 35 | "Find patterns in my code review discussions to build my quality checklist"
 36 | "Extract conversations about refactoring to create my refactoring playbook"
 37 | ```
 38 | 
 39 | **Debugging Mastery**
 40 | ```
 41 | "Create a debugging methodology from conversations where I successfully solved complex bugs"
 42 | "Find all conversations about performance issues and extract optimization techniques"
 43 | "Generate error handling patterns from my exception-related discussions"
 44 | ```
 45 | 
 46 | ## 🏢 Team & Collaboration
 47 | 
 48 | ### Onboarding & Knowledge Transfer
 49 | 
 50 | **New Developer Onboarding**
 51 | ```
 52 | "Export all conversations about our authentication system for new team member documentation"
 53 | "Find discussions about our deployment pipeline and create an onboarding guide"
 54 | "Generate FAQ from commonly asked questions in team conversations"
 55 | ```
 56 | 
 57 | **Institutional Knowledge Capture**
 58 | ```
 59 | "Extract conversations with senior developers about architectural decisions"
 60 | "Find discussions about why certain technologies were chosen over alternatives"
 61 | "Create decision logs from conversations about major technical choices"
 62 | ```
 63 | 
 64 | ### Team Standards & Best Practices
 65 | 
 66 | **Coding Standards Development**
 67 | ```
 68 | "Analyze team conversations about code reviews to create coding guidelines"
 69 | "Find discussions about naming conventions and standardize them"
 70 | "Extract security-related conversations to build security best practices"
 71 | ```
 72 | 
 73 | **Process Documentation**
 74 | ```
 75 | "Generate testing guidelines from conversations about testing strategies"
 76 | "Create deployment checklists from conversations about production issues"
 77 | "Build code review templates from successful review discussions"
 78 | ```
 79 | 
 80 | ### Knowledge Sharing
 81 | 
 82 | **Create Training Materials**
 83 | ```
 84 | "Find conversations where complex concepts were explained well and turn them into training docs"
 85 | "Extract step-by-step implementations from conversations for tutorial creation"
 86 | "Generate workshop content from conversations about hands-on learning"
 87 | ```
 88 | 
 89 | **Build Team Resources**
 90 | ```
 91 | "Create a troubleshooting database from team problem-solving conversations"
 92 | "Generate tool configuration guides from setup discussions"
 93 | "Build a team glossary from conversations about domain-specific terms"
 94 | ```
 95 | 
 96 | ## 🔧 Development Workflow Optimization
 97 | 
 98 | ### Debugging & Problem-Solving
 99 | 
100 | **Build Debugging Resources**
101 | ```
102 | "Create error code reference guides from conversations about specific errors"
103 | "Find conversations about system failures and build incident response playbooks"
104 | "Generate debugging decision trees from successful troubleshooting sessions"
105 | ```
106 | 
107 | **Performance Optimization**
108 | ```
109 | "Extract all performance-related conversations to create optimization checklists"
110 | "Find discussions about database query optimization and build query guidelines"
111 | "Generate performance monitoring guides from conversations about bottlenecks"
112 | ```
113 | 
114 | ### Tool & Technology Adoption
115 | 
116 | **Technology Evaluation**
117 | ```
118 | "Compare conversations about different frameworks to create evaluation matrices"
119 | "Find discussions about tool adoption challenges and create adoption playbooks"
120 | "Analyze conversations about migration projects to build migration guides"
121 | ```
122 | 
123 | **Configuration Management**
124 | ```
125 | "Generate setup guides from conversations about development environment configuration"
126 | "Create tool comparison documents from conversations about alternative solutions"
127 | "Build troubleshooting guides from conversations about tool-specific issues"
128 | ```
129 | 
130 | ### Process Improvement
131 | 
132 | **Workflow Analysis**
133 | ```
134 | "Analyze conversations about workflow pain points to identify improvement opportunities"
135 | "Find discussions about automation and create automation opportunity lists"
136 | "Extract conversations about time-saving techniques and build efficiency guides"
137 | ```
138 | 
139 | **Quality Assurance**
140 | ```
141 | "Generate testing strategies from conversations about QA approaches"
142 | "Create review checklists from conversations about quality issues"
143 | "Build validation frameworks from conversations about testing methodologies"
144 | ```
145 | 
146 | ## 📊 Analytics & Insights
147 | 
148 | ### Productivity Analysis
149 | 
150 | **Personal Productivity Insights**
151 | ```
152 | "Analyze conversation patterns to identify my most productive coding periods"
153 | "Find correlations between conversation topics and successful implementations"
154 | "Track how quickly I solve similar problems over time"
155 | ```
156 | 
157 | **Team Productivity Metrics**
158 | ```
159 | "Analyze team conversation patterns to identify collaboration bottlenecks"
160 | "Find conversations that led to successful outcomes vs. those that didn't"
161 | "Measure knowledge transfer effectiveness through conversation analysis"
162 | ```
163 | 
164 | ### Technology Usage Patterns
165 | 
166 | **Technology Stack Analysis**
167 | ```
168 | "Generate reports on programming language usage across all conversations"
169 | "Track the evolution of our technology stack through chat history"
170 | "Find conversations about technology decisions to understand selection criteria"
171 | ```
172 | 
173 | **Adoption Tracking**
174 | ```
175 | "Monitor new technology adoption through conversation frequency and sentiment"
176 | "Analyze conversations about learning curves for different technologies"
177 | "Track which technologies generate the most questions and support needs"
178 | ```
179 | 
180 | ### Project Insights
181 | 
182 | **Project Success Patterns**
183 | ```
184 | "Analyze conversations by project to identify success factors"
185 | "Find patterns in conversations that predict project challenges"
186 | "Generate project retrospectives based on conversation content and outcomes"
187 | ```
188 | 
189 | **Development Velocity**
190 | ```
191 | "Track feature development conversations to understand implementation patterns"
192 | "Analyze conversation complexity vs. actual implementation time"
193 | "Find conversations about estimation accuracy and improve estimation processes"
194 | ```
195 | 
196 | ## 🎨 Creative & Strategic Applications
197 | 
198 | ### Innovation & Ideation
199 | 
200 | **Idea Generation**
201 | ```
202 | "Find conversations about experimental features to identify innovation opportunities"
203 | "Extract brainstorming conversations to build idea repositories"
204 | "Analyze discussions about future improvements to generate product roadmaps"
205 | ```
206 | 
207 | **Research & Development**
208 | ```
209 | "Find conversations about proof-of-concepts to create experimentation frameworks"
210 | "Generate research reports from conversations about emerging technologies"
211 | "Extract conversations about competitive analysis to inform strategic decisions"
212 | ```
213 | 
214 | ### Architecture & Design
215 | 
216 | **System Design Documentation**
217 | ```
218 | "Create architecture documents from conversations about system design decisions"
219 | "Find discussions about scalability to build scaling playbooks"
220 | "Generate design pattern guides from conversations about code structure"
221 | ```
222 | 
223 | **Technical Debt Management**
224 | ```
225 | "Extract conversations about technical debt to create remediation plans"
226 | "Find discussions about refactoring priorities and create improvement roadmaps"
227 | "Generate maintenance schedules from conversations about system health"
228 | ```
229 | 
230 | ## 🔍 Advanced Search & Analysis Techniques
231 | 
232 | ### Pattern Recognition with LIKE Patterns
233 | 
234 | **Function and Method Analysis**
235 | ```
236 | "Find all React hook usage: likePattern='%useState(%' or '%useEffect(%'"
237 | "Locate API calls: likePattern='%fetch(%' or '%axios.%'"
238 | "Search for error handling: likePattern='%try {%' or '%catch (%'"
239 | "Find database queries: likePattern='%SELECT %' or '%INSERT INTO%'"
240 | ```
241 | 
242 | **File and Configuration Analysis**
243 | ```
244 | "Find configuration discussions: likePattern='%.config%' or '%package.json%'"
245 | "Locate styling conversations: likePattern='%.css%' or '%.scss%'"
246 | "Search for test files: likePattern='%.test.%' or '%.spec.%'"
247 | "Find documentation: likePattern='%.md%' or '%README%'"
248 | ```
249 | 
250 | ### Multi-Keyword Search Strategies
251 | 
252 | **Technology Combinations**
253 | ```
254 | "Find React + TypeScript discussions: keywords=['react', 'typescript'], keywordOperator='AND'"
255 | "Compare frameworks: keywords=['react', 'vue', 'angular'], keywordOperator='OR'"
256 | "Database + performance: keywords=['database', 'performance', 'optimization'], keywordOperator='AND'"
257 | ```
258 | 
259 | **Problem-Solution Patterns**
260 | ```
261 | "Error + solution combinations: keywords=['error', 'fix', 'solution'], keywordOperator='AND'"
262 | "Testing strategies: keywords=['test', 'unit', 'integration'], keywordOperator='OR'"
263 | "Security implementations: keywords=['auth', 'security', 'encryption'], keywordOperator='AND'"
264 | ```
265 | 
266 | ### Cross-Project Analysis
267 | 
268 | **Reusable Components**
269 | ```
270 | "Find conversations about components used across multiple projects"
271 | "Extract utility functions discussed in different contexts"
272 | "Identify patterns that could be abstracted into shared libraries"
273 | ```
274 | 
275 | **Consistency Analysis**
276 | ```
277 | "Compare how similar problems are solved across different projects"
278 | "Find conversations about standardization opportunities"
279 | "Analyze architectural decisions across project boundaries"
280 | ```
281 | 
282 | ## 🎓 Educational & Training Applications
283 | 
284 | ### Curriculum Development
285 | 
286 | **Course Creation**
287 | ```
288 | "Generate programming course content from beginner to advanced conversations"
289 | "Create hands-on exercises from conversations about practical implementations"
290 | "Build assessment materials from conversations about common mistakes"
291 | ```
292 | 
293 | **Tutorial Development**
294 | ```
295 | "Extract step-by-step tutorials from conversations about complex implementations"
296 | "Create video script content from detailed explanation conversations"
297 | "Generate interactive coding examples from problem-solving conversations"
298 | ```
299 | 
300 | ### Mentoring & Teaching
301 | 
302 | **Mentoring Resources**
303 | ```
304 | "Find conversations where complex concepts were explained simply"
305 | "Create mentoring templates from successful knowledge transfer conversations"
306 | "Generate coaching materials from conversations about skill development"
307 | ```
308 | 
309 | **Teaching Materials**
310 | ```
311 | "Build explanation frameworks from conversations about difficult concepts"
312 | "Create example repositories from conversations about best practices"
313 | "Generate quiz questions from conversations about common misconceptions"
314 | ```
315 | 
316 | ## 🔒 Security & Compliance
317 | 
318 | ### Security Analysis
319 | 
320 | **Vulnerability Assessment**
321 | ```
322 | "Find conversations about security vulnerabilities to create security checklists"
323 | "Extract authentication discussions to standardize auth implementations"
324 | "Generate security review templates from security-focused conversations"
325 | ```
326 | 
327 | **Incident Response**
328 | ```
329 | "Create incident response playbooks from security incident conversations"
330 | "Build security monitoring guides from conversations about threat detection"
331 | "Generate recovery procedures from conversations about security breaches"
332 | ```
333 | 
334 | ### Compliance & Governance
335 | 
336 | **Regulatory Compliance**
337 | ```
338 | "Extract conversations about GDPR compliance to create privacy guidelines"
339 | "Find discussions about data handling to build governance policies"
340 | "Generate audit documentation from compliance-related conversations"
341 | ```
342 | 
343 | **Risk Management**
344 | ```
345 | "Analyze conversations about risk assessment to improve risk processes"
346 | "Find discussions about security controls to standardize implementations"
347 | "Create risk mitigation strategies from conversations about security measures"
348 | ```
349 | 
350 | ## 🚀 Business & Product Development
351 | 
352 | ### Feature Development
353 | 
354 | **Requirements Analysis**
355 | ```
356 | "Analyze conversations about user requirements to improve gathering processes"
357 | "Find discussions about feature complexity to improve estimation accuracy"
358 | "Extract user feedback conversations to inform product decisions"
359 | ```
360 | 
361 | **Product Strategy**
362 | ```
363 | "Generate feature roadmaps from conversations about user needs"
364 | "Create market analysis from conversations about competitive features"
365 | "Build product specifications from conversations about successful implementations"
366 | ```
367 | 
368 | ### Market Research
369 | 
370 | **Competitive Intelligence**
371 | ```
372 | "Find conversations about competitor analysis to track competitive landscape"
373 | "Analyze discussions about market trends to identify opportunities"
374 | "Extract conversations about user research to inform product strategy"
375 | ```
376 | 
377 | **Technology Trends**
378 | ```
379 | "Generate technology trend reports from conversations about emerging tools"
380 | "Find discussions about industry changes to inform strategic planning"
381 | "Create innovation reports from conversations about cutting-edge implementations"
382 | ```
383 | 
384 | ## 💡 Creative Use Case Examples
385 | 
386 | ### Personal Brand Development
387 | 
388 | **Portfolio Creation**
389 | ```
390 | "Extract conversations about successful projects to create portfolio content"
391 | "Find discussions about innovative solutions to showcase problem-solving skills"
392 | "Generate case studies from conversations about complex implementations"
393 | ```
394 | 
395 | **Thought Leadership**
396 | ```
397 | "Create blog post content from conversations about industry insights"
398 | "Generate speaking topics from conversations about expertise areas"
399 | "Build technical writing samples from detailed explanation conversations"
400 | ```
401 | 
402 | ### Community Contribution
403 | 
404 | **Open Source Development**
405 | ```
406 | "Find conversations about common problems to identify open source opportunities"
407 | "Extract solutions that could benefit the broader developer community"
408 | "Generate documentation for open source projects from implementation conversations"
409 | ```
410 | 
411 | **Knowledge Sharing**
412 | ```
413 | "Create Stack Overflow answers from conversations about solved problems"
414 | "Generate tutorial content for developer communities"
415 | "Build FAQ resources from conversations about common questions"
416 | ```
417 | 
418 | ## 🔧 Implementation Tips
419 | 
420 | ### Getting Started
421 | 
422 | 1. **Start Small**: Begin with simple searches to understand your conversation patterns
423 | 2. **Use Filters**: Leverage project paths and date ranges to focus your analysis
424 | 3. **Combine Tools**: Use multiple MCP tools together for comprehensive insights
425 | 4. **Export Data**: Use export functionality for external analysis and visualization
426 | 
427 | ### Best Practices
428 | 
429 | 1. **Regular Analysis**: Set up periodic reviews of your conversation patterns
430 | 2. **Tag Important Conversations**: Use consistent keywords for easier searching
431 | 3. **Document Insights**: Keep track of valuable insights discovered through analysis
432 | 4. **Share Knowledge**: Use extracted insights to improve team collaboration
433 | 
434 | ### Advanced Techniques
435 | 
436 | 1. **Temporal Analysis**: Compare conversation patterns across different time periods
437 | 2. **Cross-Reference**: Combine conversation analysis with code metrics and project outcomes
438 | 3. **Predictive Insights**: Use historical patterns to predict future challenges and opportunities
439 | 4. **Automated Workflows**: Create scripts to regularly extract and analyze chat data
440 | 
441 | ---
442 | 
443 | *This document provides a comprehensive overview of use cases for the Cursor Chat History MCP. Each use case can be adapted and customized based on specific needs, team structures, and project requirements.*
```

--------------------------------------------------------------------------------
/src/database/parser.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach } from 'vitest';
  2 | import { ConversationParser } from './parser.js';
  3 | 
  4 | describe('ConversationParser', () => {
  5 |   let parser: ConversationParser;
  6 | 
  7 |   beforeEach(() => {
  8 |     parser = new ConversationParser();
  9 |   });
 10 | 
 11 |   describe('parseConversationJSON', () => {
 12 |     it('should parse valid legacy conversation JSON', () => {
 13 |       const legacyConversationJson = JSON.stringify({
 14 |         composerId: 'legacy-123',
 15 |         hasLoaded: true,
 16 |         text: '',
 17 |         richText: '',
 18 |         conversation: [
 19 |           {
 20 |             type: 1,
 21 |             bubbleId: 'bubble-1',
 22 |             text: 'Hello, world!',
 23 |             relevantFiles: ['file1.ts'],
 24 |             suggestedCodeBlocks: [],
 25 |             attachedFoldersNew: []
 26 |           }
 27 |         ]
 28 |       });
 29 | 
 30 |       const result = parser.parseConversationJSON(legacyConversationJson);
 31 | 
 32 |       expect(result.composerId).toBe('legacy-123');
 33 |       expect('conversation' in result).toBe(true);
 34 |     });
 35 | 
 36 |     it('should parse valid modern conversation JSON', () => {
 37 |       const modernConversationJson = JSON.stringify({
 38 |         composerId: 'modern-123',
 39 |         _v: 2,
 40 |         hasLoaded: true,
 41 |         text: '',
 42 |         richText: '',
 43 |         fullConversationHeadersOnly: [
 44 |           {
 45 |             type: 1,
 46 |             bubbleId: 'bubble-1'
 47 |           }
 48 |         ]
 49 |       });
 50 | 
 51 |       const result = parser.parseConversationJSON(modernConversationJson);
 52 | 
 53 |       expect(result.composerId).toBe('modern-123');
 54 |       expect('_v' in result).toBe(true);
 55 |     });
 56 | 
 57 |     it('should throw error for invalid JSON', () => {
 58 |       const invalidJson = '{ invalid json }';
 59 | 
 60 |       expect(() => parser.parseConversationJSON(invalidJson))
 61 |         .toThrow('Failed to parse conversation JSON');
 62 |     });
 63 | 
 64 |     it('should throw error for missing composerId', () => {
 65 |       const invalidConversation = JSON.stringify({
 66 |         conversation: []
 67 |       });
 68 | 
 69 |       expect(() => parser.parseConversationJSON(invalidConversation))
 70 |         .toThrow('Invalid conversation format');
 71 |     });
 72 | 
 73 |     it('should throw error for invalid conversation structure', () => {
 74 |       const invalidConversation = JSON.stringify({
 75 |         composerId: 'test',
 76 |         conversation: 'not an array'
 77 |       });
 78 | 
 79 |       expect(() => parser.parseConversationJSON(invalidConversation))
 80 |         .toThrow('Invalid conversation format');
 81 |     });
 82 |   });
 83 | 
 84 |   describe('extractMessages', () => {
 85 |     it('should extract messages from legacy conversation', () => {
 86 |       const legacyConversation = {
 87 |         composerId: 'legacy-123',
 88 |         hasLoaded: true,
 89 |         text: '',
 90 |         richText: '',
 91 |         conversation: [
 92 |           {
 93 |             type: 1,
 94 |             bubbleId: 'bubble-1',
 95 |             text: 'First message',
 96 |             relevantFiles: [],
 97 |             suggestedCodeBlocks: [],
 98 |             attachedFoldersNew: []
 99 |           },
100 |           {
101 |             type: 2,
102 |             bubbleId: 'bubble-2',
103 |             text: 'Second message',
104 |             relevantFiles: [],
105 |             suggestedCodeBlocks: [],
106 |             attachedFoldersNew: []
107 |           }
108 |         ]
109 |       };
110 | 
111 |       const messages = parser.extractMessages(legacyConversation);
112 | 
113 |       expect(messages).toHaveLength(2);
114 |       expect(messages[0].text).toBe('First message');
115 |       expect(messages[1].text).toBe('Second message');
116 |     });
117 | 
118 |     it('should return empty array for modern conversation', () => {
119 |       const modernConversation = {
120 |         composerId: 'modern-123',
121 |         _v: 2,
122 |         hasLoaded: true,
123 |         text: '',
124 |         richText: '',
125 |         fullConversationHeadersOnly: [
126 |           {
127 |             type: 1,
128 |             bubbleId: 'bubble-1'
129 |           }
130 |         ]
131 |       };
132 | 
133 |       const messages = parser.extractMessages(modernConversation);
134 | 
135 |       expect(messages).toHaveLength(0);
136 |     });
137 |   });
138 | 
139 |   describe('extractCodeBlocks', () => {
140 |     it('should extract code blocks from legacy conversation', () => {
141 |       const codeBlock = {
142 |         language: 'typescript',
143 |         code: 'console.log("Hello");',
144 |         filename: 'test.ts'
145 |       };
146 | 
147 |       const legacyConversation = {
148 |         composerId: 'legacy-123',
149 |         hasLoaded: true,
150 |         text: '',
151 |         richText: '',
152 |         conversation: [
153 |           {
154 |             type: 1,
155 |             bubbleId: 'bubble-1',
156 |             text: 'Message with code',
157 |             relevantFiles: [],
158 |             suggestedCodeBlocks: [codeBlock],
159 |             attachedFoldersNew: []
160 |           }
161 |         ]
162 |       };
163 | 
164 |       const codeBlocks = parser.extractCodeBlocks(legacyConversation);
165 | 
166 |       expect(codeBlocks).toHaveLength(1);
167 |       expect(codeBlocks[0]).toEqual(codeBlock);
168 |     });
169 | 
170 |     it('should return empty array when no code blocks exist', () => {
171 |       const legacyConversation = {
172 |         composerId: 'legacy-123',
173 |         hasLoaded: true,
174 |         text: '',
175 |         richText: '',
176 |         conversation: [
177 |           {
178 |             type: 1,
179 |             bubbleId: 'bubble-1',
180 |             text: 'Message without code',
181 |             relevantFiles: [],
182 |             suggestedCodeBlocks: [],
183 |             attachedFoldersNew: []
184 |           }
185 |         ]
186 |       };
187 | 
188 |       const codeBlocks = parser.extractCodeBlocks(legacyConversation);
189 | 
190 |       expect(codeBlocks).toHaveLength(0);
191 |     });
192 | 
193 |     it('should return empty array for modern conversation', () => {
194 |       const modernConversation = {
195 |         composerId: 'modern-123',
196 |         _v: 2,
197 |         hasLoaded: true,
198 |         text: '',
199 |         richText: '',
200 |         fullConversationHeadersOnly: []
201 |       };
202 | 
203 |       const codeBlocks = parser.extractCodeBlocks(modernConversation);
204 | 
205 |       expect(codeBlocks).toHaveLength(0);
206 |     });
207 |   });
208 | 
209 |   describe('extractFileReferences', () => {
210 |     it('should extract file references from legacy conversation', () => {
211 |       const legacyConversation = {
212 |         composerId: 'legacy-123',
213 |         hasLoaded: true,
214 |         text: '',
215 |         richText: '',
216 |         conversation: [
217 |           {
218 |             type: 1,
219 |             bubbleId: 'bubble-1',
220 |             text: 'First message',
221 |             relevantFiles: ['file1.ts', 'file2.js'],
222 |             suggestedCodeBlocks: [],
223 |             attachedFoldersNew: []
224 |           },
225 |           {
226 |             type: 2,
227 |             bubbleId: 'bubble-2',
228 |             text: 'Second message',
229 |             relevantFiles: ['file3.py', 'file1.ts'], // Duplicate file1.ts
230 |             suggestedCodeBlocks: [],
231 |             attachedFoldersNew: []
232 |           }
233 |         ]
234 |       };
235 | 
236 |       const files = parser.extractFileReferences(legacyConversation);
237 | 
238 |       expect(files).toHaveLength(3);
239 |       expect(files).toContain('file1.ts');
240 |       expect(files).toContain('file2.js');
241 |       expect(files).toContain('file3.py');
242 |       // Should remove duplicates
243 |       expect(files.filter(f => f === 'file1.ts')).toHaveLength(1);
244 |     });
245 | 
246 |     it('should return empty array when no file references exist', () => {
247 |       const legacyConversation = {
248 |         composerId: 'legacy-123',
249 |         hasLoaded: true,
250 |         text: '',
251 |         richText: '',
252 |         conversation: [
253 |           {
254 |             type: 1,
255 |             bubbleId: 'bubble-1',
256 |             text: 'Message without files',
257 |             relevantFiles: [],
258 |             suggestedCodeBlocks: [],
259 |             attachedFoldersNew: []
260 |           }
261 |         ]
262 |       };
263 | 
264 |       const files = parser.extractFileReferences(legacyConversation);
265 | 
266 |       expect(files).toHaveLength(0);
267 |     });
268 |   });
269 | 
270 |   describe('extractAttachedFolders', () => {
271 |     it('should extract attached folders from legacy conversation', () => {
272 |       const legacyConversation = {
273 |         composerId: 'legacy-123',
274 |         hasLoaded: true,
275 |         text: '',
276 |         richText: '',
277 |         conversation: [
278 |           {
279 |             type: 1,
280 |             bubbleId: 'bubble-1',
281 |             text: 'First message',
282 |             relevantFiles: [],
283 |             suggestedCodeBlocks: [],
284 |             attachedFoldersNew: ['src/', 'tests/']
285 |           },
286 |           {
287 |             type: 2,
288 |             bubbleId: 'bubble-2',
289 |             text: 'Second message',
290 |             relevantFiles: [],
291 |             suggestedCodeBlocks: [],
292 |             attachedFoldersNew: ['docs/', 'src/'] // Duplicate src/
293 |           }
294 |         ]
295 |       };
296 | 
297 |       const folders = parser.extractAttachedFolders(legacyConversation);
298 | 
299 |       expect(folders).toHaveLength(3);
300 |       expect(folders).toContain('src/');
301 |       expect(folders).toContain('tests/');
302 |       expect(folders).toContain('docs/');
303 |       // Should remove duplicates
304 |       expect(folders.filter(f => f === 'src/')).toHaveLength(1);
305 |     });
306 | 
307 |     it('should return empty array when no attached folders exist', () => {
308 |       const legacyConversation = {
309 |         composerId: 'legacy-123',
310 |         hasLoaded: true,
311 |         text: '',
312 |         richText: '',
313 |         conversation: [
314 |           {
315 |             type: 1,
316 |             bubbleId: 'bubble-1',
317 |             text: 'Message without folders',
318 |             relevantFiles: [],
319 |             suggestedCodeBlocks: [],
320 |             attachedFoldersNew: []
321 |           }
322 |         ]
323 |       };
324 | 
325 |       const folders = parser.extractAttachedFolders(legacyConversation);
326 | 
327 |       expect(folders).toHaveLength(0);
328 |     });
329 |   });
330 | 
331 |   describe('extractTimestamps', () => {
332 |     it('should extract valid timestamps from legacy conversation', () => {
333 |       const legacyConversation = {
334 |         composerId: 'legacy-123',
335 |         hasLoaded: true,
336 |         text: '',
337 |         richText: '',
338 |         conversation: [
339 |           {
340 |             type: 1,
341 |             bubbleId: 'bubble-1',
342 |             text: 'First message',
343 |             relevantFiles: [],
344 |             suggestedCodeBlocks: [],
345 |             attachedFoldersNew: [],
346 |             timestamp: '2023-01-01T12:00:00Z'
347 |           },
348 |           {
349 |             type: 2,
350 |             bubbleId: 'bubble-2',
351 |             text: 'Second message',
352 |             relevantFiles: [],
353 |             suggestedCodeBlocks: [],
354 |             attachedFoldersNew: [],
355 |             timestamp: '2023-01-01T13:00:00Z'
356 |           }
357 |         ]
358 |       };
359 | 
360 |       const timestamps = parser.extractTimestamps(legacyConversation);
361 | 
362 |       expect(timestamps).toHaveLength(2);
363 |       expect(timestamps[0]).toEqual(new Date('2023-01-01T12:00:00Z'));
364 |       expect(timestamps[1]).toEqual(new Date('2023-01-01T13:00:00Z'));
365 |     });
366 | 
367 |     it('should skip invalid timestamps', () => {
368 |       const legacyConversation = {
369 |         composerId: 'legacy-123',
370 |         hasLoaded: true,
371 |         text: '',
372 |         richText: '',
373 |         conversation: [
374 |           {
375 |             type: 1,
376 |             bubbleId: 'bubble-1',
377 |             text: 'First message',
378 |             relevantFiles: [],
379 |             suggestedCodeBlocks: [],
380 |             attachedFoldersNew: [],
381 |             timestamp: 'invalid-date'
382 |           },
383 |           {
384 |             type: 2,
385 |             bubbleId: 'bubble-2',
386 |             text: 'Second message',
387 |             relevantFiles: [],
388 |             suggestedCodeBlocks: [],
389 |             attachedFoldersNew: [],
390 |             timestamp: '2023-01-01T13:00:00Z'
391 |           }
392 |         ]
393 |       };
394 | 
395 |       const timestamps = parser.extractTimestamps(legacyConversation);
396 | 
397 |       expect(timestamps).toHaveLength(1);
398 |       expect(timestamps[0]).toEqual(new Date('2023-01-01T13:00:00Z'));
399 |     });
400 |   });
401 | 
402 |   describe('getConversationMetadata', () => {
403 |     it('should return metadata for legacy conversation', () => {
404 |       const codeBlock = {
405 |         language: 'typescript',
406 |         code: 'console.log("Hello");',
407 |         filename: 'test.ts'
408 |       };
409 | 
410 |       const legacyConversation = {
411 |         composerId: 'legacy-123',
412 |         hasLoaded: true,
413 |         text: '',
414 |         richText: '',
415 |         storedSummary: 'This is a summary',
416 |         conversation: [
417 |           {
418 |             type: 1,
419 |             bubbleId: 'bubble-1',
420 |             text: 'Message with code',
421 |             relevantFiles: ['file1.ts', 'file2.js'],
422 |             suggestedCodeBlocks: [codeBlock],
423 |             attachedFoldersNew: ['src/']
424 |           }
425 |         ]
426 |       };
427 | 
428 |       const metadata = parser.getConversationMetadata(legacyConversation);
429 | 
430 |       expect(metadata.format).toBe('legacy');
431 |       expect(metadata.messageCount).toBe(1);
432 |       expect(metadata.hasCodeBlocks).toBe(true);
433 |       expect(metadata.codeBlockCount).toBe(1);
434 |       expect(metadata.fileCount).toBe(2);
435 |       expect(metadata.folderCount).toBe(1);
436 |       expect(metadata.hasStoredSummary).toBe(true);
437 |       expect(metadata.size).toBeGreaterThan(0);
438 |     });
439 | 
440 |     it('should return metadata for modern conversation', () => {
441 |       const modernConversation = {
442 |         composerId: 'modern-123',
443 |         _v: 2,
444 |         hasLoaded: true,
445 |         text: '',
446 |         richText: '',
447 |         fullConversationHeadersOnly: [
448 |           { type: 1, bubbleId: 'bubble-1' },
449 |           { type: 2, bubbleId: 'bubble-2' }
450 |         ]
451 |       };
452 | 
453 |       const metadata = parser.getConversationMetadata(modernConversation);
454 | 
455 |       expect(metadata.format).toBe('modern');
456 |       expect(metadata.messageCount).toBe(2);
457 |       expect(metadata.hasCodeBlocks).toBe(false);
458 |       expect(metadata.codeBlockCount).toBe(0);
459 |       expect(metadata.fileCount).toBe(0);
460 |       expect(metadata.folderCount).toBe(0);
461 |       expect(metadata.hasStoredSummary).toBe(false);
462 |       expect(metadata.size).toBeGreaterThan(0);
463 |     });
464 |   });
465 | 
466 |   describe('searchInConversation', () => {
467 |     it('should find matches in conversation text', () => {
468 |       const legacyConversation = {
469 |         composerId: 'legacy-123',
470 |         hasLoaded: true,
471 |         text: '',
472 |         richText: '',
473 |         conversation: [
474 |           {
475 |             type: 1,
476 |             bubbleId: 'bubble-1',
477 |             text: 'This is a test message',
478 |             relevantFiles: [],
479 |             suggestedCodeBlocks: [],
480 |             attachedFoldersNew: []
481 |           },
482 |           {
483 |             type: 2,
484 |             bubbleId: 'bubble-2',
485 |             text: 'Another test with different content',
486 |             relevantFiles: [],
487 |             suggestedCodeBlocks: [],
488 |             attachedFoldersNew: []
489 |           }
490 |         ]
491 |       };
492 | 
493 |       const results = parser.searchInConversation(legacyConversation, 'test');
494 | 
495 |       expect(results).toHaveLength(2);
496 |       expect(results[0].messageIndex).toBe(0);
497 |       expect(results[0].message.text).toBe('This is a test message');
498 |       expect(results[0].matchPositions).toContain(10); // Position of 'test'
499 |       expect(results[1].messageIndex).toBe(1);
500 |       expect(results[1].message.text).toBe('Another test with different content');
501 |     });
502 | 
503 |     it('should handle case sensitive search', () => {
504 |       const legacyConversation = {
505 |         composerId: 'legacy-123',
506 |         hasLoaded: true,
507 |         text: '',
508 |         richText: '',
509 |         conversation: [
510 |           {
511 |             type: 1,
512 |             bubbleId: 'bubble-1',
513 |             text: 'This is a Test message',
514 |             relevantFiles: [],
515 |             suggestedCodeBlocks: [],
516 |             attachedFoldersNew: []
517 |           }
518 |         ]
519 |       };
520 | 
521 |       const caseSensitiveResults = parser.searchInConversation(legacyConversation, 'test', true);
522 |       const caseInsensitiveResults = parser.searchInConversation(legacyConversation, 'test', false);
523 | 
524 |       expect(caseSensitiveResults).toHaveLength(0);
525 |       expect(caseInsensitiveResults).toHaveLength(1);
526 |     });
527 |   });
528 | 
529 |   describe('containsSummarization', () => {
530 |     it('should return true when conversation contains summarization keywords', () => {
531 |       const legacyConversation = {
532 |         composerId: 'legacy-123',
533 |         hasLoaded: true,
534 |         text: '',
535 |         richText: '',
536 |         conversation: [
537 |           {
538 |             type: 1,
539 |             bubbleId: 'bubble-1',
540 |             text: 'Please summarize this document',
541 |             relevantFiles: [],
542 |             suggestedCodeBlocks: [],
543 |             attachedFoldersNew: []
544 |           }
545 |         ]
546 |       };
547 | 
548 |       const result = parser.containsSummarization(legacyConversation);
549 | 
550 |       expect(result).toBe(true);
551 |     });
552 | 
553 |     it('should return false when conversation does not contain summarization keywords', () => {
554 |       const legacyConversation = {
555 |         composerId: 'legacy-123',
556 |         hasLoaded: true,
557 |         text: '',
558 |         richText: '',
559 |         conversation: [
560 |           {
561 |             type: 1,
562 |             bubbleId: 'bubble-1',
563 |             text: 'This is a regular message',
564 |             relevantFiles: [],
565 |             suggestedCodeBlocks: [],
566 |             attachedFoldersNew: []
567 |           }
568 |         ]
569 |       };
570 | 
571 |       const result = parser.containsSummarization(legacyConversation);
572 | 
573 |       expect(result).toBe(false);
574 |     });
575 |   });
576 | 
577 |   describe('parseBubbleMessage', () => {
578 |     it('should parse valid bubble message JSON', () => {
579 |       const bubbleMessage = {
580 |         type: 1,
581 |         bubbleId: 'bubble-123',
582 |         text: 'Hello from bubble',
583 |         relevantFiles: ['file1.ts'],
584 |         suggestedCodeBlocks: []
585 |       };
586 | 
587 |       const jsonString = JSON.stringify(bubbleMessage);
588 |       const result = parser.parseBubbleMessage(jsonString);
589 | 
590 |       expect(result).toEqual(bubbleMessage);
591 |     });
592 | 
593 |     it('should throw error for invalid bubble message JSON', () => {
594 |       const invalidJson = '{ invalid json }';
595 | 
596 |       expect(() => parser.parseBubbleMessage(invalidJson))
597 |         .toThrow('Failed to parse bubble message JSON');
598 |     });
599 | 
600 |     it('should throw error for invalid bubble message structure', () => {
601 |       const invalidBubble = {
602 |         text: 'Missing required fields'
603 |       };
604 | 
605 |       const jsonString = JSON.stringify(invalidBubble);
606 | 
607 |       expect(() => parser.parseBubbleMessage(jsonString))
608 |         .toThrow('Invalid bubble message format');
609 |     });
610 |   });
611 | });
```

--------------------------------------------------------------------------------
/.roo/rules/dev_workflow.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | description: Guide for using Task Master to manage task-driven development workflows
  3 | globs: **/*
  4 | alwaysApply: true
  5 | ---
  6 | # Task Master Development Workflow
  7 | 
  8 | This guide outlines the typical process for using Task Master to manage software development projects.
  9 | 
 10 | ## Primary Interaction: MCP Server vs. CLI
 11 | 
 12 | Task Master offers two primary ways to interact:
 13 | 
 14 | 1.  **MCP Server (Recommended for Integrated Tools)**:
 15 |     - For AI agents and integrated development environments (like Roo Code), interacting via the **MCP server is the preferred method**.
 16 |     - The MCP server exposes Task Master functionality through a set of tools (e.g., `get_tasks`, `add_subtask`).
 17 |     - This method offers better performance, structured data exchange, and richer error handling compared to CLI parsing.
 18 |     - Refer to [`mcp.md`](mdc:.roo/rules/mcp.md) for details on the MCP architecture and available tools.
 19 |     - A comprehensive list and description of MCP tools and their corresponding CLI commands can be found in [`taskmaster.md`](mdc:.roo/rules/taskmaster.md).
 20 |     - **Restart the MCP server** if core logic in `scripts/modules` or MCP tool/direct function definitions change.
 21 | 
 22 | 2.  **`task-master` CLI (For Users & Fallback)**:
 23 |     - The global `task-master` command provides a user-friendly interface for direct terminal interaction.
 24 |     - It can also serve as a fallback if the MCP server is inaccessible or a specific function isn't exposed via MCP.
 25 |     - Install globally with `npm install -g task-master-ai` or use locally via `npx task-master-ai ...`.
 26 |     - The CLI commands often mirror the MCP tools (e.g., `task-master list` corresponds to `get_tasks`).
 27 |     - Refer to [`taskmaster.md`](mdc:.roo/rules/taskmaster.md) for a detailed command reference.
 28 | 
 29 | ## Standard Development Workflow Process
 30 | 
 31 | -   Start new projects by running `initialize_project` tool / `task-master init` or `parse_prd` / `task-master parse-prd --input='<prd-file.txt>'` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) to generate initial tasks.json
 32 | -   Begin coding sessions with `get_tasks` / `task-master list` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) to see current tasks, status, and IDs
 33 | -   Determine the next task to work on using `next_task` / `task-master next` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)).
 34 | -   Analyze task complexity with `analyze_project_complexity` / `task-master analyze-complexity --research` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) before breaking down tasks
 35 | -   Review complexity report using `complexity_report` / `task-master complexity-report` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)).
 36 | -   Select tasks based on dependencies (all marked 'done'), priority level, and ID order
 37 | -   Clarify tasks by checking task files in tasks/ directory or asking for user input
 38 | -   View specific task details using `get_task` / `task-master show <id>` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) to understand implementation requirements
 39 | -   Break down complex tasks using `expand_task` / `task-master expand --id=<id> --force --research` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) with appropriate flags like `--force` (to replace existing subtasks) and `--research`.
 40 | -   Clear existing subtasks if needed using `clear_subtasks` / `task-master clear-subtasks --id=<id>` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) before regenerating
 41 | -   Implement code following task details, dependencies, and project standards
 42 | -   Verify tasks according to test strategies before marking as complete (See [`tests.md`](mdc:.roo/rules/tests.md))
 43 | -   Mark completed tasks with `set_task_status` / `task-master set-status --id=<id> --status=done` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md))
 44 | -   Update dependent tasks when implementation differs from original plan using `update` / `task-master update --from=<id> --prompt="..."` or `update_task` / `task-master update-task --id=<id> --prompt="..."` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md))
 45 | -   Add new tasks discovered during implementation using `add_task` / `task-master add-task --prompt="..." --research` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)).
 46 | -   Add new subtasks as needed using `add_subtask` / `task-master add-subtask --parent=<id> --title="..."` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)).
 47 | -   Append notes or details to subtasks using `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='Add implementation notes here...\nMore details...'` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)).
 48 | -   Generate task files with `generate` / `task-master generate` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) after updating tasks.json
 49 | -   Maintain valid dependency structure with `add_dependency`/`remove_dependency` tools or `task-master add-dependency`/`remove-dependency` commands, `validate_dependencies` / `task-master validate-dependencies`, and `fix_dependencies` / `task-master fix-dependencies` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) when needed
 50 | -   Respect dependency chains and task priorities when selecting work
 51 | -   Report progress regularly using `get_tasks` / `task-master list`
 52 | -   Reorganize tasks as needed using `move_task` / `task-master move --from=<id> --to=<id>` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) to change task hierarchy or ordering
 53 | 
 54 | ## Task Complexity Analysis
 55 | 
 56 | -   Run `analyze_project_complexity` / `task-master analyze-complexity --research` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) for comprehensive analysis
 57 | -   Review complexity report via `complexity_report` / `task-master complexity-report` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) for a formatted, readable version.
 58 | -   Focus on tasks with highest complexity scores (8-10) for detailed breakdown
 59 | -   Use analysis results to determine appropriate subtask allocation
 60 | -   Note that reports are automatically used by the `expand_task` tool/command
 61 | 
 62 | ## Task Breakdown Process
 63 | 
 64 | -   Use `expand_task` / `task-master expand --id=<id>`. It automatically uses the complexity report if found, otherwise generates default number of subtasks.
 65 | -   Use `--num=<number>` to specify an explicit number of subtasks, overriding defaults or complexity report recommendations.
 66 | -   Add `--research` flag to leverage Perplexity AI for research-backed expansion.
 67 | -   Add `--force` flag to clear existing subtasks before generating new ones (default is to append).
 68 | -   Use `--prompt="<context>"` to provide additional context when needed.
 69 | -   Review and adjust generated subtasks as necessary.
 70 | -   Use `expand_all` tool or `task-master expand --all` to expand multiple pending tasks at once, respecting flags like `--force` and `--research`.
 71 | -   If subtasks need complete replacement (regardless of the `--force` flag on `expand`), clear them first with `clear_subtasks` / `task-master clear-subtasks --id=<id>`.
 72 | 
 73 | ## Implementation Drift Handling
 74 | 
 75 | -   When implementation differs significantly from planned approach
 76 | -   When future tasks need modification due to current implementation choices
 77 | -   When new dependencies or requirements emerge
 78 | -   Use `update` / `task-master update --from=<futureTaskId> --prompt='<explanation>\nUpdate context...' --research` to update multiple future tasks.
 79 | -   Use `update_task` / `task-master update-task --id=<taskId> --prompt='<explanation>\nUpdate context...' --research` to update a single specific task.
 80 | 
 81 | ## Task Status Management
 82 | 
 83 | -   Use 'pending' for tasks ready to be worked on
 84 | -   Use 'done' for completed and verified tasks
 85 | -   Use 'deferred' for postponed tasks
 86 | -   Add custom status values as needed for project-specific workflows
 87 | 
 88 | ## Task Structure Fields
 89 | 
 90 | - **id**: Unique identifier for the task (Example: `1`, `1.1`)
 91 | - **title**: Brief, descriptive title (Example: `"Initialize Repo"`)
 92 | - **description**: Concise summary of what the task involves (Example: `"Create a new repository, set up initial structure."`)
 93 | - **status**: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
 94 | - **dependencies**: IDs of prerequisite tasks (Example: `[1, 2.1]`)
 95 |     - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
 96 |     - This helps quickly identify which prerequisite tasks are blocking work
 97 | - **priority**: Importance level (Example: `"high"`, `"medium"`, `"low"`)
 98 | - **details**: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`) 
 99 | - **testStrategy**: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`) 
100 | - **subtasks**: List of smaller, more specific tasks (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`) 
101 | - Refer to task structure details (previously linked to `tasks.md`).
102 | 
103 | ## Configuration Management (Updated)
104 | 
105 | Taskmaster configuration is managed through two main mechanisms:
106 | 
107 | 1.  **`.taskmasterconfig` File (Primary):**
108 |     *   Located in the project root directory.
109 |     *   Stores most configuration settings: AI model selections (main, research, fallback), parameters (max tokens, temperature), logging level, default subtasks/priority, project name, etc.
110 |     *   **Managed via `task-master models --setup` command.** Do not edit manually unless you know what you are doing.
111 |     *   **View/Set specific models via `task-master models` command or `models` MCP tool.**
112 |     *   Created automatically when you run `task-master models --setup` for the first time.
113 | 
114 | 2.  **Environment Variables (`.env` / `mcp.json`):**
115 |     *   Used **only** for sensitive API keys and specific endpoint URLs.
116 |     *   Place API keys (one per provider) in a `.env` file in the project root for CLI usage.
117 |     *   For MCP/Roo Code integration, configure these keys in the `env` section of `.roo/mcp.json`.
118 |     *   Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.md`).
119 | 
120 | **Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
121 | **If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.roo/mcp.json`.
122 | **If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project.
123 | 
124 | ## Determining the Next Task
125 | 
126 | - Run `next_task` / `task-master next` to show the next task to work on.
127 | - The command identifies tasks with all dependencies satisfied
128 | - Tasks are prioritized by priority level, dependency count, and ID
129 | - The command shows comprehensive task information including:
130 |     - Basic task details and description
131 |     - Implementation details
132 |     - Subtasks (if they exist)
133 |     - Contextual suggested actions
134 | - Recommended before starting any new development work
135 | - Respects your project's dependency structure
136 | - Ensures tasks are completed in the appropriate sequence
137 | - Provides ready-to-use commands for common task actions
138 | 
139 | ## Viewing Specific Task Details
140 | 
141 | - Run `get_task` / `task-master show <id>` to view a specific task.
142 | - Use dot notation for subtasks: `task-master show 1.2` (shows subtask 2 of task 1)
143 | - Displays comprehensive information similar to the next command, but for a specific task
144 | - For parent tasks, shows all subtasks and their current status
145 | - For subtasks, shows parent task information and relationship
146 | - Provides contextual suggested actions appropriate for the specific task
147 | - Useful for examining task details before implementation or checking status
148 | 
149 | ## Managing Task Dependencies
150 | 
151 | - Use `add_dependency` / `task-master add-dependency --id=<id> --depends-on=<id>` to add a dependency.
152 | - Use `remove_dependency` / `task-master remove-dependency --id=<id> --depends-on=<id>` to remove a dependency.
153 | - The system prevents circular dependencies and duplicate dependency entries
154 | - Dependencies are checked for existence before being added or removed
155 | - Task files are automatically regenerated after dependency changes
156 | - Dependencies are visualized with status indicators in task listings and files
157 | 
158 | ## Task Reorganization
159 | 
160 | - Use `move_task` / `task-master move --from=<id> --to=<id>` to move tasks or subtasks within the hierarchy
161 | - This command supports several use cases:
162 |   - Moving a standalone task to become a subtask (e.g., `--from=5 --to=7`)
163 |   - Moving a subtask to become a standalone task (e.g., `--from=5.2 --to=7`) 
164 |   - Moving a subtask to a different parent (e.g., `--from=5.2 --to=7.3`)
165 |   - Reordering subtasks within the same parent (e.g., `--from=5.2 --to=5.4`)
166 |   - Moving a task to a new, non-existent ID position (e.g., `--from=5 --to=25`)
167 |   - Moving multiple tasks at once using comma-separated IDs (e.g., `--from=10,11,12 --to=16,17,18`)
168 | - The system includes validation to prevent data loss:
169 |   - Allows moving to non-existent IDs by creating placeholder tasks
170 |   - Prevents moving to existing task IDs that have content (to avoid overwriting)
171 |   - Validates source tasks exist before attempting to move them
172 | - The system maintains proper parent-child relationships and dependency integrity
173 | - Task files are automatically regenerated after the move operation
174 | - This provides greater flexibility in organizing and refining your task structure as project understanding evolves
175 | - This is especially useful when dealing with potential merge conflicts arising from teams creating tasks on separate branches. Solve these conflicts very easily by moving your tasks and keeping theirs.
176 | 
177 | ## Iterative Subtask Implementation
178 | 
179 | Once a task has been broken down into subtasks using `expand_task` or similar methods, follow this iterative process for implementation:
180 | 
181 | 1.  **Understand the Goal (Preparation):**
182 |     *   Use `get_task` / `task-master show <subtaskId>` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) to thoroughly understand the specific goals and requirements of the subtask.
183 | 
184 | 2.  **Initial Exploration & Planning (Iteration 1):**
185 |     *   This is the first attempt at creating a concrete implementation plan.
186 |     *   Explore the codebase to identify the precise files, functions, and even specific lines of code that will need modification.
187 |     *   Determine the intended code changes (diffs) and their locations.
188 |     *   Gather *all* relevant details from this exploration phase.
189 | 
190 | 3.  **Log the Plan:**
191 |     *   Run `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<detailed plan>'`.
192 |     *   Provide the *complete and detailed* findings from the exploration phase in the prompt. Include file paths, line numbers, proposed diffs, reasoning, and any potential challenges identified. Do not omit details. The goal is to create a rich, timestamped log within the subtask's `details`.
193 | 
194 | 4.  **Verify the Plan:**
195 |     *   Run `get_task` / `task-master show <subtaskId>` again to confirm that the detailed implementation plan has been successfully appended to the subtask's details.
196 | 
197 | 5.  **Begin Implementation:**
198 |     *   Set the subtask status using `set_task_status` / `task-master set-status --id=<subtaskId> --status=in-progress`.
199 |     *   Start coding based on the logged plan.
200 | 
201 | 6.  **Refine and Log Progress (Iteration 2+):**
202 |     *   As implementation progresses, you will encounter challenges, discover nuances, or confirm successful approaches.
203 |     *   **Before appending new information**: Briefly review the *existing* details logged in the subtask (using `get_task` or recalling from context) to ensure the update adds fresh insights and avoids redundancy.
204 |     *   **Regularly** use `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<update details>\n- What worked...\n- What didn't work...'` to append new findings.
205 |     *   **Crucially, log:**
206 |         *   What worked ("fundamental truths" discovered).
207 |         *   What didn't work and why (to avoid repeating mistakes).
208 |         *   Specific code snippets or configurations that were successful.
209 |         *   Decisions made, especially if confirmed with user input.
210 |         *   Any deviations from the initial plan and the reasoning.
211 |     *   The objective is to continuously enrich the subtask's details, creating a log of the implementation journey that helps the AI (and human developers) learn, adapt, and avoid repeating errors.
212 | 
213 | 7.  **Review & Update Rules (Post-Implementation):**
214 |     *   Once the implementation for the subtask is functionally complete, review all code changes and the relevant chat history.
215 |     *   Identify any new or modified code patterns, conventions, or best practices established during the implementation.
216 |     *   Create new or update existing rules following internal guidelines (previously linked to `cursor_rules.md` and `self_improve.md`).
217 | 
218 | 8.  **Mark Task Complete:**
219 |     *   After verifying the implementation and updating any necessary rules, mark the subtask as completed: `set_task_status` / `task-master set-status --id=<subtaskId> --status=done`.
220 | 
221 | 9.  **Commit Changes (If using Git):**
222 |     *   Stage the relevant code changes and any updated/new rule files (`git add .`).
223 |     *   Craft a comprehensive Git commit message summarizing the work done for the subtask, including both code implementation and any rule adjustments.
224 |     *   Execute the commit command directly in the terminal (e.g., `git commit -m 'feat(module): Implement feature X for subtask <subtaskId>\n\n- Details about changes...\n- Updated rule Y for pattern Z'`).
225 |     *   Consider if a Changeset is needed according to internal versioning guidelines (previously linked to `changeset.md`). If so, run `npm run changeset`, stage the generated file, and amend the commit or create a new one.
226 | 
227 | 10. **Proceed to Next Subtask:**
228 |     *   Identify the next subtask (e.g., using `next_task` / `task-master next`).
229 | 
230 | ## Code Analysis & Refactoring Techniques
231 | 
232 | - **Top-Level Function Search**:
233 |     - Useful for understanding module structure or planning refactors.
234 |     - Use grep/ripgrep to find exported functions/constants:
235 |       `rg "export (async function|function|const) \w+"` or similar patterns.
236 |     - Can help compare functions between files during migrations or identify potential naming conflicts.
237 | 
238 | ---
239 | *This workflow provides a general guideline. Adapt it based on your specific project needs and team practices.*
```

--------------------------------------------------------------------------------
/src/server.ts:
--------------------------------------------------------------------------------

```typescript
  1 | #!/usr/bin/env node
  2 | 
  3 | /*
  4 |  * WORKFLOW GUIDANCE FOR AI ASSISTANTS:
  5 |  *
  6 | 
  7 | * **ALWAYS START WITH PROJECT FILTERING** for project-specific analysis:
  8 |  * 1. DISCOVERY: Use list_conversations with projectPath parameter to find project-specific conversations
  9 |  * 2. ANALYTICS: Use get_conversation_analytics with projectPath and ["files", "languages"] breakdowns
 10 |  *    - Files/languages breakdowns contain conversation IDs in their arrays!
 11 |  * 3. DEEP DIVE: Use get_conversation with specific conversation IDs from step 1 or 2
 12 |  * 4. ANALYSIS: Use analytics tools (find_related, extract_elements) for insights
 13 |  * 5. DATE FILTERING: Use get_system_info first when applying date filters to search_conversations
 14 |  *
 15 |  * RECOMMENDED PATTERN FOR PROJECT ANALYSIS:
 16 |  * - list_conversations(projectPath: "project-name", startDate: "YYYY-MM-DD", endDate: "YYYY-MM-DD")
 17 |  * - get_conversation_analytics(projectPath: "project-name", includeBreakdowns: ["files", "languages"])
 18 |  * - Extract conversation IDs from files/languages.conversations arrays
 19 |  * - get_conversation(conversationId: "id-from-breakdown") for each relevant conversation
 20 |  *
 21 |  * PROJECT PATH EXAMPLES:
 22 |  * - "my-app" (project name)
 23 |  * - "/Users/name/Projects/my-app" (full path)
 24 |  * - "editor-elements" (project name from path like /Users/name/Projects/editor-elements)
 25 |  */
 26 | 
 27 | import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
 28 | import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
 29 | import {
 30 |   listConversations,
 31 |   getConversation,
 32 |   searchConversations,
 33 |   getConversationsByProject
 34 | } from './tools/conversation-tools.js';
 35 | import {
 36 |   getConversationAnalytics,
 37 |   findRelatedConversations
 38 | } from './tools/analytics-tools.js';
 39 | import {
 40 |   extractConversationElements,
 41 |   exportConversationData
 42 | } from './tools/extraction-tools.js';
 43 | import { z } from 'zod';
 44 | import { formatResponse } from './utils/formatter.js';
 45 | 
 46 | const server = new McpServer({
 47 |   name: 'cursor-chat-history-mcp',
 48 |   version: '0.1.0',
 49 | });
 50 | 
 51 | server.tool(
 52 |   'list_conversations',
 53 |   'Lists Cursor chats with summaries, titles, and metadata ordered by recency. **HIGHLY RECOMMENDED: Use projectPath parameter to filter conversations by specific project/codebase** - this dramatically improves relevance by finding conversations that actually worked on files in that project. Returns conversation IDs for use with get_conversation tool. WORKFLOW TIP: Start with projectPath filtering for project-specific analysis, then call get_conversation with specific IDs from results. Includes AI-generated summaries by default. Supports date range filtering (YYYY-MM-DD format).',
 54 |   {
 55 |     limit: z.number().min(1).max(100).optional().default(10).describe('Maximum number of conversations to return (1-100)'),
 56 |     minLength: z.number().min(0).optional().default(100).describe('Minimum conversation length in characters to include'),
 57 |     hasCodeBlocks: z.boolean().optional().describe('Filter to conversations that contain code blocks'),
 58 |     keywords: z.array(z.string()).optional().describe('Filter conversations containing any of these exact keywords (literal text matching)'),
 59 |     projectPath: z.string().optional().describe('**RECOMMENDED** Filter conversations by project/codebase name (e.g., "my-app") or full path (e.g., "/Users/name/Projects/my-app"). This finds conversations that actually worked on files in that project, dramatically improving relevance for project-specific analysis.'),
 60 |     filePattern: z.string().optional().describe('Filter conversations mentioning files matching this pattern (e.g., "*.tsx")'),
 61 |     relevantFiles: z.array(z.string()).optional().describe('Filter conversations that reference any of these specific files'),
 62 |     startDate: z.string().optional().describe('Start date for filtering (YYYY-MM-DD). Note: Timestamps may be unreliable.'),
 63 |     endDate: z.string().optional().describe('End date for filtering (YYYY-MM-DD). Note: Timestamps may be unreliable.'),
 64 |     includeEmpty: z.boolean().optional().default(false).describe('Include conversations with no messages'),
 65 |     includeAiSummaries: z.boolean().optional().default(true).describe('Include AI-generated conversation summaries'),
 66 |     includeRelevanceScore: z.boolean().optional().default(false).describe('Include relevance scores when filtering by projectPath'),
 67 |     outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
 68 |   },
 69 |   async (input) => {
 70 |     try {
 71 |       if (input.projectPath && input.includeRelevanceScore) {
 72 |         const projectInput = {
 73 |           projectPath: input.projectPath,
 74 |           filePattern: input.filePattern,
 75 |           orderBy: 'recency' as const,
 76 |           limit: input.limit,
 77 |           fuzzyMatch: false
 78 |         };
 79 |         const result = await getConversationsByProject(projectInput);
 80 | 
 81 |         const transformedResult = {
 82 |           conversations: result.conversations.map(conv => ({
 83 |             ...conv,
 84 |             title: undefined,
 85 |             aiGeneratedSummary: undefined,
 86 |             relevanceScore: conv.relevanceScore
 87 |           })),
 88 |           totalFound: result.totalFound,
 89 |           filters: {
 90 |             limit: input.limit ?? 10,
 91 |             minLength: input.minLength ?? 100,
 92 |             hasCodeBlocks: input.hasCodeBlocks,
 93 |             keywords: input.keywords,
 94 |             projectPath: input.projectPath,
 95 |             filePattern: input.filePattern,
 96 |             relevantFiles: input.relevantFiles,
 97 |             includeAiSummaries: input.includeAiSummaries
 98 |           }
 99 |         };
100 | 
101 |         return {
102 |           content: [{
103 |             type: 'text',
104 |             text: formatResponse(transformedResult, input.outputMode)
105 |           }]
106 |         };
107 |       } else {
108 |         const mappedInput = {
109 |           limit: input.limit,
110 |           minLength: input.minLength,
111 |           format: 'both' as const,
112 |           hasCodeBlocks: input.hasCodeBlocks,
113 |           keywords: input.keywords,
114 |           projectPath: input.projectPath,
115 |           filePattern: input.filePattern,
116 |           relevantFiles: input.relevantFiles,
117 |           startDate: input.startDate,
118 |           endDate: input.endDate,
119 |           includeEmpty: input.includeEmpty,
120 |           includeAiSummaries: input.includeAiSummaries
121 |         };
122 | 
123 |         const result = await listConversations(mappedInput);
124 |         return {
125 |           content: [{
126 |             type: 'text',
127 |             text: formatResponse(result, input.outputMode)
128 |           }]
129 |         };
130 |       }
131 |     } catch (error) {
132 |       return {
133 |         content: [{
134 |           type: 'text',
135 |           text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
136 |         }]
137 |       };
138 |     }
139 |   }
140 | );
141 | 
142 | server.tool(
143 |   'get_conversation',
144 |   'Retrieves the complete content of a specific Cursor conversation including all messages, code blocks, file references, title, and AI summary. WORKFLOW TIP: Use conversation IDs from list_conversations, search_conversations, or analytics breakdowns (files/languages arrays contain conversation IDs). Use summaryOnly=true to get enhanced summary data without full message content when you need to conserve context.',
145 |   {
146 |     conversationId: z.string().min(1).describe('Conversation ID from list_conversations, search_conversations, or analytics breakdowns'),
147 |     summaryOnly: z.boolean().optional().default(false).describe('Return only enhanced summary data without full message content'),
148 |     outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
149 |   },
150 |   async (input) => {
151 |     try {
152 |       const fullInput = {
153 |         ...input,
154 |         includeCodeBlocks: true,
155 |         includeFileReferences: true,
156 |         includeMetadata: false,
157 |         resolveBubbles: true
158 |       };
159 |       const result = await getConversation(fullInput);
160 | 
161 |       return {
162 |         content: [{
163 |           type: 'text',
164 |           text: formatResponse(result, input.outputMode)
165 |         }]
166 |       };
167 |     } catch (error) {
168 |       return {
169 |         content: [{
170 |           type: 'text',
171 |           text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
172 |         }]
173 |       };
174 |     }
175 |   }
176 | );
177 | 
178 | server.tool(
179 |   'search_conversations',
180 |   'Searches through Cursor chat content using exact text matching (NOT semantic search) to find relevant discussions. **WARNING: For project-specific searches, use list_conversations with projectPath instead of this tool!** This tool is for searching message content, not project filtering.\n\n**WHEN TO USE THIS TOOL:**\n- Searching for specific technical terms in message content (e.g., "useState", "async/await")\n- Finding conversations mentioning specific error messages\n- Searching for code patterns or function names\n\n**WHEN NOT TO USE THIS TOOL:**\n- ❌ DON\'T use query="project-name" - use list_conversations with projectPath instead\n- ❌ DON\'T search for project names in message content\n- ❌ DON\'T use this for project-specific filtering\n\nSearch methods (all use exact/literal text matching):\n1. Simple text matching: Use query parameter for literal string matching (e.g., "react hooks")\n2. Multi-keyword: Use keywords array with keywordOperator for exact matching\n3. LIKE patterns: Advanced pattern matching with SQL wildcards (% = any chars, _ = single char)\n4. Date range: Filter by message timestamps (YYYY-MM-DD format)\n\nIMPORTANT: When using date filters, call get_system_info first to know today\'s date.\n\nExamples: likePattern="%useState(%" for function calls, keywords=["typescript","interface"] with AND operator.',
181 |   {
182 |           query: z.string().optional().describe('Exact text matching - searches for literal string occurrences in MESSAGE CONTENT (e.g., "react hooks", "useState", "error message"). ❌ DON\'T use for project names - use list_conversations with projectPath instead!'),
183 |     keywords: z.array(z.string().min(1)).optional().describe('Array of keywords for exact text matching - use with keywordOperator to find conversations with specific combinations'),
184 |     keywordOperator: z.enum(['AND', 'OR']).optional().default('OR').describe('How to combine keywords: "AND" = all keywords must be present, "OR" = any keyword can be present'),
185 |     likePattern: z.string().optional().describe('SQL LIKE pattern for advanced searches - use % for any characters, _ for single character. Examples: "%useState(%" for function calls, "%.tsx%" for file types'),
186 |     startDate: z.string().optional().describe('Start date for search (YYYY-MM-DD). Note: Timestamps may be unreliable.'),
187 |     endDate: z.string().optional().describe('End date for search (YYYY-MM-DD). Note: Timestamps may be unreliable.'),
188 |     searchType: z.enum(['all', 'project', 'files', 'code']).optional().default('all').describe('Focus search on specific content types. Use "project" for project-specific searches that leverage file path context.'),
189 |     maxResults: z.number().min(1).max(50).optional().default(10).describe('Maximum number of conversations to return'),
190 |     includeCode: z.boolean().optional().default(true).describe('Include code blocks in search results'),
191 |     outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
192 |   },
193 |   async (input) => {
194 |     try {
195 |       const hasSearchCriteria = (input.query && input.query.trim() !== '' && input.query.trim() !== '?') || input.keywords || input.likePattern;
196 |       const hasDateFilter = input.startDate || input.endDate;
197 |       const hasOtherFilters = input.searchType !== 'all';
198 | 
199 |       if (!hasSearchCriteria && !hasDateFilter && !hasOtherFilters) {
200 |         throw new Error('At least one search criteria (query, keywords, likePattern), date filter (startDate, endDate), or search type filter must be provided');
201 |       }
202 | 
203 |       const fullInput = {
204 |         ...input,
205 |         contextLines: 2,
206 |         searchBubbles: true,
207 |         format: 'both' as const,
208 |         highlightMatches: true,
209 |         projectSearch: input.searchType === 'project',
210 |         fuzzyMatch: input.searchType === 'project',
211 |         includePartialPaths: input.searchType === 'project',
212 |         includeFileContent: false,
213 |         minRelevanceScore: 0.1,
214 |         orderBy: 'recency' as const
215 |       };
216 |       const result = await searchConversations(fullInput);
217 | 
218 |       return {
219 |         content: [{
220 |           type: 'text',
221 |           text: formatResponse(result, input.outputMode)
222 |         }]
223 |       };
224 |     } catch (error) {
225 |       return {
226 |         content: [{
227 |           type: 'text',
228 |           text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
229 |         }]
230 |       };
231 |     }
232 |   }
233 | );
234 | 
235 | server.tool(
236 |   'get_conversation_analytics',
237 |   'Get comprehensive analytics and statistics about Cursor chats including usage patterns, file activity, programming language distribution, and temporal trends. **BEST PRACTICE: Use projectPath parameter for project-specific analytics** - this analyzes only conversations that worked on files in that project, providing much more relevant insights for understanding coding patterns, file usage, and development activity within a specific codebase. WORKFLOW TIP: Always include "files" and "languages" in breakdowns - these contain conversation IDs in their arrays that you can immediately use with get_conversation tool. Use includeConversationDetails=true when you need the full conversation ID list and basic metadata for follow-up analysis.',
238 |   {
239 |     scope: z.enum(['all', 'recent', 'project']).optional().default('all').describe('Analysis scope: all conversations, recent only, or project-specific. Use "project" with projectPath for focused project analysis.'),
240 |     projectPath: z.string().optional().describe('**HIGHLY RECOMMENDED** Project/codebase name (e.g., "my-app") or full path for project-scoped analysis. When provided, analyzes only conversations that worked on files in that project, giving much more relevant insights about coding patterns and development activity.'),
241 |     recentDays: z.number().min(1).max(365).optional().default(30).describe('Number of recent days to analyze (1-365)'),
242 |     includeBreakdowns: z.array(z.enum(['files', 'languages', 'temporal', 'size'])).optional().default(['files', 'languages']).describe('Types of breakdowns to include in the analysis. IMPORTANT: "files" and "languages" breakdowns contain conversation IDs in their arrays - use these for follow-up analysis!'),
243 |     includeConversationDetails: z.boolean().optional().default(false).describe('Include full conversation ID list and basic metadata (increases response size significantly)'),
244 |     outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
245 |   },
246 |   async (input) => {
247 |     try {
248 |       const result = await getConversationAnalytics(input);
249 |       return {
250 |         content: [{
251 |           type: 'text',
252 |           text: formatResponse(result, input.outputMode)
253 |         }]
254 |       };
255 |     } catch (error) {
256 |       return {
257 |         content: [{
258 |           type: 'text',
259 |           text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
260 |         }]
261 |       };
262 |     }
263 |   }
264 | );
265 | 
266 | server.tool(
267 |   'find_related_conversations',
268 |   'Find conversations related to a reference conversation based on shared files, folders, programming languages, similar size, or temporal proximity. Use this to discover related discussions, find conversations about the same codebase/project, identify similar problem-solving sessions, or trace the evolution of ideas across multiple conversations.',
269 |   {
270 |     referenceConversationId: z.string().min(1).describe('ID of the conversation to find related conversations for'),
271 |     relationshipTypes: z.array(z.enum(['files', 'folders', 'languages', 'size', 'temporal'])).optional().default(['files']).describe('Types of relationships to consider when finding related conversations'),
272 |     maxResults: z.number().min(1).max(50).optional().default(10).describe('Maximum number of related conversations to return (1-50)'),
273 |     minScore: z.number().min(0).max(1).optional().default(0.1).describe('Minimum similarity score threshold (0.0-1.0)'),
274 |     includeScoreBreakdown: z.boolean().optional().default(false).describe('Include detailed breakdown of how similarity scores were calculated'),
275 |     outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
276 |   },
277 |   async (input) => {
278 |     try {
279 |       const result = await findRelatedConversations(input);
280 |       return {
281 |         content: [{
282 |           type: 'text',
283 |           text: formatResponse(result, input.outputMode)
284 |         }]
285 |       };
286 |     } catch (error) {
287 |       return {
288 |         content: [{
289 |           type: 'text',
290 |           text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
291 |         }]
292 |       };
293 |     }
294 |   }
295 | );
296 | 
297 | server.tool(
298 |   'extract_conversation_elements',
299 |   'Extract specific elements from conversations such as file references, code blocks, programming languages, folder paths, metadata, or conversation structure. Use this to build knowledge bases, analyze code patterns, extract reusable snippets, understand project file usage, or prepare data for further analysis and documentation.',
300 |   {
301 |     conversationIds: z.array(z.string()).optional().describe('Specific conversation IDs to extract elements from (if not provided, extracts from all conversations)'),
302 |     elements: z.array(z.enum(['files', 'folders', 'languages', 'codeblocks', 'metadata', 'structure'])).optional().default(['files', 'codeblocks']).describe('Types of elements to extract from conversations'),
303 |     includeContext: z.boolean().optional().default(false).describe('Include surrounding context for extracted elements'),
304 |     groupBy: z.enum(['conversation', 'element', 'none']).optional().default('conversation').describe('How to group the extracted elements in the output'),
305 |     filters: z.object({
306 |       minCodeLength: z.number().optional().describe('Minimum length for code blocks to include'),
307 |       fileExtensions: z.array(z.string()).optional().describe('Only include files with these extensions'),
308 |       languages: z.array(z.string()).optional().describe('Only include code blocks in these programming languages')
309 |     }).optional().describe('Filters to apply when extracting elements'),
310 |     outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
311 |   },
312 |   async (input) => {
313 |     try {
314 |       const mappedInput = {
315 |         conversationIds: input.conversationIds,
316 |         elements: input.elements,
317 |         includeContext: input.includeContext,
318 |         groupBy: input.groupBy,
319 |         filters: input.filters
320 |       };
321 | 
322 |       const result = await extractConversationElements(mappedInput);
323 |       return {
324 |         content: [{
325 |           type: 'text',
326 |           text: formatResponse(result, input.outputMode)
327 |         }]
328 |       };
329 |     } catch (error) {
330 |       return {
331 |         content: [{
332 |           type: 'text',
333 |           text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
334 |         }]
335 |       };
336 |     }
337 |   }
338 | );
339 | 
340 | server.tool(
341 |   'export_conversation_data',
342 |   'Export chat data in various formats (JSON, CSV, Graph) for external analysis, visualization, or integration with other tools. **TIP: Use filters.projectPath to export only project-specific conversations** for focused analysis of a particular codebase. Use this to create datasets for machine learning, generate reports for stakeholders, prepare data for visualization tools like Gephi or Tableau, or backup chat data in structured formats.',
343 |   {
344 |     conversationIds: z.array(z.string()).optional().describe('Specific conversation IDs to export (if not provided, exports all conversations)'),
345 |     format: z.enum(['json', 'csv', 'graph']).optional().default('json').describe('Export format: JSON for structured data, CSV for spreadsheets, Graph for network analysis'),
346 |     includeContent: z.boolean().optional().default(false).describe('Include full conversation content in the export'),
347 |     includeRelationships: z.boolean().optional().default(false).describe('Include relationship data between conversations'),
348 |     flattenStructure: z.boolean().optional().default(false).describe('Flatten nested structures for easier processing'),
349 |     filters: z.object({
350 |       minSize: z.number().optional().describe('Minimum conversation size to include'),
351 |       hasCodeBlocks: z.boolean().optional().describe('Only include conversations with code blocks'),
352 |               projectPath: z.string().optional().describe('**RECOMMENDED** Only include conversations related to this project/codebase name or path. Dramatically improves relevance by filtering to conversations that actually worked on files in that project.')
353 |     }).optional().describe('Filters to apply when selecting conversations to export'),
354 |     outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
355 |   },
356 |   async (input) => {
357 |     try {
358 |       const mappedInput = {
359 |         conversationIds: input.conversationIds,
360 |         format: input.format,
361 |         includeContent: input.includeContent,
362 |         includeRelationships: input.includeRelationships,
363 |         flattenStructure: input.flattenStructure,
364 |         filters: input.filters
365 |       };
366 | 
367 |       const result = await exportConversationData(mappedInput);
368 |       return {
369 |         content: [{
370 |           type: 'text',
371 |           text: formatResponse(result, input.outputMode)
372 |         }]
373 |       };
374 |     } catch (error) {
375 |       return {
376 |         content: [{
377 |           type: 'text',
378 |           text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
379 |         }]
380 |       };
381 |     }
382 |   }
383 | );
384 | 
385 | server.tool(
386 |   'get_system_info',
387 |   'Get system information and utilities for AI assistants. Provides current date, timezone, and other helpful context that AI assistants may not have access to. Use this when you need reference information for date filtering, time-based queries, or other system context.',
388 |   {
389 |     info: z.enum(['date', 'timezone', 'all']).optional().default('all').describe('Type of system information to retrieve: "date" for current date only, "timezone" for timezone info, "all" for everything')
390 |   },
391 |   async (input) => {
392 |     const now = new Date();
393 |     const currentDate = now.toISOString().split('T')[0];
394 |     const currentTime = now.toISOString();
395 |     const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone;
396 | 
397 |     let response = '';
398 | 
399 |     if (input.info === 'date') {
400 |       response = `Current date: ${currentDate}`;
401 |     } else if (input.info === 'timezone') {
402 |       response = `Timezone: ${timezone}`;
403 |     } else {
404 |       response = [
405 |         `Current date: ${currentDate}`,
406 |         `Current time: ${currentTime}`,
407 |         `Timezone: ${timezone}`,
408 |         ``,
409 |         `Use this date information when applying date filters to search_conversations.`,
410 |         `Date format for filters: YYYY-MM-DD (e.g., "${currentDate}")`
411 |       ].join('\n');
412 |     }
413 | 
414 |     return {
415 |       content: [{
416 |         type: 'text',
417 |         text: response
418 |       }]
419 |     };
420 |   }
421 | );
422 | 
423 | const transport = new StdioServerTransport();
424 | await server.connect(transport);
425 | 
```
Page 2/3FirstPrevNextLast