#
tokens: 48422/50000 29/44 files (page 1/3)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 1 of 3. Use http://codebase.md/vltansky/cursor-chat-history-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cursor
│   ├── mcp.json
│   └── rules
│       ├── cursor_rules.mdc
│       ├── dev_workflow.mdc
│       ├── general.mdc
│       ├── mcp.mdc
│       ├── project-overview.mdc
│       ├── self_improve.mdc
│       ├── taskmaster.mdc
│       ├── tests.mdc
│       └── typescript-patterns.mdc
├── .github
│   ├── dependabot.yml
│   └── workflows
│       └── ci.yml
├── .gitignore
├── .roo
│   ├── rules
│   │   ├── dev_workflow.md
│   │   ├── roo_rules.md
│   │   ├── self_improve.md
│   │   └── taskmaster.md
│   ├── rules-architect
│   │   └── architect-rules
│   ├── rules-ask
│   │   └── ask-rules
│   ├── rules-boomerang
│   │   └── boomerang-rules
│   ├── rules-code
│   │   └── code-rules
│   ├── rules-debug
│   │   └── debug-rules
│   └── rules-test
│       └── test-rules
├── .roomodes
├── .taskmaster
│   ├── .taskmaster
│   │   └── config.json
│   ├── config.json
│   └── reports
│       └── task-complexity-report.json
├── .taskmasterconfig
├── .windsurfrules
├── docs
│   ├── research.md
│   └── use-cases.md
├── LICENSE
├── package.json
├── README.md
├── scripts
│   └── example_prd.txt
├── src
│   ├── database
│   │   ├── parser.test.ts
│   │   ├── parser.ts
│   │   ├── reader.test.ts
│   │   ├── reader.ts
│   │   └── types.ts
│   ├── server.test.ts
│   ├── server.ts
│   ├── tools
│   │   ├── analytics-tools.ts
│   │   ├── conversation-tools.test.ts
│   │   ├── conversation-tools.ts
│   │   └── extraction-tools.ts
│   └── utils
│       ├── analytics.ts
│       ├── cache.test.ts
│       ├── cache.ts
│       ├── database-utils.test.ts
│       ├── database-utils.ts
│       ├── errors.test.ts
│       ├── errors.ts
│       ├── exporters.ts
│       ├── formatter.ts
│       ├── relationships.ts
│       ├── validation.test.ts
│       └── validation.ts
├── tsconfig.json
├── vitest.config.ts
└── yarn.lock
```

# Files

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
 1 | # Dependencies
 2 | node_modules/
 3 | 
 4 | # Build output
 5 | dist/
 6 | 
 7 | # Log files
 8 | *.log
 9 | npm-debug.log*
10 | yarn-debug.log*
11 | yarn-error.log*
12 | 
13 | # Environment variables
14 | .env
15 | .env.local
16 | .env.development.local
17 | .env.test.local
18 | .env.production.local
19 | 
20 | # IDEs and editors
21 | .vscode/
22 | .idea/
23 | *.suo
24 | *.ntvs*
25 | *.njsproj
26 | *.sln
27 | *.sw?
28 | 
29 | # OS generated files
30 | .DS_Store
31 | ehthumbs.db
32 | Thumbs.db
33 | 
34 | # Added by Claude Task Master
35 | # Logs
36 | logs
37 | dev-debug.log
38 | # Dependency directories
39 | # Editor directories and files
40 | .idea
41 | .vscode
42 | # OS specific
43 | # Task files
44 | tasks.json
45 | tasks/ 
```

--------------------------------------------------------------------------------
/.taskmasterconfig:
--------------------------------------------------------------------------------

```
 1 | {
 2 |   "models": {
 3 |     "main": {
 4 |       "provider": "anthropic",
 5 |       "modelId": "claude-sonnet-4-20250514",
 6 |       "maxTokens": 120000,
 7 |       "temperature": 0.2
 8 |     },
 9 |     "research": {
10 |       "provider": "perplexity",
11 |       "modelId": "sonar-pro",
12 |       "maxTokens": 8700,
13 |       "temperature": 0.1
14 |     },
15 |     "fallback": {
16 |       "provider": "anthropic",
17 |       "modelId": "claude-3-5-sonnet-20240620",
18 |       "maxTokens": 8192,
19 |       "temperature": 0.1
20 |     }
21 |   },
22 |   "global": {
23 |     "logLevel": "info",
24 |     "debug": false,
25 |     "defaultSubtasks": 5,
26 |     "defaultPriority": "medium",
27 |     "projectName": "Taskmaster",
28 |     "ollamaBaseUrl": "http://localhost:11434/api",
29 |     "azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
30 |   }
31 | }
```

--------------------------------------------------------------------------------
/.roomodes:
--------------------------------------------------------------------------------

```
 1 | {
 2 |   "customModes": [
 3 |     {
 4 |       "slug": "boomerang",
 5 |       "name": "Boomerang",
 6 |       "roleDefinition": "You are Roo, a strategic workflow orchestrator who coordinates complex tasks by delegating them to appropriate specialized modes. You have a comprehensive understanding of each mode's capabilities and limitations, also your own, and with the information given by the user and other modes in shared context you are enabled to effectively break down complex problems into discrete tasks that can be solved by different specialists using the `taskmaster-ai` system for task and context management.",
 7 |       "customInstructions": "Your role is to coordinate complex workflows by delegating tasks to specialized modes, using `taskmaster-ai` as the central hub for task definition, progress tracking, and context management. \nAs an orchestrator, you should:\nn1. When given a complex task, use contextual information (which gets updated frequently) to break it down into logical subtasks that can be delegated to appropriate specialized modes.\nn2. For each subtask, use the `new_task` tool to delegate. Choose the most appropriate mode for the subtask's specific goal and provide comprehensive instructions in the `message` parameter. \nThese instructions must include:\n*   All necessary context from the parent task or previous subtasks required to complete the work.\n*   A clearly defined scope, specifying exactly what the subtask should accomplish.\n*   An explicit statement that the subtask should *only* perform the work outlined in these instructions and not deviate.\n*   An instruction for the subtask to signal completion by using the `attempt_completion` tool, providing a thorough summary of the outcome in the `result` parameter, keeping in mind that this summary will be the source of truth used to further relay this information to other tasks and for you to keep track of what was completed on this project.\nn3. Track and manage the progress of all subtasks. When a subtask is completed, acknowledge its results and determine the next steps.\nn4. Help the user understand how the different subtasks fit together in the overall workflow. Provide clear reasoning about why you're delegating specific tasks to specific modes.\nn5. Ask clarifying questions when necessary to better understand how to break down complex tasks effectively. If it seems complex delegate to architect to accomplish that \nn6. Use subtasks to maintain clarity. If a request significantly shifts focus or requires a different expertise (mode), consider creating a subtask rather than overloading the current one.",
 8 |       "groups": [
 9 |         "read",
10 |         "edit",
11 |         "browser",
12 |         "command",
13 |         "mcp"
14 |       ]
15 |     },
16 |     {
17 |       "slug": "architect",
18 |       "name": "Architect",
19 |       "roleDefinition": "You are Roo, an expert technical leader operating in Architect mode. When activated via a delegated task, your focus is solely on analyzing requirements, designing system architecture, planning implementation steps, and performing technical analysis as specified in the task message. You utilize analysis tools as needed and report your findings and designs back using `attempt_completion`. You do not deviate from the delegated task scope.",
20 |       "customInstructions": "1. Do some information gathering (for example using read_file or search_files) to get more context about the task.\n\n2. You should also ask the user clarifying questions to get a better understanding of the task.\n\n3. Once you've gained more context about the user's request, you should create a detailed plan for how to accomplish the task. Include Mermaid diagrams if they help make your plan clearer.\n\n4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it.\n\n5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file.\n\n6. Use the switch_mode tool to request that the user switch to another mode to implement the solution.",
21 |       "groups": [
22 |         "read",
23 |         ["edit", { "fileRegex": "\\.md$", "description": "Markdown files only" }],
24 |         "command",
25 |         "mcp"
26 |       ]
27 |     },
28 |     {
29 |       "slug": "ask",
30 |       "name": "Ask",
31 |       "roleDefinition": "You are Roo, a knowledgeable technical assistant.\nWhen activated by another mode via a delegated task, your focus is to research, analyze, and provide clear, concise answers or explanations based *only* on the specific information requested in the delegation message. Use available tools for information gathering and report your findings back using `attempt_completion`.",
32 |       "customInstructions": "You can analyze code, explain concepts, and access external resources. Make sure to answer the user's questions and don't rush to switch to implementing code. Include Mermaid diagrams if they help make your response clearer.",
33 |       "groups": [
34 |         "read",
35 |         "browser",
36 |         "mcp"
37 |       ]
38 |     },
39 |     {
40 |       "slug": "debug",
41 |       "name": "Debug",
42 |       "roleDefinition": "You are Roo, an expert software debugger specializing in systematic problem diagnosis and resolution. When activated by another mode, your task is to meticulously analyze the provided debugging request (potentially referencing Taskmaster tasks, logs, or metrics), use diagnostic tools as instructed to investigate the issue, identify the root cause, and report your findings and recommended next steps back via `attempt_completion`. You focus solely on diagnostics within the scope defined by the delegated task.",
43 |       "customInstructions": "Reflect on 5-7 different possible sources of the problem, distill those down to 1-2 most likely sources, and then add logs to validate your assumptions. Explicitly ask the user to confirm the diagnosis before fixing the problem.",
44 |       "groups": [
45 |         "read",
46 |         "edit",
47 |         "command",
48 |         "mcp"
49 |       ]
50 |     },
51 |     {
52 |       "slug": "test",
53 |       "name": "Test",
54 |       "roleDefinition": "You are Roo, an expert software tester. Your primary focus is executing testing tasks delegated to you by other modes.\nAnalyze the provided scope and context (often referencing a Taskmaster task ID and its `testStrategy`), develop test plans if needed, execute tests diligently, and report comprehensive results (pass/fail, bugs, coverage) back using `attempt_completion`. You operate strictly within the delegated task's boundaries.",
55 |       "customInstructions": "Focus on the `testStrategy` defined in the Taskmaster task. Develop and execute test plans accordingly. Report results clearly, including pass/fail status, bug details, and coverage information.",
56 |       "groups": [
57 |         "read",
58 |         "command",
59 |         "mcp"
60 |       ]
61 |     }
62 |   ]
63 | }
```

--------------------------------------------------------------------------------
/.windsurfrules:
--------------------------------------------------------------------------------

```
  1 | Below you will find a variety of important rules spanning:
  2 | - the dev_workflow
  3 | - the .windsurfrules document self-improvement workflow
  4 | - the template to follow when modifying or adding new sections/rules to this document.
  5 | 
  6 | ---
  7 | DEV_WORKFLOW
  8 | ---
  9 | description: Guide for using meta-development script (scripts/dev.js) to manage task-driven development workflows
 10 | globs: **/*
 11 | filesToApplyRule: **/*
 12 | alwaysApply: true
 13 | ---
 14 | 
 15 | - **Global CLI Commands**
 16 |   - Task Master now provides a global CLI through the `task-master` command
 17 |   - All functionality from `scripts/dev.js` is available through this interface
 18 |   - Install globally with `npm install -g claude-task-master` or use locally via `npx`
 19 |   - Use `task-master <command>` instead of `node scripts/dev.js <command>`
 20 |   - Examples:
 21 |     - `task-master list` instead of `node scripts/dev.js list`
 22 |     - `task-master next` instead of `node scripts/dev.js next`
 23 |     - `task-master expand --id=3` instead of `node scripts/dev.js expand --id=3`
 24 |   - All commands accept the same options as their script equivalents
 25 |   - The CLI provides additional commands like `task-master init` for project setup
 26 | 
 27 | - **Development Workflow Process**
 28 |   - Start new projects by running `task-master init` or `node scripts/dev.js parse-prd --input=<prd-file.txt>` to generate initial tasks.json
 29 |   - Begin coding sessions with `task-master list` to see current tasks, status, and IDs
 30 |   - Analyze task complexity with `task-master analyze-complexity --research` before breaking down tasks
 31 |   - Select tasks based on dependencies (all marked 'done'), priority level, and ID order
 32 |   - Clarify tasks by checking task files in tasks/ directory or asking for user input
 33 |   - View specific task details using `task-master show <id>` to understand implementation requirements
 34 |   - Break down complex tasks using `task-master expand --id=<id>` with appropriate flags
 35 |   - Clear existing subtasks if needed using `task-master clear-subtasks --id=<id>` before regenerating
 36 |   - Implement code following task details, dependencies, and project standards
 37 |   - Verify tasks according to test strategies before marking as complete
 38 |   - Mark completed tasks with `task-master set-status --id=<id> --status=done`
 39 |   - Update dependent tasks when implementation differs from original plan
 40 |   - Generate task files with `task-master generate` after updating tasks.json
 41 |   - Maintain valid dependency structure with `task-master fix-dependencies` when needed
 42 |   - Respect dependency chains and task priorities when selecting work
 43 |   - Report progress regularly using the list command
 44 | 
 45 | - **Task Complexity Analysis**
 46 |   - Run `node scripts/dev.js analyze-complexity --research` for comprehensive analysis
 47 |   - Review complexity report in scripts/task-complexity-report.json
 48 |   - Or use `node scripts/dev.js complexity-report` for a formatted, readable version of the report
 49 |   - Focus on tasks with highest complexity scores (8-10) for detailed breakdown
 50 |   - Use analysis results to determine appropriate subtask allocation
 51 |   - Note that reports are automatically used by the expand command
 52 | 
 53 | - **Task Breakdown Process**
 54 |   - For tasks with complexity analysis, use `node scripts/dev.js expand --id=<id>`
 55 |   - Otherwise use `node scripts/dev.js expand --id=<id> --subtasks=<number>`
 56 |   - Add `--research` flag to leverage Perplexity AI for research-backed expansion
 57 |   - Use `--prompt="<context>"` to provide additional context when needed
 58 |   - Review and adjust generated subtasks as necessary
 59 |   - Use `--all` flag to expand multiple pending tasks at once
 60 |   - If subtasks need regeneration, clear them first with `clear-subtasks` command
 61 | 
 62 | - **Implementation Drift Handling**
 63 |   - When implementation differs significantly from planned approach
 64 |   - When future tasks need modification due to current implementation choices
 65 |   - When new dependencies or requirements emerge
 66 |   - Call `node scripts/dev.js update --from=<futureTaskId> --prompt="<explanation>"` to update tasks.json
 67 | 
 68 | - **Task Status Management**
 69 |   - Use 'pending' for tasks ready to be worked on
 70 |   - Use 'done' for completed and verified tasks
 71 |   - Use 'deferred' for postponed tasks
 72 |   - Add custom status values as needed for project-specific workflows
 73 | 
 74 | - **Task File Format Reference**
 75 |   ```
 76 |   # Task ID: <id>
 77 |   # Title: <title>
 78 |   # Status: <status>
 79 |   # Dependencies: <comma-separated list of dependency IDs>
 80 |   # Priority: <priority>
 81 |   # Description: <brief description>
 82 |   # Details:
 83 |   <detailed implementation notes>
 84 |   
 85 |   # Test Strategy:
 86 |   <verification approach>
 87 |   ```
 88 | 
 89 | - **Command Reference: parse-prd**
 90 |   - Legacy Syntax: `node scripts/dev.js parse-prd --input=<prd-file.txt>`
 91 |   - CLI Syntax: `task-master parse-prd --input=<prd-file.txt>`
 92 |   - Description: Parses a PRD document and generates a tasks.json file with structured tasks
 93 |   - Parameters: 
 94 |     - `--input=<file>`: Path to the PRD text file (default: sample-prd.txt)
 95 |   - Example: `task-master parse-prd --input=requirements.txt`
 96 |   - Notes: Will overwrite existing tasks.json file. Use with caution.
 97 | 
 98 | - **Command Reference: update**
 99 |   - Legacy Syntax: `node scripts/dev.js update --from=<id> --prompt="<prompt>"`
100 |   - CLI Syntax: `task-master update --from=<id> --prompt="<prompt>"`
101 |   - Description: Updates tasks with ID >= specified ID based on the provided prompt
102 |   - Parameters:
103 |     - `--from=<id>`: Task ID from which to start updating (required)
104 |     - `--prompt="<text>"`: Explanation of changes or new context (required)
105 |   - Example: `task-master update --from=4 --prompt="Now we are using Express instead of Fastify."`
106 |   - Notes: Only updates tasks not marked as 'done'. Completed tasks remain unchanged.
107 | 
108 | - **Command Reference: generate**
109 |   - Legacy Syntax: `node scripts/dev.js generate`
110 |   - CLI Syntax: `task-master generate`
111 |   - Description: Generates individual task files in tasks/ directory based on tasks.json
112 |   - Parameters: 
113 |     - `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
114 |     - `--output=<dir>, -o`: Output directory (default: 'tasks')
115 |   - Example: `task-master generate`
116 |   - Notes: Overwrites existing task files. Creates tasks/ directory if needed.
117 | 
118 | - **Command Reference: set-status**
119 |   - Legacy Syntax: `node scripts/dev.js set-status --id=<id> --status=<status>`
120 |   - CLI Syntax: `task-master set-status --id=<id> --status=<status>`
121 |   - Description: Updates the status of a specific task in tasks.json
122 |   - Parameters:
123 |     - `--id=<id>`: ID of the task to update (required)
124 |     - `--status=<status>`: New status value (required)
125 |   - Example: `task-master set-status --id=3 --status=done`
126 |   - Notes: Common values are 'done', 'pending', and 'deferred', but any string is accepted.
127 | 
128 | - **Command Reference: list**
129 |   - Legacy Syntax: `node scripts/dev.js list`
130 |   - CLI Syntax: `task-master list`
131 |   - Description: Lists all tasks in tasks.json with IDs, titles, and status
132 |   - Parameters: 
133 |     - `--status=<status>, -s`: Filter by status
134 |     - `--with-subtasks`: Show subtasks for each task
135 |     - `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
136 |   - Example: `task-master list`
137 |   - Notes: Provides quick overview of project progress. Use at start of sessions.
138 | 
139 | - **Command Reference: expand**
140 |   - Legacy Syntax: `node scripts/dev.js expand --id=<id> [--num=<number>] [--research] [--prompt="<context>"]`
141 |   - CLI Syntax: `task-master expand --id=<id> [--num=<number>] [--research] [--prompt="<context>"]`
142 |   - Description: Expands a task with subtasks for detailed implementation
143 |   - Parameters:
144 |     - `--id=<id>`: ID of task to expand (required unless using --all)
145 |     - `--all`: Expand all pending tasks, prioritized by complexity
146 |     - `--num=<number>`: Number of subtasks to generate (default: from complexity report)
147 |     - `--research`: Use Perplexity AI for research-backed generation
148 |     - `--prompt="<text>"`: Additional context for subtask generation
149 |     - `--force`: Regenerate subtasks even for tasks that already have them
150 |   - Example: `task-master expand --id=3 --num=5 --research --prompt="Focus on security aspects"`
151 |   - Notes: Uses complexity report recommendations if available.
152 | 
153 | - **Command Reference: analyze-complexity**
154 |   - Legacy Syntax: `node scripts/dev.js analyze-complexity [options]`
155 |   - CLI Syntax: `task-master analyze-complexity [options]`
156 |   - Description: Analyzes task complexity and generates expansion recommendations
157 |   - Parameters:
158 |     - `--output=<file>, -o`: Output file path (default: scripts/task-complexity-report.json)
159 |     - `--model=<model>, -m`: Override LLM model to use
160 |     - `--threshold=<number>, -t`: Minimum score for expansion recommendation (default: 5)
161 |     - `--file=<path>, -f`: Use alternative tasks.json file
162 |     - `--research, -r`: Use Perplexity AI for research-backed analysis
163 |   - Example: `task-master analyze-complexity --research`
164 |   - Notes: Report includes complexity scores, recommended subtasks, and tailored prompts.
165 | 
166 | - **Command Reference: clear-subtasks**
167 |   - Legacy Syntax: `node scripts/dev.js clear-subtasks --id=<id>`
168 |   - CLI Syntax: `task-master clear-subtasks --id=<id>`
169 |   - Description: Removes subtasks from specified tasks to allow regeneration
170 |   - Parameters:
171 |     - `--id=<id>`: ID or comma-separated IDs of tasks to clear subtasks from
172 |     - `--all`: Clear subtasks from all tasks
173 |   - Examples:
174 |     - `task-master clear-subtasks --id=3`
175 |     - `task-master clear-subtasks --id=1,2,3`
176 |     - `task-master clear-subtasks --all`
177 |   - Notes: 
178 |     - Task files are automatically regenerated after clearing subtasks
179 |     - Can be combined with expand command to immediately generate new subtasks
180 |     - Works with both parent tasks and individual subtasks
181 | 
182 | - **Task Structure Fields**
183 |   - **id**: Unique identifier for the task (Example: `1`)
184 |   - **title**: Brief, descriptive title (Example: `"Initialize Repo"`)
185 |   - **description**: Concise summary of what the task involves (Example: `"Create a new repository, set up initial structure."`)
186 |   - **status**: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
187 |   - **dependencies**: IDs of prerequisite tasks (Example: `[1, 2]`)
188 |     - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
189 |     - This helps quickly identify which prerequisite tasks are blocking work
190 |   - **priority**: Importance level (Example: `"high"`, `"medium"`, `"low"`)
191 |   - **details**: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`)
192 |   - **testStrategy**: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`)
193 |   - **subtasks**: List of smaller, more specific tasks (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`)
194 | 
195 | - **Environment Variables Configuration**
196 |   - **ANTHROPIC_API_KEY** (Required): Your Anthropic API key for Claude (Example: `ANTHROPIC_API_KEY=sk-ant-api03-...`)
197 |   - **MODEL** (Default: `"claude-3-7-sonnet-20250219"`): Claude model to use (Example: `MODEL=claude-3-opus-20240229`)
198 |   - **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
199 |   - **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
200 |   - **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
201 |   - **TASKMASTER_LOG_LEVEL** (Default: `"info"`): Console output level (Example: `TASKMASTER_LOG_LEVEL=debug`)
202 |   - **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
203 |   - **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
204 |   - **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)
205 |   - **PROJECT_VERSION** (Default: `"1.0.0"`): Version in metadata (Example: `PROJECT_VERSION=2.1.0`)
206 |   - **PERPLEXITY_API_KEY**: For research-backed features (Example: `PERPLEXITY_API_KEY=pplx-...`)
207 |   - **PERPLEXITY_MODEL** (Default: `"sonar-medium-online"`): Perplexity model (Example: `PERPLEXITY_MODEL=sonar-large-online`)
208 | 
209 | - **Determining the Next Task**
210 |   - Run `task-master next` to show the next task to work on
211 |   - The next command identifies tasks with all dependencies satisfied
212 |   - Tasks are prioritized by priority level, dependency count, and ID
213 |   - The command shows comprehensive task information including:
214 |     - Basic task details and description
215 |     - Implementation details
216 |     - Subtasks (if they exist)
217 |     - Contextual suggested actions
218 |   - Recommended before starting any new development work
219 |   - Respects your project's dependency structure
220 |   - Ensures tasks are completed in the appropriate sequence
221 |   - Provides ready-to-use commands for common task actions
222 | 
223 | - **Viewing Specific Task Details**
224 |   - Run `task-master show <id>` or `task-master show --id=<id>` to view a specific task
225 |   - Use dot notation for subtasks: `task-master show 1.2` (shows subtask 2 of task 1)
226 |   - Displays comprehensive information similar to the next command, but for a specific task
227 |   - For parent tasks, shows all subtasks and their current status
228 |   - For subtasks, shows parent task information and relationship
229 |   - Provides contextual suggested actions appropriate for the specific task
230 |   - Useful for examining task details before implementation or checking status
231 | 
232 | - **Managing Task Dependencies**
233 |   - Use `task-master add-dependency --id=<id> --depends-on=<id>` to add a dependency
234 |   - Use `task-master remove-dependency --id=<id> --depends-on=<id>` to remove a dependency
235 |   - The system prevents circular dependencies and duplicate dependency entries
236 |   - Dependencies are checked for existence before being added or removed
237 |   - Task files are automatically regenerated after dependency changes
238 |   - Dependencies are visualized with status indicators in task listings and files
239 | 
240 | - **Command Reference: add-dependency**
241 |   - Legacy Syntax: `node scripts/dev.js add-dependency --id=<id> --depends-on=<id>`
242 |   - CLI Syntax: `task-master add-dependency --id=<id> --depends-on=<id>`
243 |   - Description: Adds a dependency relationship between two tasks
244 |   - Parameters:
245 |     - `--id=<id>`: ID of task that will depend on another task (required)
246 |     - `--depends-on=<id>`: ID of task that will become a dependency (required)
247 |   - Example: `task-master add-dependency --id=22 --depends-on=21`
248 |   - Notes: Prevents circular dependencies and duplicates; updates task files automatically
249 | 
250 | - **Command Reference: remove-dependency**
251 |   - Legacy Syntax: `node scripts/dev.js remove-dependency --id=<id> --depends-on=<id>`
252 |   - CLI Syntax: `task-master remove-dependency --id=<id> --depends-on=<id>`
253 |   - Description: Removes a dependency relationship between two tasks
254 |   - Parameters:
255 |     - `--id=<id>`: ID of task to remove dependency from (required)
256 |     - `--depends-on=<id>`: ID of task to remove as a dependency (required)
257 |   - Example: `task-master remove-dependency --id=22 --depends-on=21`
258 |   - Notes: Checks if dependency actually exists; updates task files automatically
259 | 
260 | - **Command Reference: validate-dependencies**
261 |   - Legacy Syntax: `node scripts/dev.js validate-dependencies [options]`
262 |   - CLI Syntax: `task-master validate-dependencies [options]`
263 |   - Description: Checks for and identifies invalid dependencies in tasks.json and task files
264 |   - Parameters:
265 |     - `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
266 |   - Example: `task-master validate-dependencies`
267 |   - Notes: 
268 |     - Reports all non-existent dependencies and self-dependencies without modifying files
269 |     - Provides detailed statistics on task dependency state
270 |     - Use before fix-dependencies to audit your task structure
271 | 
272 | - **Command Reference: fix-dependencies**
273 |   - Legacy Syntax: `node scripts/dev.js fix-dependencies [options]`
274 |   - CLI Syntax: `task-master fix-dependencies [options]`
275 |   - Description: Finds and fixes all invalid dependencies in tasks.json and task files
276 |   - Parameters:
277 |     - `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
278 |   - Example: `task-master fix-dependencies`
279 |   - Notes: 
280 |     - Removes references to non-existent tasks and subtasks
281 |     - Eliminates self-dependencies (tasks depending on themselves)
282 |     - Regenerates task files with corrected dependencies
283 |     - Provides detailed report of all fixes made
284 | 
285 | - **Command Reference: complexity-report**
286 |   - Legacy Syntax: `node scripts/dev.js complexity-report [options]`
287 |   - CLI Syntax: `task-master complexity-report [options]`
288 |   - Description: Displays the task complexity analysis report in a formatted, easy-to-read way
289 |   - Parameters:
290 |     - `--file=<path>, -f`: Path to the complexity report file (default: 'scripts/task-complexity-report.json')
291 |   - Example: `task-master complexity-report`
292 |   - Notes: 
293 |     - Shows tasks organized by complexity score with recommended actions
294 |     - Provides complexity distribution statistics
295 |     - Displays ready-to-use expansion commands for complex tasks
296 |     - If no report exists, offers to generate one interactively
297 | 
298 | - **Command Reference: add-task**
299 |   - CLI Syntax: `task-master add-task [options]`
300 |   - Description: Add a new task to tasks.json using AI
301 |   - Parameters:
302 |     - `--file=<path>, -f`: Path to the tasks file (default: 'tasks/tasks.json')
303 |     - `--prompt=<text>, -p`: Description of the task to add (required)
304 |     - `--dependencies=<ids>, -d`: Comma-separated list of task IDs this task depends on
305 |     - `--priority=<priority>`: Task priority (high, medium, low) (default: 'medium')
306 |   - Example: `task-master add-task --prompt="Create user authentication using Auth0"`
307 |   - Notes: Uses AI to convert description into structured task with appropriate details
308 | 
309 | - **Command Reference: init**
310 |   - CLI Syntax: `task-master init`
311 |   - Description: Initialize a new project with Task Master structure
312 |   - Parameters: None
313 |   - Example: `task-master init`
314 |   - Notes: 
315 |     - Creates initial project structure with required files
316 |     - Prompts for project settings if not provided
317 |     - Merges with existing files when appropriate
318 |     - Can be used to bootstrap a new Task Master project quickly
319 | 
320 | - **Code Analysis & Refactoring Techniques**
321 |   - **Top-Level Function Search**
322 |     - Use grep pattern matching to find all exported functions across the codebase
323 |     - Command: `grep -E "export (function|const) \w+|function \w+\(|const \w+ = \(|module\.exports" --include="*.js" -r ./`
324 |     - Benefits:
325 |       - Quickly identify all public API functions without reading implementation details
326 |       - Compare functions between files during refactoring (e.g., monolithic to modular structure)
327 |       - Verify all expected functions exist in refactored modules
328 |       - Identify duplicate functionality or naming conflicts
329 |     - Usage examples:
330 |       - When migrating from `scripts/dev.js` to modular structure: `grep -E "function \w+\(" scripts/dev.js`
331 |       - Check function exports in a directory: `grep -E "export (function|const)" scripts/modules/`
332 |       - Find potential naming conflicts: `grep -E "function (get|set|create|update)\w+\(" -r ./`
333 |     - Variations:
334 |       - Add `-n` flag to include line numbers
335 |       - Add `--include="*.ts"` to filter by file extension
336 |       - Use with `| sort` to alphabetize results
337 |     - Integration with refactoring workflow:
338 |       - Start by mapping all functions in the source file
339 |       - Create target module files based on function grouping
340 |       - Verify all functions were properly migrated
341 |       - Check for any unintentional duplications or omissions
342 | 
343 | ---
344 | WINDSURF_RULES
345 | ---
346 | description: Guidelines for creating and maintaining Windsurf rules to ensure consistency and effectiveness.
347 | globs: .windsurfrules
348 | filesToApplyRule: .windsurfrules
349 | alwaysApply: true
350 | ---
351 | The below describes how you should be structuring new rule sections in this document.
352 | - **Required Rule Structure:**
353 |   ```markdown
354 |   ---
355 |   description: Clear, one-line description of what the rule enforces
356 |   globs: path/to/files/*.ext, other/path/**/*
357 |   alwaysApply: boolean
358 |   ---
359 | 
360 |   - **Main Points in Bold**
361 |     - Sub-points with details
362 |     - Examples and explanations
363 |   ```
364 | 
365 | - **Section References:**
366 |   - Use `ALL_CAPS_SECTION` to reference files
367 |   - Example: `WINDSURF_RULES`
368 | 
369 | - **Code Examples:**
370 |   - Use language-specific code blocks
371 |   ```typescript
372 |   // ✅ DO: Show good examples
373 |   const goodExample = true;
374 |   
375 |   // ❌ DON'T: Show anti-patterns
376 |   const badExample = false;
377 |   ```
378 | 
379 | - **Rule Content Guidelines:**
380 |   - Start with high-level overview
381 |   - Include specific, actionable requirements
382 |   - Show examples of correct implementation
383 |   - Reference existing code when possible
384 |   - Keep rules DRY by referencing other rules
385 | 
386 | - **Rule Maintenance:**
387 |   - Update rules when new patterns emerge
388 |   - Add examples from actual codebase
389 |   - Remove outdated patterns
390 |   - Cross-reference related rules
391 | 
392 | - **Best Practices:**
393 |   - Use bullet points for clarity
394 |   - Keep descriptions concise
395 |   - Include both DO and DON'T examples
396 |   - Reference actual code over theoretical examples
397 |   - Use consistent formatting across rules 
398 | 
399 | ---
400 | SELF_IMPROVE
401 | ---
402 | description: Guidelines for continuously improving this rules document based on emerging code patterns and best practices.
403 | globs: **/*
404 | filesToApplyRule: **/*
405 | alwaysApply: true
406 | ---
407 | 
408 | - **Rule Improvement Triggers:**
409 |   - New code patterns not covered by existing rules
410 |   - Repeated similar implementations across files
411 |   - Common error patterns that could be prevented
412 |   - New libraries or tools being used consistently
413 |   - Emerging best practices in the codebase
414 | 
415 | - **Analysis Process:**
416 |   - Compare new code with existing rules
417 |   - Identify patterns that should be standardized
418 |   - Look for references to external documentation
419 |   - Check for consistent error handling patterns
420 |   - Monitor test patterns and coverage
421 | 
422 | - **Rule Updates:**
423 |   - **Add New Rules When:**
424 |     - A new technology/pattern is used in 3+ files
425 |     - Common bugs could be prevented by a rule
426 |     - Code reviews repeatedly mention the same feedback
427 |     - New security or performance patterns emerge
428 | 
429 |   - **Modify Existing Rules When:**
430 |     - Better examples exist in the codebase
431 |     - Additional edge cases are discovered
432 |     - Related rules have been updated
433 |     - Implementation details have changed
434 | 
435 | - **Example Pattern Recognition:**
436 |   ```typescript
437 |   // If you see repeated patterns like:
438 |   const data = await prisma.user.findMany({
439 |     select: { id: true, email: true },
440 |     where: { status: 'ACTIVE' }
441 |   });
442 |   
443 |   // Consider adding a PRISMA section in the .windsurfrules:
444 |   // - Standard select fields
445 |   // - Common where conditions
446 |   // - Performance optimization patterns
447 |   ```
448 | 
449 | - **Rule Quality Checks:**
450 |   - Rules should be actionable and specific
451 |   - Examples should come from actual code
452 |   - References should be up to date
453 |   - Patterns should be consistently enforced
454 | 
455 | - **Continuous Improvement:**
456 |   - Monitor code review comments
457 |   - Track common development questions
458 |   - Update rules after major refactors
459 |   - Add links to relevant documentation
460 |   - Cross-reference related rules
461 | 
462 | - **Rule Deprecation:**
463 |   - Mark outdated patterns as deprecated
464 |   - Remove rules that no longer apply
465 |   - Update references to deprecated rules
466 |   - Document migration paths for old patterns
467 | 
468 | - **Documentation Updates:**
469 |   - Keep examples synchronized with code
470 |   - Update references to external docs
471 |   - Maintain links between related rules
472 |   - Document breaking changes
473 | 
474 | Follow WINDSURF_RULES for proper rule formatting and structure of windsurf rule sections.
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Cursor Chat History MCP
  2 | 
  3 | **Give AI assistants access to your Cursor chat history.**
  4 | 
  5 | A Model Context Protocol (MCP) server that allows Cursor, Claude, and other AI assistants to read and analyze your Cursor chat data. This enables personalized coding assistance based on your actual development patterns and history.
  6 | 
  7 | <a href="https://glama.ai/mcp/servers/@vltansky/cursor-conversations-mcp">
  8 |   <img width="380" height="200" src="https://glama.ai/mcp/servers/@vltansky/cursor-conversations-mcp/badge" alt="Cursor Conversations Server MCP server" />
  9 | </a>
 10 | 
 11 | ## What This Enables
 12 | 
 13 | Ask your AI assistant to:
 14 | 
 15 | - Analyze your chat history to understand your coding patterns and usage statistics
 16 | - Generate project-specific rules based on your actual development discussions
 17 | - Extract insights from past problem-solving sessions and find related conversations
 18 | - Create documentation based on real conversations about your code
 19 | - Export chat data for external analysis and visualization
 20 | - Find and apply solutions you've already worked through
 21 | 
 22 | ## Key Benefits
 23 | 
 24 | **Generate Personalized Rules**: Create coding standards based on your actual development patterns, not generic best practices.
 25 | 
 26 | **Learn from Your History**: Extract insights from past chats to improve future development.
 27 | 
 28 | **Context-Aware Assistance**: Get help that's informed by your specific projects and coding style.
 29 | 
 30 | **Pattern Recognition**: Identify recurring themes and solutions in your development work.
 31 | 
 32 | ## Quick Start
 33 | 
 34 | ### 1. Configure MCP
 35 | Add to your `.cursor/mcp.json`:
 36 | 
 37 | ```json
 38 | {
 39 |   "mcpServers": {
 40 |     "cursor-chat-history": {
 41 |       "command": "npx",
 42 |       "args": ["-y", "--package=cursor-chat-history-mcp", "cursor-chat-history-mcp"]
 43 |     }
 44 |   }
 45 | }
 46 | ```
 47 | 
 48 | ### 2. Start Using
 49 | ```
 50 | "Analyze my React conversations and create component guidelines"
 51 | "Find debugging patterns in my chat history"
 52 | "Generate TypeScript coding standards from my actual usage"
 53 | "What are the main themes in my recent coding discussions?"
 54 | ```
 55 | 
 56 | ## Available Tools
 57 | 
 58 | ### Core Tools
 59 | 
 60 | - **`list_conversations`** - Browse conversations with filtering options and optional project relevance scoring
 61 | - **`get_conversation`** - Retrieve full conversation content with code and file references
 62 | - **`search_conversations`** - Enhanced search with multi-keyword, LIKE patterns, and text search
 63 | 
 64 | ### Analytics & Data Extraction Tools
 65 | 
 66 | - **`get_conversation_analytics`** - Comprehensive analytics including usage patterns, file activity, programming language distribution, and temporal trends
 67 | - **`find_related_conversations`** - Find conversations related by shared files, folders, languages, size, or temporal proximity
 68 | - **`extract_conversation_elements`** - Extract files, code blocks, languages, metadata, and conversation structure with flexible grouping
 69 | - **`export_conversation_data`** - Export chat data in JSON, CSV, or Graph formats for external analysis and visualization
 70 | 
 71 | ## Common Use Cases
 72 | 
 73 | ### Generate Coding Rules
 74 | ```
 75 | "Create TypeScript interface naming conventions from my conversations"
 76 | "Extract error handling patterns and create guidelines"
 77 | "Find all my discussions about testing and create best practices"
 78 | ```
 79 | 
 80 | ### Extract Best Practices
 81 | ```
 82 | "Show me how I typically use React hooks in my projects"
 83 | "Find patterns in my state management discussions"
 84 | "Analyze my class inheritance usage and create guidelines"
 85 | ```
 86 | 
 87 | ### Advanced Analysis
 88 | ```
 89 | "Find conversations where I discussed specific functions or patterns"
 90 | "Search for file-specific discussions across my projects"
 91 | "Compare how I've approached similar problems over time"
 92 | ```
 93 | 
 94 | ### Create Project Documentation
 95 | ```
 96 | "Generate API documentation from my service discussions"
 97 | "Create technical docs from my auth module conversations"
 98 | ```
 99 | 
100 | ### Learn from Past Solutions
101 | ```
102 | "Find similar debugging sessions and extract solutions"
103 | "Analyze my performance optimization discussions"
104 | ```
105 | 
106 | ### Data Analysis & Insights
107 | ```
108 | "Get comprehensive analytics on my coding patterns over the last 3 months"
109 | "Export all conversations with React code to CSV for analysis"
110 | "Find conversations similar to this database migration discussion"
111 | ```
112 | 
113 | ## Privacy & Security
114 | 
115 | - **Runs locally** - Your chat data never leaves your machine
116 | - **No external services** - Direct access to your local Cursor database
117 | - **No API keys required** - No data sharing with external services
118 | - **Full control** - You decide what data to access and when
119 | 
120 | ## How It Works
121 | 
122 | **Summary-First Approach for Efficiency**
123 | 
124 | The entire system is designed to be both powerful and context-efficient:
125 | 
126 | ### **Data Access Process**
127 | 1. **Full Content Analysis**: All tools access complete chat data including:
128 |    - Complete message text and code blocks
129 |    - File references and folder paths
130 |    - Conversation metadata and titles
131 |    - AI-generated summaries
132 | 
133 | 2. **Smart Result Delivery**: Different tools provide focused outputs:
134 |    - **`list_conversations`**: Returns conversation summaries with titles and metadata
135 |    - **`search_conversations`**: Searches full content but returns only summaries with relevance scores
136 |    - **Analytics tools**: Extract insights and patterns without overwhelming detail
137 | 
138 | 3. **Summary-First Results**: Most tools return:
139 |    - Conversation summaries and titles
140 |    - Key metadata (files, folders, message count)
141 |    - AI-generated summaries when available
142 |    - Relevant scores and analytics
143 | 
144 | ### **Why This Design?**
145 | - **Context Efficiency**: Avoids overwhelming AI assistants with full message content
146 | - **Performance**: Summaries are much smaller and faster to process
147 | - **Discoverability**: Users can quickly scan results to identify relevant conversations
148 | - **Deep Dive When Needed**: Use `get_conversation` for full content of specific conversations
149 | 
150 | This approach lets you efficiently browse, search, and analyze your chat history, then dive deep only into conversations that matter for your current task.
151 | 
152 | ## Installation
153 | 
154 | ### For Development
155 | ```bash
156 | git clone https://github.com/vltansky/cursor-chat-history-mcp
157 | cd cursor-chat-history-mcp
158 | yarn install
159 | yarn build
160 | ```
161 | 
162 | ### For Use
163 | The npx configuration above handles installation automatically.
164 | 
165 | ## Tool Reference
166 | 
167 | ### Output Formats
168 | 
169 | All tools support JSON output formats via the `outputMode` parameter:
170 | 
171 | - **`json` (default)** - Formatted JSON with proper indentation for readability
172 | - **`compact-json`** - Minified JSON without formatting for minimal size
173 | 
174 | ### Core Tools
175 | 
176 | **`list_conversations`**
177 | - `limit` (default: 10) - Number of conversations to return
178 | - `includeAiSummaries` (default: true) - Include AI-generated summaries for efficient browsing
179 | - `projectPath` - Filter by project path
180 | - `includeRelevanceScore` (default: false) - Include relevance scores when filtering by projectPath
181 | - `hasCodeBlocks` - Filter conversations with/without code
182 | - `keywords` - Search by keywords
183 | - `filePattern` - Filter by file pattern
184 | 
185 | **`get_conversation`**
186 | - `conversationId` (required) - Conversation to retrieve
187 | - `summaryOnly` (default: false) - Get enhanced summary without full content to save context
188 | - `includeMetadata` (default: false) - Include additional metadata
189 | 
190 | **`search_conversations`** - Enhanced search with multiple methods
191 | - **Simple Query**: `query` - Basic text search (backward compatible)
192 | - **Multi-keyword**: `keywords` array with `keywordOperator` ('AND'/'OR')
193 | - **LIKE Patterns**: `likePattern` - SQL LIKE patterns (% = any chars, _ = single char)
194 | - `searchType` (default: 'all') - 'all', 'project', 'files', 'code'
195 | - `maxResults` (default: 10) - Maximum results
196 | - `includeCode` (default: true) - Include code blocks
197 | 
198 | ### Analytics & Data Extraction Tools
199 | 
200 | **`get_conversation_analytics`**
201 | - `scope` (default: 'all') - 'all', 'recent', 'project'
202 | - `projectPath` - Focus on specific project (required when scope='project')
203 | - `recentDays` (default: 30) - Time window for recent scope
204 | - `includeBreakdowns` (default: ['files', 'languages']) - Analysis types: 'files', 'languages', 'temporal', 'size'
205 | 
206 | **`find_related_conversations`**
207 | - `referenceConversationId` (required) - Starting conversation
208 | - `relationshipTypes` (default: ['files']) - 'files', 'folders', 'languages', 'size', 'temporal'
209 | - `maxResults` (default: 10) - Number of results
210 | - `minScore` (default: 0.1) - Minimum similarity score (0-1)
211 | - `includeScoreBreakdown` (default: false) - Show individual relationship scores
212 | 
213 | **`extract_conversation_elements`**
214 | - `conversationIds` - Specific conversations (optional, processes all if empty)
215 | - `elements` (default: ['files', 'codeblocks']) - 'files', 'folders', 'languages', 'codeblocks', 'metadata', 'structure'
216 | - `includeContext` (default: false) - Include surrounding message text
217 | - `groupBy` (default: 'conversation') - 'conversation', 'element', 'none'
218 | - `filters` - Filter by code length, file extensions, or languages
219 | 
220 | **`export_conversation_data`**
221 | - `conversationIds` - Specific conversations (optional, exports all if empty)
222 | - `format` (default: 'json') - 'json', 'csv', 'graph'
223 | - `includeContent` (default: false) - Include full message text
224 | - `includeRelationships` (default: false) - Calculate file/folder connections
225 | - `flattenStructure` (default: false) - Flatten for CSV compatibility
226 | - `filters` - Filter by size, code blocks, or project path
227 | 
228 | ## Database Paths
229 | 
230 | Auto-detected locations:
231 | - **macOS**: `~/Library/Application Support/Cursor/User/globalStorage/state.vscdb`
232 | - **Windows**: `%APPDATA%/Cursor/User/globalStorage/state.vscdb`
233 | - **Linux**: `~/.config/Cursor/User/globalStorage/state.vscdb`
234 | 
235 | ## Technical Notes
236 | 
237 | - Supports both legacy and modern Cursor conversation formats
238 | - Uses SQLite to access Cursor's chat database
239 | - Close Cursor before running to avoid database lock issues
240 | - Conversations filtered by size (>1000 bytes) to exclude empty ones
241 | - Uses ROWID for chronological ordering (UUIDs are not chronological)
242 | 
243 | ## Contributing
244 | 
245 | 1. Fork the repository
246 | 2. Create a feature branch
247 | 3. Make your changes
248 | 4. Add tests if applicable
249 | 5. Submit a pull request
250 | 
251 | ## License
252 | 
253 | MIT
```

--------------------------------------------------------------------------------
/vitest.config.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { defineConfig } from 'vitest/config';
 2 | 
 3 | export default defineConfig({
 4 |   test: {
 5 |     globals: true,
 6 |     environment: 'node',
 7 |     include: ['src/**/*.{test,spec}.{js,mjs,cjs,ts,mts,cts,jsx,tsx}'],
 8 |     exclude: ['node_modules', 'dist'],
 9 |   },
10 | });
```

--------------------------------------------------------------------------------
/.cursor/mcp.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |     "mcpServers": {
 3 |       "cursor-chat-history-mcp": {
 4 |           "command": "node",
 5 |           "args": [
 6 |               "/Users/vladta/Projects/cursor-chat-history-mcp/dist/server.js"
 7 |           ]
 8 |       },
 9 |         "taskmaster-ai": {
10 |           "command": "npx",
11 |           "args": ["-y", "--package=task-master-ai", "task-master-ai"]
12 |         }
13 |     }
14 | }
```

--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "compilerOptions": {
 3 |     "target": "ES2022",
 4 |     "module": "NodeNext",
 5 |     "moduleResolution": "NodeNext",
 6 |     "esModuleInterop": true,
 7 |     "forceConsistentCasingInFileNames": true,
 8 |     "strict": true,
 9 |     "skipLibCheck": true,
10 |     "outDir": "dist"
11 |   },
12 |   "include": ["src/**/*"],
13 |   "exclude": ["node_modules", "**/*.test.ts", "**/*.test.js"]
14 | }
```

--------------------------------------------------------------------------------
/src/utils/formatter.ts:
--------------------------------------------------------------------------------

```typescript
 1 | export type OutputFormat = 'json' | 'compact-json';
 2 | 
 3 | export function formatResponse(data: any, format?: OutputFormat): string {
 4 |   try {
 5 |     switch (format) {
 6 |       case 'compact-json':
 7 |         return JSON.stringify(data);
 8 |       case 'json':
 9 |       default:
10 |         return JSON.stringify(data, null, 2);
11 |     }
12 |   } catch (error) {
13 |     console.error('Formatting failed, falling back to JSON:', error);
14 |     return JSON.stringify(data, null, 2);
15 |   }
16 | }
```

--------------------------------------------------------------------------------
/.taskmaster/.taskmaster/config.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "models": {
 3 |     "main": {
 4 |       "provider": "anthropic",
 5 |       "modelId": "claude-3-7-sonnet-20250219",
 6 |       "maxTokens": 64000,
 7 |       "temperature": 0.2
 8 |     },
 9 |     "research": {
10 |       "provider": "perplexity",
11 |       "modelId": "sonar-pro",
12 |       "maxTokens": 8700,
13 |       "temperature": 0.1
14 |     },
15 |     "fallback": {
16 |       "provider": "anthropic",
17 |       "modelId": "claude-3-5-sonnet",
18 |       "maxTokens": 64000,
19 |       "temperature": 0.2
20 |     }
21 |   },
22 |   "global": {
23 |     "logLevel": "info",
24 |     "debug": false,
25 |     "defaultSubtasks": 5,
26 |     "defaultPriority": "medium",
27 |     "projectName": "Task Master",
28 |     "ollamaBaseURL": "http://localhost:11434/api",
29 |     "bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com",
30 |     "userId": "1234567890"
31 |   }
32 | }
```

--------------------------------------------------------------------------------
/.taskmaster/config.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "models": {
 3 |     "main": {
 4 |       "provider": "anthropic",
 5 |       "modelId": "claude-sonnet-4-20250514",
 6 |       "maxTokens": 120000,
 7 |       "temperature": 0.2
 8 |     },
 9 |     "research": {
10 |       "provider": "perplexity",
11 |       "modelId": "sonar-pro",
12 |       "maxTokens": 8700,
13 |       "temperature": 0.1
14 |     },
15 |     "fallback": {
16 |       "provider": "anthropic",
17 |       "modelId": "claude-3-5-sonnet-20240620",
18 |       "maxTokens": 8192,
19 |       "temperature": 0.1
20 |     }
21 |   },
22 |   "global": {
23 |     "logLevel": "info",
24 |     "debug": false,
25 |     "defaultSubtasks": 5,
26 |     "defaultPriority": "medium",
27 |     "projectName": "Taskmaster",
28 |     "ollamaBaseURL": "http://localhost:11434/api",
29 |     "bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com",
30 |     "ollamaBaseUrl": "http://localhost:11434/api",
31 |     "azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/",
32 |     "userId": "1234567890"
33 |   }
34 | }
```

--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------

```yaml
 1 | version: 2
 2 | updates:
 3 |   # Enable version updates for npm dependencies
 4 |   - package-ecosystem: "npm"
 5 |     directory: "/"
 6 |     schedule:
 7 |       interval: "weekly"
 8 |       day: "monday"
 9 |       time: "09:00"
10 |     open-pull-requests-limit: 10
11 |     reviewers:
12 |       - "@octocat"  # Replace with actual GitHub usernames
13 |     assignees:
14 |       - "@octocat"  # Replace with actual GitHub usernames
15 |     commit-message:
16 |       prefix: "deps"
17 |       include: "scope"
18 |     labels:
19 |       - "dependencies"
20 |       - "automated"
21 |     # Group minor and patch updates together
22 |     groups:
23 |       minor-and-patch:
24 |         patterns:
25 |           - "*"
26 |         update-types:
27 |           - "minor"
28 |           - "patch"
29 |     # Ignore specific packages if needed
30 |     ignore:
31 |       - dependency-name: "typescript"
32 |         versions: ["5.0.x"]  # Example: ignore specific versions
33 | 
34 |   # Enable version updates for GitHub Actions
35 |   - package-ecosystem: "github-actions"
36 |     directory: "/"
37 |     schedule:
38 |       interval: "weekly"
39 |       day: "monday"
40 |       time: "09:00"
41 |     open-pull-requests-limit: 5
42 |     commit-message:
43 |       prefix: "ci"
44 |       include: "scope"
45 |     labels:
46 |       - "github-actions"
47 |       - "automated"
```

--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "name": "cursor-chat-history-mcp",
 3 |   "version": "0.1.9",
 4 |   "description": "MCP server that provides AI assistants access to Cursor chat history for analysis and insights",
 5 |   "type": "module",
 6 |   "main": "dist/server.js",
 7 |   "bin": {
 8 |     "cursor-chat-history-mcp": "dist/server.js"
 9 |   },
10 |   "files": [
11 |     "dist/**/*",
12 |     "README.md",
13 |     "package.json"
14 |   ],
15 |   "scripts": {
16 |     "build": "tsc",
17 |     "watch": "tsc --watch",
18 |     "start": "node dist/server.js",
19 |     "inspector": "nodemon --watch dist --exec 'npx @modelcontextprotocol/inspector node dist/server.js'",
20 |     "test": "vitest run",
21 |     "test:ui": "vitest --ui",
22 |     "prepublishOnly": "npm run build && chmod +x dist/server.js",
23 |     "postinstall": "chmod +x dist/server.js 2>/dev/null || true"
24 |   },
25 |   "keywords": [
26 |     "mcp",
27 |     "model-context-protocol",
28 |     "cursor",
29 |     "chat history",
30 |     "conversation",
31 |     "ai-assistant",
32 |     "code-analysis",
33 |     "typescript"
34 |   ],
35 |   "author": "",
36 |   "license": "MIT",
37 |   "dependencies": {
38 |     "@modelcontextprotocol/sdk": "^1.12.1",
39 |     "better-sqlite3": "9.2.2",
40 |     "zod": "^3.22.4"
41 |   },
42 |   "devDependencies": {
43 |     "@types/better-sqlite3": "^7.6.13",
44 |     "@types/node": "^20.11.24",
45 |     "@vitest/ui": "^3.2.2",
46 |     "nodemon": "^3.1.10",
47 |     "shx": "^0.3.4",
48 |     "task-master-ai": "^0.16.1",
49 |     "ts-node": "^10.9.2",
50 |     "tsx": "^4.7.0",
51 |     "typescript": "^5.3.3",
52 |     "vitest": "^3.2.2"
53 |   }
54 | }
55 | 
```

--------------------------------------------------------------------------------
/scripts/example_prd.txt:
--------------------------------------------------------------------------------

```
 1 | <context>
 2 | # Overview  
 3 | [Provide a high-level overview of your product here. Explain what problem it solves, who it's for, and why it's valuable.]
 4 | 
 5 | # Core Features  
 6 | [List and describe the main features of your product. For each feature, include:
 7 | - What it does
 8 | - Why it's important
 9 | - How it works at a high level]
10 | 
11 | # User Experience  
12 | [Describe the user journey and experience. Include:
13 | - User personas
14 | - Key user flows
15 | - UI/UX considerations]
16 | </context>
17 | <PRD>
18 | # Technical Architecture  
19 | [Outline the technical implementation details:
20 | - System components
21 | - Data models
22 | - APIs and integrations
23 | - Infrastructure requirements]
24 | 
25 | # Development Roadmap  
26 | [Break down the development process into phases:
27 | - MVP requirements
28 | - Future enhancements
29 | - Do not think about timelines whatsoever -- all that matters is scope and detailing exactly what needs to be build in each phase so it can later be cut up into tasks]
30 | 
31 | # Logical Dependency Chain
32 | [Define the logical order of development:
33 | - Which features need to be built first (foundation)
34 | - Getting as quickly as possible to something usable/visible front end that works
35 | - Properly pacing and scoping each feature so it is atomic but can also be built upon and improved as development approaches]
36 | 
37 | # Risks and Mitigations  
38 | [Identify potential risks and how they'll be addressed:
39 | - Technical challenges
40 | - Figuring out the MVP that we can build upon
41 | - Resource constraints]
42 | 
43 | # Appendix  
44 | [Include any additional information:
45 | - Research findings
46 | - Technical specifications]
47 | </PRD>
```

--------------------------------------------------------------------------------
/.roo/rules/roo_rules.md:
--------------------------------------------------------------------------------

```markdown
 1 | ---
 2 | description: Guidelines for creating and maintaining Roo Code rules to ensure consistency and effectiveness.
 3 | globs: .roo/rules/*.md
 4 | alwaysApply: true
 5 | ---
 6 | 
 7 | - **Required Rule Structure:**
 8 |   ```markdown
 9 |   ---
10 |   description: Clear, one-line description of what the rule enforces
11 |   globs: path/to/files/*.ext, other/path/**/*
12 |   alwaysApply: boolean
13 |   ---
14 | 
15 |   - **Main Points in Bold**
16 |     - Sub-points with details
17 |     - Examples and explanations
18 |   ```
19 | 
20 | - **File References:**
21 |   - Use `[filename](mdc:path/to/file)` ([filename](mdc:filename)) to reference files
22 |   - Example: [prisma.md](mdc:.roo/rules/prisma.md) for rule references
23 |   - Example: [schema.prisma](mdc:prisma/schema.prisma) for code references
24 | 
25 | - **Code Examples:**
26 |   - Use language-specific code blocks
27 |   ```typescript
28 |   // ✅ DO: Show good examples
29 |   const goodExample = true;
30 |   
31 |   // ❌ DON'T: Show anti-patterns
32 |   const badExample = false;
33 |   ```
34 | 
35 | - **Rule Content Guidelines:**
36 |   - Start with high-level overview
37 |   - Include specific, actionable requirements
38 |   - Show examples of correct implementation
39 |   - Reference existing code when possible
40 |   - Keep rules DRY by referencing other rules
41 | 
42 | - **Rule Maintenance:**
43 |   - Update rules when new patterns emerge
44 |   - Add examples from actual codebase
45 |   - Remove outdated patterns
46 |   - Cross-reference related rules
47 | 
48 | - **Best Practices:**
49 |   - Use bullet points for clarity
50 |   - Keep descriptions concise
51 |   - Include both DO and DON'T examples
52 |   - Reference actual code over theoretical examples
53 |   - Use consistent formatting across rules 
```

--------------------------------------------------------------------------------
/.roo/rules/self_improve.md:
--------------------------------------------------------------------------------

```markdown
 1 | ---
 2 | description: Guidelines for continuously improving Roo Code rules based on emerging code patterns and best practices.
 3 | globs: **/*
 4 | alwaysApply: true
 5 | ---
 6 | 
 7 | - **Rule Improvement Triggers:**
 8 |   - New code patterns not covered by existing rules
 9 |   - Repeated similar implementations across files
10 |   - Common error patterns that could be prevented
11 |   - New libraries or tools being used consistently
12 |   - Emerging best practices in the codebase
13 | 
14 | - **Analysis Process:**
15 |   - Compare new code with existing rules
16 |   - Identify patterns that should be standardized
17 |   - Look for references to external documentation
18 |   - Check for consistent error handling patterns
19 |   - Monitor test patterns and coverage
20 | 
21 | - **Rule Updates:**
22 |   - **Add New Rules When:**
23 |     - A new technology/pattern is used in 3+ files
24 |     - Common bugs could be prevented by a rule
25 |     - Code reviews repeatedly mention the same feedback
26 |     - New security or performance patterns emerge
27 | 
28 |   - **Modify Existing Rules When:**
29 |     - Better examples exist in the codebase
30 |     - Additional edge cases are discovered
31 |     - Related rules have been updated
32 |     - Implementation details have changed
33 | 
34 | - **Example Pattern Recognition:**
35 |   ```typescript
36 |   // If you see repeated patterns like:
37 |   const data = await prisma.user.findMany({
38 |     select: { id: true, email: true },
39 |     where: { status: 'ACTIVE' }
40 |   });
41 |   
42 |   // Consider adding to [prisma.md](mdc:.roo/rules/prisma.md):
43 |   // - Standard select fields
44 |   // - Common where conditions
45 |   // - Performance optimization patterns
46 |   ```
47 | 
48 | - **Rule Quality Checks:**
49 |   - Rules should be actionable and specific
50 |   - Examples should come from actual code
51 |   - References should be up to date
52 |   - Patterns should be consistently enforced
53 | 
54 | - **Continuous Improvement:**
55 |   - Monitor code review comments
56 |   - Track common development questions
57 |   - Update rules after major refactors
58 |   - Add links to relevant documentation
59 |   - Cross-reference related rules
60 | 
61 | - **Rule Deprecation:**
62 |   - Mark outdated patterns as deprecated
63 |   - Remove rules that no longer apply
64 |   - Update references to deprecated rules
65 |   - Document migration paths for old patterns
66 | 
67 | - **Documentation Updates:**
68 |   - Keep examples synchronized with code
69 |   - Update references to external docs
70 |   - Maintain links between related rules
71 |   - Document breaking changes
72 | Follow [cursor_rules.md](mdc:.roo/rules/cursor_rules.md) for proper rule formatting and structure.
73 | 
```

--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: CI
  2 | 
  3 | on:
  4 |   push:
  5 |     branches: [ master ]
  6 |   pull_request:
  7 |     branches: [ master ]
  8 | 
  9 | jobs:
 10 |   test:
 11 |     name: Test on Node.js ${{ matrix.node-version }}
 12 |     runs-on: ubuntu-latest
 13 | 
 14 |     strategy:
 15 |       matrix:
 16 |         node-version: [18.x, 20.x, 22.x]
 17 | 
 18 |     steps:
 19 |     - name: Checkout code
 20 |       uses: actions/checkout@v4
 21 | 
 22 |     - name: Setup Node.js ${{ matrix.node-version }}
 23 |       uses: actions/setup-node@v4
 24 |       with:
 25 |         node-version: ${{ matrix.node-version }}
 26 |         cache: 'yarn'
 27 | 
 28 |     - name: Install dependencies
 29 |       run: yarn install --frozen-lockfile
 30 | 
 31 |     - name: Build project
 32 |       run: yarn build
 33 | 
 34 |     - name: Run tests
 35 |       run: yarn test
 36 | 
 37 |     - name: Check TypeScript compilation
 38 |       run: yarn tsc --noEmit
 39 | 
 40 |   lint:
 41 |     name: Lint and Format Check
 42 |     runs-on: ubuntu-latest
 43 | 
 44 |     steps:
 45 |     - name: Checkout code
 46 |       uses: actions/checkout@v4
 47 | 
 48 |     - name: Setup Node.js
 49 |       uses: actions/setup-node@v4
 50 |       with:
 51 |         node-version: '20.x'
 52 |         cache: 'yarn'
 53 | 
 54 |     - name: Install dependencies
 55 |       run: yarn install --frozen-lockfile
 56 | 
 57 |     - name: Check formatting (if prettier is configured)
 58 |       run: |
 59 |         if [ -f ".prettierrc" ] || [ -f ".prettierrc.json" ] || [ -f ".prettierrc.js" ] || grep -q "prettier" package.json; then
 60 |           yarn prettier --check .
 61 |         else
 62 |           echo "Prettier not configured, skipping format check"
 63 |         fi
 64 |       continue-on-error: true
 65 | 
 66 |     - name: Run ESLint (if configured)
 67 |       run: |
 68 |         if [ -f ".eslintrc.js" ] || [ -f ".eslintrc.json" ] || [ -f "eslint.config.js" ] || grep -q "eslint" package.json; then
 69 |           yarn eslint src/
 70 |         else
 71 |           echo "ESLint not configured, skipping lint check"
 72 |         fi
 73 |       continue-on-error: true
 74 | 
 75 |   build-artifacts:
 76 |     name: Build and Upload Artifacts
 77 |     runs-on: ubuntu-latest
 78 |     needs: [test, lint]
 79 | 
 80 |     steps:
 81 |     - name: Checkout code
 82 |       uses: actions/checkout@v4
 83 | 
 84 |     - name: Setup Node.js
 85 |       uses: actions/setup-node@v4
 86 |       with:
 87 |         node-version: '20.x'
 88 |         cache: 'yarn'
 89 | 
 90 |     - name: Install dependencies
 91 |       run: yarn install --frozen-lockfile
 92 | 
 93 |     - name: Build project
 94 |       run: yarn build
 95 | 
 96 |     - name: Upload build artifacts
 97 |       uses: actions/upload-artifact@v4
 98 |       with:
 99 |         name: dist-${{ github.sha }}
100 |         path: dist/
101 |         retention-days: 7
102 | 
103 |   security:
104 |     name: Security Audit
105 |     runs-on: ubuntu-latest
106 | 
107 |     steps:
108 |     - name: Checkout code
109 |       uses: actions/checkout@v4
110 | 
111 |     - name: Setup Node.js
112 |       uses: actions/setup-node@v4
113 |       with:
114 |         node-version: '20.x'
115 |         cache: 'yarn'
116 | 
117 |     - name: Install dependencies
118 |       run: yarn install --frozen-lockfile
119 | 
120 |     - name: Run security audit
121 |       run: yarn audit --level moderate
122 |       continue-on-error: true
123 | 
124 |     - name: Check for known vulnerabilities
125 |       run: |
126 |         if command -v npm &> /dev/null; then
127 |           npm audit --audit-level moderate
128 |         fi
129 |       continue-on-error: true
```

--------------------------------------------------------------------------------
/src/server.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
  2 | import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
  3 | 
  4 | // Mock the MCP SDK
  5 | vi.mock('@modelcontextprotocol/sdk/server/mcp.js');
  6 | vi.mock('./tools/conversation-tools.js');
  7 | vi.mock('./database/reader.js');
  8 | 
  9 | const mockMcpServer = vi.mocked(McpServer);
 10 | 
 11 | describe('MCP Server', () => {
 12 |   let mockServer: any;
 13 | 
 14 |   beforeEach(() => {
 15 |     mockServer = {
 16 |       tool: vi.fn(),
 17 |       connect: vi.fn(),
 18 |       close: vi.fn()
 19 |     };
 20 | 
 21 |     mockMcpServer.mockImplementation(() => mockServer);
 22 |   });
 23 | 
 24 |   afterEach(() => {
 25 |     vi.clearAllMocks();
 26 |   });
 27 | 
 28 |   describe('Server Initialization', () => {
 29 |     it('should create server with correct configuration', async () => {
 30 |       // Import the server module to trigger initialization
 31 |       await import('./server.js');
 32 | 
 33 |       expect(mockMcpServer).toHaveBeenCalledWith({
 34 |         name: 'cursor-chat-history-mcp',
 35 |         version: '0.1.0'
 36 |       });
 37 |     });
 38 | 
 39 |     it('should register all conversation tools', async () => {
 40 |       await import('./server.js');
 41 | 
 42 |       // Verify that the correct tools are registered
 43 |       const expectedTools = [
 44 |         'list_conversations',
 45 |         'get_conversation',
 46 |         'search_conversations',
 47 |         'get_conversation_analytics',
 48 |         'find_related_conversations',
 49 |         'extract_conversation_elements',
 50 |         'export_conversation_data'
 51 |       ];
 52 | 
 53 |       // Check that the expected number of tools are registered
 54 |       expect(mockServer.tool).toHaveBeenCalledTimes(expectedTools.length);
 55 | 
 56 |       // Verify each tool is registered with proper parameters
 57 |       expectedTools.forEach(toolName => {
 58 |         expect(mockServer.tool).toHaveBeenCalledWith(
 59 |           toolName,
 60 |           expect.any(String),
 61 |           expect.any(Object),
 62 |           expect.any(Function)
 63 |         );
 64 |       });
 65 |     });
 66 |   });
 67 | 
 68 |   describe('Tool Registration', () => {
 69 |     it('should register tools with proper descriptions', async () => {
 70 |       await import('./server.js');
 71 | 
 72 |       const toolCalls = mockServer.tool.mock.calls;
 73 | 
 74 |       // Check that each tool has a meaningful description
 75 |       toolCalls.forEach(([toolName, description]: [string, string]) => {
 76 |         expect(typeof toolName).toBe('string');
 77 |         expect(typeof description).toBe('string');
 78 |         expect(description.length).toBeGreaterThan(10);
 79 |       });
 80 |     });
 81 | 
 82 |     it('should register tools with proper schemas', async () => {
 83 |       await import('./server.js');
 84 | 
 85 |       const toolCalls = mockServer.tool.mock.calls;
 86 | 
 87 |       // Check that each tool has a schema object
 88 |       toolCalls.forEach(([, , schema]: [string, string, any]) => {
 89 |         expect(typeof schema).toBe('object');
 90 |         expect(schema).not.toBeNull();
 91 |       });
 92 |     });
 93 | 
 94 |     it('should register tools with handler functions', async () => {
 95 |       await import('./server.js');
 96 | 
 97 |       const toolCalls = mockServer.tool.mock.calls;
 98 | 
 99 |       // Check that each tool has a handler function
100 |       toolCalls.forEach(([, , , handler]: [string, string, any, Function]) => {
101 |         expect(typeof handler).toBe('function');
102 |       });
103 |     });
104 |   });
105 | 
106 |   describe('Error Handling', () => {
107 |     it('should handle server creation errors', () => {
108 |       mockMcpServer.mockImplementation(() => {
109 |         throw new Error('Server creation failed');
110 |       });
111 | 
112 |       expect(async () => {
113 |         await import('./server.js');
114 |       }).not.toThrow();
115 |     });
116 | 
117 |     it('should handle tool registration errors', () => {
118 |       mockServer.tool.mockImplementation(() => {
119 |         throw new Error('Tool registration failed');
120 |       });
121 | 
122 |       expect(async () => {
123 |         await import('./server.js');
124 |       }).not.toThrow();
125 |     });
126 |   });
127 | });
```

--------------------------------------------------------------------------------
/src/tools/extraction-tools.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { z } from 'zod';
  2 | import { CursorDatabaseReader } from '../database/reader.js';
  3 | import type {
  4 |   ExtractedElements,
  5 |   ExportedData,
  6 |   ConversationFilters
  7 | } from '../database/types.js';
  8 | import {
  9 |   exportAsJSON,
 10 |   exportAsCSV,
 11 |   exportAsGraph,
 12 |   createExportMetadata
 13 | } from '../utils/exporters.js';
 14 | import { DatabaseError } from '../utils/errors.js';
 15 | 
 16 | // Schema definitions
 17 | export const extractConversationElementsSchema = z.object({
 18 |   conversationIds: z.array(z.string()).optional(),
 19 |   elements: z.array(z.enum(['files', 'folders', 'languages', 'codeblocks', 'metadata', 'structure'])).optional().default(['files', 'codeblocks']),
 20 |   includeContext: z.boolean().optional().default(false),
 21 |   groupBy: z.enum(['conversation', 'element', 'none']).optional().default('conversation'),
 22 |   filters: z.object({
 23 |     minCodeLength: z.number().optional(),
 24 |     fileExtensions: z.array(z.string()).optional(),
 25 |     languages: z.array(z.string()).optional()
 26 |   }).optional()
 27 | });
 28 | 
 29 | export const exportConversationDataSchema = z.object({
 30 |   conversationIds: z.array(z.string()).optional(),
 31 |   format: z.enum(['json', 'csv', 'graph']).optional().default('json'),
 32 |   includeContent: z.boolean().optional().default(false),
 33 |   includeRelationships: z.boolean().optional().default(false),
 34 |   flattenStructure: z.boolean().optional().default(false),
 35 |   filters: z.object({
 36 |     minSize: z.number().optional(),
 37 |     hasCodeBlocks: z.boolean().optional(),
 38 |     projectPath: z.string().optional()
 39 |   }).optional()
 40 | });
 41 | 
 42 | export type ExtractConversationElementsInput = z.infer<typeof extractConversationElementsSchema>;
 43 | export type ExportConversationDataInput = z.infer<typeof exportConversationDataSchema>;
 44 | 
 45 | /**
 46 |  * Extract specific elements from conversations
 47 |  */
 48 | export async function extractConversationElements(
 49 |   input: ExtractConversationElementsInput
 50 | ): Promise<ExtractedElements> {
 51 |   const reader = new CursorDatabaseReader();
 52 | 
 53 |   try {
 54 |     await reader.connect();
 55 | 
 56 |     // Get conversation IDs to process
 57 |     let conversationIds = input.conversationIds;
 58 |     if (!conversationIds || conversationIds.length === 0) {
 59 |       // Get all conversation IDs if none specified
 60 |       conversationIds = await reader.getConversationIds({
 61 |         format: 'both',
 62 |         minLength: 1000
 63 |       });
 64 |     }
 65 | 
 66 |     // Extract elements from conversations
 67 |     const extractedData = await reader.extractConversationElements(
 68 |       conversationIds,
 69 |       input.elements,
 70 |       {
 71 |         includeContext: input.includeContext,
 72 |         filters: input.filters
 73 |       }
 74 |     );
 75 | 
 76 |     // Group data based on groupBy parameter
 77 |     if (input.groupBy === 'conversation') {
 78 |       return { conversations: extractedData };
 79 |     } else if (input.groupBy === 'element') {
 80 |       // Group by element type
 81 |       const groupedData: Record<string, any[]> = {};
 82 | 
 83 |       for (const elementType of input.elements) {
 84 |         groupedData[elementType] = [];
 85 | 
 86 |         for (const conversation of extractedData) {
 87 |           if (conversation.elements[elementType]) {
 88 |             if (Array.isArray(conversation.elements[elementType])) {
 89 |               groupedData[elementType].push(...conversation.elements[elementType]);
 90 |             } else {
 91 |               groupedData[elementType].push(conversation.elements[elementType]);
 92 |             }
 93 |           }
 94 |         }
 95 |       }
 96 | 
 97 |       return { conversations: groupedData } as any;
 98 |     } else {
 99 |       // Flatten all data
100 |       const flatData: any[] = [];
101 | 
102 |       for (const conversation of extractedData) {
103 |         for (const elementType of input.elements) {
104 |           if (conversation.elements[elementType]) {
105 |             if (Array.isArray(conversation.elements[elementType])) {
106 |               flatData.push(...conversation.elements[elementType].map((item: any) => ({
107 |                 ...item,
108 |                 conversationId: conversation.composerId,
109 |                 elementType
110 |               })));
111 |             } else {
112 |               flatData.push({
113 |                 ...conversation.elements[elementType],
114 |                 conversationId: conversation.composerId,
115 |                 elementType
116 |               });
117 |             }
118 |           }
119 |         }
120 |       }
121 | 
122 |       return { conversations: flatData } as any;
123 |     }
124 | 
125 |   } catch (error) {
126 |     throw new DatabaseError(`Failed to extract conversation elements: ${error instanceof Error ? error.message : 'Unknown error'}`);
127 |   } finally {
128 |     reader.close();
129 |   }
130 | }
131 | 
132 | /**
133 |  * Export conversation data in various formats
134 |  */
135 | export async function exportConversationData(
136 |   input: ExportConversationDataInput
137 | ): Promise<ExportedData> {
138 |   const reader = new CursorDatabaseReader();
139 | 
140 |   try {
141 |     await reader.connect();
142 | 
143 |     // Build filters
144 |     const filters: ConversationFilters = {
145 |       format: 'both',
146 |       minLength: input.filters?.minSize || 1000
147 |     };
148 | 
149 |     if (input.filters?.hasCodeBlocks !== undefined) {
150 |       filters.hasCodeBlocks = input.filters.hasCodeBlocks;
151 |     }
152 | 
153 |     if (input.filters?.projectPath) {
154 |       filters.projectPath = input.filters.projectPath;
155 |     }
156 | 
157 |     // Get conversation IDs to export
158 |     let conversationIds = input.conversationIds;
159 |     if (!conversationIds || conversationIds.length === 0) {
160 |       conversationIds = await reader.getConversationIds(filters);
161 |     }
162 | 
163 |     // Get conversation summaries
164 |     const summaries = await reader.getConversationSummariesForAnalytics(conversationIds);
165 | 
166 |     // Get full conversation data if needed
167 |     let conversationData: Map<string, any> | undefined;
168 |     if (input.includeContent) {
169 |       conversationData = new Map();
170 |       for (const id of conversationIds) {
171 |         try {
172 |           const conversation = await reader.getConversationById(id);
173 |           if (conversation) {
174 |             conversationData.set(id, conversation);
175 |           }
176 |         } catch (error) {
177 |           console.error(`Failed to get full conversation data for ${id}:`, error);
178 |         }
179 |       }
180 |     }
181 | 
182 |     // Export in requested format
183 |     let exportedData: any;
184 | 
185 |     switch (input.format) {
186 |       case 'json':
187 |         exportedData = exportAsJSON(summaries, input.includeContent, conversationData);
188 |         break;
189 | 
190 |       case 'csv':
191 |         exportedData = exportAsCSV(summaries, input.flattenStructure);
192 |         break;
193 | 
194 |       case 'graph':
195 |         exportedData = exportAsGraph(summaries, input.includeRelationships);
196 |         break;
197 | 
198 |       default:
199 |         exportedData = exportAsJSON(summaries, input.includeContent, conversationData);
200 |     }
201 | 
202 |     // Create metadata
203 |     const metadata = createExportMetadata(
204 |       summaries.length,
205 |       conversationIds.length,
206 |       input.filters || {}
207 |     );
208 | 
209 |     return {
210 |       format: input.format,
211 |       data: exportedData,
212 |       metadata
213 |     };
214 | 
215 |   } catch (error) {
216 |     throw new DatabaseError(`Failed to export conversation data: ${error instanceof Error ? error.message : 'Unknown error'}`);
217 |   } finally {
218 |     reader.close();
219 |   }
220 | }
```

--------------------------------------------------------------------------------
/src/tools/analytics-tools.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { z } from 'zod';
  2 | import { CursorDatabaseReader } from '../database/reader.js';
  3 | import type {
  4 |   ConversationAnalytics,
  5 |   RelatedConversationsResult,
  6 |   ConversationFilters
  7 | } from '../database/types.js';
  8 | import {
  9 |   calculateOverview,
 10 |   calculateFileBreakdown,
 11 |   calculateLanguageBreakdown,
 12 |   calculateTemporalBreakdown,
 13 |   calculateSizeDistribution
 14 | } from '../utils/analytics.js';
 15 | import {
 16 |   findRelatedConversations as findRelatedConversationsUtil,
 17 |   extractLanguagesFromCodeBlocks
 18 | } from '../utils/relationships.js';
 19 | import { DatabaseError } from '../utils/errors.js';
 20 | 
 21 | // Schema definitions
 22 | export const getConversationAnalyticsSchema = z.object({
 23 |   scope: z.enum(['all', 'recent', 'project']).optional().default('all'),
 24 |   projectPath: z.string().optional(),
 25 |   recentDays: z.number().min(1).max(365).optional().default(30),
 26 |   includeBreakdowns: z.array(z.enum(['files', 'languages', 'temporal', 'size'])).optional().default(['files', 'languages']),
 27 |   includeConversationDetails: z.boolean().optional().default(false)
 28 | });
 29 | 
 30 | export const findRelatedConversationsSchema = z.object({
 31 |   referenceConversationId: z.string().min(1),
 32 |   relationshipTypes: z.array(z.enum(['files', 'folders', 'languages', 'size', 'temporal'])).optional().default(['files']),
 33 |   maxResults: z.number().min(1).max(50).optional().default(10),
 34 |   minScore: z.number().min(0).max(1).optional().default(0.1),
 35 |   includeScoreBreakdown: z.boolean().optional().default(false)
 36 | });
 37 | 
 38 | export type GetConversationAnalyticsInput = z.infer<typeof getConversationAnalyticsSchema>;
 39 | export type FindRelatedConversationsInput = z.infer<typeof findRelatedConversationsSchema>;
 40 | 
 41 | /**
 42 |  * Get comprehensive analytics and statistics about Cursor conversations
 43 |  */
 44 | export async function getConversationAnalytics(
 45 |   input: GetConversationAnalyticsInput
 46 | ): Promise<ConversationAnalytics> {
 47 |   const reader = new CursorDatabaseReader();
 48 | 
 49 |   try {
 50 |     await reader.connect();
 51 | 
 52 |     // Build filters based on scope
 53 |     const filters: ConversationFilters = {
 54 |       format: 'both',
 55 |       minLength: 100 // Filter out only very small conversations (reduced from 1000)
 56 |     };
 57 | 
 58 |     if (input.scope === 'project' && input.projectPath) {
 59 |       filters.projectPath = input.projectPath;
 60 |     }
 61 | 
 62 |     // Get conversation IDs
 63 |     const conversationIds = await reader.getConversationIds(filters);
 64 | 
 65 |     // Apply recent filter if needed
 66 |     let filteredIds = conversationIds;
 67 |     if (input.scope === 'recent') {
 68 |       // Take the most recent conversations (ROWID ordering)
 69 |       const recentCount = Math.min(conversationIds.length, Math.floor(conversationIds.length * 0.3));
 70 |       filteredIds = conversationIds.slice(0, recentCount);
 71 |     }
 72 | 
 73 |     // Get conversation summaries
 74 |     const summaries = await reader.getConversationSummariesForAnalytics(filteredIds);
 75 | 
 76 |     // Calculate overview
 77 |     const overview = calculateOverview(summaries);
 78 | 
 79 |     // Calculate breakdowns
 80 |     const breakdowns: any = {};
 81 | 
 82 |     if (input.includeBreakdowns.includes('files')) {
 83 |       breakdowns.files = calculateFileBreakdown(summaries);
 84 |     }
 85 | 
 86 |     if (input.includeBreakdowns.includes('languages')) {
 87 |       // Get conversations with code blocks for language analysis
 88 |       const conversationsWithCode = await reader.getConversationsWithCodeBlocks(filteredIds);
 89 |       breakdowns.languages = calculateLanguageBreakdown(conversationsWithCode);
 90 |     }
 91 | 
 92 |     if (input.includeBreakdowns.includes('temporal')) {
 93 |       breakdowns.temporal = calculateTemporalBreakdown(summaries, filteredIds);
 94 |     }
 95 | 
 96 |     if (input.includeBreakdowns.includes('size')) {
 97 |       breakdowns.size = calculateSizeDistribution(summaries);
 98 |     }
 99 | 
100 |     return {
101 |       overview,
102 |       breakdowns,
103 |       scope: {
104 |         type: input.scope,
105 |         projectPath: input.projectPath,
106 |         recentDays: input.scope === 'recent' ? input.recentDays : undefined,
107 |         totalScanned: filteredIds.length
108 |       },
109 |       // Only include conversation details when requested (to control response size)
110 |       conversationIds: input.includeConversationDetails ? filteredIds : [],
111 |       conversations: input.includeConversationDetails ? summaries.map(s => ({
112 |         composerId: s.composerId,
113 |         messageCount: s.messageCount,
114 |         size: s.conversationSize,
115 |         files: s.relevantFiles.slice(0, 2), // Top 2 files only
116 |         hasCodeBlocks: s.codeBlockCount > 0
117 |       })) : []
118 |     };
119 | 
120 |   } catch (error) {
121 |     throw new DatabaseError(`Failed to get conversation analytics: ${error instanceof Error ? error.message : 'Unknown error'}`);
122 |   } finally {
123 |     reader.close();
124 |   }
125 | }
126 | 
127 | /**
128 |  * Find conversations related to a reference conversation
129 |  */
130 | export async function findRelatedConversations(
131 |   input: FindRelatedConversationsInput
132 | ): Promise<RelatedConversationsResult> {
133 |   const reader = new CursorDatabaseReader();
134 | 
135 |   try {
136 |     await reader.connect();
137 | 
138 |     // Get reference conversation summary
139 |     const referenceSummary = await reader.getConversationSummary(input.referenceConversationId, {
140 |       includeFirstMessage: true,
141 |       includeCodeBlockCount: true,
142 |       includeFileList: true,
143 |       includeAttachedFolders: true,
144 |       maxFirstMessageLength: 150
145 |     });
146 | 
147 |     if (!referenceSummary) {
148 |       throw new DatabaseError(`Reference conversation ${input.referenceConversationId} not found`);
149 |     }
150 | 
151 |     // Get all conversation IDs for comparison
152 |     const allConversationIds = await reader.getConversationIds({
153 |       format: 'both',
154 |       minLength: 100
155 |     });
156 | 
157 |     // Get summaries for all conversations
158 |     const allSummaries = await reader.getConversationSummariesForAnalytics(allConversationIds);
159 | 
160 |     // Extract languages from reference conversation if needed
161 |     let referenceLanguages: string[] = [];
162 |     if (input.relationshipTypes.includes('languages')) {
163 |       const conversationsWithCode = await reader.getConversationsWithCodeBlocks([input.referenceConversationId]);
164 |       if (conversationsWithCode.length > 0) {
165 |         referenceLanguages = extractLanguagesFromCodeBlocks(conversationsWithCode[0].codeBlocks);
166 |       }
167 |     }
168 | 
169 |     // Find related conversations
170 |     const related = findRelatedConversationsUtil(
171 |       referenceSummary,
172 |       allSummaries,
173 |       allConversationIds,
174 |       {
175 |         relationshipTypes: input.relationshipTypes,
176 |         maxResults: input.maxResults,
177 |         minScore: input.minScore,
178 |         includeScoreBreakdown: input.includeScoreBreakdown
179 |       }
180 |     );
181 | 
182 |     return {
183 |       reference: {
184 |         composerId: referenceSummary.composerId,
185 |         files: referenceSummary.relevantFiles,
186 |         folders: referenceSummary.attachedFolders,
187 |         languages: referenceLanguages,
188 |         messageCount: referenceSummary.messageCount,
189 |         size: referenceSummary.conversationSize
190 |       },
191 |       related
192 |     };
193 | 
194 |   } catch (error) {
195 |     throw new DatabaseError(`Failed to find related conversations: ${error instanceof Error ? error.message : 'Unknown error'}`);
196 |   } finally {
197 |     reader.close();
198 |   }
199 | }
```

--------------------------------------------------------------------------------
/src/utils/validation.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Validation utilities for MCP tool parameters
  3 |  */
  4 | 
  5 | import { z } from 'zod';
  6 | import {
  7 |   ValidationError,
  8 |   MissingParameterError,
  9 |   InvalidParameterError
 10 | } from './errors.js';
 11 | 
 12 | /**
 13 |  * Validates that a required parameter is present and not empty
 14 |  */
 15 | export function validateRequired<T>(
 16 |   value: T | undefined | null,
 17 |   paramName: string
 18 | ): T {
 19 |   if (value === undefined || value === null) {
 20 |     throw new MissingParameterError(paramName);
 21 |   }
 22 | 
 23 |   if (typeof value === 'string' && value.trim() === '') {
 24 |     throw new InvalidParameterError(paramName, value, 'non-empty string');
 25 |   }
 26 | 
 27 |   return value;
 28 | }
 29 | 
 30 | /**
 31 |  * Validates that a string parameter meets minimum length requirements
 32 |  */
 33 | export function validateStringLength(
 34 |   value: string | undefined,
 35 |   paramName: string,
 36 |   minLength: number = 1,
 37 |   maxLength?: number
 38 | ): string | undefined {
 39 |   if (value === undefined) {
 40 |     return undefined;
 41 |   }
 42 | 
 43 |   if (typeof value !== 'string') {
 44 |     throw new InvalidParameterError(paramName, value, 'string');
 45 |   }
 46 | 
 47 |   if (value.length < minLength) {
 48 |     throw new InvalidParameterError(
 49 |       paramName,
 50 |       value,
 51 |       `string with at least ${minLength} characters`
 52 |     );
 53 |   }
 54 | 
 55 |   if (maxLength && value.length > maxLength) {
 56 |     throw new InvalidParameterError(
 57 |       paramName,
 58 |       value,
 59 |       `string with at most ${maxLength} characters`
 60 |     );
 61 |   }
 62 | 
 63 |   return value;
 64 | }
 65 | 
 66 | /**
 67 |  * Validates that a number parameter is within acceptable range
 68 |  */
 69 | export function validateNumberRange(
 70 |   value: number | undefined,
 71 |   paramName: string,
 72 |   min?: number,
 73 |   max?: number
 74 | ): number | undefined {
 75 |   if (value === undefined) {
 76 |     return undefined;
 77 |   }
 78 | 
 79 |   if (typeof value !== 'number' || isNaN(value)) {
 80 |     throw new InvalidParameterError(paramName, value, 'number');
 81 |   }
 82 | 
 83 |   if (min !== undefined && value < min) {
 84 |     throw new InvalidParameterError(
 85 |       paramName,
 86 |       value,
 87 |       `number >= ${min}`
 88 |     );
 89 |   }
 90 | 
 91 |   if (max !== undefined && value > max) {
 92 |     throw new InvalidParameterError(
 93 |       paramName,
 94 |       value,
 95 |       `number <= ${max}`
 96 |     );
 97 |   }
 98 | 
 99 |   return value;
100 | }
101 | 
102 | /**
103 |  * Validates that an array parameter meets length requirements
104 |  */
105 | export function validateArrayLength<T>(
106 |   value: T[] | undefined,
107 |   paramName: string,
108 |   minLength: number = 0,
109 |   maxLength?: number
110 | ): T[] | undefined {
111 |   if (value === undefined) {
112 |     return undefined;
113 |   }
114 | 
115 |   if (!Array.isArray(value)) {
116 |     throw new InvalidParameterError(paramName, value, 'array');
117 |   }
118 | 
119 |   if (value.length < minLength) {
120 |     throw new InvalidParameterError(
121 |       paramName,
122 |       value,
123 |       `array with at least ${minLength} items`
124 |     );
125 |   }
126 | 
127 |   if (maxLength && value.length > maxLength) {
128 |     throw new InvalidParameterError(
129 |       paramName,
130 |       value,
131 |       `array with at most ${maxLength} items`
132 |     );
133 |   }
134 | 
135 |   return value;
136 | }
137 | 
138 | /**
139 |  * Validates that a value is one of the allowed enum values
140 |  */
141 | export function validateEnum<T extends string>(
142 |   value: T | undefined,
143 |   paramName: string,
144 |   allowedValues: readonly T[]
145 | ): T | undefined {
146 |   if (value === undefined) {
147 |     return undefined;
148 |   }
149 | 
150 |   if (!allowedValues.includes(value)) {
151 |     throw new InvalidParameterError(
152 |       paramName,
153 |       value,
154 |       `one of: ${allowedValues.join(', ')}`
155 |     );
156 |   }
157 | 
158 |   return value;
159 | }
160 | 
161 | /**
162 |  * Validates that a conversation ID has the correct format
163 |  */
164 | export function validateConversationId(conversationId: string): string {
165 |   if (!conversationId || conversationId.trim() === '') {
166 |     throw new MissingParameterError('conversationId');
167 |   }
168 | 
169 |   validateStringLength(conversationId, 'conversationId', 1, 100);
170 | 
171 |   // Basic format validation - should be alphanumeric with possible hyphens/underscores
172 |   if (!/^[a-zA-Z0-9_-]+$/.test(conversationId)) {
173 |     throw new InvalidParameterError(
174 |       'conversationId',
175 |       conversationId,
176 |       'alphanumeric string with optional hyphens and underscores'
177 |     );
178 |   }
179 | 
180 |   return conversationId;
181 | }
182 | 
183 | /**
184 |  * Validates that a bubble ID has the correct format
185 |  */
186 | export function validateBubbleId(bubbleId: string): string {
187 |   if (!bubbleId || bubbleId.trim() === '') {
188 |     throw new MissingParameterError('bubbleId');
189 |   }
190 | 
191 |   validateStringLength(bubbleId, 'bubbleId', 1, 100);
192 | 
193 |   // Basic format validation - should be alphanumeric with possible hyphens/underscores
194 |   if (!/^[a-zA-Z0-9_-]+$/.test(bubbleId)) {
195 |     throw new InvalidParameterError(
196 |       'bubbleId',
197 |       bubbleId,
198 |       'alphanumeric string with optional hyphens and underscores'
199 |     );
200 |   }
201 | 
202 |   return bubbleId;
203 | }
204 | 
205 | /**
206 |  * Validates search query parameters
207 |  */
208 | export function validateSearchQuery(query: string): string {
209 |   if (!query || query.trim() === '') {
210 |     throw new MissingParameterError('query');
211 |   }
212 | 
213 |   validateStringLength(query, 'query', 1, 1000);
214 | 
215 |   // Ensure query is not just whitespace
216 |   if (query.trim().length === 0) {
217 |     throw new InvalidParameterError(
218 |       'query',
219 |       query,
220 |       'non-empty search query'
221 |     );
222 |   }
223 | 
224 |   return query.trim();
225 | }
226 | 
227 | /**
228 |  * Validates file path parameters
229 |  */
230 | export function validateFilePath(path: string | undefined, paramName: string): string | undefined {
231 |   if (path === undefined) {
232 |     return undefined;
233 |   }
234 | 
235 |   validateStringLength(path, paramName, 1, 1000);
236 | 
237 |   // Basic path validation - should not contain null bytes or other dangerous characters
238 |   if (path.includes('\0')) {
239 |     throw new InvalidParameterError(
240 |       paramName,
241 |       path,
242 |       'valid file path without null bytes'
243 |     );
244 |   }
245 | 
246 |   return path;
247 | }
248 | 
249 | /**
250 |  * Validates project path parameters
251 |  */
252 | export function validateProjectPath(projectPath: string): string {
253 |   if (!projectPath || projectPath.trim() === '') {
254 |     throw new MissingParameterError('projectPath');
255 |   }
256 | 
257 |   validateStringLength(projectPath, 'projectPath', 1, 1000);
258 | 
259 |   // Basic path validation
260 |   if (projectPath.includes('\0')) {
261 |     throw new InvalidParameterError(
262 |       'projectPath',
263 |       projectPath,
264 |       'valid project path without null bytes'
265 |     );
266 |   }
267 | 
268 |   return projectPath;
269 | }
270 | 
271 | /**
272 |  * Validates and sanitizes input using a Zod schema
273 |  */
274 | export function validateWithSchema<T>(
275 |   input: unknown,
276 |   schema: z.ZodSchema<T>,
277 |   context: string = 'input'
278 | ): T {
279 |   try {
280 |     return schema.parse(input);
281 |   } catch (error) {
282 |     if (error instanceof z.ZodError) {
283 |       const firstIssue = error.issues[0];
284 |       const path = firstIssue.path.join('.');
285 |       const field = path || 'root';
286 | 
287 |       throw new ValidationError(
288 |         `Validation error in ${context}: ${firstIssue.message} at ${field}`,
289 |         field,
290 |         'received' in firstIssue ? firstIssue.received : undefined
291 |       );
292 |     }
293 | 
294 |     throw new ValidationError(
295 |       `Validation error in ${context}: ${error instanceof Error ? error.message : 'Unknown error'}`
296 |     );
297 |   }
298 | }
299 | 
300 | /**
301 |  * Validates boolean parameters with proper type checking
302 |  */
303 | export function validateBoolean(
304 |   value: boolean | undefined,
305 |   paramName: string
306 | ): boolean | undefined {
307 |   if (value === undefined) {
308 |     return undefined;
309 |   }
310 | 
311 |   if (typeof value !== 'boolean') {
312 |     throw new InvalidParameterError(paramName, value, 'boolean');
313 |   }
314 | 
315 |   return value;
316 | }
317 | 
318 | /**
319 |  * Validates limit parameters commonly used in pagination
320 |  */
321 | export function validateLimit(limit: number | undefined, defaultLimit: number = 10): number {
322 |   if (limit === undefined) {
323 |     return defaultLimit;
324 |   }
325 | 
326 |   return validateNumberRange(limit, 'limit', 1, 1000) ?? defaultLimit;
327 | }
328 | 
329 | /**
330 |  * Validates offset parameters commonly used in pagination
331 |  */
332 | export function validateOffset(offset: number | undefined): number {
333 |   if (offset === undefined) {
334 |     return 0;
335 |   }
336 | 
337 |   return validateNumberRange(offset, 'offset', 0) ?? 0;
338 | }
339 | 
340 | /**
341 |  * Validates context lines parameter for search results
342 |  */
343 | export function validateContextLines(contextLines: number | undefined): number {
344 |   if (contextLines === undefined) {
345 |     return 3;
346 |   }
347 | 
348 |   return validateNumberRange(contextLines, 'contextLines', 0, 10) ?? 3;
349 | }
```

--------------------------------------------------------------------------------
/src/utils/errors.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Custom error classes for the Cursor Chat History MCP server
  3 |  */
  4 | 
  5 | /**
  6 |  * Base error class for all MCP-related errors
  7 |  */
  8 | export class MCPError extends Error {
  9 |   public readonly code: string;
 10 |   public readonly statusCode: number;
 11 | 
 12 |   constructor(message: string, code: string = 'MCP_ERROR', statusCode: number = 500) {
 13 |     super(message);
 14 |     this.name = this.constructor.name;
 15 |     this.code = code;
 16 |     this.statusCode = statusCode;
 17 | 
 18 |     // Maintains proper stack trace for where our error was thrown (only available on V8)
 19 |     if (Error.captureStackTrace) {
 20 |       Error.captureStackTrace(this, this.constructor);
 21 |     }
 22 |   }
 23 | }
 24 | 
 25 | /**
 26 |  * Error thrown when database operations fail
 27 |  */
 28 | export class DatabaseError extends MCPError {
 29 |   constructor(message: string, originalError?: Error) {
 30 |     super(
 31 |       originalError ? `Database error: ${message}. Original: ${originalError.message}` : `Database error: ${message}`,
 32 |       'DATABASE_ERROR',
 33 |       500
 34 |     );
 35 | 
 36 |     if (originalError && originalError.stack) {
 37 |       this.stack = `${this.stack}\nCaused by: ${originalError.stack}`;
 38 |     }
 39 |   }
 40 | }
 41 | 
 42 | /**
 43 |  * Error thrown when database connection fails
 44 |  */
 45 | export class DatabaseConnectionError extends DatabaseError {
 46 |   constructor(dbPath: string, originalError?: Error) {
 47 |     super(
 48 |       `Failed to connect to database at path: ${dbPath}`,
 49 |       originalError
 50 |     );
 51 |     // Override the code property by redefining it
 52 |     Object.defineProperty(this, 'code', {
 53 |       value: 'DATABASE_CONNECTION_ERROR',
 54 |       writable: false,
 55 |       enumerable: true,
 56 |       configurable: false
 57 |     });
 58 |   }
 59 | }
 60 | 
 61 | /**
 62 |  * Error thrown when a conversation is not found
 63 |  */
 64 | export class ConversationNotFoundError extends MCPError {
 65 |   public readonly conversationId: string;
 66 | 
 67 |   constructor(conversationId: string) {
 68 |     super(`Conversation not found: ${conversationId}`, 'CONVERSATION_NOT_FOUND', 404);
 69 |     this.conversationId = conversationId;
 70 |   }
 71 | }
 72 | 
 73 | /**
 74 |  * Error thrown when a bubble message is not found
 75 |  */
 76 | export class BubbleMessageNotFoundError extends MCPError {
 77 |   public readonly composerId: string;
 78 |   public readonly bubbleId: string;
 79 | 
 80 |   constructor(composerId: string, bubbleId: string) {
 81 |     super(
 82 |       `Bubble message not found: ${bubbleId} in conversation ${composerId}`,
 83 |       'BUBBLE_MESSAGE_NOT_FOUND',
 84 |       404
 85 |     );
 86 |     this.composerId = composerId;
 87 |     this.bubbleId = bubbleId;
 88 |   }
 89 | }
 90 | 
 91 | /**
 92 |  * Error thrown when input validation fails
 93 |  */
 94 | export class ValidationError extends MCPError {
 95 |   public readonly field?: string;
 96 |   public readonly value?: any;
 97 | 
 98 |   constructor(message: string, field?: string, value?: any) {
 99 |     super(`Validation error: ${message}`, 'VALIDATION_ERROR', 400);
100 |     this.field = field;
101 |     this.value = value;
102 |   }
103 | }
104 | 
105 | /**
106 |  * Error thrown when required parameters are missing
107 |  */
108 | export class MissingParameterError extends ValidationError {
109 |   constructor(parameterName: string) {
110 |     super(`Missing required parameter: ${parameterName}`, parameterName);
111 |     Object.defineProperty(this, 'code', {
112 |       value: 'MISSING_PARAMETER',
113 |       writable: false,
114 |       enumerable: true,
115 |       configurable: false
116 |     });
117 |   }
118 | }
119 | 
120 | /**
121 |  * Error thrown when parameter values are invalid
122 |  */
123 | export class InvalidParameterError extends ValidationError {
124 |   constructor(parameterName: string, value: any, expectedType?: string) {
125 |     const message = expectedType
126 |       ? `Invalid parameter '${parameterName}': expected ${expectedType}, got ${typeof value}`
127 |       : `Invalid parameter '${parameterName}': ${value}`;
128 | 
129 |     super(message, parameterName, value);
130 |     Object.defineProperty(this, 'code', {
131 |       value: 'INVALID_PARAMETER',
132 |       writable: false,
133 |       enumerable: true,
134 |       configurable: false
135 |     });
136 |   }
137 | }
138 | 
139 | /**
140 |  * Error thrown when file system operations fail
141 |  */
142 | export class FileSystemError extends MCPError {
143 |   public readonly path: string;
144 | 
145 |   constructor(message: string, path: string, originalError?: Error) {
146 |     super(
147 |       originalError ? `File system error: ${message}. Original: ${originalError.message}` : `File system error: ${message}`,
148 |       'FILESYSTEM_ERROR',
149 |       500
150 |     );
151 |     this.path = path;
152 | 
153 |     if (originalError && originalError.stack) {
154 |       this.stack = `${this.stack}\nCaused by: ${originalError.stack}`;
155 |     }
156 |   }
157 | }
158 | 
159 | /**
160 |  * Error thrown when database path cannot be detected
161 |  */
162 | export class DatabasePathNotFoundError extends FileSystemError {
163 |   constructor(attemptedPaths: string[]) {
164 |     super(
165 |       `Could not find Cursor database. Attempted paths: ${attemptedPaths.join(', ')}`,
166 |       attemptedPaths[0] || 'unknown'
167 |     );
168 |     Object.defineProperty(this, 'code', {
169 |       value: 'DATABASE_PATH_NOT_FOUND',
170 |       writable: false,
171 |       enumerable: true,
172 |       configurable: false
173 |     });
174 |   }
175 | }
176 | 
177 | /**
178 |  * Error thrown when parsing chat data fails
179 |  */
180 | export class ConversationParseError extends MCPError {
181 |   public readonly conversationId?: string;
182 | 
183 |   constructor(message: string, conversationId?: string, originalError?: Error) {
184 |     super(
185 |       originalError ? `Parse error: ${message}. Original: ${originalError.message}` : `Parse error: ${message}`,
186 |       'CONVERSATION_PARSE_ERROR',
187 |       500
188 |     );
189 |     this.conversationId = conversationId;
190 | 
191 |     if (originalError && originalError.stack) {
192 |       this.stack = `${this.stack}\nCaused by: ${originalError.stack}`;
193 |     }
194 |   }
195 | }
196 | 
197 | /**
198 |  * Error thrown when search operations fail
199 |  */
200 | export class SearchError extends MCPError {
201 |   public readonly query: string;
202 | 
203 |   constructor(message: string, query: string, originalError?: Error) {
204 |     super(
205 |       originalError ? `Search error: ${message}. Original: ${originalError.message}` : `Search error: ${message}`,
206 |       'SEARCH_ERROR',
207 |       500
208 |     );
209 |     this.query = query;
210 | 
211 |     if (originalError && originalError.stack) {
212 |       this.stack = `${this.stack}\nCaused by: ${originalError.stack}`;
213 |     }
214 |   }
215 | }
216 | 
217 | /**
218 |  * Error thrown when cache operations fail
219 |  */
220 | export class CacheError extends MCPError {
221 |   public readonly operation: string;
222 |   public readonly key?: string;
223 | 
224 |   constructor(message: string, operation: string, key?: string, originalError?: Error) {
225 |     super(
226 |       originalError ? `Cache error: ${message}. Original: ${originalError.message}` : `Cache error: ${message}`,
227 |       'CACHE_ERROR',
228 |       500
229 |     );
230 |     this.operation = operation;
231 |     this.key = key;
232 | 
233 |     if (originalError && originalError.stack) {
234 |       this.stack = `${this.stack}\nCaused by: ${originalError.stack}`;
235 |     }
236 |   }
237 | }
238 | 
239 | /**
240 |  * Utility function to check if an error is an instance of MCPError
241 |  */
242 | export function isMCPError(error: any): error is MCPError {
243 |   return error instanceof MCPError;
244 | }
245 | 
246 | /**
247 |  * Utility function to extract error information for logging
248 |  */
249 | export function getErrorInfo(error: any): {
250 |   message: string;
251 |   code: string;
252 |   statusCode: number;
253 |   stack?: string;
254 |   originalError?: string;
255 | } {
256 |   // Handle null and undefined
257 |   if (error === null || error === undefined) {
258 |     return {
259 |       message: 'Unknown error occurred',
260 |       code: 'UNKNOWN_ERROR',
261 |       statusCode: 500,
262 |     };
263 |   }
264 | 
265 |   if (isMCPError(error)) {
266 |     const result: any = {
267 |       message: error.message,
268 |       code: error.code,
269 |       statusCode: error.statusCode,
270 |       stack: error.stack,
271 |     };
272 | 
273 |     // Extract original error info for nested errors
274 |     if (error instanceof DatabaseError ||
275 |         error instanceof FileSystemError ||
276 |         error instanceof ConversationParseError ||
277 |         error instanceof SearchError ||
278 |         error instanceof CacheError) {
279 |       // Check if the error message contains "Original: " which indicates a nested error
280 |       const originalMatch = error.message.match(/Original: (.+)$/);
281 |       if (originalMatch) {
282 |         result.originalError = originalMatch[1];
283 |       }
284 |     }
285 | 
286 |     return result;
287 |   }
288 | 
289 |   if (error instanceof Error) {
290 |     return {
291 |       message: error.message,
292 |       code: 'UNKNOWN_ERROR',
293 |       statusCode: 500,
294 |       stack: error.stack,
295 |     };
296 |   }
297 | 
298 |   // Handle objects with toString method
299 |   if (error && typeof error === 'object' && typeof error.toString === 'function') {
300 |     return {
301 |       message: error.toString(),
302 |       code: 'UNKNOWN_ERROR',
303 |       statusCode: 500,
304 |     };
305 |   }
306 | 
307 |   return {
308 |     message: String(error),
309 |     code: 'UNKNOWN_ERROR',
310 |     statusCode: 500,
311 |   };
312 | }
```

--------------------------------------------------------------------------------
/.taskmaster/reports/task-complexity-report.json:
--------------------------------------------------------------------------------

```json
  1 | {
  2 |   "meta": {
  3 |     "generatedAt": "2025-06-06T21:31:13.096Z",
  4 |     "tasksAnalyzed": 15,
  5 |     "totalTasks": 15,
  6 |     "analysisCount": 15,
  7 |     "thresholdScore": 5,
  8 |     "projectName": "Taskmaster",
  9 |     "usedResearch": true
 10 |   },
 11 |   "complexityAnalysis": [
 12 |     {
 13 |       "taskId": 1,
 14 |       "taskTitle": "Add SQLite Database Dependencies",
 15 |       "complexityScore": 2,
 16 |       "recommendedSubtasks": 3,
 17 |       "expansionPrompt": "Break down the process of adding the better-sqlite3 dependency into subtasks such as: (1) Install the package, (2) Update package.json and verify version compatibility, (3) Test the installation by importing and connecting in a test file.",
 18 |       "reasoning": "This task is straightforward, involving package installation and basic verification. The main complexity lies in ensuring compatibility and confirming the dependency is correctly set up."
 19 |     },
 20 |     {
 21 |       "taskId": 2,
 22 |       "taskTitle": "Implement Database Reader Module",
 23 |       "complexityScore": 6,
 24 |       "recommendedSubtasks": 5,
 25 |       "expansionPrompt": "Expand into subtasks such as: (1) Create the DatabaseReader class skeleton, (2) Implement connection logic, (3) Add query execution methods, (4) Handle both legacy and modern formats, (5) Write unit tests with mocks.",
 26 |       "reasoning": "This task requires designing a reusable module, handling multiple data formats, and ensuring robust testing, which increases its complexity."
 27 |     },
 28 |     {
 29 |       "taskId": 3,
 30 |       "taskTitle": "Create Conversation Parser",
 31 |       "complexityScore": 6,
 32 |       "recommendedSubtasks": 5,
 33 |       "expansionPrompt": "Expand into subtasks such as: (1) Create the ConversationParser class, (2) Implement legacy format parsing, (3) Implement modern format parsing, (4) Extract messages, code blocks, files, and timestamps, (5) Write unit tests for both formats.",
 34 |       "reasoning": "Parsing and normalizing two distinct data formats, extracting multiple data types, and ensuring correctness through tests adds moderate complexity."
 35 |     },
 36 |     {
 37 |       "taskId": 4,
 38 |       "taskTitle": "Implement List Conversations Tool",
 39 |       "complexityScore": 7,
 40 |       "recommendedSubtasks": 6,
 41 |       "expansionPrompt": "Expand into subtasks such as: (1) Define tool interface and options, (2) Integrate with DatabaseReader, (3) Implement filtering logic, (4) Handle both formats, (5) Implement ordering by ROWID, (6) Write integration tests.",
 42 |       "reasoning": "This tool requires integrating multiple modules, supporting flexible filtering, and ensuring compatibility with both data formats, increasing complexity."
 43 |     },
 44 |     {
 45 |       "taskId": 5,
 46 |       "taskTitle": "Implement Get Conversation Tool",
 47 |       "complexityScore": 6,
 48 |       "recommendedSubtasks": 5,
 49 |       "expansionPrompt": "Expand into subtasks such as: (1) Define tool interface, (2) Fetch conversation data using DatabaseReader, (3) Parse data with ConversationParser, (4) Handle bubble message resolution, (5) Write integration tests.",
 50 |       "reasoning": "Fetching and parsing conversation data with format-specific logic and ensuring correct content retrieval requires careful implementation and testing."
 51 |     },
 52 |     {
 53 |       "taskId": 6,
 54 |       "taskTitle": "Implement Get Conversation Summary Tool",
 55 |       "complexityScore": 5,
 56 |       "recommendedSubtasks": 4,
 57 |       "expansionPrompt": "Expand into subtasks such as: (1) Define summary options, (2) Extract key information using DatabaseReader and ConversationParser, (3) Implement summary formatting, (4) Write unit tests for different options.",
 58 |       "reasoning": "Summarizing conversations is less complex than full retrieval but still requires handling multiple formats and customizable output."
 59 |     },
 60 |     {
 61 |       "taskId": 7,
 62 |       "taskTitle": "Implement Search Conversations Tool",
 63 |       "complexityScore": 7,
 64 |       "recommendedSubtasks": 6,
 65 |       "expansionPrompt": "Expand into subtasks such as: (1) Define search interface and options, (2) Implement SQLite LIKE-based search, (3) Support search types (all, summarization, code, files), (4) Handle both formats and bubble messages, (5) Implement context retrieval, (6) Write integration tests.",
 66 |       "reasoning": "Implementing efficient, flexible search across multiple formats and content types, with context handling, adds significant complexity."
 67 |     },
 68 |     {
 69 |       "taskId": 8,
 70 |       "taskTitle": "Implement Get Bubble Message Tool",
 71 |       "complexityScore": 4,
 72 |       "recommendedSubtasks": 3,
 73 |       "expansionPrompt": "Expand into subtasks such as: (1) Define tool interface, (2) Fetch bubble message by ID, (3) Implement error handling for missing IDs or legacy format, (4) Write unit tests.",
 74 |       "reasoning": "This is a focused retrieval task with some error handling, making it less complex than broader tools."
 75 |     },
 76 |     {
 77 |       "taskId": 9,
 78 |       "taskTitle": "Implement Get Recent Conversations Tool",
 79 |       "complexityScore": 5,
 80 |       "recommendedSubtasks": 4,
 81 |       "expansionPrompt": "Expand into subtasks such as: (1) Define tool interface and options, (2) Implement ROWID-based ordering and filtering, (3) Optimize queries with indexes, (4) Write integration tests.",
 82 |       "reasoning": "Retrieving recent conversations with filtering and ordering is moderately complex, especially with performance considerations."
 83 |     },
 84 |     {
 85 |       "taskId": 10,
 86 |       "taskTitle": "Implement Get Conversations by Project Tool",
 87 |       "complexityScore": 6,
 88 |       "recommendedSubtasks": 5,
 89 |       "expansionPrompt": "Expand into subtasks such as: (1) Define filtering options (project path, file pattern, etc.), (2) Query using SQLite JSON functions, (3) Implement ordering by recency or relevance, (4) Handle both formats, (5) Write unit tests.",
 90 |       "reasoning": "Filtering by project and files using JSON queries and supporting multiple formats increases the complexity of this tool."
 91 |     },
 92 |     {
 93 |       "taskId": 11,
 94 |       "taskTitle": "Implement Detect Conversation Format Tool",
 95 |       "complexityScore": 3,
 96 |       "recommendedSubtasks": 2,
 97 |       "expansionPrompt": "Expand into subtasks such as: (1) Implement format detection logic, (2) Handle edge cases and ambiguous formats, (3) Write unit tests.",
 98 |       "reasoning": "This is a simple detection task with some edge case handling, resulting in low complexity."
 99 |     },
100 |     {
101 |       "taskId": 12,
102 |       "taskTitle": "Implement Cross-Platform Database Path Detection",
103 |       "complexityScore": 4,
104 |       "recommendedSubtasks": 3,
105 |       "expansionPrompt": "Expand into subtasks such as: (1) Implement OS detection logic, (2) Handle path resolution for each platform, (3) Implement fallback and user-configurable paths, (4) Write unit tests for each platform.",
106 |       "reasoning": "Handling OS-specific logic and fallback mechanisms adds some complexity, but the scope is limited."
107 |     },
108 |     {
109 |       "taskId": 13,
110 |       "taskTitle": "Implement Error Handling and Validation",
111 |       "complexityScore": 8,
112 |       "recommendedSubtasks": 7,
113 |       "expansionPrompt": "Expand into subtasks such as: (1) Design custom error classes, (2) Implement error handling in database operations, (3) Integrate Zod validation in all tools, (4) Add try-catch blocks for critical operations, (5) Implement informative error messages, (6) Write unit tests for error scenarios, (7) Write integration tests for error propagation.",
114 |       "reasoning": "Comprehensive error handling and validation across all tools, with custom classes and integration, is a complex and critical task."
115 |     },
116 |     {
117 |       "taskId": 14,
118 |       "taskTitle": "Implement Caching Mechanism",
119 |       "complexityScore": 5,
120 |       "recommendedSubtasks": 4,
121 |       "expansionPrompt": "Expand into subtasks such as: (1) Design cache structure and strategy, (2) Implement caching logic for key data, (3) Implement cache invalidation, (4) Write unit tests and benchmarks.",
122 |       "reasoning": "Implementing a basic caching layer is moderately complex, especially with invalidation and performance testing."
123 |     },
124 |     {
125 |       "taskId": 15,
126 |       "taskTitle": "Update MCP Server Configuration",
127 |       "complexityScore": 7,
128 |       "recommendedSubtasks": 6,
129 |       "expansionPrompt": "Expand into subtasks such as: (1) Integrate all conversation tools into the server, (2) Remove deprecated tools, (3) Update server name and configuration, (4) Implement error handling and logging, (5) Add graceful shutdown logic, (6) Write integration tests for server endpoints.",
130 |       "reasoning": "Coordinating integration of multiple tools, updating configuration, and ensuring robust server behavior makes this a complex and multi-faceted task."
131 |     }
132 |   ]
133 | }
```

--------------------------------------------------------------------------------
/src/utils/cache.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Cache utility for improving performance of frequently accessed data
  3 |  */
  4 | 
  5 | /**
  6 |  * Configuration options for cache instances
  7 |  */
  8 | export interface CacheConfig {
  9 |   /** Maximum number of entries to store in cache */
 10 |   maxSize?: number;
 11 |   /** Default time-to-live in milliseconds */
 12 |   defaultTTL?: number;
 13 |   /** Eviction policy when cache is full */
 14 |   evictionPolicy?: 'lru' | 'fifo';
 15 |   /** Whether to enable automatic cleanup of expired entries */
 16 |   enableCleanup?: boolean;
 17 |   /** Interval for automatic cleanup in milliseconds */
 18 |   cleanupInterval?: number;
 19 | }
 20 | 
 21 | /**
 22 |  * Cache entry with metadata
 23 |  */
 24 | export interface CacheEntry<T = any> {
 25 |   /** The cached value */
 26 |   value: T;
 27 |   /** Timestamp when the entry was created */
 28 |   createdAt: number;
 29 |   /** Timestamp when the entry was last accessed */
 30 |   lastAccessedAt: number;
 31 |   /** Time-to-live in milliseconds */
 32 |   ttl?: number;
 33 |   /** Timestamp when the entry expires */
 34 |   expiresAt?: number;
 35 | }
 36 | 
 37 | /**
 38 |  * Cache statistics for monitoring
 39 |  */
 40 | export interface CacheStats {
 41 |   /** Total number of cache hits */
 42 |   hits: number;
 43 |   /** Total number of cache misses */
 44 |   misses: number;
 45 |   /** Current number of entries in cache */
 46 |   size: number;
 47 |   /** Maximum size configured for cache */
 48 |   maxSize: number;
 49 |   /** Cache hit rate as a percentage */
 50 |   hitRate: number;
 51 |   /** Number of entries evicted due to size limits */
 52 |   evictions: number;
 53 |   /** Number of entries expired due to TTL */
 54 |   expirations: number;
 55 | }
 56 | 
 57 | /**
 58 |  * Basic cache implementation with TTL, size limits, and eviction policies
 59 |  */
 60 | export class Cache<T = any> {
 61 |   private entries: Map<string, CacheEntry<T>> = new Map();
 62 |   private accessOrder: string[] = []; // For LRU tracking
 63 |   private insertOrder: string[] = []; // For FIFO tracking
 64 |   private config: Required<CacheConfig>;
 65 |   private stats: CacheStats;
 66 |   private cleanupTimer?: NodeJS.Timeout;
 67 | 
 68 |   constructor(config: CacheConfig = {}) {
 69 |     this.config = {
 70 |       maxSize: config.maxSize ?? 1000,
 71 |       defaultTTL: config.defaultTTL ?? 5 * 60 * 1000, // 5 minutes
 72 |       evictionPolicy: config.evictionPolicy ?? 'lru',
 73 |       enableCleanup: config.enableCleanup ?? true,
 74 |       cleanupInterval: config.cleanupInterval ?? 60 * 1000, // 1 minute
 75 |     };
 76 | 
 77 |     this.stats = {
 78 |       hits: 0,
 79 |       misses: 0,
 80 |       size: 0,
 81 |       maxSize: this.config.maxSize,
 82 |       hitRate: 0,
 83 |       evictions: 0,
 84 |       expirations: 0,
 85 |     };
 86 | 
 87 |     if (this.config.enableCleanup) {
 88 |       this.startCleanupTimer();
 89 |     }
 90 |   }
 91 | 
 92 |   /**
 93 |    * Get a value from the cache
 94 |    */
 95 |   get(key: string): T | undefined {
 96 |     const entry = this.entries.get(key);
 97 | 
 98 |     if (!entry) {
 99 |       this.stats.misses++;
100 |       this.updateHitRate();
101 |       return undefined;
102 |     }
103 | 
104 |     if (this.isExpired(entry)) {
105 |       this.delete(key);
106 |       this.stats.expirations++;
107 |       this.stats.misses++;
108 |       this.updateHitRate();
109 |       return undefined;
110 |     }
111 | 
112 |     this.updateAccessOrder(key);
113 |     this.stats.hits++;
114 |     this.updateHitRate();
115 |     return entry.value;
116 |   }
117 | 
118 |   /**
119 |    * Set a value in the cache
120 |    */
121 |   set(key: string, value: T, customTtl?: number): void {
122 |     const now = Date.now();
123 |     const ttl = customTtl ?? this.config.defaultTTL;
124 |     const expiresAt = now + ttl;
125 | 
126 |     const entry: CacheEntry<T> = {
127 |       value,
128 |       createdAt: now,
129 |       lastAccessedAt: now,
130 |       ttl,
131 |       expiresAt
132 |     };
133 | 
134 |     if (this.entries.has(key)) {
135 |       this.removeFromTrackingArrays(key);
136 |     }
137 | 
138 |     if (this.entries.size >= this.config.maxSize && !this.entries.has(key)) {
139 |       this.evictEntry();
140 |     }
141 | 
142 |     this.entries.set(key, entry);
143 |     this.accessOrder.push(key);
144 |     this.insertOrder.push(key);
145 |     this.updateSize();
146 |   }
147 | 
148 |   /**
149 |    * Delete a value from the cache
150 |    */
151 |   delete(key: string): boolean {
152 |     const existed = this.entries.delete(key);
153 |     if (existed) {
154 |       this.removeFromTrackingArrays(key);
155 |       this.updateSize();
156 |     }
157 |     return existed;
158 |   }
159 | 
160 |   /**
161 |    * Check if a key exists in the cache (without affecting access order)
162 |    */
163 |   has(key: string): boolean {
164 |     const entry = this.entries.get(key);
165 |     if (!entry) {
166 |       return false;
167 |     }
168 | 
169 |     if (this.isExpired(entry)) {
170 |       this.delete(key);
171 |       this.stats.expirations++;
172 |       return false;
173 |     }
174 | 
175 |     return true;
176 |   }
177 | 
178 |   /**
179 |    * Clear all entries from the cache
180 |    */
181 |   clear(): void {
182 |     this.entries.clear();
183 |     this.accessOrder = [];
184 |     this.insertOrder = [];
185 |     this.updateSize();
186 |   }
187 | 
188 |   /**
189 |    * Get current cache size
190 |    */
191 |   size(): number {
192 |     return this.entries.size;
193 |   }
194 | 
195 |   /**
196 |    * Get cache statistics
197 |    */
198 |   getStats(): CacheStats {
199 |     return { ...this.stats };
200 |   }
201 | 
202 |   /**
203 |    * Reset cache statistics
204 |    */
205 |   resetStats(): void {
206 |     this.stats = {
207 |       hits: 0,
208 |       misses: 0,
209 |       size: this.entries.size,
210 |       maxSize: this.config.maxSize,
211 |       hitRate: 0,
212 |       evictions: 0,
213 |       expirations: 0,
214 |     };
215 |   }
216 | 
217 |   /**
218 |    * Manually trigger cleanup of expired entries
219 |    */
220 |   cleanup(): number {
221 |     const initialSize = this.entries.size;
222 |     const now = Date.now();
223 |     const expiredKeys: string[] = [];
224 | 
225 |     for (const [key, entry] of this.entries) {
226 |       if (this.isExpired(entry)) {
227 |         expiredKeys.push(key);
228 |       }
229 |     }
230 | 
231 |     for (const key of expiredKeys) {
232 |       this.delete(key);
233 |       this.stats.expirations++;
234 |     }
235 | 
236 |     return initialSize - this.entries.size;
237 |   }
238 | 
239 |   /**
240 |    * Get all keys in the cache
241 |    */
242 |   keys(): string[] {
243 |     return Array.from(this.entries.keys());
244 |   }
245 | 
246 |   /**
247 |    * Get all values in the cache
248 |    */
249 |   values(): T[] {
250 |     return Array.from(this.entries.values()).map(entry => entry.value);
251 |   }
252 | 
253 |   /**
254 |    * Destroy the cache and cleanup resources
255 |    */
256 |   destroy(): void {
257 |     if (this.cleanupTimer) {
258 |       clearInterval(this.cleanupTimer);
259 |       this.cleanupTimer = undefined;
260 |     }
261 |     this.clear();
262 |   }
263 | 
264 |   /**
265 |    * Check if an entry has expired
266 |    */
267 |   private isExpired(entry: CacheEntry<T>): boolean {
268 |     if (!entry.expiresAt) {
269 |       return false;
270 |     }
271 |     return Date.now() > entry.expiresAt;
272 |   }
273 | 
274 |   /**
275 |    * Update access order for LRU tracking
276 |    */
277 |   private updateAccessOrder(key: string): void {
278 |     const index = this.accessOrder.indexOf(key);
279 |     if (index > -1) {
280 |       this.accessOrder.splice(index, 1);
281 |     }
282 |     this.accessOrder.push(key);
283 |   }
284 | 
285 |   /**
286 |    * Remove key from tracking arrays
287 |    */
288 |   private removeFromTrackingArrays(key: string): void {
289 |     const accessIndex = this.accessOrder.indexOf(key);
290 |     if (accessIndex > -1) {
291 |       this.accessOrder.splice(accessIndex, 1);
292 |     }
293 | 
294 |     const insertIndex = this.insertOrder.indexOf(key);
295 |     if (insertIndex > -1) {
296 |       this.insertOrder.splice(insertIndex, 1);
297 |     }
298 |   }
299 | 
300 |   /**
301 |    * Evict an entry based on the configured eviction policy
302 |    */
303 |   private evictEntry(): void {
304 |     let keyToEvict: string | undefined;
305 | 
306 |     if (this.config.evictionPolicy === 'lru') {
307 |       keyToEvict = this.accessOrder[0];
308 |     } else if (this.config.evictionPolicy === 'fifo') {
309 |       keyToEvict = this.insertOrder[0];
310 |     }
311 | 
312 |     if (keyToEvict) {
313 |       this.delete(keyToEvict);
314 |       this.stats.evictions++;
315 |     }
316 |   }
317 | 
318 |   /**
319 |    * Update cache size in stats
320 |    */
321 |   private updateSize(): void {
322 |     this.stats.size = this.entries.size;
323 |   }
324 | 
325 |   /**
326 |    * Update hit rate calculation
327 |    */
328 |   private updateHitRate(): void {
329 |     const total = this.stats.hits + this.stats.misses;
330 |     this.stats.hitRate = total > 0 ? (this.stats.hits / total) * 100 : 0;
331 |   }
332 | 
333 |   /**
334 |    * Start automatic cleanup timer
335 |    */
336 |   private startCleanupTimer(): void {
337 |     this.cleanupTimer = setInterval(() => {
338 |       this.cleanup();
339 |     }, this.config.cleanupInterval);
340 |   }
341 | }
342 | 
343 | /**
344 |  * Create a new cache instance with the specified configuration
345 |  */
346 | export function createCache<T = any>(config?: CacheConfig): Cache<T> {
347 |   return new Cache<T>(config);
348 | }
349 | 
350 | /**
351 |  * Default cache configurations for common use cases
352 |  */
353 | export const CachePresets = {
354 |   /** Small, fast cache for frequently accessed data */
355 |   small: {
356 |     maxSize: 100,
357 |     defaultTTL: 2 * 60 * 1000, // 2 minutes
358 |     evictionPolicy: 'lru' as const,
359 |     enableCleanup: true,
360 |     cleanupInterval: 30 * 1000, // 30 seconds
361 |   },
362 | 
363 |   /** Medium cache for general purpose use */
364 |   medium: {
365 |     maxSize: 500,
366 |     defaultTTL: 5 * 60 * 1000, // 5 minutes
367 |     evictionPolicy: 'lru' as const,
368 |     enableCleanup: true,
369 |     cleanupInterval: 60 * 1000, // 1 minute
370 |   },
371 | 
372 |   /** Large cache for bulk data */
373 |   large: {
374 |     maxSize: 2000,
375 |     defaultTTL: 15 * 60 * 1000, // 15 minutes
376 |     evictionPolicy: 'lru' as const,
377 |     enableCleanup: true,
378 |     cleanupInterval: 2 * 60 * 1000, // 2 minutes
379 |   },
380 | 
381 |   /** Long-lived cache for stable data */
382 |   persistent: {
383 |     maxSize: 1000,
384 |     defaultTTL: 60 * 60 * 1000, // 1 hour
385 |     evictionPolicy: 'fifo' as const,
386 |     enableCleanup: true,
387 |     cleanupInterval: 5 * 60 * 1000, // 5 minutes
388 |   },
389 | } as const;
```

--------------------------------------------------------------------------------
/src/utils/analytics.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import type { ConversationSummary } from '../database/types.js';
  2 | 
  3 | /**
  4 |  * Statistical calculations for conversation analytics
  5 |  */
  6 | 
  7 | export interface AnalyticsOverview {
  8 |   totalConversations: number;
  9 |   totalMessages: number;
 10 |   totalCodeBlocks: number;
 11 |   averageConversationSize: number;
 12 |   averageMessagesPerConversation: number;
 13 |   totalFiles: number;
 14 |   totalFolders: number;
 15 | }
 16 | 
 17 | export interface FileBreakdown {
 18 |   file: string;
 19 |   mentions: number;
 20 |   conversations: string[];
 21 |   extension: string;
 22 |   projectPath?: string;
 23 | }
 24 | 
 25 | export interface LanguageBreakdown {
 26 |   language: string;
 27 |   codeBlocks: number;
 28 |   conversations: string[];
 29 |   averageCodeLength: number;
 30 | }
 31 | 
 32 | export interface TemporalBreakdown {
 33 |   period: string;
 34 |   conversationCount: number;
 35 |   messageCount: number;
 36 |   averageSize: number;
 37 |   conversationIds: string[];
 38 | }
 39 | 
 40 | export interface SizeDistribution {
 41 |   distribution: number[];
 42 |   percentiles: Record<string, number>;
 43 |   bins: Array<{ range: string; count: number }>;
 44 | }
 45 | 
 46 | /**
 47 |  * Calculate basic overview statistics from conversation summaries
 48 |  */
 49 | export function calculateOverview(summaries: ConversationSummary[]): AnalyticsOverview {
 50 |   const totalConversations = summaries.length;
 51 |   const totalMessages = summaries.reduce((sum, s) => sum + s.messageCount, 0);
 52 |   const totalCodeBlocks = summaries.reduce((sum, s) => sum + s.codeBlockCount, 0);
 53 |   const totalSize = summaries.reduce((sum, s) => sum + s.conversationSize, 0);
 54 | 
 55 |   // Collect unique files and folders
 56 |   const allFiles = new Set<string>();
 57 |   const allFolders = new Set<string>();
 58 | 
 59 |   summaries.forEach(summary => {
 60 |     summary.relevantFiles.forEach(file => allFiles.add(file));
 61 |     summary.attachedFolders.forEach(folder => allFolders.add(folder));
 62 |   });
 63 | 
 64 |   return {
 65 |     totalConversations,
 66 |     totalMessages,
 67 |     totalCodeBlocks,
 68 |     averageConversationSize: totalConversations > 0 ? totalSize / totalConversations : 0,
 69 |     averageMessagesPerConversation: totalConversations > 0 ? totalMessages / totalConversations : 0,
 70 |     totalFiles: allFiles.size,
 71 |     totalFolders: allFolders.size
 72 |   };
 73 | }
 74 | 
 75 | /**
 76 |  * Calculate file breakdown with mentions and conversations
 77 |  */
 78 | export function calculateFileBreakdown(summaries: ConversationSummary[]): FileBreakdown[] {
 79 |   const fileMap = new Map<string, { mentions: number; conversations: string[]; extension: string }>();
 80 | 
 81 |   for (const summary of summaries) {
 82 |     const files = new Set([
 83 |       ...(summary.relevantFiles || []),
 84 |       ...(summary.attachedFolders || [])
 85 |     ]);
 86 | 
 87 |     for (const file of files) {
 88 |       if (!fileMap.has(file)) {
 89 |         fileMap.set(file, {
 90 |           mentions: 0,
 91 |           conversations: [],
 92 |           extension: getFileExtension(file)
 93 |         });
 94 |       }
 95 | 
 96 |       const entry = fileMap.get(file)!;
 97 |       entry.mentions++;
 98 |       entry.conversations.push(summary.composerId);
 99 |     }
100 |   }
101 | 
102 |   return Array.from(fileMap.entries())
103 |     .map(([file, data]) => ({
104 |       file,
105 |       mentions: data.mentions,
106 |       conversations: data.conversations,
107 |       extension: data.extension
108 |     }))
109 |     .sort((a, b) => b.mentions - a.mentions);
110 | }
111 | 
112 | /**
113 |  * Calculate language breakdown from code blocks
114 |  */
115 | export function calculateLanguageBreakdown(
116 |   conversationsWithCode: Array<{
117 |     composerId: string;
118 |     codeBlocks: Array<{ language: string; code: string }>;
119 |   }>
120 | ): LanguageBreakdown[] {
121 |   const languageMap = new Map<string, {
122 |     codeBlocks: number;
123 |     conversations: Set<string>;
124 |     totalCodeLength: number;
125 |   }>();
126 | 
127 |   conversationsWithCode.forEach(({ composerId, codeBlocks }) => {
128 |     codeBlocks.forEach(block => {
129 |       const language = normalizeLanguage(block.language);
130 |       if (!languageMap.has(language)) {
131 |         languageMap.set(language, {
132 |           codeBlocks: 0,
133 |           conversations: new Set(),
134 |           totalCodeLength: 0
135 |         });
136 |       }
137 |       const entry = languageMap.get(language)!;
138 |       entry.codeBlocks++;
139 |       entry.conversations.add(composerId);
140 |       entry.totalCodeLength += block.code.length;
141 |     });
142 |   });
143 | 
144 |   return Array.from(languageMap.entries())
145 |     .map(([language, data]) => ({
146 |       language,
147 |       codeBlocks: data.codeBlocks,
148 |       conversations: Array.from(data.conversations),
149 |       averageCodeLength: data.codeBlocks > 0 ? data.totalCodeLength / data.codeBlocks : 0
150 |     }))
151 |     .sort((a, b) => b.codeBlocks - a.codeBlocks);
152 | }
153 | 
154 | /**
155 |  * Calculate temporal breakdown using ROWID ordering as proxy for time
156 |  */
157 | export function calculateTemporalBreakdown(
158 |   summaries: ConversationSummary[],
159 |   conversationIds: string[]
160 | ): TemporalBreakdown[] {
161 |   const totalConversations = conversationIds.length;
162 |   const binsCount = Math.min(10, Math.max(3, Math.floor(totalConversations / 10)));
163 |   const conversationsPerBin = Math.ceil(totalConversations / binsCount);
164 | 
165 |   const bins: TemporalBreakdown[] = [];
166 | 
167 |   for (let i = 0; i < binsCount; i++) {
168 |     const startIndex = i * conversationsPerBin;
169 |     const endIndex = Math.min(startIndex + conversationsPerBin, totalConversations);
170 |     const binIds = conversationIds.slice(startIndex, endIndex);
171 | 
172 |     const binSummaries = summaries.filter(s => binIds.includes(s.composerId));
173 |     const totalSize = binSummaries.reduce((sum, s) => sum + s.conversationSize, 0);
174 |     const averageSize = binSummaries.length > 0 ? totalSize / binSummaries.length : 0;
175 | 
176 |     bins.push({
177 |       period: `Period ${i + 1}`,
178 |       conversationCount: binSummaries.length,
179 |       messageCount: binSummaries.reduce((sum, s) => sum + s.messageCount, 0),
180 |       averageSize: Math.round(averageSize),
181 |       conversationIds: binIds
182 |     });
183 |   }
184 | 
185 |   return bins;
186 | }
187 | 
188 | /**
189 |  * Calculate size distribution with percentiles and bins
190 |  */
191 | export function calculateSizeDistribution(summaries: ConversationSummary[]): SizeDistribution {
192 |   const sizes = summaries.map(s => s.conversationSize).sort((a, b) => a - b);
193 | 
194 |   if (sizes.length === 0) {
195 |     return {
196 |       distribution: [],
197 |       percentiles: {},
198 |       bins: []
199 |     };
200 |   }
201 | 
202 |   // Calculate percentiles
203 |   const percentiles = {
204 |     p10: calculatePercentile(sizes, 10),
205 |     p25: calculatePercentile(sizes, 25),
206 |     p50: calculatePercentile(sizes, 50),
207 |     p75: calculatePercentile(sizes, 75),
208 |     p90: calculatePercentile(sizes, 90),
209 |     p95: calculatePercentile(sizes, 95),
210 |     p99: calculatePercentile(sizes, 99)
211 |   };
212 | 
213 |   // Create size bins
214 |   const minSize = sizes[0];
215 |   const maxSize = sizes[sizes.length - 1];
216 |   const binCount = 10;
217 |   const binSize = (maxSize - minSize) / binCount;
218 | 
219 |   const bins: Array<{ range: string; count: number }> = [];
220 |   for (let i = 0; i < binCount; i++) {
221 |     const binStart = minSize + (i * binSize);
222 |     const binEnd = i === binCount - 1 ? maxSize : binStart + binSize;
223 |     const count = sizes.filter(size => size >= binStart && size <= binEnd).length;
224 | 
225 |     bins.push({
226 |       range: `${formatSize(binStart)} - ${formatSize(binEnd)}`,
227 |       count
228 |     });
229 |   }
230 | 
231 |   return {
232 |     distribution: sizes,
233 |     percentiles,
234 |     bins
235 |   };
236 | }
237 | 
238 | /**
239 |  * Calculate percentile value from sorted array
240 |  */
241 | function calculatePercentile(sortedArray: number[], percentile: number): number {
242 |   if (sortedArray.length === 0) return 0;
243 | 
244 |   const index = (percentile / 100) * (sortedArray.length - 1);
245 |   const lower = Math.floor(index);
246 |   const upper = Math.ceil(index);
247 | 
248 |   if (lower === upper) {
249 |     return sortedArray[lower];
250 |   }
251 | 
252 |   const weight = index - lower;
253 |   return sortedArray[lower] * (1 - weight) + sortedArray[upper] * weight;
254 | }
255 | 
256 | /**
257 |  * Extract file extension from file path
258 |  */
259 | function getFileExtension(filePath: string): string {
260 |   const lastDot = filePath.lastIndexOf('.');
261 |   const lastSlash = Math.max(filePath.lastIndexOf('/'), filePath.lastIndexOf('\\'));
262 | 
263 |   if (lastDot > lastSlash && lastDot !== -1) {
264 |     return filePath.substring(lastDot + 1).toLowerCase();
265 |   }
266 | 
267 |   return '';
268 | }
269 | 
270 | /**
271 |  * Extract project path from file path (first few directories)
272 |  */
273 | function extractProjectPath(filePath: string): string | undefined {
274 |   const parts = filePath.split(/[/\\]/);
275 |   if (parts.length > 2) {
276 |     return parts.slice(0, 2).join('/');
277 |   }
278 |   return undefined;
279 | }
280 | 
281 | /**
282 |  * Normalize language names for consistency
283 |  */
284 | function normalizeLanguage(language: string): string {
285 |   const normalized = language.toLowerCase().trim();
286 | 
287 |   // Common language mappings
288 |   const mappings: Record<string, string> = {
289 |     'js': 'javascript',
290 |     'ts': 'typescript',
291 |     'jsx': 'javascript',
292 |     'tsx': 'typescript',
293 |     'py': 'python',
294 |     'rb': 'ruby',
295 |     'sh': 'shell',
296 |     'bash': 'shell',
297 |     'zsh': 'shell',
298 |     'fish': 'shell',
299 |     'yml': 'yaml',
300 |     'md': 'markdown',
301 |     'dockerfile': 'docker'
302 |   };
303 | 
304 |   return mappings[normalized] || normalized;
305 | }
306 | 
307 | /**
308 |  * Format size in human-readable format
309 |  */
310 | function formatSize(bytes: number): string {
311 |   if (bytes < 1024) return `${bytes}B`;
312 |   if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)}KB`;
313 |   if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)}MB`;
314 |   return `${(bytes / (1024 * 1024 * 1024)).toFixed(1)}GB`;
315 |   }
```

--------------------------------------------------------------------------------
/docs/research.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Cursor Chat Storage Guide
  2 | 
  3 | ## Overview
  4 | 
  5 | Cursor stores all chat conversations in a SQLite database located in your system's application support directory. This guide explains where to find your chats and how to access them.
  6 | 
  7 | ## Storage Location
  8 | 
  9 | Your Cursor chat conversations are stored at:
 10 | 
 11 | **macOS**: `~/Library/Application Support/Cursor/User/globalStorage/state.vscdb`
 12 | 
 13 | **Full Path**: `/Users/[username]/Library/Application Support/Cursor/User/globalStorage/state.vscdb`
 14 | 
 15 | ## Database Details
 16 | 
 17 | - **Type**: SQLite 3.x database
 18 | - **Size**: ~1.5GB (varies based on chat history)
 19 | - **Format**: Key-value store in table `cursorDiskKV`
 20 | - **Structure**: Binary BLOB values with text keys
 21 | 
 22 | ## Data Organization
 23 | 
 24 | ### Main Tables
 25 | - `cursorDiskKV` - Primary key-value storage
 26 | - `ItemTable` - Additional metadata storage
 27 | 
 28 | ### Key Types in cursorDiskKV
 29 | 
 30 | | Key Pattern | Description | Example |
 31 | |-------------|-------------|---------|
 32 | | `composerData:UUID` | Complete conversation data | `composerData:0003f899-8807-4f86-ab1b-a48f985cb580` |
 33 | | `messageRequestContext:UUID:UUID` | Message context and metadata | `messageRequestContext:013273b7-92e9-409a-816c-b671052557ea:89911dfd-87c3-4124-928f-d7c00fd7e273` |
 34 | | `bubbleId:UUID:UUID` | Individual message/bubble data | `bubbleId:00624634-f10c-4427-b2d1-52caef4e19cf:01cdaaf8-3c72-4984-8397-fb8079ad04fc` |
 35 | | `checkpointId:UUID` | Conversation checkpoints | `checkpointId:UUID` |
 36 | | `codeBlockDiff:UUID` | Code block differences | `codeBlockDiff:UUID` |
 37 | 
 38 | ## Summarization Data Storage
 39 | 
 40 | **Location**: Summarization data is stored within the conversation JSON in `composerData:UUID` records.
 41 | 
 42 | **How to Find Conversations with Summarization**:
 43 | ```sql
 44 | -- Find conversations containing summarization content
 45 | SELECT key FROM cursorDiskKV
 46 | WHERE key LIKE 'composerData:%'
 47 | AND value LIKE '%summarization%';
 48 | ```
 49 | 
 50 | **Storage Format**: The summarization content appears to be embedded within the conversation data structure, likely in:
 51 | - Message text content
 52 | - Tool call parameters and results
 53 | - Conversation metadata
 54 | 
 55 | **Example Query to Extract Summarization Content**:
 56 | ```sql
 57 | -- Get conversations with summarization and show first 500 characters
 58 | SELECT key, substr(value, 1, 500) FROM cursorDiskKV
 59 | WHERE key LIKE 'composerData:%'
 60 | AND value LIKE '%summarization%'
 61 | LIMIT 5;
 62 | ```
 63 | 
 64 | **Note**: Summarization data is not stored in a separate table but is integrated into the regular conversation flow as part of the chat history. This means summarization requests and responses are treated as regular messages within the conversation structure.
 65 | 
 66 | ## Accessing Your Chats
 67 | 
 68 | ### Using SQLite Command Line
 69 | 
 70 | 1. **Open Terminal** and navigate to the database:
 71 |    ```bash
 72 |    cd ~/Library/Application\ Support/Cursor/User/globalStorage/
 73 |    ```
 74 | 
 75 | 2. **Open the database**:
 76 |    ```bash
 77 |    sqlite3 state.vscdb
 78 |    ```
 79 | 
 80 | 3. **Basic queries**:
 81 | 
 82 |    **List all conversations:**
 83 |    ```sql
 84 |    SELECT key FROM cursorDiskKV WHERE key LIKE 'composerData:%';
 85 |    ```
 86 | 
 87 |    **Count total conversations:**
 88 |    ```sql
 89 |    SELECT COUNT(*) FROM cursorDiskKV WHERE key LIKE 'composerData:%';
 90 |    ```
 91 | 
 92 |    **Get conversation data (replace UUID with actual ID):**
 93 |    ```sql
 94 |    SELECT value FROM cursorDiskKV WHERE key = 'composerData:UUID';
 95 |    ```
 96 | 
 97 |    **Check conversation size:**
 98 |    ```sql
 99 |    SELECT key, length(value) as size_bytes
100 |    FROM cursorDiskKV
101 |    WHERE key LIKE 'composerData:%'
102 |    ORDER BY size_bytes DESC
103 |    LIMIT 10;
104 |    ```
105 | 
106 |    **⭐ Find most recent conversations (by insertion order):**
107 |    ```sql
108 |    SELECT key FROM cursorDiskKV
109 |    WHERE key LIKE 'composerData:%' AND length(value) > 5000
110 |    ORDER BY ROWID DESC LIMIT 10;
111 |    ```
112 | 
113 |    **Extract user messages from conversations:**
114 |    ```sql
115 |    SELECT value FROM cursorDiskKV
116 |    WHERE key = 'bubbleId:COMPOSER_UUID:BUBBLE_UUID';
117 |    ```
118 | 
119 | ### Data Format
120 | 
121 | Conversations are stored as JSON objects with different format versions:
122 | 
123 | #### Legacy Format (older conversations)
124 | ```json
125 | {
126 |   "composerId": "UUID",
127 |   "richText": "",
128 |   "hasLoaded": true,
129 |   "text": "",
130 |   "conversation": [
131 |     {
132 |       "type": 1,
133 |       "attachedFoldersNew": [],
134 |       "bubbleId": "UUID",
135 |       "suggestedCodeBlocks": [],
136 |       "relevantFiles": ["file1.tsx", "file2.css"],
137 |       "text": "user message content..."
138 |     }
139 |   ]
140 | }
141 | ```
142 | 
143 | #### Modern Format (newer conversations)
144 | ```json
145 | {
146 |   "_v": 3,
147 |   "composerId": "UUID",
148 |   "richText": "",
149 |   "hasLoaded": true,
150 |   "text": "",
151 |   "fullConversationHeadersOnly": [
152 |     {
153 |       "bubbleId": "UUID",
154 |       "type": 1
155 |     },
156 |     {
157 |       "bubbleId": "UUID",
158 |       "type": 2,
159 |       "serverBubbleId": "UUID"
160 |     }
161 |   ]
162 | }
163 | ```
164 | 
165 | **Key Differences:**
166 | - Modern format uses `_v` version field
167 | - Individual messages stored separately with `bubbleId:` keys
168 | - `fullConversationHeadersOnly` contains message references
169 | - Type 1 = User message, Type 2 = AI response
170 | 
171 | ## Finding Recent Conversations
172 | 
173 | ⚠️ **Important**: UUID ordering is NOT chronological! Use these methods instead:
174 | 
175 | ### Method 1: ROWID Ordering (Most Reliable)
176 | ```sql
177 | -- Get most recent conversations with content
178 | SELECT key FROM cursorDiskKV
179 | WHERE key LIKE 'composerData:%' AND length(value) > 5000
180 | ORDER BY ROWID DESC LIMIT 5;
181 | ```
182 | 
183 | ### Method 2: Extract User Messages
184 | ```bash
185 | # Get user message text from a bubble
186 | sqlite3 state.vscdb "SELECT value FROM cursorDiskKV WHERE key = 'bubbleId:COMPOSER_UUID:BUBBLE_UUID';" | grep -o '"text":"[^"]*"'
187 | ```
188 | 
189 | ### Method 3: File Modification Time
190 | ```bash
191 | # Check when database was last modified
192 | ls -la ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb
193 | ```
194 | 
195 | ## Statistics Example
196 | 
197 | Based on a typical Cursor installation:
198 | - **Total conversations**: ~3,294
199 | - **Database size**: ~1.5GB
200 | - **Total records**: ~48,485
201 | - **Average conversation size**: ~100-400KB
202 | 
203 | ## Backup Recommendations
204 | 
205 | ### Manual Backup
206 | ```bash
207 | # Create a backup of your chat database
208 | cp ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb ~/Desktop/cursor-chats-backup.db
209 | ```
210 | 
211 | ### Export Conversations
212 | ```bash
213 | # Export all conversation keys to a text file
214 | sqlite3 ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb \
215 | "SELECT key FROM cursorDiskKV WHERE key LIKE 'composerData:%';" > ~/Desktop/conversation-list.txt
216 | ```
217 | 
218 | ### Export Recent Conversations with Content
219 | ```bash
220 | # Export recent conversations with their sizes
221 | sqlite3 ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb \
222 | "SELECT key, length(value) FROM cursorDiskKV WHERE key LIKE 'composerData:%' ORDER BY ROWID DESC LIMIT 20;" > ~/Desktop/recent-conversations.txt
223 | ```
224 | 
225 | ## Important Notes
226 | 
227 | ⚠️ **Warnings:**
228 | - The database is actively used by Cursor - close Cursor before making changes
229 | - Always backup before modifying the database
230 | - The database format may change with Cursor updates
231 | - UUID-based sorting does NOT reflect chronological order
232 | 
233 | 💡 **Tips:**
234 | - Use SQLite browser tools for easier exploration
235 | - The database contains sensitive information - handle with care
236 | - Large conversations may take time to load/export
237 | - Use ROWID for finding recent conversations
238 | - Modern conversations split messages into separate bubble records
239 | 
240 | ## Troubleshooting
241 | 
242 | ### Database Locked Error
243 | If you get "database is locked" error:
244 | 1. Close Cursor completely
245 | 2. Wait a few seconds
246 | 3. Try the SQLite command again
247 | 
248 | ### File Not Found
249 | If the database file doesn't exist:
250 | - Check if Cursor has been used for chats
251 | - Verify the correct path for your OS
252 | - Look for similar `.vscdb` files in the directory
253 | 
254 | ### Empty Conversations
255 | Some conversations may appear empty because:
256 | - They use the modern format with separate bubble storage
257 | - The conversation was just started but not used
258 | - Messages are stored in `bubbleId:` keys instead of inline
259 | 
260 | ## Alternative Tools
261 | 
262 | ### SQLite Browser Applications
263 | - **DB Browser for SQLite** (Free, cross-platform)
264 | - **SQLiteStudio** (Free, cross-platform)
265 | - **Navicat for SQLite** (Paid)
266 | 
267 | ### Command Line Tools
268 | ```bash
269 | # Install sqlite3 if not available
270 | brew install sqlite3  # macOS with Homebrew
271 | 
272 | # View database schema
273 | sqlite3 state.vscdb ".schema"
274 | 
275 | # Export entire database to SQL
276 | sqlite3 state.vscdb ".dump" > backup.sql
277 | ```
278 | 
279 | ## Practical Examples
280 | 
281 | ### Find Your Last 5 Conversations
282 | ```bash
283 | # Step 1: Find recent conversation IDs
284 | sqlite3 ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb \
285 | "SELECT key FROM cursorDiskKV WHERE key LIKE 'composerData:%' AND length(value) > 5000 ORDER BY ROWID DESC LIMIT 5;"
286 | 
287 | # Step 2: Get user message from first bubble (replace UUIDs)
288 | sqlite3 ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb \
289 | "SELECT value FROM cursorDiskKV WHERE key = 'bubbleId:COMPOSER_UUID:FIRST_BUBBLE_UUID';" | grep -o '"text":"[^"]*"'
290 | ```
291 | 
292 | ### Search Conversations by Content
293 | ```bash
294 | # Find conversations mentioning specific terms (requires extracting JSON)
295 | sqlite3 ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb \
296 | "SELECT key FROM cursorDiskKV WHERE key LIKE 'composerData:%' AND value LIKE '%your_search_term%';"
297 | ```
298 | 
299 | ---
300 | 
301 | *Last updated: Based on Cursor's current storage implementation with format version 3*
```

--------------------------------------------------------------------------------
/src/utils/relationships.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import type { ConversationSummary } from '../database/types.js';
  2 | 
  3 | /**
  4 |  * Relationship detection and similarity scoring algorithms
  5 |  */
  6 | 
  7 | export interface RelationshipScore {
  8 |   sharedFiles?: string[];
  9 |   sharedFolders?: string[];
 10 |   sharedLanguages?: string[];
 11 |   sizeSimilarity?: number;
 12 |   temporalProximity?: number;
 13 | }
 14 | 
 15 | export interface RelatedConversation {
 16 |   composerId: string;
 17 |   relationshipScore: number;
 18 |   relationships: RelationshipScore;
 19 |   summary: string;
 20 |   scoreBreakdown?: Record<string, number>;
 21 | }
 22 | 
 23 | export interface RelationshipOptions {
 24 |   relationshipTypes: Array<'files' | 'folders' | 'languages' | 'size' | 'temporal'>;
 25 |   maxResults: number;
 26 |   minScore: number;
 27 |   includeScoreBreakdown: boolean;
 28 | }
 29 | 
 30 | /**
 31 |  * Find conversations related to a reference conversation
 32 |  */
 33 | export function findRelatedConversations(
 34 |   referenceSummary: ConversationSummary,
 35 |   allSummaries: ConversationSummary[],
 36 |   conversationIds: string[],
 37 |   options: RelationshipOptions
 38 | ): RelatedConversation[] {
 39 |   const related: RelatedConversation[] = [];
 40 | 
 41 |   // Get reference conversation index for temporal calculations
 42 |   const referenceIndex = conversationIds.indexOf(referenceSummary.composerId);
 43 | 
 44 |   for (const summary of allSummaries) {
 45 |     // Skip the reference conversation itself
 46 |     if (summary.composerId === referenceSummary.composerId) {
 47 |       continue;
 48 |     }
 49 | 
 50 |     const relationships = calculateRelationships(
 51 |       referenceSummary,
 52 |       summary,
 53 |       conversationIds,
 54 |       referenceIndex,
 55 |       options.relationshipTypes
 56 |     );
 57 | 
 58 |     const score = calculateCompositeScore(relationships, options.relationshipTypes);
 59 | 
 60 |     if (score >= options.minScore) {
 61 |       related.push({
 62 |         composerId: summary.composerId,
 63 |         relationshipScore: score,
 64 |         relationships,
 65 |         summary: summary.firstMessage || 'No preview available',
 66 |         scoreBreakdown: options.includeScoreBreakdown ?
 67 |           calculateScoreBreakdown(relationships, options.relationshipTypes) : undefined
 68 |       });
 69 |     }
 70 |   }
 71 | 
 72 |   // Sort by score and limit results
 73 |   return related
 74 |     .sort((a, b) => b.relationshipScore - a.relationshipScore)
 75 |     .slice(0, options.maxResults);
 76 | }
 77 | 
 78 | /**
 79 |  * Calculate relationships between two conversations
 80 |  */
 81 | function calculateRelationships(
 82 |   reference: ConversationSummary,
 83 |   candidate: ConversationSummary,
 84 |   conversationIds: string[],
 85 |   referenceIndex: number,
 86 |   relationshipTypes: string[]
 87 | ): RelationshipScore {
 88 |   const relationships: RelationshipScore = {};
 89 | 
 90 |   if (relationshipTypes.includes('files')) {
 91 |     relationships.sharedFiles = calculateSharedItems(
 92 |       reference.relevantFiles,
 93 |       candidate.relevantFiles
 94 |     );
 95 |   }
 96 | 
 97 |   if (relationshipTypes.includes('folders')) {
 98 |     relationships.sharedFolders = calculateSharedItems(
 99 |       reference.attachedFolders,
100 |       candidate.attachedFolders
101 |     );
102 |   }
103 | 
104 |   if (relationshipTypes.includes('languages')) {
105 |     // Extract languages from both conversations (would need code block data)
106 |     // For now, we'll use a placeholder - this would be enhanced with actual language extraction
107 |     relationships.sharedLanguages = [];
108 |   }
109 | 
110 |   if (relationshipTypes.includes('size')) {
111 |     relationships.sizeSimilarity = calculateSizeSimilarity(
112 |       reference.conversationSize,
113 |       candidate.conversationSize
114 |     );
115 |   }
116 | 
117 |   if (relationshipTypes.includes('temporal')) {
118 |     const candidateIndex = conversationIds.indexOf(candidate.composerId);
119 |     relationships.temporalProximity = calculateTemporalProximity(
120 |       referenceIndex,
121 |       candidateIndex,
122 |       conversationIds.length
123 |     );
124 |   }
125 | 
126 |   return relationships;
127 | }
128 | 
129 | /**
130 |  * Calculate shared items between two arrays
131 |  */
132 | function calculateSharedItems(array1: string[], array2: string[]): string[] {
133 |   const set1 = new Set(array1);
134 |   return array2.filter(item => set1.has(item));
135 | }
136 | 
137 | /**
138 |  * Calculate size similarity between two conversations
139 |  */
140 | function calculateSizeSimilarity(size1: number, size2: number): number {
141 |   if (size1 === 0 && size2 === 0) return 1;
142 |   if (size1 === 0 || size2 === 0) return 0;
143 | 
144 |   const maxSize = Math.max(size1, size2);
145 |   const minSize = Math.min(size1, size2);
146 | 
147 |   return minSize / maxSize;
148 | }
149 | 
150 | /**
151 |  * Calculate temporal proximity based on ROWID distance
152 |  */
153 | function calculateTemporalProximity(
154 |   index1: number,
155 |   index2: number,
156 |   totalConversations: number
157 | ): number {
158 |   if (index1 === -1 || index2 === -1) return 0;
159 | 
160 |   const distance = Math.abs(index1 - index2);
161 |   const maxDistance = totalConversations - 1;
162 | 
163 |   if (maxDistance === 0) return 1;
164 | 
165 |   // Closer conversations get higher scores
166 |   return 1 - (distance / maxDistance);
167 | }
168 | 
169 | /**
170 |  * Calculate composite score from relationships
171 |  */
172 | function calculateCompositeScore(
173 |   relationships: RelationshipScore,
174 |   relationshipTypes: string[]
175 | ): number {
176 |   let totalScore = 0;
177 |   let weightSum = 0;
178 | 
179 |   // Define weights for different relationship types
180 |   const weights = {
181 |     files: 0.4,
182 |     folders: 0.3,
183 |     languages: 0.2,
184 |     size: 0.05,
185 |     temporal: 0.05
186 |   };
187 | 
188 |   if (relationshipTypes.includes('files') && relationships.sharedFiles) {
189 |     const score = Math.min(relationships.sharedFiles.length / 5, 1); // Cap at 5 shared files
190 |     totalScore += score * weights.files;
191 |     weightSum += weights.files;
192 |   }
193 | 
194 |   if (relationshipTypes.includes('folders') && relationships.sharedFolders) {
195 |     const score = Math.min(relationships.sharedFolders.length / 3, 1); // Cap at 3 shared folders
196 |     totalScore += score * weights.folders;
197 |     weightSum += weights.folders;
198 |   }
199 | 
200 |   if (relationshipTypes.includes('languages') && relationships.sharedLanguages) {
201 |     const score = Math.min(relationships.sharedLanguages.length / 3, 1); // Cap at 3 shared languages
202 |     totalScore += score * weights.languages;
203 |     weightSum += weights.languages;
204 |   }
205 | 
206 |   if (relationshipTypes.includes('size') && relationships.sizeSimilarity !== undefined) {
207 |     totalScore += relationships.sizeSimilarity * weights.size;
208 |     weightSum += weights.size;
209 |   }
210 | 
211 |   if (relationshipTypes.includes('temporal') && relationships.temporalProximity !== undefined) {
212 |     totalScore += relationships.temporalProximity * weights.temporal;
213 |     weightSum += weights.temporal;
214 |   }
215 | 
216 |   return weightSum > 0 ? totalScore / weightSum : 0;
217 | }
218 | 
219 | /**
220 |  * Calculate individual score breakdown for debugging
221 |  */
222 | function calculateScoreBreakdown(
223 |   relationships: RelationshipScore,
224 |   relationshipTypes: string[]
225 | ): Record<string, number> {
226 |   const breakdown: Record<string, number> = {};
227 | 
228 |   if (relationshipTypes.includes('files') && relationships.sharedFiles) {
229 |     breakdown.files = Math.min(relationships.sharedFiles.length / 5, 1);
230 |   }
231 | 
232 |   if (relationshipTypes.includes('folders') && relationships.sharedFolders) {
233 |     breakdown.folders = Math.min(relationships.sharedFolders.length / 3, 1);
234 |   }
235 | 
236 |   if (relationshipTypes.includes('languages') && relationships.sharedLanguages) {
237 |     breakdown.languages = Math.min(relationships.sharedLanguages.length / 3, 1);
238 |   }
239 | 
240 |   if (relationshipTypes.includes('size') && relationships.sizeSimilarity !== undefined) {
241 |     breakdown.size = relationships.sizeSimilarity;
242 |   }
243 | 
244 |   if (relationshipTypes.includes('temporal') && relationships.temporalProximity !== undefined) {
245 |     breakdown.temporal = relationships.temporalProximity;
246 |   }
247 | 
248 |   return breakdown;
249 | }
250 | 
251 | /**
252 |  * Extract languages from code blocks in conversation data
253 |  */
254 | export function extractLanguagesFromCodeBlocks(
255 |   codeBlocks: Array<{ language: string; code: string }>
256 | ): string[] {
257 |   const languages = new Set<string>();
258 | 
259 |   codeBlocks.forEach(block => {
260 |     if (block.language && block.language.trim()) {
261 |       languages.add(normalizeLanguage(block.language));
262 |     }
263 |   });
264 | 
265 |   return Array.from(languages);
266 | }
267 | 
268 | /**
269 |  * Normalize language names for consistency
270 |  */
271 | function normalizeLanguage(language: string): string {
272 |   const normalized = language.toLowerCase().trim();
273 | 
274 |   // Common language mappings
275 |   const mappings: Record<string, string> = {
276 |     'js': 'javascript',
277 |     'ts': 'typescript',
278 |     'jsx': 'javascript',
279 |     'tsx': 'typescript',
280 |     'py': 'python',
281 |     'rb': 'ruby',
282 |     'sh': 'shell',
283 |     'bash': 'shell',
284 |     'zsh': 'shell',
285 |     'fish': 'shell',
286 |     'yml': 'yaml',
287 |     'md': 'markdown',
288 |     'dockerfile': 'docker'
289 |   };
290 | 
291 |   return mappings[normalized] || normalized;
292 | }
293 | 
294 | /**
295 |  * Calculate file overlap score between two conversations
296 |  */
297 | export function calculateFileOverlapScore(files1: string[], files2: string[]): number {
298 |   if (files1.length === 0 && files2.length === 0) return 1;
299 |   if (files1.length === 0 || files2.length === 0) return 0;
300 | 
301 |   const set1 = new Set(files1);
302 |   const intersection = files2.filter(file => set1.has(file));
303 |   const union = new Set([...files1, ...files2]);
304 | 
305 |   return intersection.length / union.size; // Jaccard similarity
306 | }
307 | 
308 | /**
309 |  * Calculate folder overlap score between two conversations
310 |  */
311 | export function calculateFolderOverlapScore(folders1: string[], folders2: string[]): number {
312 |   if (folders1.length === 0 && folders2.length === 0) return 1;
313 |   if (folders1.length === 0 || folders2.length === 0) return 0;
314 | 
315 |   const set1 = new Set(folders1);
316 |   const intersection = folders2.filter(folder => set1.has(folder));
317 |   const union = new Set([...folders1, ...folders2]);
318 | 
319 |   return intersection.length / union.size; // Jaccard similarity
320 | }
```

--------------------------------------------------------------------------------
/src/utils/exporters.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import type { ConversationSummary } from '../database/types.js';
  2 | 
  3 | /**
  4 |  * Format conversion utilities for exporting conversation data
  5 |  */
  6 | 
  7 | export interface ExportMetadata {
  8 |   exportedCount: number;
  9 |   totalAvailable: number;
 10 |   exportTimestamp: string;
 11 |   filters: Record<string, any>;
 12 | }
 13 | 
 14 | export interface GraphNode {
 15 |   id: string;
 16 |   label: string;
 17 |   type: 'conversation';
 18 |   attributes: {
 19 |     messageCount: number;
 20 |     size: number;
 21 |     hasCodeBlocks: boolean;
 22 |     format: 'legacy' | 'modern';
 23 |     fileCount: number;
 24 |     folderCount: number;
 25 |   };
 26 | }
 27 | 
 28 | export interface GraphEdge {
 29 |   source: string;
 30 |   target: string;
 31 |   type: 'shared_files' | 'shared_folders' | 'similar_size' | 'temporal_proximity';
 32 |   weight: number;
 33 |   attributes: {
 34 |     sharedItems?: string[];
 35 |     similarity?: number;
 36 |   };
 37 | }
 38 | 
 39 | export interface GraphData {
 40 |   nodes: GraphNode[];
 41 |   edges: GraphEdge[];
 42 | }
 43 | 
 44 | /**
 45 |  * Export conversation data as JSON
 46 |  */
 47 | export function exportAsJSON(
 48 |   summaries: ConversationSummary[],
 49 |   includeContent: boolean,
 50 |   conversationData?: Map<string, any>
 51 | ): any {
 52 |   if (!includeContent) {
 53 |     return summaries;
 54 |   }
 55 | 
 56 |   // Include full conversation content if available
 57 |   return summaries.map(summary => ({
 58 |     ...summary,
 59 |     fullContent: conversationData?.get(summary.composerId) || null
 60 |   }));
 61 | }
 62 | 
 63 | /**
 64 |  * Export conversation data as CSV
 65 |  */
 66 | export function exportAsCSV(
 67 |   summaries: ConversationSummary[],
 68 |   flattenStructure: boolean
 69 | ): string {
 70 |   if (summaries.length === 0) {
 71 |     return 'No data to export';
 72 |   }
 73 | 
 74 |   const headers = [
 75 |     'composerId',
 76 |     'format',
 77 |     'messageCount',
 78 |     'hasCodeBlocks',
 79 |     'codeBlockCount',
 80 |     'conversationSize',
 81 |     'fileCount',
 82 |     'folderCount',
 83 |     'firstMessage',
 84 |     'relevantFiles',
 85 |     'attachedFolders'
 86 |   ];
 87 | 
 88 |   const rows = summaries.map(summary => [
 89 |     escapeCSVField(summary.composerId),
 90 |     escapeCSVField(summary.format),
 91 |     summary.messageCount.toString(),
 92 |     summary.hasCodeBlocks.toString(),
 93 |     summary.codeBlockCount.toString(),
 94 |     summary.conversationSize.toString(),
 95 |     summary.relevantFiles.length.toString(),
 96 |     summary.attachedFolders.length.toString(),
 97 |     escapeCSVField(summary.firstMessage || ''),
 98 |     escapeCSVField(flattenStructure ?
 99 |       summary.relevantFiles.join('; ') :
100 |       JSON.stringify(summary.relevantFiles)
101 |     ),
102 |     escapeCSVField(flattenStructure ?
103 |       summary.attachedFolders.join('; ') :
104 |       JSON.stringify(summary.attachedFolders)
105 |     )
106 |   ]);
107 | 
108 |   return [headers.join(','), ...rows.map(row => row.join(','))].join('\n');
109 | }
110 | 
111 | /**
112 |  * Export conversation data as graph format for visualization tools
113 |  */
114 | export function exportAsGraph(
115 |   summaries: ConversationSummary[],
116 |   includeRelationships: boolean
117 | ): GraphData {
118 |   const nodes: GraphNode[] = summaries.map(summary => ({
119 |     id: summary.composerId,
120 |     label: summary.firstMessage?.substring(0, 50) || summary.composerId,
121 |     type: 'conversation',
122 |     attributes: {
123 |       messageCount: summary.messageCount,
124 |       size: summary.conversationSize,
125 |       hasCodeBlocks: summary.hasCodeBlocks,
126 |       format: summary.format,
127 |       fileCount: summary.relevantFiles.length,
128 |       folderCount: summary.attachedFolders.length
129 |     }
130 |   }));
131 | 
132 |   const edges: GraphEdge[] = [];
133 | 
134 |   if (includeRelationships) {
135 |     // Calculate relationships between conversations
136 |     for (let i = 0; i < summaries.length; i++) {
137 |       for (let j = i + 1; j < summaries.length; j++) {
138 |         const summary1 = summaries[i];
139 |         const summary2 = summaries[j];
140 | 
141 |         // Shared files relationship
142 |         const sharedFiles = calculateSharedItems(summary1.relevantFiles, summary2.relevantFiles);
143 |         if (sharedFiles.length > 0) {
144 |           edges.push({
145 |             source: summary1.composerId,
146 |             target: summary2.composerId,
147 |             type: 'shared_files',
148 |             weight: sharedFiles.length,
149 |             attributes: {
150 |               sharedItems: sharedFiles
151 |             }
152 |           });
153 |         }
154 | 
155 |         // Shared folders relationship
156 |         const sharedFolders = calculateSharedItems(summary1.attachedFolders, summary2.attachedFolders);
157 |         if (sharedFolders.length > 0) {
158 |           edges.push({
159 |             source: summary1.composerId,
160 |             target: summary2.composerId,
161 |             type: 'shared_folders',
162 |             weight: sharedFolders.length,
163 |             attributes: {
164 |               sharedItems: sharedFolders
165 |             }
166 |           });
167 |         }
168 | 
169 |         // Size similarity relationship
170 |         const sizeSimilarity = calculateSizeSimilarity(
171 |           summary1.conversationSize,
172 |           summary2.conversationSize
173 |         );
174 |         if (sizeSimilarity > 0.7) { // Only include high similarity
175 |           edges.push({
176 |             source: summary1.composerId,
177 |             target: summary2.composerId,
178 |             type: 'similar_size',
179 |             weight: sizeSimilarity,
180 |             attributes: {
181 |               similarity: sizeSimilarity
182 |             }
183 |           });
184 |         }
185 |       }
186 |     }
187 |   }
188 | 
189 |   return { nodes, edges };
190 | }
191 | 
192 | /**
193 |  * Create export metadata
194 |  */
195 | export function createExportMetadata(
196 |   exportedCount: number,
197 |   totalAvailable: number,
198 |   filters: Record<string, any>
199 | ): ExportMetadata {
200 |   return {
201 |     exportedCount,
202 |     totalAvailable,
203 |     exportTimestamp: new Date().toISOString(),
204 |     filters
205 |   };
206 | }
207 | 
208 | /**
209 |  * Escape CSV field to handle commas, quotes, and newlines
210 |  */
211 | function escapeCSVField(field: string): string {
212 |   if (field.includes(',') || field.includes('"') || field.includes('\n')) {
213 |     return `"${field.replace(/"/g, '""')}"`;
214 |   }
215 |   return field;
216 | }
217 | 
218 | /**
219 |  * Calculate shared items between two arrays
220 |  */
221 | function calculateSharedItems(array1: string[], array2: string[]): string[] {
222 |   const set1 = new Set(array1);
223 |   return array2.filter(item => set1.has(item));
224 | }
225 | 
226 | /**
227 |  * Calculate size similarity between two conversations
228 |  */
229 | function calculateSizeSimilarity(size1: number, size2: number): number {
230 |   if (size1 === 0 && size2 === 0) return 1;
231 |   if (size1 === 0 || size2 === 0) return 0;
232 | 
233 |   const maxSize = Math.max(size1, size2);
234 |   const minSize = Math.min(size1, size2);
235 | 
236 |   return minSize / maxSize;
237 | }
238 | 
239 | /**
240 |  * Convert graph data to Gephi-compatible GEXF format
241 |  */
242 | export function exportAsGEXF(graphData: GraphData): string {
243 |   const { nodes, edges } = graphData;
244 | 
245 |   let gexf = `<?xml version="1.0" encoding="UTF-8"?>
246 | <gexf xmlns="http://www.gexf.net/1.2draft" version="1.2">
247 |   <meta lastmodifieddate="${new Date().toISOString()}">
248 |     <creator>Cursor Conversations MCP</creator>
249 |     <description>Conversation relationship graph</description>
250 |   </meta>
251 |   <graph mode="static" defaultedgetype="undirected">
252 |     <attributes class="node">
253 |       <attribute id="0" title="messageCount" type="integer"/>
254 |       <attribute id="1" title="size" type="integer"/>
255 |       <attribute id="2" title="hasCodeBlocks" type="boolean"/>
256 |       <attribute id="3" title="format" type="string"/>
257 |       <attribute id="4" title="fileCount" type="integer"/>
258 |       <attribute id="5" title="folderCount" type="integer"/>
259 |     </attributes>
260 |     <attributes class="edge">
261 |       <attribute id="0" title="type" type="string"/>
262 |       <attribute id="1" title="sharedItems" type="string"/>
263 |       <attribute id="2" title="similarity" type="float"/>
264 |     </attributes>
265 |     <nodes>`;
266 | 
267 |   // Add nodes
268 |   nodes.forEach(node => {
269 |     gexf += `
270 |       <node id="${escapeXML(node.id)}" label="${escapeXML(node.label)}">
271 |         <attvalues>
272 |           <attvalue for="0" value="${node.attributes.messageCount}"/>
273 |           <attvalue for="1" value="${node.attributes.size}"/>
274 |           <attvalue for="2" value="${node.attributes.hasCodeBlocks}"/>
275 |           <attvalue for="3" value="${escapeXML(node.attributes.format)}"/>
276 |           <attvalue for="4" value="${node.attributes.fileCount}"/>
277 |           <attvalue for="5" value="${node.attributes.folderCount}"/>
278 |         </attvalues>
279 |       </node>`;
280 |   });
281 | 
282 |   gexf += `
283 |     </nodes>
284 |     <edges>`;
285 | 
286 |   // Add edges
287 |   edges.forEach((edge, index) => {
288 |     gexf += `
289 |       <edge id="${index}" source="${escapeXML(edge.source)}" target="${escapeXML(edge.target)}" weight="${edge.weight}">
290 |         <attvalues>
291 |           <attvalue for="0" value="${escapeXML(edge.type)}"/>
292 |           <attvalue for="1" value="${escapeXML(edge.attributes.sharedItems?.join(', ') || '')}"/>
293 |           <attvalue for="2" value="${edge.attributes.similarity || 0}"/>
294 |         </attvalues>
295 |       </edge>`;
296 |   });
297 | 
298 |   gexf += `
299 |     </edges>
300 |   </graph>
301 | </gexf>`;
302 | 
303 |   return gexf;
304 | }
305 | 
306 | /**
307 |  * Convert graph data to Cytoscape.js format
308 |  */
309 | export function exportAsCytoscape(graphData: GraphData): any {
310 |   const { nodes, edges } = graphData;
311 | 
312 |   return {
313 |     elements: [
314 |       ...nodes.map(node => ({
315 |         data: {
316 |           id: node.id,
317 |           label: node.label,
318 |           ...node.attributes
319 |         }
320 |       })),
321 |       ...edges.map((edge, index) => ({
322 |         data: {
323 |           id: `edge-${index}`,
324 |           source: edge.source,
325 |           target: edge.target,
326 |           weight: edge.weight,
327 |           type: edge.type,
328 |           ...edge.attributes
329 |         }
330 |       }))
331 |     ]
332 |   };
333 | }
334 | 
335 | /**
336 |  * Escape XML special characters
337 |  */
338 | function escapeXML(text: string): string {
339 |   return text
340 |     .replace(/&/g, '&amp;')
341 |     .replace(/</g, '&lt;')
342 |     .replace(/>/g, '&gt;')
343 |     .replace(/"/g, '&quot;')
344 |     .replace(/'/g, '&apos;');
345 | }
```

--------------------------------------------------------------------------------
/src/tools/conversation-tools.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
  2 | import {
  3 |   listConversations,
  4 |   getConversation,
  5 |   getConversationSummary,
  6 |   searchConversations,
  7 |   getBubbleMessage,
  8 |   getRecentConversations,
  9 |   getConversationsByProject
 10 | } from './conversation-tools.js';
 11 | import { CursorDatabaseReader } from '../database/reader.js';
 12 | import * as databaseUtils from '../utils/database-utils.js';
 13 | 
 14 | // Mock the database reader
 15 | vi.mock('../database/reader.js');
 16 | vi.mock('../utils/database-utils.js');
 17 | 
 18 | const mockDatabaseReader = vi.mocked(CursorDatabaseReader);
 19 | const mockDetectCursorDatabasePath = vi.mocked(databaseUtils.detectCursorDatabasePath);
 20 | 
 21 | describe('Conversation Tools', () => {
 22 |   let mockReader: any;
 23 | 
 24 |   beforeEach(() => {
 25 |     mockReader = {
 26 |       connect: vi.fn(),
 27 |       close: vi.fn(),
 28 |       getConversationIds: vi.fn(),
 29 |       getConversationSummary: vi.fn(),
 30 |       getConversationById: vi.fn(),
 31 |       getBubbleMessage: vi.fn(),
 32 |       searchConversations: vi.fn(),
 33 |       getConversationIdsByProject: vi.fn()
 34 |     };
 35 | 
 36 |     mockDatabaseReader.mockImplementation(() => mockReader);
 37 |     mockDetectCursorDatabasePath.mockReturnValue('/mock/path/to/cursor.db');
 38 | 
 39 |     // Clear environment variable
 40 |     delete process.env.CURSOR_DB_PATH;
 41 |   });
 42 | 
 43 |   afterEach(() => {
 44 |     vi.clearAllMocks();
 45 |   });
 46 | 
 47 |   describe('listConversations', () => {
 48 |     it('should list conversations with default parameters', async () => {
 49 |       const mockConversationIds = ['conv1', 'conv2'];
 50 |       const mockSummary = {
 51 |         composerId: 'conv1',
 52 |         format: 'legacy' as const,
 53 |         messageCount: 5,
 54 |         hasCodeBlocks: true,
 55 |         relevantFiles: ['file1.ts'],
 56 |         attachedFolders: ['folder1'],
 57 |         firstMessage: 'Hello world',
 58 |         conversationSize: 1000
 59 |       };
 60 | 
 61 |       mockReader.getConversationIds.mockResolvedValue(mockConversationIds);
 62 |       mockReader.getConversationSummary.mockResolvedValue(mockSummary);
 63 | 
 64 |       const result = await listConversations({});
 65 | 
 66 |       expect(mockReader.connect).toHaveBeenCalled();
 67 |       expect(mockReader.close).toHaveBeenCalled();
 68 |       expect(result.conversations).toHaveLength(2);
 69 |       expect(result.totalFound).toBe(2);
 70 |       expect(result.filters.limit).toBe(1000);
 71 |       expect(result.filters.minLength).toBe(100);
 72 |     });
 73 | 
 74 |     it('should handle empty results', async () => {
 75 |       mockReader.getConversationIds.mockResolvedValue([]);
 76 | 
 77 |       const result = await listConversations({});
 78 | 
 79 |       expect(result.conversations).toHaveLength(0);
 80 |       expect(result.totalFound).toBe(0);
 81 |     });
 82 | 
 83 |     it('should always close database connection', async () => {
 84 |       mockReader.getConversationIds.mockRejectedValue(new Error('Database error'));
 85 | 
 86 |       await expect(listConversations({})).rejects.toThrow('Database error');
 87 |       expect(mockReader.close).toHaveBeenCalled();
 88 |     });
 89 |   });
 90 | 
 91 |   describe('getConversation', () => {
 92 |     it('should get legacy conversation with full content', async () => {
 93 |       const mockConversation = {
 94 |         composerId: 'conv1',
 95 |         hasLoaded: true,
 96 |         text: '',
 97 |         richText: '',
 98 |         conversation: [
 99 |           {
100 |             type: 1,
101 |             bubbleId: 'bubble1',
102 |             text: 'Hello',
103 |             relevantFiles: ['file1.ts'],
104 |             suggestedCodeBlocks: [{
105 |               language: 'typescript',
106 |               code: 'console.log("hello");',
107 |               filename: 'test.ts'
108 |             }],
109 |             attachedFoldersNew: ['folder1']
110 |           }
111 |         ]
112 |       };
113 | 
114 |       mockReader.getConversationById.mockResolvedValue(mockConversation);
115 | 
116 |       const result = await getConversation({
117 |         conversationId: 'conv1'
118 |       });
119 | 
120 |       expect(mockReader.connect).toHaveBeenCalled();
121 |       expect(mockReader.getConversationById).toHaveBeenCalledWith('conv1');
122 |       expect(result.conversation).toBeDefined();
123 |       expect(result.conversation!.format).toBe('legacy');
124 |       expect(result.conversation!.messageCount).toBe(1);
125 |     });
126 | 
127 |     it('should return null for non-existent conversation', async () => {
128 |       mockReader.getConversationById.mockResolvedValue(null);
129 | 
130 |       const result = await getConversation({
131 |         conversationId: 'nonexistent'
132 |       });
133 | 
134 |       expect(result.conversation).toBeNull();
135 |     });
136 |   });
137 | 
138 |   describe('getConversationSummary', () => {
139 |     it('should get conversation summary', async () => {
140 |       const mockSummary = {
141 |         composerId: 'conv1',
142 |         format: 'legacy' as const,
143 |         messageCount: 5,
144 |         hasCodeBlocks: true,
145 |         codeBlockCount: 3,
146 |         conversationSize: 2000,
147 |         firstMessage: 'First message',
148 |         relevantFiles: ['file1.ts'],
149 |         attachedFolders: ['src']
150 |       };
151 | 
152 |       mockReader.getConversationSummary.mockResolvedValue(mockSummary);
153 | 
154 |       const result = await getConversationSummary({
155 |         conversationId: 'conv1'
156 |       });
157 | 
158 |       expect(result.summary).toEqual(mockSummary);
159 |     });
160 | 
161 |     it('should return null for non-existent conversation', async () => {
162 |       mockReader.getConversationSummary.mockResolvedValue(null);
163 | 
164 |       const result = await getConversationSummary({
165 |         conversationId: 'nonexistent'
166 |       });
167 | 
168 |       expect(result.summary).toBeNull();
169 |     });
170 |   });
171 | 
172 |   describe('searchConversations', () => {
173 |     it('should search conversations with default options', async () => {
174 |       const mockResults = [
175 |         {
176 |           composerId: 'conv1',
177 |           format: 'legacy' as const,
178 |           matches: [
179 |             {
180 |               text: 'Found text with query match',
181 |               context: 'Context around match',
182 |               bubbleId: 'bubble1',
183 |               type: 1
184 |             }
185 |           ],
186 |           totalMatches: 1,
187 |           messageCount: 5,
188 |           hasCodeBlocks: true,
189 |           relevantFiles: ['file1.ts'],
190 |           attachedFolders: ['src']
191 |         }
192 |       ];
193 | 
194 |       mockReader.searchConversations.mockResolvedValue(mockResults);
195 | 
196 |       const result = await searchConversations({
197 |         query: 'test query'
198 |       });
199 | 
200 |       expect(mockReader.searchConversations).toHaveBeenCalledWith('test query', {
201 |         includeCode: true,
202 |         contextLines: 3,
203 |         maxResults: 20,
204 |         searchBubbles: true,
205 |         searchType: 'all',
206 |         format: 'both'
207 |       });
208 | 
209 |       expect(result.results).toEqual(mockResults);
210 |       expect(result.totalResults).toBe(1);
211 |       expect(result.query).toBe('test query');
212 |     });
213 |   });
214 | 
215 |   describe('getBubbleMessage', () => {
216 |     it('should get bubble message', async () => {
217 |       const mockBubbleMessage = {
218 |         bubbleId: 'bubble1',
219 |         type: 1,
220 |         text: 'Bubble message text',
221 |         relevantFiles: ['file1.ts'],
222 |         suggestedCodeBlocks: [],
223 |         attachedFoldersNew: []
224 |       };
225 | 
226 |       mockReader.getBubbleMessage.mockResolvedValue(mockBubbleMessage);
227 | 
228 |       const result = await getBubbleMessage({
229 |         composerId: 'conv1',
230 |         bubbleId: 'bubble1'
231 |       });
232 | 
233 |       expect(result.bubbleMessage).toEqual(mockBubbleMessage);
234 |     });
235 | 
236 |     it('should return null for non-existent bubble message', async () => {
237 |       mockReader.getBubbleMessage.mockResolvedValue(null);
238 | 
239 |       const result = await getBubbleMessage({
240 |         composerId: 'conv1',
241 |         bubbleId: 'nonexistent'
242 |       });
243 | 
244 |       expect(result.bubbleMessage).toBeNull();
245 |     });
246 |   });
247 | 
248 |   describe('getRecentConversations', () => {
249 |     it('should get recent conversations', async () => {
250 |       const mockConversationIds = ['conv1', 'conv2'];
251 |       const mockSummary = {
252 |         composerId: 'conv1',
253 |         format: 'legacy' as const,
254 |         messageCount: 3,
255 |         hasCodeBlocks: false,
256 |         relevantFiles: [],
257 |         attachedFolders: [],
258 |         conversationSize: 800
259 |       };
260 | 
261 |       mockReader.getConversationIds.mockResolvedValue(mockConversationIds);
262 |       mockReader.getConversationSummary.mockResolvedValue(mockSummary);
263 | 
264 |       const result = await getRecentConversations({});
265 | 
266 |       expect(result.conversations).toHaveLength(2);
267 |       expect(result.requestedLimit).toBe(10);
268 |       expect(result.totalFound).toBe(2);
269 |       expect(result.timestamp).toBeDefined();
270 |     });
271 |   });
272 | 
273 |   describe('getConversationsByProject', () => {
274 |     it('should get conversations by project path', async () => {
275 |       const mockResults = [
276 |         { composerId: 'conv1', relevanceScore: 0.9 }
277 |       ];
278 | 
279 |       const mockSummary = {
280 |         composerId: 'conv1',
281 |         format: 'legacy' as const,
282 |         messageCount: 5,
283 |         hasCodeBlocks: true,
284 |         relevantFiles: ['src/file1.ts'],
285 |         attachedFolders: ['/project/src'],
286 |         conversationSize: 1500
287 |       };
288 | 
289 |       mockReader.getConversationIdsByProject.mockResolvedValue(mockResults);
290 |       mockReader.getConversationSummary.mockResolvedValue(mockSummary);
291 | 
292 |       const result = await getConversationsByProject({
293 |         projectPath: '/project/src'
294 |       });
295 | 
296 |       expect(result.conversations).toHaveLength(1);
297 |       expect(result.totalFound).toBe(1);
298 |       expect(result.filters.projectPath).toBe('/project/src');
299 |     });
300 |   });
301 | 
302 |   describe('Error Handling', () => {
303 |     it('should handle database connection errors', async () => {
304 |       mockReader.connect.mockRejectedValue(new Error('Connection failed'));
305 | 
306 |       await expect(listConversations({})).rejects.toThrow('Connection failed');
307 |       expect(mockReader.close).toHaveBeenCalled();
308 |     });
309 | 
310 |     it('should handle validation errors', async () => {
311 |       const invalidInput = { conversationId: '' };
312 | 
313 |       await expect(getConversation(invalidInput as any)).rejects.toThrow();
314 |     });
315 |   });
316 | });
```
Page 1/3FirstPrevNextLast