This is page 1 of 3. Use http://codebase.md/vltansky/cursor-chat-history-mcp?page={x} to view the full context.
# Directory Structure
```
├── .cursor
│ ├── mcp.json
│ └── rules
│ ├── cursor_rules.mdc
│ ├── dev_workflow.mdc
│ ├── general.mdc
│ ├── mcp.mdc
│ ├── project-overview.mdc
│ ├── self_improve.mdc
│ ├── taskmaster.mdc
│ ├── tests.mdc
│ └── typescript-patterns.mdc
├── .github
│ ├── dependabot.yml
│ └── workflows
│ └── ci.yml
├── .gitignore
├── .roo
│ ├── rules
│ │ ├── dev_workflow.md
│ │ ├── roo_rules.md
│ │ ├── self_improve.md
│ │ └── taskmaster.md
│ ├── rules-architect
│ │ └── architect-rules
│ ├── rules-ask
│ │ └── ask-rules
│ ├── rules-boomerang
│ │ └── boomerang-rules
│ ├── rules-code
│ │ └── code-rules
│ ├── rules-debug
│ │ └── debug-rules
│ └── rules-test
│ └── test-rules
├── .roomodes
├── .taskmaster
│ ├── .taskmaster
│ │ └── config.json
│ ├── config.json
│ └── reports
│ └── task-complexity-report.json
├── .taskmasterconfig
├── .windsurfrules
├── docs
│ ├── research.md
│ └── use-cases.md
├── LICENSE
├── package.json
├── README.md
├── scripts
│ └── example_prd.txt
├── src
│ ├── database
│ │ ├── parser.test.ts
│ │ ├── parser.ts
│ │ ├── reader.test.ts
│ │ ├── reader.ts
│ │ └── types.ts
│ ├── server.test.ts
│ ├── server.ts
│ ├── tools
│ │ ├── analytics-tools.ts
│ │ ├── conversation-tools.test.ts
│ │ ├── conversation-tools.ts
│ │ └── extraction-tools.ts
│ └── utils
│ ├── analytics.ts
│ ├── cache.test.ts
│ ├── cache.ts
│ ├── database-utils.test.ts
│ ├── database-utils.ts
│ ├── errors.test.ts
│ ├── errors.ts
│ ├── exporters.ts
│ ├── formatter.ts
│ ├── relationships.ts
│ ├── validation.test.ts
│ └── validation.ts
├── tsconfig.json
├── vitest.config.ts
└── yarn.lock
```
# Files
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
# Dependencies
node_modules/
# Build output
dist/
# Log files
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Environment variables
.env
.env.local
.env.development.local
.env.test.local
.env.production.local
# IDEs and editors
.vscode/
.idea/
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
# OS generated files
.DS_Store
ehthumbs.db
Thumbs.db
# Added by Claude Task Master
# Logs
logs
dev-debug.log
# Dependency directories
# Editor directories and files
.idea
.vscode
# OS specific
# Task files
tasks.json
tasks/
```
--------------------------------------------------------------------------------
/.taskmasterconfig:
--------------------------------------------------------------------------------
```
{
"models": {
"main": {
"provider": "anthropic",
"modelId": "claude-sonnet-4-20250514",
"maxTokens": 120000,
"temperature": 0.2
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3-5-sonnet-20240620",
"maxTokens": 8192,
"temperature": 0.1
}
},
"global": {
"logLevel": "info",
"debug": false,
"defaultSubtasks": 5,
"defaultPriority": "medium",
"projectName": "Taskmaster",
"ollamaBaseUrl": "http://localhost:11434/api",
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
}
}
```
--------------------------------------------------------------------------------
/.roomodes:
--------------------------------------------------------------------------------
```
{
"customModes": [
{
"slug": "boomerang",
"name": "Boomerang",
"roleDefinition": "You are Roo, a strategic workflow orchestrator who coordinates complex tasks by delegating them to appropriate specialized modes. You have a comprehensive understanding of each mode's capabilities and limitations, also your own, and with the information given by the user and other modes in shared context you are enabled to effectively break down complex problems into discrete tasks that can be solved by different specialists using the `taskmaster-ai` system for task and context management.",
"customInstructions": "Your role is to coordinate complex workflows by delegating tasks to specialized modes, using `taskmaster-ai` as the central hub for task definition, progress tracking, and context management. \nAs an orchestrator, you should:\nn1. When given a complex task, use contextual information (which gets updated frequently) to break it down into logical subtasks that can be delegated to appropriate specialized modes.\nn2. For each subtask, use the `new_task` tool to delegate. Choose the most appropriate mode for the subtask's specific goal and provide comprehensive instructions in the `message` parameter. \nThese instructions must include:\n* All necessary context from the parent task or previous subtasks required to complete the work.\n* A clearly defined scope, specifying exactly what the subtask should accomplish.\n* An explicit statement that the subtask should *only* perform the work outlined in these instructions and not deviate.\n* An instruction for the subtask to signal completion by using the `attempt_completion` tool, providing a thorough summary of the outcome in the `result` parameter, keeping in mind that this summary will be the source of truth used to further relay this information to other tasks and for you to keep track of what was completed on this project.\nn3. Track and manage the progress of all subtasks. When a subtask is completed, acknowledge its results and determine the next steps.\nn4. Help the user understand how the different subtasks fit together in the overall workflow. Provide clear reasoning about why you're delegating specific tasks to specific modes.\nn5. Ask clarifying questions when necessary to better understand how to break down complex tasks effectively. If it seems complex delegate to architect to accomplish that \nn6. Use subtasks to maintain clarity. If a request significantly shifts focus or requires a different expertise (mode), consider creating a subtask rather than overloading the current one.",
"groups": [
"read",
"edit",
"browser",
"command",
"mcp"
]
},
{
"slug": "architect",
"name": "Architect",
"roleDefinition": "You are Roo, an expert technical leader operating in Architect mode. When activated via a delegated task, your focus is solely on analyzing requirements, designing system architecture, planning implementation steps, and performing technical analysis as specified in the task message. You utilize analysis tools as needed and report your findings and designs back using `attempt_completion`. You do not deviate from the delegated task scope.",
"customInstructions": "1. Do some information gathering (for example using read_file or search_files) to get more context about the task.\n\n2. You should also ask the user clarifying questions to get a better understanding of the task.\n\n3. Once you've gained more context about the user's request, you should create a detailed plan for how to accomplish the task. Include Mermaid diagrams if they help make your plan clearer.\n\n4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it.\n\n5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file.\n\n6. Use the switch_mode tool to request that the user switch to another mode to implement the solution.",
"groups": [
"read",
["edit", { "fileRegex": "\\.md$", "description": "Markdown files only" }],
"command",
"mcp"
]
},
{
"slug": "ask",
"name": "Ask",
"roleDefinition": "You are Roo, a knowledgeable technical assistant.\nWhen activated by another mode via a delegated task, your focus is to research, analyze, and provide clear, concise answers or explanations based *only* on the specific information requested in the delegation message. Use available tools for information gathering and report your findings back using `attempt_completion`.",
"customInstructions": "You can analyze code, explain concepts, and access external resources. Make sure to answer the user's questions and don't rush to switch to implementing code. Include Mermaid diagrams if they help make your response clearer.",
"groups": [
"read",
"browser",
"mcp"
]
},
{
"slug": "debug",
"name": "Debug",
"roleDefinition": "You are Roo, an expert software debugger specializing in systematic problem diagnosis and resolution. When activated by another mode, your task is to meticulously analyze the provided debugging request (potentially referencing Taskmaster tasks, logs, or metrics), use diagnostic tools as instructed to investigate the issue, identify the root cause, and report your findings and recommended next steps back via `attempt_completion`. You focus solely on diagnostics within the scope defined by the delegated task.",
"customInstructions": "Reflect on 5-7 different possible sources of the problem, distill those down to 1-2 most likely sources, and then add logs to validate your assumptions. Explicitly ask the user to confirm the diagnosis before fixing the problem.",
"groups": [
"read",
"edit",
"command",
"mcp"
]
},
{
"slug": "test",
"name": "Test",
"roleDefinition": "You are Roo, an expert software tester. Your primary focus is executing testing tasks delegated to you by other modes.\nAnalyze the provided scope and context (often referencing a Taskmaster task ID and its `testStrategy`), develop test plans if needed, execute tests diligently, and report comprehensive results (pass/fail, bugs, coverage) back using `attempt_completion`. You operate strictly within the delegated task's boundaries.",
"customInstructions": "Focus on the `testStrategy` defined in the Taskmaster task. Develop and execute test plans accordingly. Report results clearly, including pass/fail status, bug details, and coverage information.",
"groups": [
"read",
"command",
"mcp"
]
}
]
}
```
--------------------------------------------------------------------------------
/.windsurfrules:
--------------------------------------------------------------------------------
```
Below you will find a variety of important rules spanning:
- the dev_workflow
- the .windsurfrules document self-improvement workflow
- the template to follow when modifying or adding new sections/rules to this document.
---
DEV_WORKFLOW
---
description: Guide for using meta-development script (scripts/dev.js) to manage task-driven development workflows
globs: **/*
filesToApplyRule: **/*
alwaysApply: true
---
- **Global CLI Commands**
- Task Master now provides a global CLI through the `task-master` command
- All functionality from `scripts/dev.js` is available through this interface
- Install globally with `npm install -g claude-task-master` or use locally via `npx`
- Use `task-master <command>` instead of `node scripts/dev.js <command>`
- Examples:
- `task-master list` instead of `node scripts/dev.js list`
- `task-master next` instead of `node scripts/dev.js next`
- `task-master expand --id=3` instead of `node scripts/dev.js expand --id=3`
- All commands accept the same options as their script equivalents
- The CLI provides additional commands like `task-master init` for project setup
- **Development Workflow Process**
- Start new projects by running `task-master init` or `node scripts/dev.js parse-prd --input=<prd-file.txt>` to generate initial tasks.json
- Begin coding sessions with `task-master list` to see current tasks, status, and IDs
- Analyze task complexity with `task-master analyze-complexity --research` before breaking down tasks
- Select tasks based on dependencies (all marked 'done'), priority level, and ID order
- Clarify tasks by checking task files in tasks/ directory or asking for user input
- View specific task details using `task-master show <id>` to understand implementation requirements
- Break down complex tasks using `task-master expand --id=<id>` with appropriate flags
- Clear existing subtasks if needed using `task-master clear-subtasks --id=<id>` before regenerating
- Implement code following task details, dependencies, and project standards
- Verify tasks according to test strategies before marking as complete
- Mark completed tasks with `task-master set-status --id=<id> --status=done`
- Update dependent tasks when implementation differs from original plan
- Generate task files with `task-master generate` after updating tasks.json
- Maintain valid dependency structure with `task-master fix-dependencies` when needed
- Respect dependency chains and task priorities when selecting work
- Report progress regularly using the list command
- **Task Complexity Analysis**
- Run `node scripts/dev.js analyze-complexity --research` for comprehensive analysis
- Review complexity report in scripts/task-complexity-report.json
- Or use `node scripts/dev.js complexity-report` for a formatted, readable version of the report
- Focus on tasks with highest complexity scores (8-10) for detailed breakdown
- Use analysis results to determine appropriate subtask allocation
- Note that reports are automatically used by the expand command
- **Task Breakdown Process**
- For tasks with complexity analysis, use `node scripts/dev.js expand --id=<id>`
- Otherwise use `node scripts/dev.js expand --id=<id> --subtasks=<number>`
- Add `--research` flag to leverage Perplexity AI for research-backed expansion
- Use `--prompt="<context>"` to provide additional context when needed
- Review and adjust generated subtasks as necessary
- Use `--all` flag to expand multiple pending tasks at once
- If subtasks need regeneration, clear them first with `clear-subtasks` command
- **Implementation Drift Handling**
- When implementation differs significantly from planned approach
- When future tasks need modification due to current implementation choices
- When new dependencies or requirements emerge
- Call `node scripts/dev.js update --from=<futureTaskId> --prompt="<explanation>"` to update tasks.json
- **Task Status Management**
- Use 'pending' for tasks ready to be worked on
- Use 'done' for completed and verified tasks
- Use 'deferred' for postponed tasks
- Add custom status values as needed for project-specific workflows
- **Task File Format Reference**
```
# Task ID: <id>
# Title: <title>
# Status: <status>
# Dependencies: <comma-separated list of dependency IDs>
# Priority: <priority>
# Description: <brief description>
# Details:
<detailed implementation notes>
# Test Strategy:
<verification approach>
```
- **Command Reference: parse-prd**
- Legacy Syntax: `node scripts/dev.js parse-prd --input=<prd-file.txt>`
- CLI Syntax: `task-master parse-prd --input=<prd-file.txt>`
- Description: Parses a PRD document and generates a tasks.json file with structured tasks
- Parameters:
- `--input=<file>`: Path to the PRD text file (default: sample-prd.txt)
- Example: `task-master parse-prd --input=requirements.txt`
- Notes: Will overwrite existing tasks.json file. Use with caution.
- **Command Reference: update**
- Legacy Syntax: `node scripts/dev.js update --from=<id> --prompt="<prompt>"`
- CLI Syntax: `task-master update --from=<id> --prompt="<prompt>"`
- Description: Updates tasks with ID >= specified ID based on the provided prompt
- Parameters:
- `--from=<id>`: Task ID from which to start updating (required)
- `--prompt="<text>"`: Explanation of changes or new context (required)
- Example: `task-master update --from=4 --prompt="Now we are using Express instead of Fastify."`
- Notes: Only updates tasks not marked as 'done'. Completed tasks remain unchanged.
- **Command Reference: generate**
- Legacy Syntax: `node scripts/dev.js generate`
- CLI Syntax: `task-master generate`
- Description: Generates individual task files in tasks/ directory based on tasks.json
- Parameters:
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
- `--output=<dir>, -o`: Output directory (default: 'tasks')
- Example: `task-master generate`
- Notes: Overwrites existing task files. Creates tasks/ directory if needed.
- **Command Reference: set-status**
- Legacy Syntax: `node scripts/dev.js set-status --id=<id> --status=<status>`
- CLI Syntax: `task-master set-status --id=<id> --status=<status>`
- Description: Updates the status of a specific task in tasks.json
- Parameters:
- `--id=<id>`: ID of the task to update (required)
- `--status=<status>`: New status value (required)
- Example: `task-master set-status --id=3 --status=done`
- Notes: Common values are 'done', 'pending', and 'deferred', but any string is accepted.
- **Command Reference: list**
- Legacy Syntax: `node scripts/dev.js list`
- CLI Syntax: `task-master list`
- Description: Lists all tasks in tasks.json with IDs, titles, and status
- Parameters:
- `--status=<status>, -s`: Filter by status
- `--with-subtasks`: Show subtasks for each task
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
- Example: `task-master list`
- Notes: Provides quick overview of project progress. Use at start of sessions.
- **Command Reference: expand**
- Legacy Syntax: `node scripts/dev.js expand --id=<id> [--num=<number>] [--research] [--prompt="<context>"]`
- CLI Syntax: `task-master expand --id=<id> [--num=<number>] [--research] [--prompt="<context>"]`
- Description: Expands a task with subtasks for detailed implementation
- Parameters:
- `--id=<id>`: ID of task to expand (required unless using --all)
- `--all`: Expand all pending tasks, prioritized by complexity
- `--num=<number>`: Number of subtasks to generate (default: from complexity report)
- `--research`: Use Perplexity AI for research-backed generation
- `--prompt="<text>"`: Additional context for subtask generation
- `--force`: Regenerate subtasks even for tasks that already have them
- Example: `task-master expand --id=3 --num=5 --research --prompt="Focus on security aspects"`
- Notes: Uses complexity report recommendations if available.
- **Command Reference: analyze-complexity**
- Legacy Syntax: `node scripts/dev.js analyze-complexity [options]`
- CLI Syntax: `task-master analyze-complexity [options]`
- Description: Analyzes task complexity and generates expansion recommendations
- Parameters:
- `--output=<file>, -o`: Output file path (default: scripts/task-complexity-report.json)
- `--model=<model>, -m`: Override LLM model to use
- `--threshold=<number>, -t`: Minimum score for expansion recommendation (default: 5)
- `--file=<path>, -f`: Use alternative tasks.json file
- `--research, -r`: Use Perplexity AI for research-backed analysis
- Example: `task-master analyze-complexity --research`
- Notes: Report includes complexity scores, recommended subtasks, and tailored prompts.
- **Command Reference: clear-subtasks**
- Legacy Syntax: `node scripts/dev.js clear-subtasks --id=<id>`
- CLI Syntax: `task-master clear-subtasks --id=<id>`
- Description: Removes subtasks from specified tasks to allow regeneration
- Parameters:
- `--id=<id>`: ID or comma-separated IDs of tasks to clear subtasks from
- `--all`: Clear subtasks from all tasks
- Examples:
- `task-master clear-subtasks --id=3`
- `task-master clear-subtasks --id=1,2,3`
- `task-master clear-subtasks --all`
- Notes:
- Task files are automatically regenerated after clearing subtasks
- Can be combined with expand command to immediately generate new subtasks
- Works with both parent tasks and individual subtasks
- **Task Structure Fields**
- **id**: Unique identifier for the task (Example: `1`)
- **title**: Brief, descriptive title (Example: `"Initialize Repo"`)
- **description**: Concise summary of what the task involves (Example: `"Create a new repository, set up initial structure."`)
- **status**: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
- **dependencies**: IDs of prerequisite tasks (Example: `[1, 2]`)
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
- This helps quickly identify which prerequisite tasks are blocking work
- **priority**: Importance level (Example: `"high"`, `"medium"`, `"low"`)
- **details**: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`)
- **testStrategy**: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`)
- **subtasks**: List of smaller, more specific tasks (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`)
- **Environment Variables Configuration**
- **ANTHROPIC_API_KEY** (Required): Your Anthropic API key for Claude (Example: `ANTHROPIC_API_KEY=sk-ant-api03-...`)
- **MODEL** (Default: `"claude-3-7-sonnet-20250219"`): Claude model to use (Example: `MODEL=claude-3-opus-20240229`)
- **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
- **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
- **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
- **TASKMASTER_LOG_LEVEL** (Default: `"info"`): Console output level (Example: `TASKMASTER_LOG_LEVEL=debug`)
- **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
- **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
- **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)
- **PROJECT_VERSION** (Default: `"1.0.0"`): Version in metadata (Example: `PROJECT_VERSION=2.1.0`)
- **PERPLEXITY_API_KEY**: For research-backed features (Example: `PERPLEXITY_API_KEY=pplx-...`)
- **PERPLEXITY_MODEL** (Default: `"sonar-medium-online"`): Perplexity model (Example: `PERPLEXITY_MODEL=sonar-large-online`)
- **Determining the Next Task**
- Run `task-master next` to show the next task to work on
- The next command identifies tasks with all dependencies satisfied
- Tasks are prioritized by priority level, dependency count, and ID
- The command shows comprehensive task information including:
- Basic task details and description
- Implementation details
- Subtasks (if they exist)
- Contextual suggested actions
- Recommended before starting any new development work
- Respects your project's dependency structure
- Ensures tasks are completed in the appropriate sequence
- Provides ready-to-use commands for common task actions
- **Viewing Specific Task Details**
- Run `task-master show <id>` or `task-master show --id=<id>` to view a specific task
- Use dot notation for subtasks: `task-master show 1.2` (shows subtask 2 of task 1)
- Displays comprehensive information similar to the next command, but for a specific task
- For parent tasks, shows all subtasks and their current status
- For subtasks, shows parent task information and relationship
- Provides contextual suggested actions appropriate for the specific task
- Useful for examining task details before implementation or checking status
- **Managing Task Dependencies**
- Use `task-master add-dependency --id=<id> --depends-on=<id>` to add a dependency
- Use `task-master remove-dependency --id=<id> --depends-on=<id>` to remove a dependency
- The system prevents circular dependencies and duplicate dependency entries
- Dependencies are checked for existence before being added or removed
- Task files are automatically regenerated after dependency changes
- Dependencies are visualized with status indicators in task listings and files
- **Command Reference: add-dependency**
- Legacy Syntax: `node scripts/dev.js add-dependency --id=<id> --depends-on=<id>`
- CLI Syntax: `task-master add-dependency --id=<id> --depends-on=<id>`
- Description: Adds a dependency relationship between two tasks
- Parameters:
- `--id=<id>`: ID of task that will depend on another task (required)
- `--depends-on=<id>`: ID of task that will become a dependency (required)
- Example: `task-master add-dependency --id=22 --depends-on=21`
- Notes: Prevents circular dependencies and duplicates; updates task files automatically
- **Command Reference: remove-dependency**
- Legacy Syntax: `node scripts/dev.js remove-dependency --id=<id> --depends-on=<id>`
- CLI Syntax: `task-master remove-dependency --id=<id> --depends-on=<id>`
- Description: Removes a dependency relationship between two tasks
- Parameters:
- `--id=<id>`: ID of task to remove dependency from (required)
- `--depends-on=<id>`: ID of task to remove as a dependency (required)
- Example: `task-master remove-dependency --id=22 --depends-on=21`
- Notes: Checks if dependency actually exists; updates task files automatically
- **Command Reference: validate-dependencies**
- Legacy Syntax: `node scripts/dev.js validate-dependencies [options]`
- CLI Syntax: `task-master validate-dependencies [options]`
- Description: Checks for and identifies invalid dependencies in tasks.json and task files
- Parameters:
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
- Example: `task-master validate-dependencies`
- Notes:
- Reports all non-existent dependencies and self-dependencies without modifying files
- Provides detailed statistics on task dependency state
- Use before fix-dependencies to audit your task structure
- **Command Reference: fix-dependencies**
- Legacy Syntax: `node scripts/dev.js fix-dependencies [options]`
- CLI Syntax: `task-master fix-dependencies [options]`
- Description: Finds and fixes all invalid dependencies in tasks.json and task files
- Parameters:
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
- Example: `task-master fix-dependencies`
- Notes:
- Removes references to non-existent tasks and subtasks
- Eliminates self-dependencies (tasks depending on themselves)
- Regenerates task files with corrected dependencies
- Provides detailed report of all fixes made
- **Command Reference: complexity-report**
- Legacy Syntax: `node scripts/dev.js complexity-report [options]`
- CLI Syntax: `task-master complexity-report [options]`
- Description: Displays the task complexity analysis report in a formatted, easy-to-read way
- Parameters:
- `--file=<path>, -f`: Path to the complexity report file (default: 'scripts/task-complexity-report.json')
- Example: `task-master complexity-report`
- Notes:
- Shows tasks organized by complexity score with recommended actions
- Provides complexity distribution statistics
- Displays ready-to-use expansion commands for complex tasks
- If no report exists, offers to generate one interactively
- **Command Reference: add-task**
- CLI Syntax: `task-master add-task [options]`
- Description: Add a new task to tasks.json using AI
- Parameters:
- `--file=<path>, -f`: Path to the tasks file (default: 'tasks/tasks.json')
- `--prompt=<text>, -p`: Description of the task to add (required)
- `--dependencies=<ids>, -d`: Comma-separated list of task IDs this task depends on
- `--priority=<priority>`: Task priority (high, medium, low) (default: 'medium')
- Example: `task-master add-task --prompt="Create user authentication using Auth0"`
- Notes: Uses AI to convert description into structured task with appropriate details
- **Command Reference: init**
- CLI Syntax: `task-master init`
- Description: Initialize a new project with Task Master structure
- Parameters: None
- Example: `task-master init`
- Notes:
- Creates initial project structure with required files
- Prompts for project settings if not provided
- Merges with existing files when appropriate
- Can be used to bootstrap a new Task Master project quickly
- **Code Analysis & Refactoring Techniques**
- **Top-Level Function Search**
- Use grep pattern matching to find all exported functions across the codebase
- Command: `grep -E "export (function|const) \w+|function \w+\(|const \w+ = \(|module\.exports" --include="*.js" -r ./`
- Benefits:
- Quickly identify all public API functions without reading implementation details
- Compare functions between files during refactoring (e.g., monolithic to modular structure)
- Verify all expected functions exist in refactored modules
- Identify duplicate functionality or naming conflicts
- Usage examples:
- When migrating from `scripts/dev.js` to modular structure: `grep -E "function \w+\(" scripts/dev.js`
- Check function exports in a directory: `grep -E "export (function|const)" scripts/modules/`
- Find potential naming conflicts: `grep -E "function (get|set|create|update)\w+\(" -r ./`
- Variations:
- Add `-n` flag to include line numbers
- Add `--include="*.ts"` to filter by file extension
- Use with `| sort` to alphabetize results
- Integration with refactoring workflow:
- Start by mapping all functions in the source file
- Create target module files based on function grouping
- Verify all functions were properly migrated
- Check for any unintentional duplications or omissions
---
WINDSURF_RULES
---
description: Guidelines for creating and maintaining Windsurf rules to ensure consistency and effectiveness.
globs: .windsurfrules
filesToApplyRule: .windsurfrules
alwaysApply: true
---
The below describes how you should be structuring new rule sections in this document.
- **Required Rule Structure:**
```markdown
---
description: Clear, one-line description of what the rule enforces
globs: path/to/files/*.ext, other/path/**/*
alwaysApply: boolean
---
- **Main Points in Bold**
- Sub-points with details
- Examples and explanations
```
- **Section References:**
- Use `ALL_CAPS_SECTION` to reference files
- Example: `WINDSURF_RULES`
- **Code Examples:**
- Use language-specific code blocks
```typescript
// ✅ DO: Show good examples
const goodExample = true;
// ❌ DON'T: Show anti-patterns
const badExample = false;
```
- **Rule Content Guidelines:**
- Start with high-level overview
- Include specific, actionable requirements
- Show examples of correct implementation
- Reference existing code when possible
- Keep rules DRY by referencing other rules
- **Rule Maintenance:**
- Update rules when new patterns emerge
- Add examples from actual codebase
- Remove outdated patterns
- Cross-reference related rules
- **Best Practices:**
- Use bullet points for clarity
- Keep descriptions concise
- Include both DO and DON'T examples
- Reference actual code over theoretical examples
- Use consistent formatting across rules
---
SELF_IMPROVE
---
description: Guidelines for continuously improving this rules document based on emerging code patterns and best practices.
globs: **/*
filesToApplyRule: **/*
alwaysApply: true
---
- **Rule Improvement Triggers:**
- New code patterns not covered by existing rules
- Repeated similar implementations across files
- Common error patterns that could be prevented
- New libraries or tools being used consistently
- Emerging best practices in the codebase
- **Analysis Process:**
- Compare new code with existing rules
- Identify patterns that should be standardized
- Look for references to external documentation
- Check for consistent error handling patterns
- Monitor test patterns and coverage
- **Rule Updates:**
- **Add New Rules When:**
- A new technology/pattern is used in 3+ files
- Common bugs could be prevented by a rule
- Code reviews repeatedly mention the same feedback
- New security or performance patterns emerge
- **Modify Existing Rules When:**
- Better examples exist in the codebase
- Additional edge cases are discovered
- Related rules have been updated
- Implementation details have changed
- **Example Pattern Recognition:**
```typescript
// If you see repeated patterns like:
const data = await prisma.user.findMany({
select: { id: true, email: true },
where: { status: 'ACTIVE' }
});
// Consider adding a PRISMA section in the .windsurfrules:
// - Standard select fields
// - Common where conditions
// - Performance optimization patterns
```
- **Rule Quality Checks:**
- Rules should be actionable and specific
- Examples should come from actual code
- References should be up to date
- Patterns should be consistently enforced
- **Continuous Improvement:**
- Monitor code review comments
- Track common development questions
- Update rules after major refactors
- Add links to relevant documentation
- Cross-reference related rules
- **Rule Deprecation:**
- Mark outdated patterns as deprecated
- Remove rules that no longer apply
- Update references to deprecated rules
- Document migration paths for old patterns
- **Documentation Updates:**
- Keep examples synchronized with code
- Update references to external docs
- Maintain links between related rules
- Document breaking changes
Follow WINDSURF_RULES for proper rule formatting and structure of windsurf rule sections.
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
# Cursor Chat History MCP
**Give AI assistants access to your Cursor chat history.**
A Model Context Protocol (MCP) server that allows Cursor, Claude, and other AI assistants to read and analyze your Cursor chat data. This enables personalized coding assistance based on your actual development patterns and history.
<a href="https://glama.ai/mcp/servers/@vltansky/cursor-conversations-mcp">
<img width="380" height="200" src="https://glama.ai/mcp/servers/@vltansky/cursor-conversations-mcp/badge" alt="Cursor Conversations Server MCP server" />
</a>
## What This Enables
Ask your AI assistant to:
- Analyze your chat history to understand your coding patterns and usage statistics
- Generate project-specific rules based on your actual development discussions
- Extract insights from past problem-solving sessions and find related conversations
- Create documentation based on real conversations about your code
- Export chat data for external analysis and visualization
- Find and apply solutions you've already worked through
## Key Benefits
**Generate Personalized Rules**: Create coding standards based on your actual development patterns, not generic best practices.
**Learn from Your History**: Extract insights from past chats to improve future development.
**Context-Aware Assistance**: Get help that's informed by your specific projects and coding style.
**Pattern Recognition**: Identify recurring themes and solutions in your development work.
## Quick Start
### 1. Configure MCP
Add to your `.cursor/mcp.json`:
```json
{
"mcpServers": {
"cursor-chat-history": {
"command": "npx",
"args": ["-y", "--package=cursor-chat-history-mcp", "cursor-chat-history-mcp"]
}
}
}
```
### 2. Start Using
```
"Analyze my React conversations and create component guidelines"
"Find debugging patterns in my chat history"
"Generate TypeScript coding standards from my actual usage"
"What are the main themes in my recent coding discussions?"
```
## Available Tools
### Core Tools
- **`list_conversations`** - Browse conversations with filtering options and optional project relevance scoring
- **`get_conversation`** - Retrieve full conversation content with code and file references
- **`search_conversations`** - Enhanced search with multi-keyword, LIKE patterns, and text search
### Analytics & Data Extraction Tools
- **`get_conversation_analytics`** - Comprehensive analytics including usage patterns, file activity, programming language distribution, and temporal trends
- **`find_related_conversations`** - Find conversations related by shared files, folders, languages, size, or temporal proximity
- **`extract_conversation_elements`** - Extract files, code blocks, languages, metadata, and conversation structure with flexible grouping
- **`export_conversation_data`** - Export chat data in JSON, CSV, or Graph formats for external analysis and visualization
## Common Use Cases
### Generate Coding Rules
```
"Create TypeScript interface naming conventions from my conversations"
"Extract error handling patterns and create guidelines"
"Find all my discussions about testing and create best practices"
```
### Extract Best Practices
```
"Show me how I typically use React hooks in my projects"
"Find patterns in my state management discussions"
"Analyze my class inheritance usage and create guidelines"
```
### Advanced Analysis
```
"Find conversations where I discussed specific functions or patterns"
"Search for file-specific discussions across my projects"
"Compare how I've approached similar problems over time"
```
### Create Project Documentation
```
"Generate API documentation from my service discussions"
"Create technical docs from my auth module conversations"
```
### Learn from Past Solutions
```
"Find similar debugging sessions and extract solutions"
"Analyze my performance optimization discussions"
```
### Data Analysis & Insights
```
"Get comprehensive analytics on my coding patterns over the last 3 months"
"Export all conversations with React code to CSV for analysis"
"Find conversations similar to this database migration discussion"
```
## Privacy & Security
- **Runs locally** - Your chat data never leaves your machine
- **No external services** - Direct access to your local Cursor database
- **No API keys required** - No data sharing with external services
- **Full control** - You decide what data to access and when
## How It Works
**Summary-First Approach for Efficiency**
The entire system is designed to be both powerful and context-efficient:
### **Data Access Process**
1. **Full Content Analysis**: All tools access complete chat data including:
- Complete message text and code blocks
- File references and folder paths
- Conversation metadata and titles
- AI-generated summaries
2. **Smart Result Delivery**: Different tools provide focused outputs:
- **`list_conversations`**: Returns conversation summaries with titles and metadata
- **`search_conversations`**: Searches full content but returns only summaries with relevance scores
- **Analytics tools**: Extract insights and patterns without overwhelming detail
3. **Summary-First Results**: Most tools return:
- Conversation summaries and titles
- Key metadata (files, folders, message count)
- AI-generated summaries when available
- Relevant scores and analytics
### **Why This Design?**
- **Context Efficiency**: Avoids overwhelming AI assistants with full message content
- **Performance**: Summaries are much smaller and faster to process
- **Discoverability**: Users can quickly scan results to identify relevant conversations
- **Deep Dive When Needed**: Use `get_conversation` for full content of specific conversations
This approach lets you efficiently browse, search, and analyze your chat history, then dive deep only into conversations that matter for your current task.
## Installation
### For Development
```bash
git clone https://github.com/vltansky/cursor-chat-history-mcp
cd cursor-chat-history-mcp
yarn install
yarn build
```
### For Use
The npx configuration above handles installation automatically.
## Tool Reference
### Output Formats
All tools support JSON output formats via the `outputMode` parameter:
- **`json` (default)** - Formatted JSON with proper indentation for readability
- **`compact-json`** - Minified JSON without formatting for minimal size
### Core Tools
**`list_conversations`**
- `limit` (default: 10) - Number of conversations to return
- `includeAiSummaries` (default: true) - Include AI-generated summaries for efficient browsing
- `projectPath` - Filter by project path
- `includeRelevanceScore` (default: false) - Include relevance scores when filtering by projectPath
- `hasCodeBlocks` - Filter conversations with/without code
- `keywords` - Search by keywords
- `filePattern` - Filter by file pattern
**`get_conversation`**
- `conversationId` (required) - Conversation to retrieve
- `summaryOnly` (default: false) - Get enhanced summary without full content to save context
- `includeMetadata` (default: false) - Include additional metadata
**`search_conversations`** - Enhanced search with multiple methods
- **Simple Query**: `query` - Basic text search (backward compatible)
- **Multi-keyword**: `keywords` array with `keywordOperator` ('AND'/'OR')
- **LIKE Patterns**: `likePattern` - SQL LIKE patterns (% = any chars, _ = single char)
- `searchType` (default: 'all') - 'all', 'project', 'files', 'code'
- `maxResults` (default: 10) - Maximum results
- `includeCode` (default: true) - Include code blocks
### Analytics & Data Extraction Tools
**`get_conversation_analytics`**
- `scope` (default: 'all') - 'all', 'recent', 'project'
- `projectPath` - Focus on specific project (required when scope='project')
- `recentDays` (default: 30) - Time window for recent scope
- `includeBreakdowns` (default: ['files', 'languages']) - Analysis types: 'files', 'languages', 'temporal', 'size'
**`find_related_conversations`**
- `referenceConversationId` (required) - Starting conversation
- `relationshipTypes` (default: ['files']) - 'files', 'folders', 'languages', 'size', 'temporal'
- `maxResults` (default: 10) - Number of results
- `minScore` (default: 0.1) - Minimum similarity score (0-1)
- `includeScoreBreakdown` (default: false) - Show individual relationship scores
**`extract_conversation_elements`**
- `conversationIds` - Specific conversations (optional, processes all if empty)
- `elements` (default: ['files', 'codeblocks']) - 'files', 'folders', 'languages', 'codeblocks', 'metadata', 'structure'
- `includeContext` (default: false) - Include surrounding message text
- `groupBy` (default: 'conversation') - 'conversation', 'element', 'none'
- `filters` - Filter by code length, file extensions, or languages
**`export_conversation_data`**
- `conversationIds` - Specific conversations (optional, exports all if empty)
- `format` (default: 'json') - 'json', 'csv', 'graph'
- `includeContent` (default: false) - Include full message text
- `includeRelationships` (default: false) - Calculate file/folder connections
- `flattenStructure` (default: false) - Flatten for CSV compatibility
- `filters` - Filter by size, code blocks, or project path
## Database Paths
Auto-detected locations:
- **macOS**: `~/Library/Application Support/Cursor/User/globalStorage/state.vscdb`
- **Windows**: `%APPDATA%/Cursor/User/globalStorage/state.vscdb`
- **Linux**: `~/.config/Cursor/User/globalStorage/state.vscdb`
## Technical Notes
- Supports both legacy and modern Cursor conversation formats
- Uses SQLite to access Cursor's chat database
- Close Cursor before running to avoid database lock issues
- Conversations filtered by size (>1000 bytes) to exclude empty ones
- Uses ROWID for chronological ordering (UUIDs are not chronological)
## Contributing
1. Fork the repository
2. Create a feature branch
3. Make your changes
4. Add tests if applicable
5. Submit a pull request
## License
MIT
```
--------------------------------------------------------------------------------
/vitest.config.ts:
--------------------------------------------------------------------------------
```typescript
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true,
environment: 'node',
include: ['src/**/*.{test,spec}.{js,mjs,cjs,ts,mts,cts,jsx,tsx}'],
exclude: ['node_modules', 'dist'],
},
});
```
--------------------------------------------------------------------------------
/.cursor/mcp.json:
--------------------------------------------------------------------------------
```json
{
"mcpServers": {
"cursor-chat-history-mcp": {
"command": "node",
"args": [
"/Users/vladta/Projects/cursor-chat-history-mcp/dist/server.js"
]
},
"taskmaster-ai": {
"command": "npx",
"args": ["-y", "--package=task-master-ai", "task-master-ai"]
}
}
}
```
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
```json
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"skipLibCheck": true,
"outDir": "dist"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "**/*.test.ts", "**/*.test.js"]
}
```
--------------------------------------------------------------------------------
/src/utils/formatter.ts:
--------------------------------------------------------------------------------
```typescript
export type OutputFormat = 'json' | 'compact-json';
export function formatResponse(data: any, format?: OutputFormat): string {
try {
switch (format) {
case 'compact-json':
return JSON.stringify(data);
case 'json':
default:
return JSON.stringify(data, null, 2);
}
} catch (error) {
console.error('Formatting failed, falling back to JSON:', error);
return JSON.stringify(data, null, 2);
}
}
```
--------------------------------------------------------------------------------
/.taskmaster/.taskmaster/config.json:
--------------------------------------------------------------------------------
```json
{
"models": {
"main": {
"provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 64000,
"temperature": 0.2
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3-5-sonnet",
"maxTokens": 64000,
"temperature": 0.2
}
},
"global": {
"logLevel": "info",
"debug": false,
"defaultSubtasks": 5,
"defaultPriority": "medium",
"projectName": "Task Master",
"ollamaBaseURL": "http://localhost:11434/api",
"bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com",
"userId": "1234567890"
}
}
```
--------------------------------------------------------------------------------
/.taskmaster/config.json:
--------------------------------------------------------------------------------
```json
{
"models": {
"main": {
"provider": "anthropic",
"modelId": "claude-sonnet-4-20250514",
"maxTokens": 120000,
"temperature": 0.2
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3-5-sonnet-20240620",
"maxTokens": 8192,
"temperature": 0.1
}
},
"global": {
"logLevel": "info",
"debug": false,
"defaultSubtasks": 5,
"defaultPriority": "medium",
"projectName": "Taskmaster",
"ollamaBaseURL": "http://localhost:11434/api",
"bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com",
"ollamaBaseUrl": "http://localhost:11434/api",
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/",
"userId": "1234567890"
}
}
```
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
```yaml
version: 2
updates:
# Enable version updates for npm dependencies
- package-ecosystem: "npm"
directory: "/"
schedule:
interval: "weekly"
day: "monday"
time: "09:00"
open-pull-requests-limit: 10
reviewers:
- "@octocat" # Replace with actual GitHub usernames
assignees:
- "@octocat" # Replace with actual GitHub usernames
commit-message:
prefix: "deps"
include: "scope"
labels:
- "dependencies"
- "automated"
# Group minor and patch updates together
groups:
minor-and-patch:
patterns:
- "*"
update-types:
- "minor"
- "patch"
# Ignore specific packages if needed
ignore:
- dependency-name: "typescript"
versions: ["5.0.x"] # Example: ignore specific versions
# Enable version updates for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
day: "monday"
time: "09:00"
open-pull-requests-limit: 5
commit-message:
prefix: "ci"
include: "scope"
labels:
- "github-actions"
- "automated"
```
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
```json
{
"name": "cursor-chat-history-mcp",
"version": "0.1.9",
"description": "MCP server that provides AI assistants access to Cursor chat history for analysis and insights",
"type": "module",
"main": "dist/server.js",
"bin": {
"cursor-chat-history-mcp": "dist/server.js"
},
"files": [
"dist/**/*",
"README.md",
"package.json"
],
"scripts": {
"build": "tsc",
"watch": "tsc --watch",
"start": "node dist/server.js",
"inspector": "nodemon --watch dist --exec 'npx @modelcontextprotocol/inspector node dist/server.js'",
"test": "vitest run",
"test:ui": "vitest --ui",
"prepublishOnly": "npm run build && chmod +x dist/server.js",
"postinstall": "chmod +x dist/server.js 2>/dev/null || true"
},
"keywords": [
"mcp",
"model-context-protocol",
"cursor",
"chat history",
"conversation",
"ai-assistant",
"code-analysis",
"typescript"
],
"author": "",
"license": "MIT",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.1",
"better-sqlite3": "9.2.2",
"zod": "^3.22.4"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.13",
"@types/node": "^20.11.24",
"@vitest/ui": "^3.2.2",
"nodemon": "^3.1.10",
"shx": "^0.3.4",
"task-master-ai": "^0.16.1",
"ts-node": "^10.9.2",
"tsx": "^4.7.0",
"typescript": "^5.3.3",
"vitest": "^3.2.2"
}
}
```
--------------------------------------------------------------------------------
/scripts/example_prd.txt:
--------------------------------------------------------------------------------
```
<context>
# Overview
[Provide a high-level overview of your product here. Explain what problem it solves, who it's for, and why it's valuable.]
# Core Features
[List and describe the main features of your product. For each feature, include:
- What it does
- Why it's important
- How it works at a high level]
# User Experience
[Describe the user journey and experience. Include:
- User personas
- Key user flows
- UI/UX considerations]
</context>
<PRD>
# Technical Architecture
[Outline the technical implementation details:
- System components
- Data models
- APIs and integrations
- Infrastructure requirements]
# Development Roadmap
[Break down the development process into phases:
- MVP requirements
- Future enhancements
- Do not think about timelines whatsoever -- all that matters is scope and detailing exactly what needs to be build in each phase so it can later be cut up into tasks]
# Logical Dependency Chain
[Define the logical order of development:
- Which features need to be built first (foundation)
- Getting as quickly as possible to something usable/visible front end that works
- Properly pacing and scoping each feature so it is atomic but can also be built upon and improved as development approaches]
# Risks and Mitigations
[Identify potential risks and how they'll be addressed:
- Technical challenges
- Figuring out the MVP that we can build upon
- Resource constraints]
# Appendix
[Include any additional information:
- Research findings
- Technical specifications]
</PRD>
```
--------------------------------------------------------------------------------
/.roo/rules/roo_rules.md:
--------------------------------------------------------------------------------
```markdown
---
description: Guidelines for creating and maintaining Roo Code rules to ensure consistency and effectiveness.
globs: .roo/rules/*.md
alwaysApply: true
---
- **Required Rule Structure:**
```markdown
---
description: Clear, one-line description of what the rule enforces
globs: path/to/files/*.ext, other/path/**/*
alwaysApply: boolean
---
- **Main Points in Bold**
- Sub-points with details
- Examples and explanations
```
- **File References:**
- Use `[filename](mdc:path/to/file)` ([filename](mdc:filename)) to reference files
- Example: [prisma.md](mdc:.roo/rules/prisma.md) for rule references
- Example: [schema.prisma](mdc:prisma/schema.prisma) for code references
- **Code Examples:**
- Use language-specific code blocks
```typescript
// ✅ DO: Show good examples
const goodExample = true;
// ❌ DON'T: Show anti-patterns
const badExample = false;
```
- **Rule Content Guidelines:**
- Start with high-level overview
- Include specific, actionable requirements
- Show examples of correct implementation
- Reference existing code when possible
- Keep rules DRY by referencing other rules
- **Rule Maintenance:**
- Update rules when new patterns emerge
- Add examples from actual codebase
- Remove outdated patterns
- Cross-reference related rules
- **Best Practices:**
- Use bullet points for clarity
- Keep descriptions concise
- Include both DO and DON'T examples
- Reference actual code over theoretical examples
- Use consistent formatting across rules
```
--------------------------------------------------------------------------------
/.roo/rules/self_improve.md:
--------------------------------------------------------------------------------
```markdown
---
description: Guidelines for continuously improving Roo Code rules based on emerging code patterns and best practices.
globs: **/*
alwaysApply: true
---
- **Rule Improvement Triggers:**
- New code patterns not covered by existing rules
- Repeated similar implementations across files
- Common error patterns that could be prevented
- New libraries or tools being used consistently
- Emerging best practices in the codebase
- **Analysis Process:**
- Compare new code with existing rules
- Identify patterns that should be standardized
- Look for references to external documentation
- Check for consistent error handling patterns
- Monitor test patterns and coverage
- **Rule Updates:**
- **Add New Rules When:**
- A new technology/pattern is used in 3+ files
- Common bugs could be prevented by a rule
- Code reviews repeatedly mention the same feedback
- New security or performance patterns emerge
- **Modify Existing Rules When:**
- Better examples exist in the codebase
- Additional edge cases are discovered
- Related rules have been updated
- Implementation details have changed
- **Example Pattern Recognition:**
```typescript
// If you see repeated patterns like:
const data = await prisma.user.findMany({
select: { id: true, email: true },
where: { status: 'ACTIVE' }
});
// Consider adding to [prisma.md](mdc:.roo/rules/prisma.md):
// - Standard select fields
// - Common where conditions
// - Performance optimization patterns
```
- **Rule Quality Checks:**
- Rules should be actionable and specific
- Examples should come from actual code
- References should be up to date
- Patterns should be consistently enforced
- **Continuous Improvement:**
- Monitor code review comments
- Track common development questions
- Update rules after major refactors
- Add links to relevant documentation
- Cross-reference related rules
- **Rule Deprecation:**
- Mark outdated patterns as deprecated
- Remove rules that no longer apply
- Update references to deprecated rules
- Document migration paths for old patterns
- **Documentation Updates:**
- Keep examples synchronized with code
- Update references to external docs
- Maintain links between related rules
- Document breaking changes
Follow [cursor_rules.md](mdc:.roo/rules/cursor_rules.md) for proper rule formatting and structure.
```
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
```yaml
name: CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
test:
name: Test on Node.js ${{ matrix.node-version }}
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [18.x, 20.x, 22.x]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: 'yarn'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Build project
run: yarn build
- name: Run tests
run: yarn test
- name: Check TypeScript compilation
run: yarn tsc --noEmit
lint:
name: Lint and Format Check
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
cache: 'yarn'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Check formatting (if prettier is configured)
run: |
if [ -f ".prettierrc" ] || [ -f ".prettierrc.json" ] || [ -f ".prettierrc.js" ] || grep -q "prettier" package.json; then
yarn prettier --check .
else
echo "Prettier not configured, skipping format check"
fi
continue-on-error: true
- name: Run ESLint (if configured)
run: |
if [ -f ".eslintrc.js" ] || [ -f ".eslintrc.json" ] || [ -f "eslint.config.js" ] || grep -q "eslint" package.json; then
yarn eslint src/
else
echo "ESLint not configured, skipping lint check"
fi
continue-on-error: true
build-artifacts:
name: Build and Upload Artifacts
runs-on: ubuntu-latest
needs: [test, lint]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
cache: 'yarn'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Build project
run: yarn build
- name: Upload build artifacts
uses: actions/upload-artifact@v4
with:
name: dist-${{ github.sha }}
path: dist/
retention-days: 7
security:
name: Security Audit
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
cache: 'yarn'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Run security audit
run: yarn audit --level moderate
continue-on-error: true
- name: Check for known vulnerabilities
run: |
if command -v npm &> /dev/null; then
npm audit --audit-level moderate
fi
continue-on-error: true
```
--------------------------------------------------------------------------------
/src/server.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
// Mock the MCP SDK
vi.mock('@modelcontextprotocol/sdk/server/mcp.js');
vi.mock('./tools/conversation-tools.js');
vi.mock('./database/reader.js');
const mockMcpServer = vi.mocked(McpServer);
describe('MCP Server', () => {
let mockServer: any;
beforeEach(() => {
mockServer = {
tool: vi.fn(),
connect: vi.fn(),
close: vi.fn()
};
mockMcpServer.mockImplementation(() => mockServer);
});
afterEach(() => {
vi.clearAllMocks();
});
describe('Server Initialization', () => {
it('should create server with correct configuration', async () => {
// Import the server module to trigger initialization
await import('./server.js');
expect(mockMcpServer).toHaveBeenCalledWith({
name: 'cursor-chat-history-mcp',
version: '0.1.0'
});
});
it('should register all conversation tools', async () => {
await import('./server.js');
// Verify that the correct tools are registered
const expectedTools = [
'list_conversations',
'get_conversation',
'search_conversations',
'get_conversation_analytics',
'find_related_conversations',
'extract_conversation_elements',
'export_conversation_data'
];
// Check that the expected number of tools are registered
expect(mockServer.tool).toHaveBeenCalledTimes(expectedTools.length);
// Verify each tool is registered with proper parameters
expectedTools.forEach(toolName => {
expect(mockServer.tool).toHaveBeenCalledWith(
toolName,
expect.any(String),
expect.any(Object),
expect.any(Function)
);
});
});
});
describe('Tool Registration', () => {
it('should register tools with proper descriptions', async () => {
await import('./server.js');
const toolCalls = mockServer.tool.mock.calls;
// Check that each tool has a meaningful description
toolCalls.forEach(([toolName, description]: [string, string]) => {
expect(typeof toolName).toBe('string');
expect(typeof description).toBe('string');
expect(description.length).toBeGreaterThan(10);
});
});
it('should register tools with proper schemas', async () => {
await import('./server.js');
const toolCalls = mockServer.tool.mock.calls;
// Check that each tool has a schema object
toolCalls.forEach(([, , schema]: [string, string, any]) => {
expect(typeof schema).toBe('object');
expect(schema).not.toBeNull();
});
});
it('should register tools with handler functions', async () => {
await import('./server.js');
const toolCalls = mockServer.tool.mock.calls;
// Check that each tool has a handler function
toolCalls.forEach(([, , , handler]: [string, string, any, Function]) => {
expect(typeof handler).toBe('function');
});
});
});
describe('Error Handling', () => {
it('should handle server creation errors', () => {
mockMcpServer.mockImplementation(() => {
throw new Error('Server creation failed');
});
expect(async () => {
await import('./server.js');
}).not.toThrow();
});
it('should handle tool registration errors', () => {
mockServer.tool.mockImplementation(() => {
throw new Error('Tool registration failed');
});
expect(async () => {
await import('./server.js');
}).not.toThrow();
});
});
});
```
--------------------------------------------------------------------------------
/src/tools/extraction-tools.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from 'zod';
import { CursorDatabaseReader } from '../database/reader.js';
import type {
ExtractedElements,
ExportedData,
ConversationFilters
} from '../database/types.js';
import {
exportAsJSON,
exportAsCSV,
exportAsGraph,
createExportMetadata
} from '../utils/exporters.js';
import { DatabaseError } from '../utils/errors.js';
// Schema definitions
export const extractConversationElementsSchema = z.object({
conversationIds: z.array(z.string()).optional(),
elements: z.array(z.enum(['files', 'folders', 'languages', 'codeblocks', 'metadata', 'structure'])).optional().default(['files', 'codeblocks']),
includeContext: z.boolean().optional().default(false),
groupBy: z.enum(['conversation', 'element', 'none']).optional().default('conversation'),
filters: z.object({
minCodeLength: z.number().optional(),
fileExtensions: z.array(z.string()).optional(),
languages: z.array(z.string()).optional()
}).optional()
});
export const exportConversationDataSchema = z.object({
conversationIds: z.array(z.string()).optional(),
format: z.enum(['json', 'csv', 'graph']).optional().default('json'),
includeContent: z.boolean().optional().default(false),
includeRelationships: z.boolean().optional().default(false),
flattenStructure: z.boolean().optional().default(false),
filters: z.object({
minSize: z.number().optional(),
hasCodeBlocks: z.boolean().optional(),
projectPath: z.string().optional()
}).optional()
});
export type ExtractConversationElementsInput = z.infer<typeof extractConversationElementsSchema>;
export type ExportConversationDataInput = z.infer<typeof exportConversationDataSchema>;
/**
* Extract specific elements from conversations
*/
export async function extractConversationElements(
input: ExtractConversationElementsInput
): Promise<ExtractedElements> {
const reader = new CursorDatabaseReader();
try {
await reader.connect();
// Get conversation IDs to process
let conversationIds = input.conversationIds;
if (!conversationIds || conversationIds.length === 0) {
// Get all conversation IDs if none specified
conversationIds = await reader.getConversationIds({
format: 'both',
minLength: 1000
});
}
// Extract elements from conversations
const extractedData = await reader.extractConversationElements(
conversationIds,
input.elements,
{
includeContext: input.includeContext,
filters: input.filters
}
);
// Group data based on groupBy parameter
if (input.groupBy === 'conversation') {
return { conversations: extractedData };
} else if (input.groupBy === 'element') {
// Group by element type
const groupedData: Record<string, any[]> = {};
for (const elementType of input.elements) {
groupedData[elementType] = [];
for (const conversation of extractedData) {
if (conversation.elements[elementType]) {
if (Array.isArray(conversation.elements[elementType])) {
groupedData[elementType].push(...conversation.elements[elementType]);
} else {
groupedData[elementType].push(conversation.elements[elementType]);
}
}
}
}
return { conversations: groupedData } as any;
} else {
// Flatten all data
const flatData: any[] = [];
for (const conversation of extractedData) {
for (const elementType of input.elements) {
if (conversation.elements[elementType]) {
if (Array.isArray(conversation.elements[elementType])) {
flatData.push(...conversation.elements[elementType].map((item: any) => ({
...item,
conversationId: conversation.composerId,
elementType
})));
} else {
flatData.push({
...conversation.elements[elementType],
conversationId: conversation.composerId,
elementType
});
}
}
}
}
return { conversations: flatData } as any;
}
} catch (error) {
throw new DatabaseError(`Failed to extract conversation elements: ${error instanceof Error ? error.message : 'Unknown error'}`);
} finally {
reader.close();
}
}
/**
* Export conversation data in various formats
*/
export async function exportConversationData(
input: ExportConversationDataInput
): Promise<ExportedData> {
const reader = new CursorDatabaseReader();
try {
await reader.connect();
// Build filters
const filters: ConversationFilters = {
format: 'both',
minLength: input.filters?.minSize || 1000
};
if (input.filters?.hasCodeBlocks !== undefined) {
filters.hasCodeBlocks = input.filters.hasCodeBlocks;
}
if (input.filters?.projectPath) {
filters.projectPath = input.filters.projectPath;
}
// Get conversation IDs to export
let conversationIds = input.conversationIds;
if (!conversationIds || conversationIds.length === 0) {
conversationIds = await reader.getConversationIds(filters);
}
// Get conversation summaries
const summaries = await reader.getConversationSummariesForAnalytics(conversationIds);
// Get full conversation data if needed
let conversationData: Map<string, any> | undefined;
if (input.includeContent) {
conversationData = new Map();
for (const id of conversationIds) {
try {
const conversation = await reader.getConversationById(id);
if (conversation) {
conversationData.set(id, conversation);
}
} catch (error) {
console.error(`Failed to get full conversation data for ${id}:`, error);
}
}
}
// Export in requested format
let exportedData: any;
switch (input.format) {
case 'json':
exportedData = exportAsJSON(summaries, input.includeContent, conversationData);
break;
case 'csv':
exportedData = exportAsCSV(summaries, input.flattenStructure);
break;
case 'graph':
exportedData = exportAsGraph(summaries, input.includeRelationships);
break;
default:
exportedData = exportAsJSON(summaries, input.includeContent, conversationData);
}
// Create metadata
const metadata = createExportMetadata(
summaries.length,
conversationIds.length,
input.filters || {}
);
return {
format: input.format,
data: exportedData,
metadata
};
} catch (error) {
throw new DatabaseError(`Failed to export conversation data: ${error instanceof Error ? error.message : 'Unknown error'}`);
} finally {
reader.close();
}
}
```
--------------------------------------------------------------------------------
/src/tools/analytics-tools.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from 'zod';
import { CursorDatabaseReader } from '../database/reader.js';
import type {
ConversationAnalytics,
RelatedConversationsResult,
ConversationFilters
} from '../database/types.js';
import {
calculateOverview,
calculateFileBreakdown,
calculateLanguageBreakdown,
calculateTemporalBreakdown,
calculateSizeDistribution
} from '../utils/analytics.js';
import {
findRelatedConversations as findRelatedConversationsUtil,
extractLanguagesFromCodeBlocks
} from '../utils/relationships.js';
import { DatabaseError } from '../utils/errors.js';
// Schema definitions
export const getConversationAnalyticsSchema = z.object({
scope: z.enum(['all', 'recent', 'project']).optional().default('all'),
projectPath: z.string().optional(),
recentDays: z.number().min(1).max(365).optional().default(30),
includeBreakdowns: z.array(z.enum(['files', 'languages', 'temporal', 'size'])).optional().default(['files', 'languages']),
includeConversationDetails: z.boolean().optional().default(false)
});
export const findRelatedConversationsSchema = z.object({
referenceConversationId: z.string().min(1),
relationshipTypes: z.array(z.enum(['files', 'folders', 'languages', 'size', 'temporal'])).optional().default(['files']),
maxResults: z.number().min(1).max(50).optional().default(10),
minScore: z.number().min(0).max(1).optional().default(0.1),
includeScoreBreakdown: z.boolean().optional().default(false)
});
export type GetConversationAnalyticsInput = z.infer<typeof getConversationAnalyticsSchema>;
export type FindRelatedConversationsInput = z.infer<typeof findRelatedConversationsSchema>;
/**
* Get comprehensive analytics and statistics about Cursor conversations
*/
export async function getConversationAnalytics(
input: GetConversationAnalyticsInput
): Promise<ConversationAnalytics> {
const reader = new CursorDatabaseReader();
try {
await reader.connect();
// Build filters based on scope
const filters: ConversationFilters = {
format: 'both',
minLength: 100 // Filter out only very small conversations (reduced from 1000)
};
if (input.scope === 'project' && input.projectPath) {
filters.projectPath = input.projectPath;
}
// Get conversation IDs
const conversationIds = await reader.getConversationIds(filters);
// Apply recent filter if needed
let filteredIds = conversationIds;
if (input.scope === 'recent') {
// Take the most recent conversations (ROWID ordering)
const recentCount = Math.min(conversationIds.length, Math.floor(conversationIds.length * 0.3));
filteredIds = conversationIds.slice(0, recentCount);
}
// Get conversation summaries
const summaries = await reader.getConversationSummariesForAnalytics(filteredIds);
// Calculate overview
const overview = calculateOverview(summaries);
// Calculate breakdowns
const breakdowns: any = {};
if (input.includeBreakdowns.includes('files')) {
breakdowns.files = calculateFileBreakdown(summaries);
}
if (input.includeBreakdowns.includes('languages')) {
// Get conversations with code blocks for language analysis
const conversationsWithCode = await reader.getConversationsWithCodeBlocks(filteredIds);
breakdowns.languages = calculateLanguageBreakdown(conversationsWithCode);
}
if (input.includeBreakdowns.includes('temporal')) {
breakdowns.temporal = calculateTemporalBreakdown(summaries, filteredIds);
}
if (input.includeBreakdowns.includes('size')) {
breakdowns.size = calculateSizeDistribution(summaries);
}
return {
overview,
breakdowns,
scope: {
type: input.scope,
projectPath: input.projectPath,
recentDays: input.scope === 'recent' ? input.recentDays : undefined,
totalScanned: filteredIds.length
},
// Only include conversation details when requested (to control response size)
conversationIds: input.includeConversationDetails ? filteredIds : [],
conversations: input.includeConversationDetails ? summaries.map(s => ({
composerId: s.composerId,
messageCount: s.messageCount,
size: s.conversationSize,
files: s.relevantFiles.slice(0, 2), // Top 2 files only
hasCodeBlocks: s.codeBlockCount > 0
})) : []
};
} catch (error) {
throw new DatabaseError(`Failed to get conversation analytics: ${error instanceof Error ? error.message : 'Unknown error'}`);
} finally {
reader.close();
}
}
/**
* Find conversations related to a reference conversation
*/
export async function findRelatedConversations(
input: FindRelatedConversationsInput
): Promise<RelatedConversationsResult> {
const reader = new CursorDatabaseReader();
try {
await reader.connect();
// Get reference conversation summary
const referenceSummary = await reader.getConversationSummary(input.referenceConversationId, {
includeFirstMessage: true,
includeCodeBlockCount: true,
includeFileList: true,
includeAttachedFolders: true,
maxFirstMessageLength: 150
});
if (!referenceSummary) {
throw new DatabaseError(`Reference conversation ${input.referenceConversationId} not found`);
}
// Get all conversation IDs for comparison
const allConversationIds = await reader.getConversationIds({
format: 'both',
minLength: 100
});
// Get summaries for all conversations
const allSummaries = await reader.getConversationSummariesForAnalytics(allConversationIds);
// Extract languages from reference conversation if needed
let referenceLanguages: string[] = [];
if (input.relationshipTypes.includes('languages')) {
const conversationsWithCode = await reader.getConversationsWithCodeBlocks([input.referenceConversationId]);
if (conversationsWithCode.length > 0) {
referenceLanguages = extractLanguagesFromCodeBlocks(conversationsWithCode[0].codeBlocks);
}
}
// Find related conversations
const related = findRelatedConversationsUtil(
referenceSummary,
allSummaries,
allConversationIds,
{
relationshipTypes: input.relationshipTypes,
maxResults: input.maxResults,
minScore: input.minScore,
includeScoreBreakdown: input.includeScoreBreakdown
}
);
return {
reference: {
composerId: referenceSummary.composerId,
files: referenceSummary.relevantFiles,
folders: referenceSummary.attachedFolders,
languages: referenceLanguages,
messageCount: referenceSummary.messageCount,
size: referenceSummary.conversationSize
},
related
};
} catch (error) {
throw new DatabaseError(`Failed to find related conversations: ${error instanceof Error ? error.message : 'Unknown error'}`);
} finally {
reader.close();
}
}
```
--------------------------------------------------------------------------------
/src/utils/validation.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Validation utilities for MCP tool parameters
*/
import { z } from 'zod';
import {
ValidationError,
MissingParameterError,
InvalidParameterError
} from './errors.js';
/**
* Validates that a required parameter is present and not empty
*/
export function validateRequired<T>(
value: T | undefined | null,
paramName: string
): T {
if (value === undefined || value === null) {
throw new MissingParameterError(paramName);
}
if (typeof value === 'string' && value.trim() === '') {
throw new InvalidParameterError(paramName, value, 'non-empty string');
}
return value;
}
/**
* Validates that a string parameter meets minimum length requirements
*/
export function validateStringLength(
value: string | undefined,
paramName: string,
minLength: number = 1,
maxLength?: number
): string | undefined {
if (value === undefined) {
return undefined;
}
if (typeof value !== 'string') {
throw new InvalidParameterError(paramName, value, 'string');
}
if (value.length < minLength) {
throw new InvalidParameterError(
paramName,
value,
`string with at least ${minLength} characters`
);
}
if (maxLength && value.length > maxLength) {
throw new InvalidParameterError(
paramName,
value,
`string with at most ${maxLength} characters`
);
}
return value;
}
/**
* Validates that a number parameter is within acceptable range
*/
export function validateNumberRange(
value: number | undefined,
paramName: string,
min?: number,
max?: number
): number | undefined {
if (value === undefined) {
return undefined;
}
if (typeof value !== 'number' || isNaN(value)) {
throw new InvalidParameterError(paramName, value, 'number');
}
if (min !== undefined && value < min) {
throw new InvalidParameterError(
paramName,
value,
`number >= ${min}`
);
}
if (max !== undefined && value > max) {
throw new InvalidParameterError(
paramName,
value,
`number <= ${max}`
);
}
return value;
}
/**
* Validates that an array parameter meets length requirements
*/
export function validateArrayLength<T>(
value: T[] | undefined,
paramName: string,
minLength: number = 0,
maxLength?: number
): T[] | undefined {
if (value === undefined) {
return undefined;
}
if (!Array.isArray(value)) {
throw new InvalidParameterError(paramName, value, 'array');
}
if (value.length < minLength) {
throw new InvalidParameterError(
paramName,
value,
`array with at least ${minLength} items`
);
}
if (maxLength && value.length > maxLength) {
throw new InvalidParameterError(
paramName,
value,
`array with at most ${maxLength} items`
);
}
return value;
}
/**
* Validates that a value is one of the allowed enum values
*/
export function validateEnum<T extends string>(
value: T | undefined,
paramName: string,
allowedValues: readonly T[]
): T | undefined {
if (value === undefined) {
return undefined;
}
if (!allowedValues.includes(value)) {
throw new InvalidParameterError(
paramName,
value,
`one of: ${allowedValues.join(', ')}`
);
}
return value;
}
/**
* Validates that a conversation ID has the correct format
*/
export function validateConversationId(conversationId: string): string {
if (!conversationId || conversationId.trim() === '') {
throw new MissingParameterError('conversationId');
}
validateStringLength(conversationId, 'conversationId', 1, 100);
// Basic format validation - should be alphanumeric with possible hyphens/underscores
if (!/^[a-zA-Z0-9_-]+$/.test(conversationId)) {
throw new InvalidParameterError(
'conversationId',
conversationId,
'alphanumeric string with optional hyphens and underscores'
);
}
return conversationId;
}
/**
* Validates that a bubble ID has the correct format
*/
export function validateBubbleId(bubbleId: string): string {
if (!bubbleId || bubbleId.trim() === '') {
throw new MissingParameterError('bubbleId');
}
validateStringLength(bubbleId, 'bubbleId', 1, 100);
// Basic format validation - should be alphanumeric with possible hyphens/underscores
if (!/^[a-zA-Z0-9_-]+$/.test(bubbleId)) {
throw new InvalidParameterError(
'bubbleId',
bubbleId,
'alphanumeric string with optional hyphens and underscores'
);
}
return bubbleId;
}
/**
* Validates search query parameters
*/
export function validateSearchQuery(query: string): string {
if (!query || query.trim() === '') {
throw new MissingParameterError('query');
}
validateStringLength(query, 'query', 1, 1000);
// Ensure query is not just whitespace
if (query.trim().length === 0) {
throw new InvalidParameterError(
'query',
query,
'non-empty search query'
);
}
return query.trim();
}
/**
* Validates file path parameters
*/
export function validateFilePath(path: string | undefined, paramName: string): string | undefined {
if (path === undefined) {
return undefined;
}
validateStringLength(path, paramName, 1, 1000);
// Basic path validation - should not contain null bytes or other dangerous characters
if (path.includes('\0')) {
throw new InvalidParameterError(
paramName,
path,
'valid file path without null bytes'
);
}
return path;
}
/**
* Validates project path parameters
*/
export function validateProjectPath(projectPath: string): string {
if (!projectPath || projectPath.trim() === '') {
throw new MissingParameterError('projectPath');
}
validateStringLength(projectPath, 'projectPath', 1, 1000);
// Basic path validation
if (projectPath.includes('\0')) {
throw new InvalidParameterError(
'projectPath',
projectPath,
'valid project path without null bytes'
);
}
return projectPath;
}
/**
* Validates and sanitizes input using a Zod schema
*/
export function validateWithSchema<T>(
input: unknown,
schema: z.ZodSchema<T>,
context: string = 'input'
): T {
try {
return schema.parse(input);
} catch (error) {
if (error instanceof z.ZodError) {
const firstIssue = error.issues[0];
const path = firstIssue.path.join('.');
const field = path || 'root';
throw new ValidationError(
`Validation error in ${context}: ${firstIssue.message} at ${field}`,
field,
'received' in firstIssue ? firstIssue.received : undefined
);
}
throw new ValidationError(
`Validation error in ${context}: ${error instanceof Error ? error.message : 'Unknown error'}`
);
}
}
/**
* Validates boolean parameters with proper type checking
*/
export function validateBoolean(
value: boolean | undefined,
paramName: string
): boolean | undefined {
if (value === undefined) {
return undefined;
}
if (typeof value !== 'boolean') {
throw new InvalidParameterError(paramName, value, 'boolean');
}
return value;
}
/**
* Validates limit parameters commonly used in pagination
*/
export function validateLimit(limit: number | undefined, defaultLimit: number = 10): number {
if (limit === undefined) {
return defaultLimit;
}
return validateNumberRange(limit, 'limit', 1, 1000) ?? defaultLimit;
}
/**
* Validates offset parameters commonly used in pagination
*/
export function validateOffset(offset: number | undefined): number {
if (offset === undefined) {
return 0;
}
return validateNumberRange(offset, 'offset', 0) ?? 0;
}
/**
* Validates context lines parameter for search results
*/
export function validateContextLines(contextLines: number | undefined): number {
if (contextLines === undefined) {
return 3;
}
return validateNumberRange(contextLines, 'contextLines', 0, 10) ?? 3;
}
```
--------------------------------------------------------------------------------
/src/utils/errors.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Custom error classes for the Cursor Chat History MCP server
*/
/**
* Base error class for all MCP-related errors
*/
export class MCPError extends Error {
public readonly code: string;
public readonly statusCode: number;
constructor(message: string, code: string = 'MCP_ERROR', statusCode: number = 500) {
super(message);
this.name = this.constructor.name;
this.code = code;
this.statusCode = statusCode;
// Maintains proper stack trace for where our error was thrown (only available on V8)
if (Error.captureStackTrace) {
Error.captureStackTrace(this, this.constructor);
}
}
}
/**
* Error thrown when database operations fail
*/
export class DatabaseError extends MCPError {
constructor(message: string, originalError?: Error) {
super(
originalError ? `Database error: ${message}. Original: ${originalError.message}` : `Database error: ${message}`,
'DATABASE_ERROR',
500
);
if (originalError && originalError.stack) {
this.stack = `${this.stack}\nCaused by: ${originalError.stack}`;
}
}
}
/**
* Error thrown when database connection fails
*/
export class DatabaseConnectionError extends DatabaseError {
constructor(dbPath: string, originalError?: Error) {
super(
`Failed to connect to database at path: ${dbPath}`,
originalError
);
// Override the code property by redefining it
Object.defineProperty(this, 'code', {
value: 'DATABASE_CONNECTION_ERROR',
writable: false,
enumerable: true,
configurable: false
});
}
}
/**
* Error thrown when a conversation is not found
*/
export class ConversationNotFoundError extends MCPError {
public readonly conversationId: string;
constructor(conversationId: string) {
super(`Conversation not found: ${conversationId}`, 'CONVERSATION_NOT_FOUND', 404);
this.conversationId = conversationId;
}
}
/**
* Error thrown when a bubble message is not found
*/
export class BubbleMessageNotFoundError extends MCPError {
public readonly composerId: string;
public readonly bubbleId: string;
constructor(composerId: string, bubbleId: string) {
super(
`Bubble message not found: ${bubbleId} in conversation ${composerId}`,
'BUBBLE_MESSAGE_NOT_FOUND',
404
);
this.composerId = composerId;
this.bubbleId = bubbleId;
}
}
/**
* Error thrown when input validation fails
*/
export class ValidationError extends MCPError {
public readonly field?: string;
public readonly value?: any;
constructor(message: string, field?: string, value?: any) {
super(`Validation error: ${message}`, 'VALIDATION_ERROR', 400);
this.field = field;
this.value = value;
}
}
/**
* Error thrown when required parameters are missing
*/
export class MissingParameterError extends ValidationError {
constructor(parameterName: string) {
super(`Missing required parameter: ${parameterName}`, parameterName);
Object.defineProperty(this, 'code', {
value: 'MISSING_PARAMETER',
writable: false,
enumerable: true,
configurable: false
});
}
}
/**
* Error thrown when parameter values are invalid
*/
export class InvalidParameterError extends ValidationError {
constructor(parameterName: string, value: any, expectedType?: string) {
const message = expectedType
? `Invalid parameter '${parameterName}': expected ${expectedType}, got ${typeof value}`
: `Invalid parameter '${parameterName}': ${value}`;
super(message, parameterName, value);
Object.defineProperty(this, 'code', {
value: 'INVALID_PARAMETER',
writable: false,
enumerable: true,
configurable: false
});
}
}
/**
* Error thrown when file system operations fail
*/
export class FileSystemError extends MCPError {
public readonly path: string;
constructor(message: string, path: string, originalError?: Error) {
super(
originalError ? `File system error: ${message}. Original: ${originalError.message}` : `File system error: ${message}`,
'FILESYSTEM_ERROR',
500
);
this.path = path;
if (originalError && originalError.stack) {
this.stack = `${this.stack}\nCaused by: ${originalError.stack}`;
}
}
}
/**
* Error thrown when database path cannot be detected
*/
export class DatabasePathNotFoundError extends FileSystemError {
constructor(attemptedPaths: string[]) {
super(
`Could not find Cursor database. Attempted paths: ${attemptedPaths.join(', ')}`,
attemptedPaths[0] || 'unknown'
);
Object.defineProperty(this, 'code', {
value: 'DATABASE_PATH_NOT_FOUND',
writable: false,
enumerable: true,
configurable: false
});
}
}
/**
* Error thrown when parsing chat data fails
*/
export class ConversationParseError extends MCPError {
public readonly conversationId?: string;
constructor(message: string, conversationId?: string, originalError?: Error) {
super(
originalError ? `Parse error: ${message}. Original: ${originalError.message}` : `Parse error: ${message}`,
'CONVERSATION_PARSE_ERROR',
500
);
this.conversationId = conversationId;
if (originalError && originalError.stack) {
this.stack = `${this.stack}\nCaused by: ${originalError.stack}`;
}
}
}
/**
* Error thrown when search operations fail
*/
export class SearchError extends MCPError {
public readonly query: string;
constructor(message: string, query: string, originalError?: Error) {
super(
originalError ? `Search error: ${message}. Original: ${originalError.message}` : `Search error: ${message}`,
'SEARCH_ERROR',
500
);
this.query = query;
if (originalError && originalError.stack) {
this.stack = `${this.stack}\nCaused by: ${originalError.stack}`;
}
}
}
/**
* Error thrown when cache operations fail
*/
export class CacheError extends MCPError {
public readonly operation: string;
public readonly key?: string;
constructor(message: string, operation: string, key?: string, originalError?: Error) {
super(
originalError ? `Cache error: ${message}. Original: ${originalError.message}` : `Cache error: ${message}`,
'CACHE_ERROR',
500
);
this.operation = operation;
this.key = key;
if (originalError && originalError.stack) {
this.stack = `${this.stack}\nCaused by: ${originalError.stack}`;
}
}
}
/**
* Utility function to check if an error is an instance of MCPError
*/
export function isMCPError(error: any): error is MCPError {
return error instanceof MCPError;
}
/**
* Utility function to extract error information for logging
*/
export function getErrorInfo(error: any): {
message: string;
code: string;
statusCode: number;
stack?: string;
originalError?: string;
} {
// Handle null and undefined
if (error === null || error === undefined) {
return {
message: 'Unknown error occurred',
code: 'UNKNOWN_ERROR',
statusCode: 500,
};
}
if (isMCPError(error)) {
const result: any = {
message: error.message,
code: error.code,
statusCode: error.statusCode,
stack: error.stack,
};
// Extract original error info for nested errors
if (error instanceof DatabaseError ||
error instanceof FileSystemError ||
error instanceof ConversationParseError ||
error instanceof SearchError ||
error instanceof CacheError) {
// Check if the error message contains "Original: " which indicates a nested error
const originalMatch = error.message.match(/Original: (.+)$/);
if (originalMatch) {
result.originalError = originalMatch[1];
}
}
return result;
}
if (error instanceof Error) {
return {
message: error.message,
code: 'UNKNOWN_ERROR',
statusCode: 500,
stack: error.stack,
};
}
// Handle objects with toString method
if (error && typeof error === 'object' && typeof error.toString === 'function') {
return {
message: error.toString(),
code: 'UNKNOWN_ERROR',
statusCode: 500,
};
}
return {
message: String(error),
code: 'UNKNOWN_ERROR',
statusCode: 500,
};
}
```
--------------------------------------------------------------------------------
/.taskmaster/reports/task-complexity-report.json:
--------------------------------------------------------------------------------
```json
{
"meta": {
"generatedAt": "2025-06-06T21:31:13.096Z",
"tasksAnalyzed": 15,
"totalTasks": 15,
"analysisCount": 15,
"thresholdScore": 5,
"projectName": "Taskmaster",
"usedResearch": true
},
"complexityAnalysis": [
{
"taskId": 1,
"taskTitle": "Add SQLite Database Dependencies",
"complexityScore": 2,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the process of adding the better-sqlite3 dependency into subtasks such as: (1) Install the package, (2) Update package.json and verify version compatibility, (3) Test the installation by importing and connecting in a test file.",
"reasoning": "This task is straightforward, involving package installation and basic verification. The main complexity lies in ensuring compatibility and confirming the dependency is correctly set up."
},
{
"taskId": 2,
"taskTitle": "Implement Database Reader Module",
"complexityScore": 6,
"recommendedSubtasks": 5,
"expansionPrompt": "Expand into subtasks such as: (1) Create the DatabaseReader class skeleton, (2) Implement connection logic, (3) Add query execution methods, (4) Handle both legacy and modern formats, (5) Write unit tests with mocks.",
"reasoning": "This task requires designing a reusable module, handling multiple data formats, and ensuring robust testing, which increases its complexity."
},
{
"taskId": 3,
"taskTitle": "Create Conversation Parser",
"complexityScore": 6,
"recommendedSubtasks": 5,
"expansionPrompt": "Expand into subtasks such as: (1) Create the ConversationParser class, (2) Implement legacy format parsing, (3) Implement modern format parsing, (4) Extract messages, code blocks, files, and timestamps, (5) Write unit tests for both formats.",
"reasoning": "Parsing and normalizing two distinct data formats, extracting multiple data types, and ensuring correctness through tests adds moderate complexity."
},
{
"taskId": 4,
"taskTitle": "Implement List Conversations Tool",
"complexityScore": 7,
"recommendedSubtasks": 6,
"expansionPrompt": "Expand into subtasks such as: (1) Define tool interface and options, (2) Integrate with DatabaseReader, (3) Implement filtering logic, (4) Handle both formats, (5) Implement ordering by ROWID, (6) Write integration tests.",
"reasoning": "This tool requires integrating multiple modules, supporting flexible filtering, and ensuring compatibility with both data formats, increasing complexity."
},
{
"taskId": 5,
"taskTitle": "Implement Get Conversation Tool",
"complexityScore": 6,
"recommendedSubtasks": 5,
"expansionPrompt": "Expand into subtasks such as: (1) Define tool interface, (2) Fetch conversation data using DatabaseReader, (3) Parse data with ConversationParser, (4) Handle bubble message resolution, (5) Write integration tests.",
"reasoning": "Fetching and parsing conversation data with format-specific logic and ensuring correct content retrieval requires careful implementation and testing."
},
{
"taskId": 6,
"taskTitle": "Implement Get Conversation Summary Tool",
"complexityScore": 5,
"recommendedSubtasks": 4,
"expansionPrompt": "Expand into subtasks such as: (1) Define summary options, (2) Extract key information using DatabaseReader and ConversationParser, (3) Implement summary formatting, (4) Write unit tests for different options.",
"reasoning": "Summarizing conversations is less complex than full retrieval but still requires handling multiple formats and customizable output."
},
{
"taskId": 7,
"taskTitle": "Implement Search Conversations Tool",
"complexityScore": 7,
"recommendedSubtasks": 6,
"expansionPrompt": "Expand into subtasks such as: (1) Define search interface and options, (2) Implement SQLite LIKE-based search, (3) Support search types (all, summarization, code, files), (4) Handle both formats and bubble messages, (5) Implement context retrieval, (6) Write integration tests.",
"reasoning": "Implementing efficient, flexible search across multiple formats and content types, with context handling, adds significant complexity."
},
{
"taskId": 8,
"taskTitle": "Implement Get Bubble Message Tool",
"complexityScore": 4,
"recommendedSubtasks": 3,
"expansionPrompt": "Expand into subtasks such as: (1) Define tool interface, (2) Fetch bubble message by ID, (3) Implement error handling for missing IDs or legacy format, (4) Write unit tests.",
"reasoning": "This is a focused retrieval task with some error handling, making it less complex than broader tools."
},
{
"taskId": 9,
"taskTitle": "Implement Get Recent Conversations Tool",
"complexityScore": 5,
"recommendedSubtasks": 4,
"expansionPrompt": "Expand into subtasks such as: (1) Define tool interface and options, (2) Implement ROWID-based ordering and filtering, (3) Optimize queries with indexes, (4) Write integration tests.",
"reasoning": "Retrieving recent conversations with filtering and ordering is moderately complex, especially with performance considerations."
},
{
"taskId": 10,
"taskTitle": "Implement Get Conversations by Project Tool",
"complexityScore": 6,
"recommendedSubtasks": 5,
"expansionPrompt": "Expand into subtasks such as: (1) Define filtering options (project path, file pattern, etc.), (2) Query using SQLite JSON functions, (3) Implement ordering by recency or relevance, (4) Handle both formats, (5) Write unit tests.",
"reasoning": "Filtering by project and files using JSON queries and supporting multiple formats increases the complexity of this tool."
},
{
"taskId": 11,
"taskTitle": "Implement Detect Conversation Format Tool",
"complexityScore": 3,
"recommendedSubtasks": 2,
"expansionPrompt": "Expand into subtasks such as: (1) Implement format detection logic, (2) Handle edge cases and ambiguous formats, (3) Write unit tests.",
"reasoning": "This is a simple detection task with some edge case handling, resulting in low complexity."
},
{
"taskId": 12,
"taskTitle": "Implement Cross-Platform Database Path Detection",
"complexityScore": 4,
"recommendedSubtasks": 3,
"expansionPrompt": "Expand into subtasks such as: (1) Implement OS detection logic, (2) Handle path resolution for each platform, (3) Implement fallback and user-configurable paths, (4) Write unit tests for each platform.",
"reasoning": "Handling OS-specific logic and fallback mechanisms adds some complexity, but the scope is limited."
},
{
"taskId": 13,
"taskTitle": "Implement Error Handling and Validation",
"complexityScore": 8,
"recommendedSubtasks": 7,
"expansionPrompt": "Expand into subtasks such as: (1) Design custom error classes, (2) Implement error handling in database operations, (3) Integrate Zod validation in all tools, (4) Add try-catch blocks for critical operations, (5) Implement informative error messages, (6) Write unit tests for error scenarios, (7) Write integration tests for error propagation.",
"reasoning": "Comprehensive error handling and validation across all tools, with custom classes and integration, is a complex and critical task."
},
{
"taskId": 14,
"taskTitle": "Implement Caching Mechanism",
"complexityScore": 5,
"recommendedSubtasks": 4,
"expansionPrompt": "Expand into subtasks such as: (1) Design cache structure and strategy, (2) Implement caching logic for key data, (3) Implement cache invalidation, (4) Write unit tests and benchmarks.",
"reasoning": "Implementing a basic caching layer is moderately complex, especially with invalidation and performance testing."
},
{
"taskId": 15,
"taskTitle": "Update MCP Server Configuration",
"complexityScore": 7,
"recommendedSubtasks": 6,
"expansionPrompt": "Expand into subtasks such as: (1) Integrate all conversation tools into the server, (2) Remove deprecated tools, (3) Update server name and configuration, (4) Implement error handling and logging, (5) Add graceful shutdown logic, (6) Write integration tests for server endpoints.",
"reasoning": "Coordinating integration of multiple tools, updating configuration, and ensuring robust server behavior makes this a complex and multi-faceted task."
}
]
}
```
--------------------------------------------------------------------------------
/src/utils/cache.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Cache utility for improving performance of frequently accessed data
*/
/**
* Configuration options for cache instances
*/
export interface CacheConfig {
/** Maximum number of entries to store in cache */
maxSize?: number;
/** Default time-to-live in milliseconds */
defaultTTL?: number;
/** Eviction policy when cache is full */
evictionPolicy?: 'lru' | 'fifo';
/** Whether to enable automatic cleanup of expired entries */
enableCleanup?: boolean;
/** Interval for automatic cleanup in milliseconds */
cleanupInterval?: number;
}
/**
* Cache entry with metadata
*/
export interface CacheEntry<T = any> {
/** The cached value */
value: T;
/** Timestamp when the entry was created */
createdAt: number;
/** Timestamp when the entry was last accessed */
lastAccessedAt: number;
/** Time-to-live in milliseconds */
ttl?: number;
/** Timestamp when the entry expires */
expiresAt?: number;
}
/**
* Cache statistics for monitoring
*/
export interface CacheStats {
/** Total number of cache hits */
hits: number;
/** Total number of cache misses */
misses: number;
/** Current number of entries in cache */
size: number;
/** Maximum size configured for cache */
maxSize: number;
/** Cache hit rate as a percentage */
hitRate: number;
/** Number of entries evicted due to size limits */
evictions: number;
/** Number of entries expired due to TTL */
expirations: number;
}
/**
* Basic cache implementation with TTL, size limits, and eviction policies
*/
export class Cache<T = any> {
private entries: Map<string, CacheEntry<T>> = new Map();
private accessOrder: string[] = []; // For LRU tracking
private insertOrder: string[] = []; // For FIFO tracking
private config: Required<CacheConfig>;
private stats: CacheStats;
private cleanupTimer?: NodeJS.Timeout;
constructor(config: CacheConfig = {}) {
this.config = {
maxSize: config.maxSize ?? 1000,
defaultTTL: config.defaultTTL ?? 5 * 60 * 1000, // 5 minutes
evictionPolicy: config.evictionPolicy ?? 'lru',
enableCleanup: config.enableCleanup ?? true,
cleanupInterval: config.cleanupInterval ?? 60 * 1000, // 1 minute
};
this.stats = {
hits: 0,
misses: 0,
size: 0,
maxSize: this.config.maxSize,
hitRate: 0,
evictions: 0,
expirations: 0,
};
if (this.config.enableCleanup) {
this.startCleanupTimer();
}
}
/**
* Get a value from the cache
*/
get(key: string): T | undefined {
const entry = this.entries.get(key);
if (!entry) {
this.stats.misses++;
this.updateHitRate();
return undefined;
}
if (this.isExpired(entry)) {
this.delete(key);
this.stats.expirations++;
this.stats.misses++;
this.updateHitRate();
return undefined;
}
this.updateAccessOrder(key);
this.stats.hits++;
this.updateHitRate();
return entry.value;
}
/**
* Set a value in the cache
*/
set(key: string, value: T, customTtl?: number): void {
const now = Date.now();
const ttl = customTtl ?? this.config.defaultTTL;
const expiresAt = now + ttl;
const entry: CacheEntry<T> = {
value,
createdAt: now,
lastAccessedAt: now,
ttl,
expiresAt
};
if (this.entries.has(key)) {
this.removeFromTrackingArrays(key);
}
if (this.entries.size >= this.config.maxSize && !this.entries.has(key)) {
this.evictEntry();
}
this.entries.set(key, entry);
this.accessOrder.push(key);
this.insertOrder.push(key);
this.updateSize();
}
/**
* Delete a value from the cache
*/
delete(key: string): boolean {
const existed = this.entries.delete(key);
if (existed) {
this.removeFromTrackingArrays(key);
this.updateSize();
}
return existed;
}
/**
* Check if a key exists in the cache (without affecting access order)
*/
has(key: string): boolean {
const entry = this.entries.get(key);
if (!entry) {
return false;
}
if (this.isExpired(entry)) {
this.delete(key);
this.stats.expirations++;
return false;
}
return true;
}
/**
* Clear all entries from the cache
*/
clear(): void {
this.entries.clear();
this.accessOrder = [];
this.insertOrder = [];
this.updateSize();
}
/**
* Get current cache size
*/
size(): number {
return this.entries.size;
}
/**
* Get cache statistics
*/
getStats(): CacheStats {
return { ...this.stats };
}
/**
* Reset cache statistics
*/
resetStats(): void {
this.stats = {
hits: 0,
misses: 0,
size: this.entries.size,
maxSize: this.config.maxSize,
hitRate: 0,
evictions: 0,
expirations: 0,
};
}
/**
* Manually trigger cleanup of expired entries
*/
cleanup(): number {
const initialSize = this.entries.size;
const now = Date.now();
const expiredKeys: string[] = [];
for (const [key, entry] of this.entries) {
if (this.isExpired(entry)) {
expiredKeys.push(key);
}
}
for (const key of expiredKeys) {
this.delete(key);
this.stats.expirations++;
}
return initialSize - this.entries.size;
}
/**
* Get all keys in the cache
*/
keys(): string[] {
return Array.from(this.entries.keys());
}
/**
* Get all values in the cache
*/
values(): T[] {
return Array.from(this.entries.values()).map(entry => entry.value);
}
/**
* Destroy the cache and cleanup resources
*/
destroy(): void {
if (this.cleanupTimer) {
clearInterval(this.cleanupTimer);
this.cleanupTimer = undefined;
}
this.clear();
}
/**
* Check if an entry has expired
*/
private isExpired(entry: CacheEntry<T>): boolean {
if (!entry.expiresAt) {
return false;
}
return Date.now() > entry.expiresAt;
}
/**
* Update access order for LRU tracking
*/
private updateAccessOrder(key: string): void {
const index = this.accessOrder.indexOf(key);
if (index > -1) {
this.accessOrder.splice(index, 1);
}
this.accessOrder.push(key);
}
/**
* Remove key from tracking arrays
*/
private removeFromTrackingArrays(key: string): void {
const accessIndex = this.accessOrder.indexOf(key);
if (accessIndex > -1) {
this.accessOrder.splice(accessIndex, 1);
}
const insertIndex = this.insertOrder.indexOf(key);
if (insertIndex > -1) {
this.insertOrder.splice(insertIndex, 1);
}
}
/**
* Evict an entry based on the configured eviction policy
*/
private evictEntry(): void {
let keyToEvict: string | undefined;
if (this.config.evictionPolicy === 'lru') {
keyToEvict = this.accessOrder[0];
} else if (this.config.evictionPolicy === 'fifo') {
keyToEvict = this.insertOrder[0];
}
if (keyToEvict) {
this.delete(keyToEvict);
this.stats.evictions++;
}
}
/**
* Update cache size in stats
*/
private updateSize(): void {
this.stats.size = this.entries.size;
}
/**
* Update hit rate calculation
*/
private updateHitRate(): void {
const total = this.stats.hits + this.stats.misses;
this.stats.hitRate = total > 0 ? (this.stats.hits / total) * 100 : 0;
}
/**
* Start automatic cleanup timer
*/
private startCleanupTimer(): void {
this.cleanupTimer = setInterval(() => {
this.cleanup();
}, this.config.cleanupInterval);
}
}
/**
* Create a new cache instance with the specified configuration
*/
export function createCache<T = any>(config?: CacheConfig): Cache<T> {
return new Cache<T>(config);
}
/**
* Default cache configurations for common use cases
*/
export const CachePresets = {
/** Small, fast cache for frequently accessed data */
small: {
maxSize: 100,
defaultTTL: 2 * 60 * 1000, // 2 minutes
evictionPolicy: 'lru' as const,
enableCleanup: true,
cleanupInterval: 30 * 1000, // 30 seconds
},
/** Medium cache for general purpose use */
medium: {
maxSize: 500,
defaultTTL: 5 * 60 * 1000, // 5 minutes
evictionPolicy: 'lru' as const,
enableCleanup: true,
cleanupInterval: 60 * 1000, // 1 minute
},
/** Large cache for bulk data */
large: {
maxSize: 2000,
defaultTTL: 15 * 60 * 1000, // 15 minutes
evictionPolicy: 'lru' as const,
enableCleanup: true,
cleanupInterval: 2 * 60 * 1000, // 2 minutes
},
/** Long-lived cache for stable data */
persistent: {
maxSize: 1000,
defaultTTL: 60 * 60 * 1000, // 1 hour
evictionPolicy: 'fifo' as const,
enableCleanup: true,
cleanupInterval: 5 * 60 * 1000, // 5 minutes
},
} as const;
```
--------------------------------------------------------------------------------
/src/utils/analytics.ts:
--------------------------------------------------------------------------------
```typescript
import type { ConversationSummary } from '../database/types.js';
/**
* Statistical calculations for conversation analytics
*/
export interface AnalyticsOverview {
totalConversations: number;
totalMessages: number;
totalCodeBlocks: number;
averageConversationSize: number;
averageMessagesPerConversation: number;
totalFiles: number;
totalFolders: number;
}
export interface FileBreakdown {
file: string;
mentions: number;
conversations: string[];
extension: string;
projectPath?: string;
}
export interface LanguageBreakdown {
language: string;
codeBlocks: number;
conversations: string[];
averageCodeLength: number;
}
export interface TemporalBreakdown {
period: string;
conversationCount: number;
messageCount: number;
averageSize: number;
conversationIds: string[];
}
export interface SizeDistribution {
distribution: number[];
percentiles: Record<string, number>;
bins: Array<{ range: string; count: number }>;
}
/**
* Calculate basic overview statistics from conversation summaries
*/
export function calculateOverview(summaries: ConversationSummary[]): AnalyticsOverview {
const totalConversations = summaries.length;
const totalMessages = summaries.reduce((sum, s) => sum + s.messageCount, 0);
const totalCodeBlocks = summaries.reduce((sum, s) => sum + s.codeBlockCount, 0);
const totalSize = summaries.reduce((sum, s) => sum + s.conversationSize, 0);
// Collect unique files and folders
const allFiles = new Set<string>();
const allFolders = new Set<string>();
summaries.forEach(summary => {
summary.relevantFiles.forEach(file => allFiles.add(file));
summary.attachedFolders.forEach(folder => allFolders.add(folder));
});
return {
totalConversations,
totalMessages,
totalCodeBlocks,
averageConversationSize: totalConversations > 0 ? totalSize / totalConversations : 0,
averageMessagesPerConversation: totalConversations > 0 ? totalMessages / totalConversations : 0,
totalFiles: allFiles.size,
totalFolders: allFolders.size
};
}
/**
* Calculate file breakdown with mentions and conversations
*/
export function calculateFileBreakdown(summaries: ConversationSummary[]): FileBreakdown[] {
const fileMap = new Map<string, { mentions: number; conversations: string[]; extension: string }>();
for (const summary of summaries) {
const files = new Set([
...(summary.relevantFiles || []),
...(summary.attachedFolders || [])
]);
for (const file of files) {
if (!fileMap.has(file)) {
fileMap.set(file, {
mentions: 0,
conversations: [],
extension: getFileExtension(file)
});
}
const entry = fileMap.get(file)!;
entry.mentions++;
entry.conversations.push(summary.composerId);
}
}
return Array.from(fileMap.entries())
.map(([file, data]) => ({
file,
mentions: data.mentions,
conversations: data.conversations,
extension: data.extension
}))
.sort((a, b) => b.mentions - a.mentions);
}
/**
* Calculate language breakdown from code blocks
*/
export function calculateLanguageBreakdown(
conversationsWithCode: Array<{
composerId: string;
codeBlocks: Array<{ language: string; code: string }>;
}>
): LanguageBreakdown[] {
const languageMap = new Map<string, {
codeBlocks: number;
conversations: Set<string>;
totalCodeLength: number;
}>();
conversationsWithCode.forEach(({ composerId, codeBlocks }) => {
codeBlocks.forEach(block => {
const language = normalizeLanguage(block.language);
if (!languageMap.has(language)) {
languageMap.set(language, {
codeBlocks: 0,
conversations: new Set(),
totalCodeLength: 0
});
}
const entry = languageMap.get(language)!;
entry.codeBlocks++;
entry.conversations.add(composerId);
entry.totalCodeLength += block.code.length;
});
});
return Array.from(languageMap.entries())
.map(([language, data]) => ({
language,
codeBlocks: data.codeBlocks,
conversations: Array.from(data.conversations),
averageCodeLength: data.codeBlocks > 0 ? data.totalCodeLength / data.codeBlocks : 0
}))
.sort((a, b) => b.codeBlocks - a.codeBlocks);
}
/**
* Calculate temporal breakdown using ROWID ordering as proxy for time
*/
export function calculateTemporalBreakdown(
summaries: ConversationSummary[],
conversationIds: string[]
): TemporalBreakdown[] {
const totalConversations = conversationIds.length;
const binsCount = Math.min(10, Math.max(3, Math.floor(totalConversations / 10)));
const conversationsPerBin = Math.ceil(totalConversations / binsCount);
const bins: TemporalBreakdown[] = [];
for (let i = 0; i < binsCount; i++) {
const startIndex = i * conversationsPerBin;
const endIndex = Math.min(startIndex + conversationsPerBin, totalConversations);
const binIds = conversationIds.slice(startIndex, endIndex);
const binSummaries = summaries.filter(s => binIds.includes(s.composerId));
const totalSize = binSummaries.reduce((sum, s) => sum + s.conversationSize, 0);
const averageSize = binSummaries.length > 0 ? totalSize / binSummaries.length : 0;
bins.push({
period: `Period ${i + 1}`,
conversationCount: binSummaries.length,
messageCount: binSummaries.reduce((sum, s) => sum + s.messageCount, 0),
averageSize: Math.round(averageSize),
conversationIds: binIds
});
}
return bins;
}
/**
* Calculate size distribution with percentiles and bins
*/
export function calculateSizeDistribution(summaries: ConversationSummary[]): SizeDistribution {
const sizes = summaries.map(s => s.conversationSize).sort((a, b) => a - b);
if (sizes.length === 0) {
return {
distribution: [],
percentiles: {},
bins: []
};
}
// Calculate percentiles
const percentiles = {
p10: calculatePercentile(sizes, 10),
p25: calculatePercentile(sizes, 25),
p50: calculatePercentile(sizes, 50),
p75: calculatePercentile(sizes, 75),
p90: calculatePercentile(sizes, 90),
p95: calculatePercentile(sizes, 95),
p99: calculatePercentile(sizes, 99)
};
// Create size bins
const minSize = sizes[0];
const maxSize = sizes[sizes.length - 1];
const binCount = 10;
const binSize = (maxSize - minSize) / binCount;
const bins: Array<{ range: string; count: number }> = [];
for (let i = 0; i < binCount; i++) {
const binStart = minSize + (i * binSize);
const binEnd = i === binCount - 1 ? maxSize : binStart + binSize;
const count = sizes.filter(size => size >= binStart && size <= binEnd).length;
bins.push({
range: `${formatSize(binStart)} - ${formatSize(binEnd)}`,
count
});
}
return {
distribution: sizes,
percentiles,
bins
};
}
/**
* Calculate percentile value from sorted array
*/
function calculatePercentile(sortedArray: number[], percentile: number): number {
if (sortedArray.length === 0) return 0;
const index = (percentile / 100) * (sortedArray.length - 1);
const lower = Math.floor(index);
const upper = Math.ceil(index);
if (lower === upper) {
return sortedArray[lower];
}
const weight = index - lower;
return sortedArray[lower] * (1 - weight) + sortedArray[upper] * weight;
}
/**
* Extract file extension from file path
*/
function getFileExtension(filePath: string): string {
const lastDot = filePath.lastIndexOf('.');
const lastSlash = Math.max(filePath.lastIndexOf('/'), filePath.lastIndexOf('\\'));
if (lastDot > lastSlash && lastDot !== -1) {
return filePath.substring(lastDot + 1).toLowerCase();
}
return '';
}
/**
* Extract project path from file path (first few directories)
*/
function extractProjectPath(filePath: string): string | undefined {
const parts = filePath.split(/[/\\]/);
if (parts.length > 2) {
return parts.slice(0, 2).join('/');
}
return undefined;
}
/**
* Normalize language names for consistency
*/
function normalizeLanguage(language: string): string {
const normalized = language.toLowerCase().trim();
// Common language mappings
const mappings: Record<string, string> = {
'js': 'javascript',
'ts': 'typescript',
'jsx': 'javascript',
'tsx': 'typescript',
'py': 'python',
'rb': 'ruby',
'sh': 'shell',
'bash': 'shell',
'zsh': 'shell',
'fish': 'shell',
'yml': 'yaml',
'md': 'markdown',
'dockerfile': 'docker'
};
return mappings[normalized] || normalized;
}
/**
* Format size in human-readable format
*/
function formatSize(bytes: number): string {
if (bytes < 1024) return `${bytes}B`;
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)}KB`;
if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)}MB`;
return `${(bytes / (1024 * 1024 * 1024)).toFixed(1)}GB`;
}
```
--------------------------------------------------------------------------------
/docs/research.md:
--------------------------------------------------------------------------------
```markdown
# Cursor Chat Storage Guide
## Overview
Cursor stores all chat conversations in a SQLite database located in your system's application support directory. This guide explains where to find your chats and how to access them.
## Storage Location
Your Cursor chat conversations are stored at:
**macOS**: `~/Library/Application Support/Cursor/User/globalStorage/state.vscdb`
**Full Path**: `/Users/[username]/Library/Application Support/Cursor/User/globalStorage/state.vscdb`
## Database Details
- **Type**: SQLite 3.x database
- **Size**: ~1.5GB (varies based on chat history)
- **Format**: Key-value store in table `cursorDiskKV`
- **Structure**: Binary BLOB values with text keys
## Data Organization
### Main Tables
- `cursorDiskKV` - Primary key-value storage
- `ItemTable` - Additional metadata storage
### Key Types in cursorDiskKV
| Key Pattern | Description | Example |
|-------------|-------------|---------|
| `composerData:UUID` | Complete conversation data | `composerData:0003f899-8807-4f86-ab1b-a48f985cb580` |
| `messageRequestContext:UUID:UUID` | Message context and metadata | `messageRequestContext:013273b7-92e9-409a-816c-b671052557ea:89911dfd-87c3-4124-928f-d7c00fd7e273` |
| `bubbleId:UUID:UUID` | Individual message/bubble data | `bubbleId:00624634-f10c-4427-b2d1-52caef4e19cf:01cdaaf8-3c72-4984-8397-fb8079ad04fc` |
| `checkpointId:UUID` | Conversation checkpoints | `checkpointId:UUID` |
| `codeBlockDiff:UUID` | Code block differences | `codeBlockDiff:UUID` |
## Summarization Data Storage
**Location**: Summarization data is stored within the conversation JSON in `composerData:UUID` records.
**How to Find Conversations with Summarization**:
```sql
-- Find conversations containing summarization content
SELECT key FROM cursorDiskKV
WHERE key LIKE 'composerData:%'
AND value LIKE '%summarization%';
```
**Storage Format**: The summarization content appears to be embedded within the conversation data structure, likely in:
- Message text content
- Tool call parameters and results
- Conversation metadata
**Example Query to Extract Summarization Content**:
```sql
-- Get conversations with summarization and show first 500 characters
SELECT key, substr(value, 1, 500) FROM cursorDiskKV
WHERE key LIKE 'composerData:%'
AND value LIKE '%summarization%'
LIMIT 5;
```
**Note**: Summarization data is not stored in a separate table but is integrated into the regular conversation flow as part of the chat history. This means summarization requests and responses are treated as regular messages within the conversation structure.
## Accessing Your Chats
### Using SQLite Command Line
1. **Open Terminal** and navigate to the database:
```bash
cd ~/Library/Application\ Support/Cursor/User/globalStorage/
```
2. **Open the database**:
```bash
sqlite3 state.vscdb
```
3. **Basic queries**:
**List all conversations:**
```sql
SELECT key FROM cursorDiskKV WHERE key LIKE 'composerData:%';
```
**Count total conversations:**
```sql
SELECT COUNT(*) FROM cursorDiskKV WHERE key LIKE 'composerData:%';
```
**Get conversation data (replace UUID with actual ID):**
```sql
SELECT value FROM cursorDiskKV WHERE key = 'composerData:UUID';
```
**Check conversation size:**
```sql
SELECT key, length(value) as size_bytes
FROM cursorDiskKV
WHERE key LIKE 'composerData:%'
ORDER BY size_bytes DESC
LIMIT 10;
```
**⭐ Find most recent conversations (by insertion order):**
```sql
SELECT key FROM cursorDiskKV
WHERE key LIKE 'composerData:%' AND length(value) > 5000
ORDER BY ROWID DESC LIMIT 10;
```
**Extract user messages from conversations:**
```sql
SELECT value FROM cursorDiskKV
WHERE key = 'bubbleId:COMPOSER_UUID:BUBBLE_UUID';
```
### Data Format
Conversations are stored as JSON objects with different format versions:
#### Legacy Format (older conversations)
```json
{
"composerId": "UUID",
"richText": "",
"hasLoaded": true,
"text": "",
"conversation": [
{
"type": 1,
"attachedFoldersNew": [],
"bubbleId": "UUID",
"suggestedCodeBlocks": [],
"relevantFiles": ["file1.tsx", "file2.css"],
"text": "user message content..."
}
]
}
```
#### Modern Format (newer conversations)
```json
{
"_v": 3,
"composerId": "UUID",
"richText": "",
"hasLoaded": true,
"text": "",
"fullConversationHeadersOnly": [
{
"bubbleId": "UUID",
"type": 1
},
{
"bubbleId": "UUID",
"type": 2,
"serverBubbleId": "UUID"
}
]
}
```
**Key Differences:**
- Modern format uses `_v` version field
- Individual messages stored separately with `bubbleId:` keys
- `fullConversationHeadersOnly` contains message references
- Type 1 = User message, Type 2 = AI response
## Finding Recent Conversations
⚠️ **Important**: UUID ordering is NOT chronological! Use these methods instead:
### Method 1: ROWID Ordering (Most Reliable)
```sql
-- Get most recent conversations with content
SELECT key FROM cursorDiskKV
WHERE key LIKE 'composerData:%' AND length(value) > 5000
ORDER BY ROWID DESC LIMIT 5;
```
### Method 2: Extract User Messages
```bash
# Get user message text from a bubble
sqlite3 state.vscdb "SELECT value FROM cursorDiskKV WHERE key = 'bubbleId:COMPOSER_UUID:BUBBLE_UUID';" | grep -o '"text":"[^"]*"'
```
### Method 3: File Modification Time
```bash
# Check when database was last modified
ls -la ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb
```
## Statistics Example
Based on a typical Cursor installation:
- **Total conversations**: ~3,294
- **Database size**: ~1.5GB
- **Total records**: ~48,485
- **Average conversation size**: ~100-400KB
## Backup Recommendations
### Manual Backup
```bash
# Create a backup of your chat database
cp ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb ~/Desktop/cursor-chats-backup.db
```
### Export Conversations
```bash
# Export all conversation keys to a text file
sqlite3 ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb \
"SELECT key FROM cursorDiskKV WHERE key LIKE 'composerData:%';" > ~/Desktop/conversation-list.txt
```
### Export Recent Conversations with Content
```bash
# Export recent conversations with their sizes
sqlite3 ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb \
"SELECT key, length(value) FROM cursorDiskKV WHERE key LIKE 'composerData:%' ORDER BY ROWID DESC LIMIT 20;" > ~/Desktop/recent-conversations.txt
```
## Important Notes
⚠️ **Warnings:**
- The database is actively used by Cursor - close Cursor before making changes
- Always backup before modifying the database
- The database format may change with Cursor updates
- UUID-based sorting does NOT reflect chronological order
💡 **Tips:**
- Use SQLite browser tools for easier exploration
- The database contains sensitive information - handle with care
- Large conversations may take time to load/export
- Use ROWID for finding recent conversations
- Modern conversations split messages into separate bubble records
## Troubleshooting
### Database Locked Error
If you get "database is locked" error:
1. Close Cursor completely
2. Wait a few seconds
3. Try the SQLite command again
### File Not Found
If the database file doesn't exist:
- Check if Cursor has been used for chats
- Verify the correct path for your OS
- Look for similar `.vscdb` files in the directory
### Empty Conversations
Some conversations may appear empty because:
- They use the modern format with separate bubble storage
- The conversation was just started but not used
- Messages are stored in `bubbleId:` keys instead of inline
## Alternative Tools
### SQLite Browser Applications
- **DB Browser for SQLite** (Free, cross-platform)
- **SQLiteStudio** (Free, cross-platform)
- **Navicat for SQLite** (Paid)
### Command Line Tools
```bash
# Install sqlite3 if not available
brew install sqlite3 # macOS with Homebrew
# View database schema
sqlite3 state.vscdb ".schema"
# Export entire database to SQL
sqlite3 state.vscdb ".dump" > backup.sql
```
## Practical Examples
### Find Your Last 5 Conversations
```bash
# Step 1: Find recent conversation IDs
sqlite3 ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb \
"SELECT key FROM cursorDiskKV WHERE key LIKE 'composerData:%' AND length(value) > 5000 ORDER BY ROWID DESC LIMIT 5;"
# Step 2: Get user message from first bubble (replace UUIDs)
sqlite3 ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb \
"SELECT value FROM cursorDiskKV WHERE key = 'bubbleId:COMPOSER_UUID:FIRST_BUBBLE_UUID';" | grep -o '"text":"[^"]*"'
```
### Search Conversations by Content
```bash
# Find conversations mentioning specific terms (requires extracting JSON)
sqlite3 ~/Library/Application\ Support/Cursor/User/globalStorage/state.vscdb \
"SELECT key FROM cursorDiskKV WHERE key LIKE 'composerData:%' AND value LIKE '%your_search_term%';"
```
---
*Last updated: Based on Cursor's current storage implementation with format version 3*
```
--------------------------------------------------------------------------------
/src/utils/relationships.ts:
--------------------------------------------------------------------------------
```typescript
import type { ConversationSummary } from '../database/types.js';
/**
* Relationship detection and similarity scoring algorithms
*/
export interface RelationshipScore {
sharedFiles?: string[];
sharedFolders?: string[];
sharedLanguages?: string[];
sizeSimilarity?: number;
temporalProximity?: number;
}
export interface RelatedConversation {
composerId: string;
relationshipScore: number;
relationships: RelationshipScore;
summary: string;
scoreBreakdown?: Record<string, number>;
}
export interface RelationshipOptions {
relationshipTypes: Array<'files' | 'folders' | 'languages' | 'size' | 'temporal'>;
maxResults: number;
minScore: number;
includeScoreBreakdown: boolean;
}
/**
* Find conversations related to a reference conversation
*/
export function findRelatedConversations(
referenceSummary: ConversationSummary,
allSummaries: ConversationSummary[],
conversationIds: string[],
options: RelationshipOptions
): RelatedConversation[] {
const related: RelatedConversation[] = [];
// Get reference conversation index for temporal calculations
const referenceIndex = conversationIds.indexOf(referenceSummary.composerId);
for (const summary of allSummaries) {
// Skip the reference conversation itself
if (summary.composerId === referenceSummary.composerId) {
continue;
}
const relationships = calculateRelationships(
referenceSummary,
summary,
conversationIds,
referenceIndex,
options.relationshipTypes
);
const score = calculateCompositeScore(relationships, options.relationshipTypes);
if (score >= options.minScore) {
related.push({
composerId: summary.composerId,
relationshipScore: score,
relationships,
summary: summary.firstMessage || 'No preview available',
scoreBreakdown: options.includeScoreBreakdown ?
calculateScoreBreakdown(relationships, options.relationshipTypes) : undefined
});
}
}
// Sort by score and limit results
return related
.sort((a, b) => b.relationshipScore - a.relationshipScore)
.slice(0, options.maxResults);
}
/**
* Calculate relationships between two conversations
*/
function calculateRelationships(
reference: ConversationSummary,
candidate: ConversationSummary,
conversationIds: string[],
referenceIndex: number,
relationshipTypes: string[]
): RelationshipScore {
const relationships: RelationshipScore = {};
if (relationshipTypes.includes('files')) {
relationships.sharedFiles = calculateSharedItems(
reference.relevantFiles,
candidate.relevantFiles
);
}
if (relationshipTypes.includes('folders')) {
relationships.sharedFolders = calculateSharedItems(
reference.attachedFolders,
candidate.attachedFolders
);
}
if (relationshipTypes.includes('languages')) {
// Extract languages from both conversations (would need code block data)
// For now, we'll use a placeholder - this would be enhanced with actual language extraction
relationships.sharedLanguages = [];
}
if (relationshipTypes.includes('size')) {
relationships.sizeSimilarity = calculateSizeSimilarity(
reference.conversationSize,
candidate.conversationSize
);
}
if (relationshipTypes.includes('temporal')) {
const candidateIndex = conversationIds.indexOf(candidate.composerId);
relationships.temporalProximity = calculateTemporalProximity(
referenceIndex,
candidateIndex,
conversationIds.length
);
}
return relationships;
}
/**
* Calculate shared items between two arrays
*/
function calculateSharedItems(array1: string[], array2: string[]): string[] {
const set1 = new Set(array1);
return array2.filter(item => set1.has(item));
}
/**
* Calculate size similarity between two conversations
*/
function calculateSizeSimilarity(size1: number, size2: number): number {
if (size1 === 0 && size2 === 0) return 1;
if (size1 === 0 || size2 === 0) return 0;
const maxSize = Math.max(size1, size2);
const minSize = Math.min(size1, size2);
return minSize / maxSize;
}
/**
* Calculate temporal proximity based on ROWID distance
*/
function calculateTemporalProximity(
index1: number,
index2: number,
totalConversations: number
): number {
if (index1 === -1 || index2 === -1) return 0;
const distance = Math.abs(index1 - index2);
const maxDistance = totalConversations - 1;
if (maxDistance === 0) return 1;
// Closer conversations get higher scores
return 1 - (distance / maxDistance);
}
/**
* Calculate composite score from relationships
*/
function calculateCompositeScore(
relationships: RelationshipScore,
relationshipTypes: string[]
): number {
let totalScore = 0;
let weightSum = 0;
// Define weights for different relationship types
const weights = {
files: 0.4,
folders: 0.3,
languages: 0.2,
size: 0.05,
temporal: 0.05
};
if (relationshipTypes.includes('files') && relationships.sharedFiles) {
const score = Math.min(relationships.sharedFiles.length / 5, 1); // Cap at 5 shared files
totalScore += score * weights.files;
weightSum += weights.files;
}
if (relationshipTypes.includes('folders') && relationships.sharedFolders) {
const score = Math.min(relationships.sharedFolders.length / 3, 1); // Cap at 3 shared folders
totalScore += score * weights.folders;
weightSum += weights.folders;
}
if (relationshipTypes.includes('languages') && relationships.sharedLanguages) {
const score = Math.min(relationships.sharedLanguages.length / 3, 1); // Cap at 3 shared languages
totalScore += score * weights.languages;
weightSum += weights.languages;
}
if (relationshipTypes.includes('size') && relationships.sizeSimilarity !== undefined) {
totalScore += relationships.sizeSimilarity * weights.size;
weightSum += weights.size;
}
if (relationshipTypes.includes('temporal') && relationships.temporalProximity !== undefined) {
totalScore += relationships.temporalProximity * weights.temporal;
weightSum += weights.temporal;
}
return weightSum > 0 ? totalScore / weightSum : 0;
}
/**
* Calculate individual score breakdown for debugging
*/
function calculateScoreBreakdown(
relationships: RelationshipScore,
relationshipTypes: string[]
): Record<string, number> {
const breakdown: Record<string, number> = {};
if (relationshipTypes.includes('files') && relationships.sharedFiles) {
breakdown.files = Math.min(relationships.sharedFiles.length / 5, 1);
}
if (relationshipTypes.includes('folders') && relationships.sharedFolders) {
breakdown.folders = Math.min(relationships.sharedFolders.length / 3, 1);
}
if (relationshipTypes.includes('languages') && relationships.sharedLanguages) {
breakdown.languages = Math.min(relationships.sharedLanguages.length / 3, 1);
}
if (relationshipTypes.includes('size') && relationships.sizeSimilarity !== undefined) {
breakdown.size = relationships.sizeSimilarity;
}
if (relationshipTypes.includes('temporal') && relationships.temporalProximity !== undefined) {
breakdown.temporal = relationships.temporalProximity;
}
return breakdown;
}
/**
* Extract languages from code blocks in conversation data
*/
export function extractLanguagesFromCodeBlocks(
codeBlocks: Array<{ language: string; code: string }>
): string[] {
const languages = new Set<string>();
codeBlocks.forEach(block => {
if (block.language && block.language.trim()) {
languages.add(normalizeLanguage(block.language));
}
});
return Array.from(languages);
}
/**
* Normalize language names for consistency
*/
function normalizeLanguage(language: string): string {
const normalized = language.toLowerCase().trim();
// Common language mappings
const mappings: Record<string, string> = {
'js': 'javascript',
'ts': 'typescript',
'jsx': 'javascript',
'tsx': 'typescript',
'py': 'python',
'rb': 'ruby',
'sh': 'shell',
'bash': 'shell',
'zsh': 'shell',
'fish': 'shell',
'yml': 'yaml',
'md': 'markdown',
'dockerfile': 'docker'
};
return mappings[normalized] || normalized;
}
/**
* Calculate file overlap score between two conversations
*/
export function calculateFileOverlapScore(files1: string[], files2: string[]): number {
if (files1.length === 0 && files2.length === 0) return 1;
if (files1.length === 0 || files2.length === 0) return 0;
const set1 = new Set(files1);
const intersection = files2.filter(file => set1.has(file));
const union = new Set([...files1, ...files2]);
return intersection.length / union.size; // Jaccard similarity
}
/**
* Calculate folder overlap score between two conversations
*/
export function calculateFolderOverlapScore(folders1: string[], folders2: string[]): number {
if (folders1.length === 0 && folders2.length === 0) return 1;
if (folders1.length === 0 || folders2.length === 0) return 0;
const set1 = new Set(folders1);
const intersection = folders2.filter(folder => set1.has(folder));
const union = new Set([...folders1, ...folders2]);
return intersection.length / union.size; // Jaccard similarity
}
```
--------------------------------------------------------------------------------
/src/utils/exporters.ts:
--------------------------------------------------------------------------------
```typescript
import type { ConversationSummary } from '../database/types.js';
/**
* Format conversion utilities for exporting conversation data
*/
export interface ExportMetadata {
exportedCount: number;
totalAvailable: number;
exportTimestamp: string;
filters: Record<string, any>;
}
export interface GraphNode {
id: string;
label: string;
type: 'conversation';
attributes: {
messageCount: number;
size: number;
hasCodeBlocks: boolean;
format: 'legacy' | 'modern';
fileCount: number;
folderCount: number;
};
}
export interface GraphEdge {
source: string;
target: string;
type: 'shared_files' | 'shared_folders' | 'similar_size' | 'temporal_proximity';
weight: number;
attributes: {
sharedItems?: string[];
similarity?: number;
};
}
export interface GraphData {
nodes: GraphNode[];
edges: GraphEdge[];
}
/**
* Export conversation data as JSON
*/
export function exportAsJSON(
summaries: ConversationSummary[],
includeContent: boolean,
conversationData?: Map<string, any>
): any {
if (!includeContent) {
return summaries;
}
// Include full conversation content if available
return summaries.map(summary => ({
...summary,
fullContent: conversationData?.get(summary.composerId) || null
}));
}
/**
* Export conversation data as CSV
*/
export function exportAsCSV(
summaries: ConversationSummary[],
flattenStructure: boolean
): string {
if (summaries.length === 0) {
return 'No data to export';
}
const headers = [
'composerId',
'format',
'messageCount',
'hasCodeBlocks',
'codeBlockCount',
'conversationSize',
'fileCount',
'folderCount',
'firstMessage',
'relevantFiles',
'attachedFolders'
];
const rows = summaries.map(summary => [
escapeCSVField(summary.composerId),
escapeCSVField(summary.format),
summary.messageCount.toString(),
summary.hasCodeBlocks.toString(),
summary.codeBlockCount.toString(),
summary.conversationSize.toString(),
summary.relevantFiles.length.toString(),
summary.attachedFolders.length.toString(),
escapeCSVField(summary.firstMessage || ''),
escapeCSVField(flattenStructure ?
summary.relevantFiles.join('; ') :
JSON.stringify(summary.relevantFiles)
),
escapeCSVField(flattenStructure ?
summary.attachedFolders.join('; ') :
JSON.stringify(summary.attachedFolders)
)
]);
return [headers.join(','), ...rows.map(row => row.join(','))].join('\n');
}
/**
* Export conversation data as graph format for visualization tools
*/
export function exportAsGraph(
summaries: ConversationSummary[],
includeRelationships: boolean
): GraphData {
const nodes: GraphNode[] = summaries.map(summary => ({
id: summary.composerId,
label: summary.firstMessage?.substring(0, 50) || summary.composerId,
type: 'conversation',
attributes: {
messageCount: summary.messageCount,
size: summary.conversationSize,
hasCodeBlocks: summary.hasCodeBlocks,
format: summary.format,
fileCount: summary.relevantFiles.length,
folderCount: summary.attachedFolders.length
}
}));
const edges: GraphEdge[] = [];
if (includeRelationships) {
// Calculate relationships between conversations
for (let i = 0; i < summaries.length; i++) {
for (let j = i + 1; j < summaries.length; j++) {
const summary1 = summaries[i];
const summary2 = summaries[j];
// Shared files relationship
const sharedFiles = calculateSharedItems(summary1.relevantFiles, summary2.relevantFiles);
if (sharedFiles.length > 0) {
edges.push({
source: summary1.composerId,
target: summary2.composerId,
type: 'shared_files',
weight: sharedFiles.length,
attributes: {
sharedItems: sharedFiles
}
});
}
// Shared folders relationship
const sharedFolders = calculateSharedItems(summary1.attachedFolders, summary2.attachedFolders);
if (sharedFolders.length > 0) {
edges.push({
source: summary1.composerId,
target: summary2.composerId,
type: 'shared_folders',
weight: sharedFolders.length,
attributes: {
sharedItems: sharedFolders
}
});
}
// Size similarity relationship
const sizeSimilarity = calculateSizeSimilarity(
summary1.conversationSize,
summary2.conversationSize
);
if (sizeSimilarity > 0.7) { // Only include high similarity
edges.push({
source: summary1.composerId,
target: summary2.composerId,
type: 'similar_size',
weight: sizeSimilarity,
attributes: {
similarity: sizeSimilarity
}
});
}
}
}
}
return { nodes, edges };
}
/**
* Create export metadata
*/
export function createExportMetadata(
exportedCount: number,
totalAvailable: number,
filters: Record<string, any>
): ExportMetadata {
return {
exportedCount,
totalAvailable,
exportTimestamp: new Date().toISOString(),
filters
};
}
/**
* Escape CSV field to handle commas, quotes, and newlines
*/
function escapeCSVField(field: string): string {
if (field.includes(',') || field.includes('"') || field.includes('\n')) {
return `"${field.replace(/"/g, '""')}"`;
}
return field;
}
/**
* Calculate shared items between two arrays
*/
function calculateSharedItems(array1: string[], array2: string[]): string[] {
const set1 = new Set(array1);
return array2.filter(item => set1.has(item));
}
/**
* Calculate size similarity between two conversations
*/
function calculateSizeSimilarity(size1: number, size2: number): number {
if (size1 === 0 && size2 === 0) return 1;
if (size1 === 0 || size2 === 0) return 0;
const maxSize = Math.max(size1, size2);
const minSize = Math.min(size1, size2);
return minSize / maxSize;
}
/**
* Convert graph data to Gephi-compatible GEXF format
*/
export function exportAsGEXF(graphData: GraphData): string {
const { nodes, edges } = graphData;
let gexf = `<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft" version="1.2">
<meta lastmodifieddate="${new Date().toISOString()}">
<creator>Cursor Conversations MCP</creator>
<description>Conversation relationship graph</description>
</meta>
<graph mode="static" defaultedgetype="undirected">
<attributes class="node">
<attribute id="0" title="messageCount" type="integer"/>
<attribute id="1" title="size" type="integer"/>
<attribute id="2" title="hasCodeBlocks" type="boolean"/>
<attribute id="3" title="format" type="string"/>
<attribute id="4" title="fileCount" type="integer"/>
<attribute id="5" title="folderCount" type="integer"/>
</attributes>
<attributes class="edge">
<attribute id="0" title="type" type="string"/>
<attribute id="1" title="sharedItems" type="string"/>
<attribute id="2" title="similarity" type="float"/>
</attributes>
<nodes>`;
// Add nodes
nodes.forEach(node => {
gexf += `
<node id="${escapeXML(node.id)}" label="${escapeXML(node.label)}">
<attvalues>
<attvalue for="0" value="${node.attributes.messageCount}"/>
<attvalue for="1" value="${node.attributes.size}"/>
<attvalue for="2" value="${node.attributes.hasCodeBlocks}"/>
<attvalue for="3" value="${escapeXML(node.attributes.format)}"/>
<attvalue for="4" value="${node.attributes.fileCount}"/>
<attvalue for="5" value="${node.attributes.folderCount}"/>
</attvalues>
</node>`;
});
gexf += `
</nodes>
<edges>`;
// Add edges
edges.forEach((edge, index) => {
gexf += `
<edge id="${index}" source="${escapeXML(edge.source)}" target="${escapeXML(edge.target)}" weight="${edge.weight}">
<attvalues>
<attvalue for="0" value="${escapeXML(edge.type)}"/>
<attvalue for="1" value="${escapeXML(edge.attributes.sharedItems?.join(', ') || '')}"/>
<attvalue for="2" value="${edge.attributes.similarity || 0}"/>
</attvalues>
</edge>`;
});
gexf += `
</edges>
</graph>
</gexf>`;
return gexf;
}
/**
* Convert graph data to Cytoscape.js format
*/
export function exportAsCytoscape(graphData: GraphData): any {
const { nodes, edges } = graphData;
return {
elements: [
...nodes.map(node => ({
data: {
id: node.id,
label: node.label,
...node.attributes
}
})),
...edges.map((edge, index) => ({
data: {
id: `edge-${index}`,
source: edge.source,
target: edge.target,
weight: edge.weight,
type: edge.type,
...edge.attributes
}
}))
]
};
}
/**
* Escape XML special characters
*/
function escapeXML(text: string): string {
return text
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, ''');
}
```
--------------------------------------------------------------------------------
/src/tools/conversation-tools.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import {
listConversations,
getConversation,
getConversationSummary,
searchConversations,
getBubbleMessage,
getRecentConversations,
getConversationsByProject
} from './conversation-tools.js';
import { CursorDatabaseReader } from '../database/reader.js';
import * as databaseUtils from '../utils/database-utils.js';
// Mock the database reader
vi.mock('../database/reader.js');
vi.mock('../utils/database-utils.js');
const mockDatabaseReader = vi.mocked(CursorDatabaseReader);
const mockDetectCursorDatabasePath = vi.mocked(databaseUtils.detectCursorDatabasePath);
describe('Conversation Tools', () => {
let mockReader: any;
beforeEach(() => {
mockReader = {
connect: vi.fn(),
close: vi.fn(),
getConversationIds: vi.fn(),
getConversationSummary: vi.fn(),
getConversationById: vi.fn(),
getBubbleMessage: vi.fn(),
searchConversations: vi.fn(),
getConversationIdsByProject: vi.fn()
};
mockDatabaseReader.mockImplementation(() => mockReader);
mockDetectCursorDatabasePath.mockReturnValue('/mock/path/to/cursor.db');
// Clear environment variable
delete process.env.CURSOR_DB_PATH;
});
afterEach(() => {
vi.clearAllMocks();
});
describe('listConversations', () => {
it('should list conversations with default parameters', async () => {
const mockConversationIds = ['conv1', 'conv2'];
const mockSummary = {
composerId: 'conv1',
format: 'legacy' as const,
messageCount: 5,
hasCodeBlocks: true,
relevantFiles: ['file1.ts'],
attachedFolders: ['folder1'],
firstMessage: 'Hello world',
conversationSize: 1000
};
mockReader.getConversationIds.mockResolvedValue(mockConversationIds);
mockReader.getConversationSummary.mockResolvedValue(mockSummary);
const result = await listConversations({});
expect(mockReader.connect).toHaveBeenCalled();
expect(mockReader.close).toHaveBeenCalled();
expect(result.conversations).toHaveLength(2);
expect(result.totalFound).toBe(2);
expect(result.filters.limit).toBe(1000);
expect(result.filters.minLength).toBe(100);
});
it('should handle empty results', async () => {
mockReader.getConversationIds.mockResolvedValue([]);
const result = await listConversations({});
expect(result.conversations).toHaveLength(0);
expect(result.totalFound).toBe(0);
});
it('should always close database connection', async () => {
mockReader.getConversationIds.mockRejectedValue(new Error('Database error'));
await expect(listConversations({})).rejects.toThrow('Database error');
expect(mockReader.close).toHaveBeenCalled();
});
});
describe('getConversation', () => {
it('should get legacy conversation with full content', async () => {
const mockConversation = {
composerId: 'conv1',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble1',
text: 'Hello',
relevantFiles: ['file1.ts'],
suggestedCodeBlocks: [{
language: 'typescript',
code: 'console.log("hello");',
filename: 'test.ts'
}],
attachedFoldersNew: ['folder1']
}
]
};
mockReader.getConversationById.mockResolvedValue(mockConversation);
const result = await getConversation({
conversationId: 'conv1'
});
expect(mockReader.connect).toHaveBeenCalled();
expect(mockReader.getConversationById).toHaveBeenCalledWith('conv1');
expect(result.conversation).toBeDefined();
expect(result.conversation!.format).toBe('legacy');
expect(result.conversation!.messageCount).toBe(1);
});
it('should return null for non-existent conversation', async () => {
mockReader.getConversationById.mockResolvedValue(null);
const result = await getConversation({
conversationId: 'nonexistent'
});
expect(result.conversation).toBeNull();
});
});
describe('getConversationSummary', () => {
it('should get conversation summary', async () => {
const mockSummary = {
composerId: 'conv1',
format: 'legacy' as const,
messageCount: 5,
hasCodeBlocks: true,
codeBlockCount: 3,
conversationSize: 2000,
firstMessage: 'First message',
relevantFiles: ['file1.ts'],
attachedFolders: ['src']
};
mockReader.getConversationSummary.mockResolvedValue(mockSummary);
const result = await getConversationSummary({
conversationId: 'conv1'
});
expect(result.summary).toEqual(mockSummary);
});
it('should return null for non-existent conversation', async () => {
mockReader.getConversationSummary.mockResolvedValue(null);
const result = await getConversationSummary({
conversationId: 'nonexistent'
});
expect(result.summary).toBeNull();
});
});
describe('searchConversations', () => {
it('should search conversations with default options', async () => {
const mockResults = [
{
composerId: 'conv1',
format: 'legacy' as const,
matches: [
{
text: 'Found text with query match',
context: 'Context around match',
bubbleId: 'bubble1',
type: 1
}
],
totalMatches: 1,
messageCount: 5,
hasCodeBlocks: true,
relevantFiles: ['file1.ts'],
attachedFolders: ['src']
}
];
mockReader.searchConversations.mockResolvedValue(mockResults);
const result = await searchConversations({
query: 'test query'
});
expect(mockReader.searchConversations).toHaveBeenCalledWith('test query', {
includeCode: true,
contextLines: 3,
maxResults: 20,
searchBubbles: true,
searchType: 'all',
format: 'both'
});
expect(result.results).toEqual(mockResults);
expect(result.totalResults).toBe(1);
expect(result.query).toBe('test query');
});
});
describe('getBubbleMessage', () => {
it('should get bubble message', async () => {
const mockBubbleMessage = {
bubbleId: 'bubble1',
type: 1,
text: 'Bubble message text',
relevantFiles: ['file1.ts'],
suggestedCodeBlocks: [],
attachedFoldersNew: []
};
mockReader.getBubbleMessage.mockResolvedValue(mockBubbleMessage);
const result = await getBubbleMessage({
composerId: 'conv1',
bubbleId: 'bubble1'
});
expect(result.bubbleMessage).toEqual(mockBubbleMessage);
});
it('should return null for non-existent bubble message', async () => {
mockReader.getBubbleMessage.mockResolvedValue(null);
const result = await getBubbleMessage({
composerId: 'conv1',
bubbleId: 'nonexistent'
});
expect(result.bubbleMessage).toBeNull();
});
});
describe('getRecentConversations', () => {
it('should get recent conversations', async () => {
const mockConversationIds = ['conv1', 'conv2'];
const mockSummary = {
composerId: 'conv1',
format: 'legacy' as const,
messageCount: 3,
hasCodeBlocks: false,
relevantFiles: [],
attachedFolders: [],
conversationSize: 800
};
mockReader.getConversationIds.mockResolvedValue(mockConversationIds);
mockReader.getConversationSummary.mockResolvedValue(mockSummary);
const result = await getRecentConversations({});
expect(result.conversations).toHaveLength(2);
expect(result.requestedLimit).toBe(10);
expect(result.totalFound).toBe(2);
expect(result.timestamp).toBeDefined();
});
});
describe('getConversationsByProject', () => {
it('should get conversations by project path', async () => {
const mockResults = [
{ composerId: 'conv1', relevanceScore: 0.9 }
];
const mockSummary = {
composerId: 'conv1',
format: 'legacy' as const,
messageCount: 5,
hasCodeBlocks: true,
relevantFiles: ['src/file1.ts'],
attachedFolders: ['/project/src'],
conversationSize: 1500
};
mockReader.getConversationIdsByProject.mockResolvedValue(mockResults);
mockReader.getConversationSummary.mockResolvedValue(mockSummary);
const result = await getConversationsByProject({
projectPath: '/project/src'
});
expect(result.conversations).toHaveLength(1);
expect(result.totalFound).toBe(1);
expect(result.filters.projectPath).toBe('/project/src');
});
});
describe('Error Handling', () => {
it('should handle database connection errors', async () => {
mockReader.connect.mockRejectedValue(new Error('Connection failed'));
await expect(listConversations({})).rejects.toThrow('Connection failed');
expect(mockReader.close).toHaveBeenCalled();
});
it('should handle validation errors', async () => {
const invalidInput = { conversationId: '' };
await expect(getConversation(invalidInput as any)).rejects.toThrow();
});
});
});
```
--------------------------------------------------------------------------------
/src/utils/database-utils.ts:
--------------------------------------------------------------------------------
```typescript
import { homedir, platform } from 'os';
import { join, resolve } from 'path';
import { existsSync } from 'fs';
import type { CursorDatabasePaths, DatabaseConfig } from '../database/types.js';
// Platform-specific database paths (lazy-loaded to support testing)
export function getCursorDatabasePaths(): CursorDatabasePaths {
return {
macOS: join(homedir(), 'Library/Application Support/Cursor/User/globalStorage/state.vscdb'),
windows: join(homedir(), 'AppData/Roaming/Cursor/User/globalStorage/state.vscdb'),
linux: join(homedir(), '.config/Cursor/User/globalStorage/state.vscdb')
};
}
/**
* Detect the current operating system
* @returns The detected operating system as a string
*/
export function detectOperatingSystem(): 'macOS' | 'windows' | 'linux' | 'unknown' {
const currentPlatform = platform();
switch (currentPlatform) {
case 'darwin':
return 'macOS';
case 'win32':
return 'windows';
case 'linux':
return 'linux';
default:
return 'unknown';
}
}
/**
* Get the default database path for a specific operating system
* @param os The operating system identifier
* @returns The default database path for the OS
*/
export function getDefaultDatabasePath(os: string): string {
const paths = getCursorDatabasePaths();
switch (os) {
case 'macOS':
case 'darwin':
return paths.macOS;
case 'windows':
case 'win32':
return paths.windows;
case 'linux':
return paths.linux;
default:
// Fallback to Linux path for unknown operating systems
return paths.linux;
}
}
/**
* Check if the database file exists at the specified path
* @param path The path to verify
* @returns Object with verification result and optional error message
*/
export function verifyDatabasePath(path: string): { exists: boolean; error?: string } {
try {
if (!path) {
return { exists: false, error: 'Database path is empty' };
}
const resolvedPath = resolve(path);
const exists = existsSync(resolvedPath);
if (!exists) {
console.warn(`Database file not found at: ${resolvedPath}`);
return {
exists: false,
error: `Database file not found at: ${resolvedPath}. Make sure Cursor is installed and has been used to create conversations.`
};
}
return { exists: true };
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred';
return {
exists: false,
error: `Error verifying database path: ${errorMessage}`
};
}
}
/**
* Get user-configured database path from environment variables or configuration
* @returns The user-configured path if found, null otherwise
*/
export function getUserConfiguredDatabasePath(): string | null {
// Check environment variable first
const envPath = process.env.CURSOR_DB_PATH;
if (envPath) {
const resolvedPath = resolve(envPath.replace(/^~/, homedir()));
const verification = verifyDatabasePath(resolvedPath);
if (verification.exists) {
return resolvedPath;
} else {
console.warn(`User-configured database path is invalid: ${verification.error}`);
}
}
return null;
}
/**
* Main function to detect the appropriate Cursor database path
* Combines all detection mechanisms with proper fallback handling
* @returns The resolved database path
* @throws Error if no valid database path can be determined
*/
export function detectDatabasePath(): string {
// 1. Check for user-configured path first
const userConfiguredPath = getUserConfiguredDatabasePath();
if (userConfiguredPath) {
return userConfiguredPath;
}
// 2. Detect OS and use default path
const os = detectOperatingSystem();
const defaultPath = getDefaultDatabasePath(os);
const resolvedPath = resolve(defaultPath);
// 3. Verify the default path exists
const verification = verifyDatabasePath(resolvedPath);
if (verification.exists) {
return resolvedPath;
}
// 4. Implement fallback mechanisms
console.warn(`Default database path verification failed: ${verification.error}`);
// Try alternative common locations as fallbacks
const fallbackPaths = getFallbackDatabasePaths(os);
for (const fallbackPath of fallbackPaths) {
const resolvedFallback = resolve(fallbackPath);
const fallbackVerification = verifyDatabasePath(resolvedFallback);
if (fallbackVerification.exists) {
console.log(`Using fallback database path: ${resolvedFallback}`);
return resolvedFallback;
}
}
// If no valid path found, throw descriptive error
throw new Error(
`Unable to locate Cursor database file. Tried:\n` +
`- User configured: ${process.env.CURSOR_DB_PATH || 'Not set'}\n` +
`- Default (${os}): ${resolvedPath}\n` +
`- Fallback paths: ${fallbackPaths.join(', ')}\n\n` +
`Please ensure Cursor is installed and has been used to create conversations, ` +
`or set the CURSOR_DB_PATH environment variable to the correct database location.`
);
}
/**
* Get fallback database paths for the given operating system
* @param os The operating system identifier
* @returns Array of fallback paths to try
*/
function getFallbackDatabasePaths(os: string): string[] {
const fallbacks: string[] = [];
switch (os) {
case 'macOS':
case 'darwin':
fallbacks.push(
join(homedir(), 'Library/Application Support/Cursor/cursor.db'),
join(homedir(), 'Library/Application Support/Cursor/User/cursor.db'),
join(homedir(), 'Library/Application Support/Cursor/state.vscdb')
);
break;
case 'windows':
case 'win32':
fallbacks.push(
join(homedir(), 'AppData/Roaming/Cursor/cursor.db'),
join(homedir(), 'AppData/Roaming/Cursor/User/cursor.db'),
join(homedir(), 'AppData/Roaming/Cursor/state.vscdb')
);
break;
case 'linux':
fallbacks.push(
join(homedir(), '.config/Cursor/cursor.db'),
join(homedir(), '.config/Cursor/User/cursor.db'),
join(homedir(), '.config/Cursor/state.vscdb')
);
break;
default:
// For unknown OS, try Linux-style paths
fallbacks.push(
join(homedir(), '.config/Cursor/cursor.db'),
join(homedir(), '.config/Cursor/User/cursor.db'),
join(homedir(), '.config/Cursor/state.vscdb')
);
}
return fallbacks;
}
/**
* Automatically detect the Cursor database path for the current platform
* @deprecated Use detectDatabasePath() instead for more robust detection
*/
export function detectCursorDatabasePath(): string {
return detectDatabasePath();
}
/**
* Validate that the database path exists and is accessible
* @deprecated Use verifyDatabasePath() instead for consistent error handling
*/
export function validateDatabasePath(dbPath: string): { valid: boolean; error?: string } {
const verification = verifyDatabasePath(dbPath);
return {
valid: verification.exists,
error: verification.error
};
}
/**
* Create default database configuration
*/
export function createDefaultDatabaseConfig(customDbPath?: string): DatabaseConfig {
const dbPath = customDbPath || detectDatabasePath();
return {
dbPath,
maxConversations: 1000,
cacheEnabled: true,
minConversationSize: 100, // Reduced from 5000 to capture more conversations
resolveBubblesAutomatically: true
};
}
/**
* Extract composer ID from a composerData key
*/
export function extractComposerIdFromKey(key: string): string | null {
const match = key.match(/^composerData:(.+)$/);
return match ? match[1] : null;
}
/**
* Extract bubble ID components from a bubbleId key
*/
export function extractBubbleIdComponents(key: string): { composerId: string; bubbleId: string } | null {
const match = key.match(/^bubbleId:([^:]+):(.+)$/);
return match ? { composerId: match[1], bubbleId: match[2] } : null;
}
/**
* Generate a bubbleId key for modern format message lookup
*/
export function generateBubbleIdKey(composerId: string, bubbleId: string): string {
return `bubbleId:${composerId}:${bubbleId}`;
}
/**
* Check if a key is a composerData key
*/
export function isComposerDataKey(key: string): boolean {
return key.startsWith('composerData:');
}
/**
* Check if a key is a bubbleId key
*/
export function isBubbleIdKey(key: string): boolean {
return key.startsWith('bubbleId:');
}
/**
* Sanitize and validate conversation size filter
*/
export function sanitizeMinConversationSize(size?: number): number {
if (typeof size !== 'number' || size < 0) {
return 100; // Default minimum size (reduced from 5000)
}
return Math.floor(size);
}
/**
* Sanitize and validate limit parameter
*/
export function sanitizeLimit(limit?: number, maxLimit: number = 1000): number {
if (typeof limit !== 'number' || limit <= 0) {
return maxLimit; // Default to max limit instead of 10
}
return Math.min(Math.floor(limit), maxLimit);
}
/**
* Create SQL LIKE pattern for file pattern matching
*/
export function createFilePatternLike(pattern: string): string {
// Escape SQL special characters and convert glob patterns
return pattern
.replace(/[%_]/g, '\\$&') // Escape SQL wildcards
.replace(/\*/g, '%') // Convert * to SQL %
.replace(/\?/g, '_'); // Convert ? to SQL _
}
/**
* Validate and sanitize search query
*/
export function sanitizeSearchQuery(query: string): string {
if (typeof query !== 'string') {
throw new Error('Search query must be a string');
}
const trimmed = query.trim();
if (trimmed.length === 0) {
throw new Error('Search query cannot be empty');
}
if (trimmed.length > 1000) {
throw new Error('Search query is too long (max 1000 characters)');
}
return trimmed;
}
```
--------------------------------------------------------------------------------
/src/utils/cache.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { Cache, createCache, CachePresets } from './cache.js';
describe('Cache', () => {
let cache: Cache;
beforeEach(() => {
cache = new Cache();
});
afterEach(() => {
cache.destroy();
});
describe('Basic Operations', () => {
it('should store and retrieve values', () => {
cache.set('key1', 'value1');
expect(cache.get('key1')).toBe('value1');
});
it('should return undefined for non-existent keys', () => {
expect(cache.get('nonexistent')).toBeUndefined();
});
it('should check if key exists', () => {
cache.set('key1', 'value1');
expect(cache.has('key1')).toBe(true);
expect(cache.has('nonexistent')).toBe(false);
});
it('should delete entries', () => {
cache.set('key1', 'value1');
expect(cache.has('key1')).toBe(true);
const deleted = cache.delete('key1');
expect(deleted).toBe(true);
expect(cache.has('key1')).toBe(false);
expect(cache.get('key1')).toBeUndefined();
});
it('should return false when deleting non-existent key', () => {
const deleted = cache.delete('nonexistent');
expect(deleted).toBe(false);
});
it('should clear all entries', () => {
cache.set('key1', 'value1');
cache.set('key2', 'value2');
expect(cache.size()).toBe(2);
cache.clear();
expect(cache.size()).toBe(0);
expect(cache.has('key1')).toBe(false);
expect(cache.has('key2')).toBe(false);
});
it('should track cache size', () => {
expect(cache.size()).toBe(0);
cache.set('key1', 'value1');
expect(cache.size()).toBe(1);
cache.set('key2', 'value2');
expect(cache.size()).toBe(2);
cache.delete('key1');
expect(cache.size()).toBe(1);
});
it('should get all keys', () => {
cache.set('key1', 'value1');
cache.set('key2', 'value2');
const keys = cache.keys();
expect(keys).toContain('key1');
expect(keys).toContain('key2');
expect(keys).toHaveLength(2);
});
it('should get all values', () => {
cache.set('key1', 'value1');
cache.set('key2', 'value2');
const values = cache.values();
expect(values).toContain('value1');
expect(values).toContain('value2');
expect(values).toHaveLength(2);
});
});
describe('TTL (Time-To-Live)', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers();
});
it('should expire entries after TTL', () => {
cache = new Cache({ defaultTTL: 1000, enableCleanup: false });
cache.set('key1', 'value1');
expect(cache.get('key1')).toBe('value1');
// Advance time by 1001ms (past TTL)
vi.advanceTimersByTime(1001);
expect(cache.get('key1')).toBeUndefined();
expect(cache.has('key1')).toBe(false);
});
it('should use custom TTL for individual entries', () => {
cache = new Cache({ defaultTTL: 5000, enableCleanup: false });
cache.set('key1', 'value1', 1000); // Custom TTL of 1 second
cache.set('key2', 'value2'); // Uses default TTL of 5 seconds
// Advance time by 1001ms
vi.advanceTimersByTime(1001);
expect(cache.get('key1')).toBeUndefined(); // Should be expired
expect(cache.get('key2')).toBe('value2'); // Should still exist
});
it('should handle entries with no TTL (never expire)', () => {
cache = new Cache({ defaultTTL: 0, enableCleanup: false });
cache.set('key1', 'value1');
// Advance time significantly
vi.advanceTimersByTime(10000);
expect(cache.get('key1')).toBe('value1');
});
it('should manually cleanup expired entries', () => {
cache = new Cache({ defaultTTL: 1000, enableCleanup: false });
cache.set('key1', 'value1');
cache.set('key2', 'value2');
expect(cache.size()).toBe(2);
// Advance time past TTL
vi.advanceTimersByTime(1001);
const cleanedCount = cache.cleanup();
expect(cleanedCount).toBe(2);
expect(cache.size()).toBe(0);
});
});
describe('Size Limits and Eviction', () => {
it('should evict entries when max size is reached (LRU)', () => {
cache = new Cache({ maxSize: 2, evictionPolicy: 'lru', enableCleanup: false });
cache.set('key1', 'value1');
cache.set('key2', 'value2');
expect(cache.size()).toBe(2);
// Access key1 to make it more recently used
cache.get('key1');
// Add third entry, should evict key2 (least recently used)
cache.set('key3', 'value3');
expect(cache.size()).toBe(2);
expect(cache.has('key1')).toBe(true);
expect(cache.has('key2')).toBe(false);
expect(cache.has('key3')).toBe(true);
});
it('should evict entries when max size is reached (FIFO)', () => {
cache = new Cache({ maxSize: 2, evictionPolicy: 'fifo', enableCleanup: false });
cache.set('key1', 'value1');
cache.set('key2', 'value2');
expect(cache.size()).toBe(2);
// Access key1 (shouldn't matter for FIFO)
cache.get('key1');
// Add third entry, should evict key1 (first in)
cache.set('key3', 'value3');
expect(cache.size()).toBe(2);
expect(cache.has('key1')).toBe(false);
expect(cache.has('key2')).toBe(true);
expect(cache.has('key3')).toBe(true);
});
it('should handle updating existing keys without eviction', () => {
cache = new Cache({ maxSize: 2, enableCleanup: false });
cache.set('key1', 'value1');
cache.set('key2', 'value2');
expect(cache.size()).toBe(2);
// Update existing key
cache.set('key1', 'updated_value1');
expect(cache.size()).toBe(2);
expect(cache.get('key1')).toBe('updated_value1');
expect(cache.has('key2')).toBe(true);
});
});
describe('Statistics', () => {
it('should track hits and misses', () => {
cache.set('key1', 'value1');
// Hit
cache.get('key1');
// Miss
cache.get('nonexistent');
const stats = cache.getStats();
expect(stats.hits).toBe(1);
expect(stats.misses).toBe(1);
expect(stats.hitRate).toBe(50);
});
it('should track evictions', () => {
cache = new Cache({ maxSize: 1, enableCleanup: false });
cache.set('key1', 'value1');
cache.set('key2', 'value2'); // Should evict key1
const stats = cache.getStats();
expect(stats.evictions).toBe(1);
});
it('should track expirations', () => {
vi.useFakeTimers();
cache = new Cache({ defaultTTL: 1000, enableCleanup: false });
cache.set('key1', 'value1');
// Advance time past TTL
vi.advanceTimersByTime(1001);
// Try to access expired entry
cache.get('key1');
const stats = cache.getStats();
expect(stats.expirations).toBe(1);
vi.useRealTimers();
});
it('should reset statistics', () => {
cache.set('key1', 'value1');
cache.get('key1'); // Hit
cache.get('nonexistent'); // Miss
let stats = cache.getStats();
expect(stats.hits).toBe(1);
expect(stats.misses).toBe(1);
cache.resetStats();
stats = cache.getStats();
expect(stats.hits).toBe(0);
expect(stats.misses).toBe(0);
expect(stats.hitRate).toBe(0);
});
});
describe('Automatic Cleanup', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers();
});
it('should automatically cleanup expired entries', () => {
cache = new Cache({
defaultTTL: 1000,
enableCleanup: true,
cleanupInterval: 500
});
cache.set('key1', 'value1');
expect(cache.size()).toBe(1);
// Advance time past TTL but before cleanup interval
vi.advanceTimersByTime(1001);
expect(cache.size()).toBe(1); // Still there, cleanup hasn't run
// Advance time to trigger cleanup
vi.advanceTimersByTime(500);
expect(cache.size()).toBe(0); // Should be cleaned up
});
it('should stop cleanup timer when destroyed', () => {
cache = new Cache({ enableCleanup: true, cleanupInterval: 100 });
const clearIntervalSpy = vi.spyOn(global, 'clearInterval');
cache.destroy();
expect(clearIntervalSpy).toHaveBeenCalled();
});
});
describe('Type Safety', () => {
it('should work with typed values', () => {
interface User {
id: number;
name: string;
}
const userCache = new Cache<User>();
const user: User = { id: 1, name: 'John' };
userCache.set('user1', user);
const retrieved = userCache.get('user1');
expect(retrieved).toEqual(user);
expect(retrieved?.id).toBe(1);
expect(retrieved?.name).toBe('John');
});
});
});
describe('createCache', () => {
it('should create a cache instance', () => {
const cache = createCache({ maxSize: 100 });
expect(cache).toBeInstanceOf(Cache);
cache.set('test', 'value');
expect(cache.get('test')).toBe('value');
cache.destroy();
});
});
describe('CachePresets', () => {
it('should provide predefined configurations', () => {
expect(CachePresets.small.maxSize).toBe(100);
expect(CachePresets.medium.maxSize).toBe(500);
expect(CachePresets.large.maxSize).toBe(2000);
expect(CachePresets.persistent.maxSize).toBe(1000);
expect(CachePresets.small.evictionPolicy).toBe('lru');
expect(CachePresets.persistent.evictionPolicy).toBe('fifo');
});
it('should work with preset configurations', () => {
const cache = new Cache(CachePresets.small);
cache.set('test', 'value');
expect(cache.get('test')).toBe('value');
const stats = cache.getStats();
expect(stats.maxSize).toBe(100);
cache.destroy();
});
});
```
--------------------------------------------------------------------------------
/src/database/parser.ts:
--------------------------------------------------------------------------------
```typescript
import type {
CursorConversation,
LegacyCursorConversation,
ModernCursorConversation,
ConversationMessage,
BubbleMessage,
CodeBlock
} from './types.js';
import {
isLegacyConversation,
isModernConversation
} from './types.js';
export class ConversationParser {
/**
* Parse conversation JSON data
*/
parseConversationJSON(rawData: string): CursorConversation {
try {
const parsed = JSON.parse(rawData);
if (!this.isValidConversation(parsed)) {
throw new Error('Invalid conversation format');
}
return parsed as CursorConversation;
} catch (error) {
throw new Error(`Failed to parse conversation JSON: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
/**
* Validate conversation structure
*/
private isValidConversation(data: any): boolean {
if (!data || typeof data !== 'object') {
return false;
}
if (typeof data.composerId !== 'string') {
return false;
}
if (Array.isArray(data.conversation)) {
return this.isValidLegacyConversation(data);
}
if (typeof data._v === 'number' && Array.isArray(data.fullConversationHeadersOnly)) {
return this.isValidModernConversation(data);
}
return false;
}
/**
* Validate legacy conversation format
*/
private isValidLegacyConversation(data: any): boolean {
if (!Array.isArray(data.conversation)) {
return false;
}
for (const message of data.conversation) {
if (!this.isValidMessage(message)) {
return false;
}
}
return true;
}
/**
* Validate modern conversation format
*/
private isValidModernConversation(data: any): boolean {
if (!Array.isArray(data.fullConversationHeadersOnly)) {
return false;
}
for (const header of data.fullConversationHeadersOnly) {
if (!this.isValidConversationHeader(header)) {
return false;
}
}
return true;
}
/**
* Validate message structure
*/
private isValidMessage(message: any): boolean {
return (
message &&
typeof message === 'object' &&
typeof message.type === 'number' &&
typeof message.bubbleId === 'string' &&
typeof message.text === 'string'
);
}
/**
* Validate conversation header structure
*/
private isValidConversationHeader(header: any): boolean {
return (
header &&
typeof header === 'object' &&
typeof header.type === 'number' &&
typeof header.bubbleId === 'string'
);
}
/**
* Extract messages from conversation (legacy format only)
*/
extractMessages(conversation: CursorConversation): ConversationMessage[] {
if (isLegacyConversation(conversation)) {
return conversation.conversation;
}
// For modern format, messages need to be resolved separately
return [];
}
/**
* Extract code blocks from conversation
*/
extractCodeBlocks(conversation: CursorConversation): CodeBlock[] {
const codeBlocks: CodeBlock[] = [];
if (isLegacyConversation(conversation)) {
for (const message of conversation.conversation) {
if (message.suggestedCodeBlocks) {
codeBlocks.push(...message.suggestedCodeBlocks);
}
}
}
return codeBlocks;
}
/**
* Extract file references from conversation
*/
extractFileReferences(conversation: CursorConversation): string[] {
const files: string[] = [];
if (isLegacyConversation(conversation)) {
for (const message of conversation.conversation) {
if (message.relevantFiles) {
files.push(...message.relevantFiles);
}
}
}
return Array.from(new Set(files));
}
/**
* Extract attached folder references from conversation
*/
extractAttachedFolders(conversation: CursorConversation): string[] {
const folders: string[] = [];
if (isLegacyConversation(conversation)) {
for (const message of conversation.conversation) {
if (message.attachedFoldersNew) {
folders.push(...message.attachedFoldersNew);
}
}
}
return Array.from(new Set(folders));
}
/**
* Extract timestamps from conversation (limited availability)
*/
extractTimestamps(conversation: CursorConversation): Date[] {
const timestamps: Date[] = [];
if (isLegacyConversation(conversation)) {
for (const message of conversation.conversation) {
if (message.timestamp) {
try {
const date = new Date(message.timestamp);
if (!isNaN(date.getTime())) {
timestamps.push(date);
}
} catch (error) {
// Skip invalid timestamps
}
}
}
}
return timestamps;
}
/**
* Get conversation metadata
*/
getConversationMetadata(conversation: CursorConversation): {
format: 'legacy' | 'modern';
messageCount: number;
hasCodeBlocks: boolean;
codeBlockCount: number;
fileCount: number;
folderCount: number;
hasStoredSummary: boolean;
size: number;
} {
const format = isLegacyConversation(conversation) ? 'legacy' : 'modern';
const size = JSON.stringify(conversation).length;
let messageCount = 0;
let codeBlockCount = 0;
let fileCount = 0;
let folderCount = 0;
if (isLegacyConversation(conversation)) {
messageCount = conversation.conversation.length;
for (const message of conversation.conversation) {
if (message.suggestedCodeBlocks) {
codeBlockCount += message.suggestedCodeBlocks.length;
}
if (message.relevantFiles) {
fileCount += message.relevantFiles.length;
}
if (message.attachedFoldersNew) {
folderCount += message.attachedFoldersNew.length;
}
}
} else if (isModernConversation(conversation)) {
messageCount = conversation.fullConversationHeadersOnly.length;
// Note: For modern format, accurate counts would require resolving bubble messages
}
const hasCodeBlocks = codeBlockCount > 0;
const hasStoredSummary = !!(conversation.text || conversation.richText || (conversation as any).storedSummary);
return {
format,
messageCount,
hasCodeBlocks,
codeBlockCount,
fileCount,
folderCount,
hasStoredSummary,
size
};
}
/**
* Extract user messages only
*/
extractUserMessages(conversation: CursorConversation): ConversationMessage[] {
if (isLegacyConversation(conversation)) {
return conversation.conversation.filter(message => message.type === 1);
}
return [];
}
/**
* Extract AI messages only
*/
extractAIMessages(conversation: CursorConversation): ConversationMessage[] {
if (isLegacyConversation(conversation)) {
return conversation.conversation.filter(message => message.type === 2);
}
return [];
}
/**
* Get first user message
*/
getFirstUserMessage(conversation: CursorConversation): ConversationMessage | null {
if (isLegacyConversation(conversation)) {
const userMessages = conversation.conversation.filter(message => message.type === 1);
return userMessages.length > 0 ? userMessages[0] : null;
}
return null;
}
/**
* Get last message
*/
getLastMessage(conversation: CursorConversation): ConversationMessage | null {
if (isLegacyConversation(conversation)) {
const messages = conversation.conversation;
return messages.length > 0 ? messages[messages.length - 1] : null;
}
return null;
}
/**
* Search for text within conversation messages
*/
searchInConversation(conversation: CursorConversation, query: string, caseSensitive: boolean = false): {
messageIndex: number;
message: ConversationMessage;
matchPositions: number[];
}[] {
const results: {
messageIndex: number;
message: ConversationMessage;
matchPositions: number[];
}[] = [];
if (isLegacyConversation(conversation)) {
const searchQuery = caseSensitive ? query : query.toLowerCase();
conversation.conversation.forEach((message, index) => {
const text = caseSensitive ? message.text : message.text.toLowerCase();
const matchPositions: number[] = [];
let position = 0;
while (position < text.length) {
const found = text.indexOf(searchQuery, position);
if (found === -1) break;
matchPositions.push(found);
position = found + 1;
}
if (matchPositions.length > 0) {
results.push({
messageIndex: index,
message,
matchPositions
});
}
});
}
return results;
}
/**
* Check if conversation contains summarization content
*/
containsSummarization(conversation: CursorConversation): boolean {
const summarizationKeywords = ['summarization', 'summarize', 'summary'];
if (isLegacyConversation(conversation)) {
for (const message of conversation.conversation) {
const text = message.text.toLowerCase();
if (summarizationKeywords.some(keyword => text.includes(keyword))) {
return true;
}
}
}
// Also check stored summary fields
const text = conversation.text?.toLowerCase() || '';
const richText = conversation.richText?.toLowerCase() || '';
return summarizationKeywords.some(keyword =>
text.includes(keyword) || richText.includes(keyword)
);
}
/**
* Parse bubble message JSON
*/
parseBubbleMessage(rawData: string): BubbleMessage {
try {
const parsed = JSON.parse(rawData);
if (!this.isValidBubbleMessage(parsed)) {
throw new Error('Invalid bubble message format');
}
return parsed as BubbleMessage;
} catch (error) {
throw new Error(`Failed to parse bubble message JSON: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
/**
* Validate bubble message structure
*/
private isValidBubbleMessage(data: any): boolean {
return (
data &&
typeof data === 'object' &&
typeof data.type === 'number' &&
typeof data.text === 'string'
);
}
}
```
--------------------------------------------------------------------------------
/src/utils/database-utils.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { homedir, platform } from 'os';
import { existsSync } from 'fs';
import { join, resolve } from 'path';
import {
detectOperatingSystem,
getDefaultDatabasePath,
verifyDatabasePath,
getUserConfiguredDatabasePath,
detectDatabasePath,
detectCursorDatabasePath,
validateDatabasePath,
createDefaultDatabaseConfig,
getCursorDatabasePaths
} from './database-utils.js';
// Mock the os module
vi.mock('os', () => ({
platform: vi.fn(),
homedir: vi.fn()
}));
// Mock the fs module
vi.mock('fs', () => ({
existsSync: vi.fn()
}));
// Mock console methods to avoid noise in tests
vi.mock('console', () => ({
warn: vi.fn(),
log: vi.fn()
}));
const mockPlatform = vi.mocked(platform);
const mockHomedir = vi.mocked(homedir);
const mockExistsSync = vi.mocked(existsSync);
describe('Database Utils', () => {
beforeEach(() => {
vi.clearAllMocks();
mockHomedir.mockReturnValue('/home/testuser');
delete process.env.CURSOR_DB_PATH;
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('detectOperatingSystem', () => {
it('should detect macOS correctly', () => {
mockPlatform.mockReturnValue('darwin');
expect(detectOperatingSystem()).toBe('macOS');
});
it('should detect Windows correctly', () => {
mockPlatform.mockReturnValue('win32');
expect(detectOperatingSystem()).toBe('windows');
});
it('should detect Linux correctly', () => {
mockPlatform.mockReturnValue('linux');
expect(detectOperatingSystem()).toBe('linux');
});
it('should return unknown for unrecognized platforms', () => {
mockPlatform.mockReturnValue('freebsd');
expect(detectOperatingSystem()).toBe('unknown');
});
});
describe('getDefaultDatabasePath', () => {
beforeEach(() => {
mockHomedir.mockReturnValue('/home/testuser');
});
it('should return macOS path for macOS', () => {
const expected = join('/home/testuser', 'Library/Application Support/Cursor/User/globalStorage/state.vscdb');
expect(getDefaultDatabasePath('macOS')).toBe(expected);
expect(getDefaultDatabasePath('darwin')).toBe(expected);
});
it('should return Windows path for Windows', () => {
const expected = join('/home/testuser', 'AppData/Roaming/Cursor/User/globalStorage/state.vscdb');
expect(getDefaultDatabasePath('windows')).toBe(expected);
expect(getDefaultDatabasePath('win32')).toBe(expected);
});
it('should return Linux path for Linux', () => {
const expected = join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb');
expect(getDefaultDatabasePath('linux')).toBe(expected);
});
it('should fallback to Linux path for unknown OS', () => {
const expected = join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb');
expect(getDefaultDatabasePath('unknown')).toBe(expected);
});
});
describe('verifyDatabasePath', () => {
it('should return exists: false for empty path', () => {
const result = verifyDatabasePath('');
expect(result.exists).toBe(false);
expect(result.error).toBe('Database path is empty');
});
it('should return exists: true when file exists', () => {
mockExistsSync.mockReturnValue(true);
const result = verifyDatabasePath('/path/to/db.vscdb');
expect(result.exists).toBe(true);
expect(result.error).toBeUndefined();
});
it('should return exists: false when file does not exist', () => {
mockExistsSync.mockReturnValue(false);
const result = verifyDatabasePath('/path/to/nonexistent.vscdb');
expect(result.exists).toBe(false);
expect(result.error).toContain('Database file not found');
});
it('should handle file system errors gracefully', () => {
mockExistsSync.mockImplementation(() => {
throw new Error('Permission denied');
});
const result = verifyDatabasePath('/path/to/db.vscdb');
expect(result.exists).toBe(false);
expect(result.error).toContain('Error verifying database path: Permission denied');
});
});
describe('getUserConfiguredDatabasePath', () => {
it('should return null when no environment variable is set', () => {
expect(getUserConfiguredDatabasePath()).toBeNull();
});
it('should return resolved path when environment variable is set and file exists', () => {
process.env.CURSOR_DB_PATH = '~/custom/path/db.vscdb';
mockHomedir.mockReturnValue('/home/testuser');
mockExistsSync.mockReturnValue(true);
const result = getUserConfiguredDatabasePath();
expect(result).toBe(resolve('/home/testuser/custom/path/db.vscdb'));
});
it('should return null when environment variable is set but file does not exist', () => {
process.env.CURSOR_DB_PATH = '~/custom/path/nonexistent.vscdb';
mockHomedir.mockReturnValue('/home/testuser');
mockExistsSync.mockReturnValue(false);
const result = getUserConfiguredDatabasePath();
expect(result).toBeNull();
});
it('should handle absolute paths correctly', () => {
process.env.CURSOR_DB_PATH = '/absolute/path/db.vscdb';
mockExistsSync.mockReturnValue(true);
const result = getUserConfiguredDatabasePath();
expect(result).toBe(resolve('/absolute/path/db.vscdb'));
});
});
describe('detectDatabasePath', () => {
beforeEach(() => {
mockHomedir.mockReturnValue('/home/testuser');
mockPlatform.mockReturnValue('linux');
});
it('should return user-configured path when available', () => {
process.env.CURSOR_DB_PATH = '/custom/path/db.vscdb';
mockExistsSync.mockReturnValue(true);
const result = detectDatabasePath();
expect(result).toBe(resolve('/custom/path/db.vscdb'));
});
it('should return default path when user config is not available but default exists', () => {
mockExistsSync.mockReturnValue(true);
const result = detectDatabasePath();
const expectedPath = resolve(join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb'));
expect(result).toBe(expectedPath);
});
it('should try fallback paths when default does not exist', () => {
// First call (default path) returns false, second call (fallback) returns true
mockExistsSync
.mockReturnValueOnce(false) // Default path doesn't exist
.mockReturnValueOnce(true); // First fallback exists
const result = detectDatabasePath();
// Should find the first fallback path
expect(result).toBeDefined();
expect(mockExistsSync).toHaveBeenCalledTimes(2);
});
it('should throw error when no valid path is found', () => {
mockExistsSync.mockReturnValue(false); // All paths fail
expect(() => detectDatabasePath()).toThrow('Unable to locate Cursor database file');
});
it('should work correctly for macOS', () => {
mockPlatform.mockReturnValue('darwin');
mockExistsSync.mockReturnValue(true);
const result = detectDatabasePath();
const expectedPath = resolve(join('/home/testuser', 'Library/Application Support/Cursor/User/globalStorage/state.vscdb'));
expect(result).toBe(expectedPath);
});
it('should work correctly for Windows', () => {
mockPlatform.mockReturnValue('win32');
mockExistsSync.mockReturnValue(true);
const result = detectDatabasePath();
const expectedPath = resolve(join('/home/testuser', 'AppData/Roaming/Cursor/User/globalStorage/state.vscdb'));
expect(result).toBe(expectedPath);
});
});
describe('validateDatabasePath (deprecated)', () => {
it('should work as a wrapper around verifyDatabasePath', () => {
mockExistsSync.mockReturnValue(true);
const result = validateDatabasePath('/path/to/db.vscdb');
expect(result.valid).toBe(true);
expect(result.error).toBeUndefined();
});
it('should return invalid for non-existent files', () => {
mockExistsSync.mockReturnValue(false);
const result = validateDatabasePath('/path/to/nonexistent.vscdb');
expect(result.valid).toBe(false);
expect(result.error).toContain('Database file not found');
});
});
describe('detectCursorDatabasePath (deprecated)', () => {
it('should work as a wrapper around detectDatabasePath', () => {
mockPlatform.mockReturnValue('linux');
mockHomedir.mockReturnValue('/home/testuser');
mockExistsSync.mockReturnValue(true);
const result = detectCursorDatabasePath();
const expectedPath = resolve(join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb'));
expect(result).toBe(expectedPath);
});
});
describe('createDefaultDatabaseConfig', () => {
beforeEach(() => {
mockPlatform.mockReturnValue('linux');
mockHomedir.mockReturnValue('/home/testuser');
mockExistsSync.mockReturnValue(true);
});
it('should use custom path when provided', () => {
const customPath = '/custom/path/db.vscdb';
const config = createDefaultDatabaseConfig(customPath);
expect(config.dbPath).toBe(customPath);
expect(config.maxConversations).toBe(1000);
expect(config.cacheEnabled).toBe(true);
expect(config.minConversationSize).toBe(5000);
expect(config.resolveBubblesAutomatically).toBe(true);
});
it('should detect path automatically when no custom path provided', () => {
const config = createDefaultDatabaseConfig();
const expectedPath = resolve(join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb'));
expect(config.dbPath).toBe(expectedPath);
});
});
describe('getCursorDatabasePaths function', () => {
it('should return correct paths for all platforms', () => {
// Mock homedir for consistent testing
mockHomedir.mockReturnValue('/home/testuser');
const paths = getCursorDatabasePaths();
expect(paths.macOS).toBe(
join('/home/testuser', 'Library/Application Support/Cursor/User/globalStorage/state.vscdb')
);
expect(paths.windows).toBe(
join('/home/testuser', 'AppData/Roaming/Cursor/User/globalStorage/state.vscdb')
);
expect(paths.linux).toBe(
join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb')
);
});
});
describe('Edge cases and error handling', () => {
it('should handle null/undefined paths gracefully', () => {
const result = verifyDatabasePath(null as any);
expect(result.exists).toBe(false);
expect(result.error).toContain('Database path is empty');
});
it('should handle environment variable with tilde expansion', () => {
process.env.CURSOR_DB_PATH = '~/Documents/cursor.db';
mockHomedir.mockReturnValue('/Users/testuser');
mockExistsSync.mockReturnValue(true);
const result = getUserConfiguredDatabasePath();
expect(result).toBe(resolve('/Users/testuser/Documents/cursor.db'));
});
it('should handle unknown operating systems with fallback', () => {
mockPlatform.mockReturnValue('aix');
mockHomedir.mockReturnValue('/home/testuser');
mockExistsSync.mockReturnValue(true);
const result = detectDatabasePath();
// Should fallback to Linux-style path
const expectedPath = resolve(join('/home/testuser', '.config/Cursor/User/globalStorage/state.vscdb'));
expect(result).toBe(expectedPath);
});
});
});
```
--------------------------------------------------------------------------------
/src/database/types.ts:
--------------------------------------------------------------------------------
```typescript
// Type definitions for Cursor chat data
// Supports both legacy and modern conversation formats
export interface CursorDiskKV {
key: string;
value: string;
}
// Key patterns in the Cursor database
export type CursorKeyPatterns = {
composerData: `composerData:${string}`;
bubbleId: `bubbleId:${string}:${string}`;
messageRequestContext: `messageRequestContext:${string}:${string}`;
checkpointId: `checkpointId:${string}`;
codeBlockDiff: `codeBlockDiff:${string}`;
};
// Legacy format conversation structure
export interface LegacyCursorConversation {
composerId: string;
conversation: ConversationMessage[];
hasLoaded: boolean;
text: string; // May contain conversation summary (often empty)
richText: string; // May contain formatted summary (often empty)
}
// Modern format conversation structure
export interface ModernCursorConversation {
_v: number; // Version field (e.g., 3)
composerId: string;
richText: string; // May contain formatted summary (often empty)
hasLoaded: boolean;
text: string; // May contain conversation summary (often empty)
fullConversationHeadersOnly: ConversationHeader[];
name?: string; // Conversation title (Modern format only)
latestConversationSummary?: { // AI-generated summary structure
summary: {
summary: string; // The actual AI-generated summary text
};
};
context?: { // Context information including file selections
fileSelections?: Array<{
uri: {
fsPath: string; // Full file system path
path: string; // Path (usually same as fsPath)
};
}>;
// ... other context fields may exist
};
}
// Union type for both conversation formats
export type CursorConversation = LegacyCursorConversation | ModernCursorConversation;
// Message structure for legacy format
export interface ConversationMessage {
type: number; // 1 = user, 2 = AI
bubbleId: string;
attachedFoldersNew: string[];
suggestedCodeBlocks: CodeBlock[];
relevantFiles: string[];
text: string; // Message content
timestamp?: string;
context?: { // Context information including file selections
fileSelections?: Array<{
uri: {
fsPath: string; // Full file system path
path: string; // Path (usually same as fsPath)
};
}>;
// ... other context fields may exist
};
}
// Header structure for modern format
export interface ConversationHeader {
bubbleId: string;
type: number; // 1 = user, 2 = AI
serverBubbleId?: string; // For AI responses
}
// Individual message for modern format (stored separately)
export interface BubbleMessage {
text: string; // Message content
type: number;
attachedFoldersNew?: string[];
suggestedCodeBlocks?: CodeBlock[];
relevantFiles?: string[];
timestamp?: string;
context?: { // Context information including file selections
fileSelections?: Array<{
uri: {
fsPath: string; // Full file system path
path: string; // Path (usually same as fsPath)
};
}>;
// ... other context fields may exist
};
}
// Code block structure
export interface CodeBlock {
language: string;
code: string;
filename?: string;
}
// Conversation summary data
export interface ConversationSummary {
composerId: string;
format: 'legacy' | 'modern';
messageCount: number;
hasCodeBlocks: boolean;
codeBlockCount: number;
relevantFiles: string[];
attachedFolders: string[];
firstMessage?: string; // Truncated first user message
lastMessage?: string; // Last message in conversation
storedSummary?: string; // From text field if available
storedRichText?: string; // From richText field if available
title?: string; // From 'name' field (Modern format only)
aiGeneratedSummary?: string; // From 'latestConversationSummary.summary.summary'
conversationSize: number; // Size in bytes
}
// Search result structure
export interface ConversationSearchResult {
composerId: string;
format: 'legacy' | 'modern';
matches: SearchMatch[];
relevantFiles: string[];
attachedFolders: string[];
maxLastMessageLength?: number; // Max length for last message
includeStoredSummary?: boolean; // Include text/richText fields
includeFileList?: boolean; // Include relevant files
includeCodeBlockCount?: boolean; // Count code blocks
includeAttachedFolders?: boolean; // Include attached folders
includeMetadata?: boolean; // Include metadata information
includeTitle?: boolean; // Include conversation title (Modern format)
includeAIGeneratedSummary?: boolean; // Include AI-generated summary (Modern format)
}
export interface SearchMatch {
messageIndex?: number; // For legacy format
bubbleId?: string; // For modern format
text: string;
context: string; // Surrounding text
type: number; // 1 = user, 2 = AI
}
// Statistics structure
export interface ConversationStats {
totalConversations: number;
legacyFormatCount: number;
modernFormatCount: number;
averageConversationSize: number;
totalConversationsWithCode: number;
mostCommonFiles: Array<{ file: string; count: number }>;
mostCommonFolders: Array<{ folder: string; count: number }>;
}
// Filter options for conversation queries
export interface ConversationFilters {
dateRange?: { start: Date; end: Date }; // ⚠️ Limited - no reliable timestamps
minLength?: number; // Filter by conversation size
keywords?: string[]; // Search in conversation content
projectPath?: string; // Filter by attached folders
relevantFiles?: string[]; // Filter by specific files mentioned
filePattern?: string; // Filter by file pattern (e.g., "*.tsx")
hasCodeBlocks?: boolean; // Filter conversations with code
format?: 'legacy' | 'modern' | 'both'; // Filter by conversation format
}
// Summary options
export interface SummaryOptions {
includeFirstMessage?: boolean; // Include truncated first message
includeLastMessage?: boolean; // Include last message
maxFirstMessageLength?: number; // Max length for first message
maxLastMessageLength?: number; // Max length for last message
includeStoredSummary?: boolean; // Include text/richText fields
includeFileList?: boolean; // Include relevant files
includeCodeBlockCount?: boolean; // Count code blocks
includeAttachedFolders?: boolean; // Include attached folders
includeMetadata?: boolean; // Include metadata information
includeTitle?: boolean; // Include conversation title (Modern format)
includeAIGeneratedSummary?: boolean; // Include AI-generated summary (Modern format)
}
// Database configuration
export interface DatabaseConfig {
dbPath: string;
maxConversations?: number; // Limit for performance
cacheEnabled?: boolean; // Cache frequently accessed data
minConversationSize?: number; // Minimum size to consider valid
resolveBubblesAutomatically?: boolean; // Auto-resolve bubble messages
}
// Platform-specific database paths
export interface CursorDatabasePaths {
macOS: string;
windows: string;
linux: string;
}
// Type guards for format detection
export function isLegacyConversation(conversation: any): conversation is LegacyCursorConversation {
return conversation &&
typeof conversation.composerId === 'string' &&
Array.isArray(conversation.conversation) &&
!conversation._v;
}
export function isModernConversation(conversation: any): conversation is ModernCursorConversation {
return conversation &&
typeof conversation.composerId === 'string' &&
typeof conversation._v === 'number' &&
Array.isArray(conversation.fullConversationHeadersOnly);
}
// New types for analytics tools
export interface ConversationAnalytics {
overview: {
totalConversations: number;
totalMessages: number;
totalCodeBlocks: number;
averageConversationSize: number;
averageMessagesPerConversation: number;
totalFiles: number;
totalFolders: number;
};
breakdowns: {
files?: Array<{
file: string;
mentions: number;
conversations: string[];
extension: string;
projectPath?: string;
}>;
languages?: Array<{
language: string;
codeBlocks: number;
conversations: string[];
averageCodeLength: number;
}>;
temporal?: Array<{
period: string;
conversationCount: number;
messageCount: number;
averageSize: number;
conversationIds: string[];
}>;
size?: {
distribution: number[];
percentiles: Record<string, number>;
bins: Array<{ range: string; count: number }>;
};
};
scope: {
type: string;
projectPath?: string;
recentDays?: number;
totalScanned: number;
};
// Include conversation IDs for follow-up analysis
conversationIds: string[];
// Include basic conversation info for immediate access
conversations: Array<{
composerId: string;
messageCount: number;
size: number;
files: string[];
hasCodeBlocks: boolean;
}>;
}
export interface RelatedConversationsResult {
reference: {
composerId: string;
files: string[];
folders: string[];
languages: string[];
messageCount: number;
size: number;
};
related: Array<{
composerId: string;
relationshipScore: number;
relationships: {
sharedFiles?: string[];
sharedFolders?: string[];
sharedLanguages?: string[];
sizeSimilarity?: number;
temporalProximity?: number;
};
summary: string;
scoreBreakdown?: Record<string, number>;
}>;
}
export interface ExtractedElements {
conversations: Array<{
composerId: string;
format: 'legacy' | 'modern';
elements: {
files?: Array<{
path: string;
extension: string;
context?: string;
messageType: 'user' | 'assistant';
}>;
folders?: Array<{
path: string;
context?: string;
}>;
languages?: Array<{
language: string;
codeBlocks: number;
totalLines: number;
averageLength: number;
}>;
codeblocks?: Array<{
language: string;
code: string;
filename?: string;
lineCount: number;
messageType: 'user' | 'assistant';
context?: string;
}>;
metadata?: {
messageCount: number;
size: number;
format: 'legacy' | 'modern';
userMessages: number;
assistantMessages: number;
hasCodeBlocks: boolean;
hasFileReferences: boolean;
};
structure?: {
messageFlow: Array<{ type: 'user' | 'assistant'; length: number; hasCode: boolean }>;
conversationPattern: string;
averageMessageLength: number;
longestMessage: number;
};
};
}>;
}
export interface ExportedData {
format: string;
data: any;
metadata: {
exportedCount: number;
totalAvailable: number;
exportTimestamp: string;
filters: Record<string, any>;
};
}
```