This is page 2 of 3. Use http://codebase.md/vltansky/cursor-chat-history-mcp?page={x} to view the full context.
# Directory Structure
```
├── .cursor
│ ├── mcp.json
│ └── rules
│ ├── cursor_rules.mdc
│ ├── dev_workflow.mdc
│ ├── general.mdc
│ ├── mcp.mdc
│ ├── project-overview.mdc
│ ├── self_improve.mdc
│ ├── taskmaster.mdc
│ ├── tests.mdc
│ └── typescript-patterns.mdc
├── .github
│ ├── dependabot.yml
│ └── workflows
│ └── ci.yml
├── .gitignore
├── .roo
│ ├── rules
│ │ ├── dev_workflow.md
│ │ ├── roo_rules.md
│ │ ├── self_improve.md
│ │ └── taskmaster.md
│ ├── rules-architect
│ │ └── architect-rules
│ ├── rules-ask
│ │ └── ask-rules
│ ├── rules-boomerang
│ │ └── boomerang-rules
│ ├── rules-code
│ │ └── code-rules
│ ├── rules-debug
│ │ └── debug-rules
│ └── rules-test
│ └── test-rules
├── .roomodes
├── .taskmaster
│ ├── .taskmaster
│ │ └── config.json
│ ├── config.json
│ └── reports
│ └── task-complexity-report.json
├── .taskmasterconfig
├── .windsurfrules
├── docs
│ ├── research.md
│ └── use-cases.md
├── LICENSE
├── package.json
├── README.md
├── scripts
│ └── example_prd.txt
├── src
│ ├── database
│ │ ├── parser.test.ts
│ │ ├── parser.ts
│ │ ├── reader.test.ts
│ │ ├── reader.ts
│ │ └── types.ts
│ ├── server.test.ts
│ ├── server.ts
│ ├── tools
│ │ ├── analytics-tools.ts
│ │ ├── conversation-tools.test.ts
│ │ ├── conversation-tools.ts
│ │ └── extraction-tools.ts
│ └── utils
│ ├── analytics.ts
│ ├── cache.test.ts
│ ├── cache.ts
│ ├── database-utils.test.ts
│ ├── database-utils.ts
│ ├── errors.test.ts
│ ├── errors.ts
│ ├── exporters.ts
│ ├── formatter.ts
│ ├── relationships.ts
│ ├── validation.test.ts
│ └── validation.ts
├── tsconfig.json
├── vitest.config.ts
└── yarn.lock
```
# Files
--------------------------------------------------------------------------------
/src/utils/validation.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import {
validateRequired,
validateStringLength,
validateNumberRange,
validateArrayLength,
validateEnum,
validateConversationId,
validateBubbleId,
validateSearchQuery,
validateFilePath,
validateProjectPath,
validateWithSchema,
validateBoolean,
validateLimit,
validateOffset,
validateContextLines
} from './validation.js';
import {
MissingParameterError,
InvalidParameterError,
ValidationError
} from './errors.js';
import { z } from 'zod';
describe('Validation Utils', () => {
describe('validateRequired', () => {
it('should return value when present', () => {
expect(validateRequired('test', 'param')).toBe('test');
expect(validateRequired(123, 'param')).toBe(123);
expect(validateRequired(false, 'param')).toBe(false);
expect(validateRequired([], 'param')).toEqual([]);
});
it('should throw MissingParameterError for undefined', () => {
expect(() => validateRequired(undefined, 'param')).toThrow(MissingParameterError);
expect(() => validateRequired(undefined, 'param')).toThrow('Missing required parameter: param');
});
it('should throw MissingParameterError for null', () => {
expect(() => validateRequired(null, 'param')).toThrow(MissingParameterError);
expect(() => validateRequired(null, 'param')).toThrow('Missing required parameter: param');
});
it('should throw InvalidParameterError for empty string', () => {
expect(() => validateRequired('', 'param')).toThrow(InvalidParameterError);
expect(() => validateRequired(' ', 'param')).toThrow(InvalidParameterError);
});
});
describe('validateStringLength', () => {
it('should return undefined for undefined input', () => {
expect(validateStringLength(undefined, 'param')).toBeUndefined();
});
it('should validate string length correctly', () => {
expect(validateStringLength('test', 'param', 1, 10)).toBe('test');
expect(validateStringLength('a', 'param', 1, 10)).toBe('a');
expect(validateStringLength('1234567890', 'param', 1, 10)).toBe('1234567890');
});
it('should throw for non-string input', () => {
expect(() => validateStringLength(123 as any, 'param')).toThrow(InvalidParameterError);
expect(() => validateStringLength([] as any, 'param')).toThrow(InvalidParameterError);
});
it('should throw for string too short', () => {
expect(() => validateStringLength('', 'param', 1)).toThrow(InvalidParameterError);
expect(() => validateStringLength('ab', 'param', 3)).toThrow(InvalidParameterError);
});
it('should throw for string too long', () => {
expect(() => validateStringLength('toolong', 'param', 1, 5)).toThrow(InvalidParameterError);
});
it('should use default minLength of 1', () => {
expect(() => validateStringLength('', 'param')).toThrow(InvalidParameterError);
expect(validateStringLength('a', 'param')).toBe('a');
});
});
describe('validateNumberRange', () => {
it('should return undefined for undefined input', () => {
expect(validateNumberRange(undefined, 'param')).toBeUndefined();
});
it('should validate number range correctly', () => {
expect(validateNumberRange(5, 'param', 1, 10)).toBe(5);
expect(validateNumberRange(1, 'param', 1, 10)).toBe(1);
expect(validateNumberRange(10, 'param', 1, 10)).toBe(10);
});
it('should throw for non-number input', () => {
expect(() => validateNumberRange('5' as any, 'param')).toThrow(InvalidParameterError);
expect(() => validateNumberRange(NaN, 'param')).toThrow(InvalidParameterError);
});
it('should throw for number too small', () => {
expect(() => validateNumberRange(0, 'param', 1)).toThrow(InvalidParameterError);
expect(() => validateNumberRange(-5, 'param', 0, 10)).toThrow(InvalidParameterError);
});
it('should throw for number too large', () => {
expect(() => validateNumberRange(11, 'param', 1, 10)).toThrow(InvalidParameterError);
});
it('should work with only min or max specified', () => {
expect(validateNumberRange(5, 'param', 1)).toBe(5);
expect(validateNumberRange(5, 'param', undefined, 10)).toBe(5);
});
});
describe('validateArrayLength', () => {
it('should return undefined for undefined input', () => {
expect(validateArrayLength(undefined, 'param')).toBeUndefined();
});
it('should validate array length correctly', () => {
expect(validateArrayLength([1, 2, 3], 'param', 1, 5)).toEqual([1, 2, 3]);
expect(validateArrayLength([], 'param', 0, 5)).toEqual([]);
expect(validateArrayLength([1], 'param', 1, 1)).toEqual([1]);
});
it('should throw for non-array input', () => {
expect(() => validateArrayLength('not array' as any, 'param')).toThrow(InvalidParameterError);
expect(() => validateArrayLength(123 as any, 'param')).toThrow(InvalidParameterError);
});
it('should throw for array too short', () => {
expect(() => validateArrayLength([], 'param', 1)).toThrow(InvalidParameterError);
expect(() => validateArrayLength([1], 'param', 2)).toThrow(InvalidParameterError);
});
it('should throw for array too long', () => {
expect(() => validateArrayLength([1, 2, 3], 'param', 0, 2)).toThrow(InvalidParameterError);
});
it('should use default minLength of 0', () => {
expect(validateArrayLength([], 'param')).toEqual([]);
});
});
describe('validateEnum', () => {
const allowedValues = ['option1', 'option2', 'option3'] as const;
it('should return undefined for undefined input', () => {
expect(validateEnum(undefined, 'param', allowedValues)).toBeUndefined();
});
it('should validate enum values correctly', () => {
expect(validateEnum('option1', 'param', allowedValues)).toBe('option1');
expect(validateEnum('option2', 'param', allowedValues)).toBe('option2');
expect(validateEnum('option3', 'param', allowedValues)).toBe('option3');
});
it('should throw for invalid enum value', () => {
expect(() => validateEnum('invalid' as any, 'param', allowedValues)).toThrow(InvalidParameterError);
expect(() => validateEnum('invalid' as any, 'param', allowedValues))
.toThrow('one of: option1, option2, option3');
});
});
describe('validateConversationId', () => {
it('should validate correct conversation IDs', () => {
expect(validateConversationId('abc123')).toBe('abc123');
expect(validateConversationId('conversation-id')).toBe('conversation-id');
expect(validateConversationId('conv_123')).toBe('conv_123');
expect(validateConversationId('ABC123')).toBe('ABC123');
});
it('should throw for empty or missing conversation ID', () => {
expect(() => validateConversationId('')).toThrow(MissingParameterError);
});
it('should throw for invalid characters', () => {
expect(() => validateConversationId('conv@123')).toThrow(InvalidParameterError);
expect(() => validateConversationId('conv 123')).toThrow(InvalidParameterError);
expect(() => validateConversationId('conv.123')).toThrow(InvalidParameterError);
});
it('should throw for too long conversation ID', () => {
const longId = 'a'.repeat(101);
expect(() => validateConversationId(longId)).toThrow(InvalidParameterError);
});
});
describe('validateBubbleId', () => {
it('should validate correct bubble IDs', () => {
expect(validateBubbleId('bubble123')).toBe('bubble123');
expect(validateBubbleId('bubble-id')).toBe('bubble-id');
expect(validateBubbleId('bubble_123')).toBe('bubble_123');
});
it('should throw for empty or missing bubble ID', () => {
expect(() => validateBubbleId('')).toThrow(MissingParameterError);
});
it('should throw for invalid characters', () => {
expect(() => validateBubbleId('bubble@123')).toThrow(InvalidParameterError);
expect(() => validateBubbleId('bubble 123')).toThrow(InvalidParameterError);
});
});
describe('validateSearchQuery', () => {
it('should validate correct search queries', () => {
expect(validateSearchQuery('test query')).toBe('test query');
expect(validateSearchQuery('a')).toBe('a');
});
it('should throw for empty query', () => {
expect(() => validateSearchQuery('')).toThrow(MissingParameterError);
});
it('should throw for too long query', () => {
const longQuery = 'a'.repeat(1001);
expect(() => validateSearchQuery(longQuery)).toThrow(InvalidParameterError);
});
});
describe('validateFilePath', () => {
it('should return undefined for undefined input', () => {
expect(validateFilePath(undefined, 'param')).toBeUndefined();
});
it('should validate correct file paths', () => {
expect(validateFilePath('/path/to/file.txt', 'param')).toBe('/path/to/file.txt');
expect(validateFilePath('relative/path.js', 'param')).toBe('relative/path.js');
expect(validateFilePath('file.ts', 'param')).toBe('file.ts');
});
it('should throw for empty path', () => {
expect(() => validateFilePath('', 'param')).toThrow(InvalidParameterError);
});
it('should throw for too long path', () => {
const longPath = 'a'.repeat(1001);
expect(() => validateFilePath(longPath, 'param')).toThrow(InvalidParameterError);
});
});
describe('validateProjectPath', () => {
it('should validate correct project paths', () => {
expect(validateProjectPath('/project/path')).toBe('/project/path');
expect(validateProjectPath('relative/project')).toBe('relative/project');
});
it('should throw for empty path', () => {
expect(() => validateProjectPath('')).toThrow(MissingParameterError);
});
it('should throw for too long path', () => {
const longPath = 'a'.repeat(1001);
expect(() => validateProjectPath(longPath)).toThrow(InvalidParameterError);
});
});
describe('validateWithSchema', () => {
const testSchema = z.object({
name: z.string(),
age: z.number().min(0)
});
it('should validate correct input', () => {
const input = { name: 'John', age: 30 };
expect(validateWithSchema(input, testSchema)).toEqual(input);
});
it('should throw ValidationError for invalid input', () => {
const input = { name: 'John', age: -5 };
expect(() => validateWithSchema(input, testSchema)).toThrow(ValidationError);
});
it('should throw ValidationError for missing fields', () => {
const input = { name: 'John' };
expect(() => validateWithSchema(input, testSchema)).toThrow(ValidationError);
});
it('should include context in error message', () => {
const input = { name: 'John', age: -5 };
expect(() => validateWithSchema(input, testSchema, 'user data'))
.toThrow('Validation error in user data');
});
});
describe('validateBoolean', () => {
it('should return undefined for undefined input', () => {
expect(validateBoolean(undefined, 'param')).toBeUndefined();
});
it('should validate boolean values', () => {
expect(validateBoolean(true, 'param')).toBe(true);
expect(validateBoolean(false, 'param')).toBe(false);
});
it('should throw for non-boolean input', () => {
expect(() => validateBoolean('true' as any, 'param')).toThrow(InvalidParameterError);
expect(() => validateBoolean(1 as any, 'param')).toThrow(InvalidParameterError);
});
});
describe('validateLimit', () => {
it('should return default limit for undefined input', () => {
expect(validateLimit(undefined)).toBe(10);
expect(validateLimit(undefined, 20)).toBe(20);
});
it('should validate correct limits', () => {
expect(validateLimit(5)).toBe(5);
expect(validateLimit(100)).toBe(100);
});
it('should throw for invalid limits', () => {
expect(() => validateLimit(0)).toThrow(InvalidParameterError);
expect(() => validateLimit(-5)).toThrow(InvalidParameterError);
expect(() => validateLimit(1001)).toThrow(InvalidParameterError);
});
});
describe('validateOffset', () => {
it('should return 0 for undefined input', () => {
expect(validateOffset(undefined)).toBe(0);
});
it('should validate correct offsets', () => {
expect(validateOffset(0)).toBe(0);
expect(validateOffset(50)).toBe(50);
});
it('should throw for negative offset', () => {
expect(() => validateOffset(-1)).toThrow(InvalidParameterError);
});
});
describe('validateContextLines', () => {
it('should return 3 for undefined input', () => {
expect(validateContextLines(undefined)).toBe(3);
});
it('should validate correct context lines', () => {
expect(validateContextLines(0)).toBe(0);
expect(validateContextLines(5)).toBe(5);
expect(validateContextLines(10)).toBe(10);
});
it('should throw for invalid context lines', () => {
expect(() => validateContextLines(-1)).toThrow(InvalidParameterError);
expect(() => validateContextLines(11)).toThrow(InvalidParameterError);
});
});
});
```
--------------------------------------------------------------------------------
/src/database/reader.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { CursorDatabaseReader } from './reader.js';
import Database from 'better-sqlite3';
// Mock better-sqlite3
vi.mock('better-sqlite3');
const mockDatabase = vi.mocked(Database);
describe('CursorDatabaseReader', () => {
let mockDb: any;
let reader: CursorDatabaseReader;
beforeEach(() => {
mockDb = {
prepare: vi.fn(),
close: vi.fn(),
exec: vi.fn()
};
mockDatabase.mockReturnValue(mockDb);
reader = new CursorDatabaseReader({
dbPath: '/test/path/cursor.db',
minConversationSize: 1000
});
});
afterEach(() => {
vi.clearAllMocks();
});
describe('constructor', () => {
it('should create reader with default options', () => {
const defaultReader = new CursorDatabaseReader({ dbPath: '/test/cursor.db' });
expect(defaultReader).toBeDefined();
});
it('should create reader with custom options', () => {
const customReader = new CursorDatabaseReader({
dbPath: '/custom/path.db',
minConversationSize: 5000,
cacheEnabled: false
});
expect(customReader).toBeDefined();
});
});
describe('connect', () => {
it('should connect to database successfully', async () => {
const mockPrepare = vi.fn().mockReturnValue({
get: vi.fn().mockReturnValue({ count: 10 })
});
mockDb.prepare.mockReturnValue(mockPrepare);
await reader.connect();
expect(mockDatabase).toHaveBeenCalledWith('/test/path/cursor.db', { readonly: true });
expect(mockDb.exec).toHaveBeenCalledWith('PRAGMA journal_mode = WAL;');
});
it('should handle connection errors', async () => {
mockDatabase.mockImplementation(() => {
throw new Error('Database connection failed');
});
await expect(reader.connect()).rejects.toThrow('Database connection failed');
});
it('should handle connection with cache disabled', async () => {
const noCacheReader = new CursorDatabaseReader({
dbPath: '/test/cursor.db',
cacheEnabled: false
});
const mockPrepare = vi.fn().mockReturnValue({
get: vi.fn().mockReturnValue({ count: 5 })
});
mockDb.prepare.mockReturnValue(mockPrepare);
await noCacheReader.connect();
expect(mockDatabase).toHaveBeenCalledWith('/test/cursor.db', { readonly: true });
});
});
describe('close', () => {
it('should close database connection', () => {
reader['db'] = mockDb;
reader.close();
expect(mockDb.close).toHaveBeenCalled();
});
it('should handle close when not connected', () => {
expect(() => reader.close()).not.toThrow();
});
});
describe('getConversationIds', () => {
beforeEach(async () => {
const mockPrepare = vi.fn().mockReturnValue({
get: vi.fn().mockReturnValue({ count: 10 }),
all: vi.fn().mockReturnValue([
{ composerId: 'conv1' },
{ composerId: 'conv2' }
])
});
mockDb.prepare.mockReturnValue(mockPrepare);
await reader.connect();
});
it('should get conversation IDs with default filters', async () => {
const result = await reader.getConversationIds({});
expect(result).toEqual(['conv1', 'conv2']);
});
it('should apply minLength filter', async () => {
const mockStmt = {
all: vi.fn().mockReturnValue([{ composerId: 'conv1' }])
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationIds({ minLength: 2000 });
expect(result).toEqual(['conv1']);
expect(mockDb.prepare).toHaveBeenCalledWith(
expect.stringContaining('LENGTH(text) >= ?')
);
});
it('should apply keywords filter', async () => {
const mockStmt = {
all: vi.fn().mockReturnValue([{ composerId: 'conv1' }])
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationIds({ keywords: ['test', 'query'] });
expect(result).toEqual(['conv1']);
expect(mockDb.prepare).toHaveBeenCalledWith(
expect.stringContaining('text LIKE ?')
);
});
it('should apply format filter', async () => {
const mockStmt = {
all: vi.fn().mockReturnValue([{ composerId: 'conv1' }])
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationIds({ format: 'modern' });
expect(result).toEqual(['conv1']);
expect(mockDb.prepare).toHaveBeenCalledWith(
expect.stringContaining('_v IS NOT NULL')
);
});
});
describe('getConversationById', () => {
beforeEach(async () => {
const mockPrepare = vi.fn().mockReturnValue({
get: vi.fn().mockReturnValue({ count: 10 })
});
mockDb.prepare.mockReturnValue(mockPrepare);
await reader.connect();
});
it('should get conversation by ID', async () => {
const mockConversation = {
composerId: 'conv1',
text: 'conversation text',
conversation: JSON.stringify([{ type: 1, text: 'hello' }])
};
const mockStmt = {
get: vi.fn().mockReturnValue(mockConversation)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationById('conv1');
expect(result).toEqual({
composerId: 'conv1',
text: 'conversation text',
conversation: [{ type: 1, text: 'hello' }]
});
expect(mockStmt.get).toHaveBeenCalledWith('conv1');
});
it('should return null for non-existent conversation', async () => {
const mockStmt = {
get: vi.fn().mockReturnValue(undefined)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationById('nonexistent');
expect(result).toBeNull();
});
it('should handle JSON parsing errors gracefully', async () => {
const mockConversation = {
composerId: 'conv1',
text: 'conversation text',
conversation: 'invalid json'
};
const mockStmt = {
get: vi.fn().mockReturnValue(mockConversation)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationById('conv1');
expect(result).toEqual({
composerId: 'conv1',
text: 'conversation text',
conversation: []
});
});
});
describe('getConversationSummary', () => {
beforeEach(async () => {
const mockPrepare = vi.fn().mockReturnValue({
get: vi.fn().mockReturnValue({ count: 10 })
});
mockDb.prepare.mockReturnValue(mockPrepare);
await reader.connect();
});
it('should get conversation summary with default options', async () => {
const mockConversation = {
composerId: 'conv1',
text: 'stored summary',
richText: 'rich text',
conversation: JSON.stringify([
{ type: 1, text: 'first message' },
{ type: 2, text: 'second message' }
])
};
const mockStmt = {
get: vi.fn().mockReturnValue(mockConversation)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationSummary('conv1');
expect(result).toEqual({
composerId: 'conv1',
format: 'legacy',
messageCount: 2,
hasCodeBlocks: false,
conversationSize: expect.any(Number),
relevantFiles: [],
attachedFolders: []
});
});
it('should include first message when requested', async () => {
const mockConversation = {
composerId: 'conv1',
conversation: JSON.stringify([
{ type: 1, text: 'This is the first message' }
])
};
const mockStmt = {
get: vi.fn().mockReturnValue(mockConversation)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationSummary('conv1', {
includeFirstMessage: true,
maxFirstMessageLength: 50
});
expect(result?.firstMessage).toBe('This is the first message');
});
it('should detect code blocks', async () => {
const mockConversation = {
composerId: 'conv1',
conversation: JSON.stringify([
{
type: 1,
text: 'message',
suggestedCodeBlocks: [{ language: 'js', code: 'console.log()' }]
}
])
};
const mockStmt = {
get: vi.fn().mockReturnValue(mockConversation)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationSummary('conv1', {
includeCodeBlockCount: true
});
expect(result?.hasCodeBlocks).toBe(true);
expect(result?.codeBlockCount).toBe(1);
});
it('should return null for non-existent conversation', async () => {
const mockStmt = {
get: vi.fn().mockReturnValue(undefined)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationSummary('nonexistent');
expect(result).toBeNull();
});
});
describe('getBubbleMessage', () => {
beforeEach(async () => {
const mockPrepare = vi.fn().mockReturnValue({
get: vi.fn().mockReturnValue({ count: 10 })
});
mockDb.prepare.mockReturnValue(mockPrepare);
await reader.connect();
});
it('should get bubble message', async () => {
const mockBubble = {
bubbleId: 'bubble1',
type: 1,
text: 'bubble text',
relevantFiles: JSON.stringify(['file1.ts']),
suggestedCodeBlocks: JSON.stringify([]),
attachedFoldersNew: JSON.stringify(['folder1'])
};
const mockStmt = {
get: vi.fn().mockReturnValue(mockBubble)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getBubbleMessage('conv1', 'bubble1');
expect(result).toEqual({
bubbleId: 'bubble1',
type: 1,
text: 'bubble text',
relevantFiles: ['file1.ts'],
suggestedCodeBlocks: [],
attachedFoldersNew: ['folder1']
});
});
it('should return null for non-existent bubble', async () => {
const mockStmt = {
get: vi.fn().mockReturnValue(undefined)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getBubbleMessage('conv1', 'nonexistent');
expect(result).toBeNull();
});
});
describe('searchConversations', () => {
beforeEach(async () => {
const mockPrepare = vi.fn().mockReturnValue({
get: vi.fn().mockReturnValue({ count: 10 })
});
mockDb.prepare.mockReturnValue(mockPrepare);
await reader.connect();
});
it('should search conversations', async () => {
const mockResults = [
{
composerId: 'conv1',
text: 'conversation with search term',
conversation: JSON.stringify([
{ type: 1, text: 'message with search term' }
])
}
];
const mockStmt = {
all: vi.fn().mockReturnValue(mockResults)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.searchConversations('search term');
expect(result).toHaveLength(1);
expect(result[0].composerId).toBe('conv1');
expect(result[0].matches).toBeDefined();
});
it('should apply search options', async () => {
const mockStmt = {
all: vi.fn().mockReturnValue([])
};
mockDb.prepare.mockReturnValue(mockStmt);
await reader.searchConversations('query', {
maxResults: 5,
searchType: 'code',
format: 'modern'
});
expect(mockDb.prepare).toHaveBeenCalledWith(
expect.stringContaining('LIMIT 5')
);
});
});
describe('getConversationIdsByProject', () => {
beforeEach(async () => {
const mockPrepare = vi.fn().mockReturnValue({
get: vi.fn().mockReturnValue({ count: 10 })
});
mockDb.prepare.mockReturnValue(mockPrepare);
await reader.connect();
});
it('should get conversations by project path', async () => {
const mockResults = [
{ composerId: 'conv1', relevanceScore: 0.9 },
{ composerId: 'conv2', relevanceScore: 0.7 }
];
const mockStmt = {
all: vi.fn().mockReturnValue(mockResults)
};
mockDb.prepare.mockReturnValue(mockStmt);
const result = await reader.getConversationIdsByProject('/project/path');
expect(result).toEqual(mockResults);
expect(mockDb.prepare).toHaveBeenCalledWith(
expect.stringContaining('attachedFoldersNew LIKE ?')
);
});
it('should apply project search options', async () => {
const mockStmt = {
all: vi.fn().mockReturnValue([])
};
mockDb.prepare.mockReturnValue(mockStmt);
await reader.getConversationIdsByProject('/project', {
filePattern: '*.ts',
limit: 10,
orderBy: 'relevance'
});
expect(mockDb.prepare).toHaveBeenCalledWith(
expect.stringContaining('relevantFiles LIKE ?')
);
});
});
describe('Error Handling', () => {
it('should handle database errors gracefully', async () => {
const mockStmt = {
get: vi.fn().mockImplementation(() => {
throw new Error('Database error');
})
};
mockDb.prepare.mockReturnValue(mockStmt);
await reader.connect();
await expect(reader.getConversationById('conv1')).rejects.toThrow('Database error');
});
it('should handle missing database connection', async () => {
const unconnectedReader = new CursorDatabaseReader({ dbPath: '/test/cursor.db' });
await expect(unconnectedReader.getConversationIds({})).rejects.toThrow();
});
});
});
```
--------------------------------------------------------------------------------
/src/utils/errors.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import {
MCPError,
DatabaseError,
DatabaseConnectionError,
ConversationNotFoundError,
BubbleMessageNotFoundError,
ValidationError,
MissingParameterError,
InvalidParameterError,
FileSystemError,
DatabasePathNotFoundError,
ConversationParseError,
SearchError,
CacheError,
isMCPError,
getErrorInfo
} from './errors.js';
describe('Error Utils', () => {
describe('MCPError', () => {
it('should create basic MCP error', () => {
const error = new MCPError('Test message');
expect(error.message).toBe('Test message');
expect(error.code).toBe('MCP_ERROR');
expect(error.statusCode).toBe(500);
expect(error.name).toBe('MCPError');
expect(error).toBeInstanceOf(Error);
expect(error).toBeInstanceOf(MCPError);
});
it('should create MCP error with custom code and status', () => {
const error = new MCPError('Custom message', 'CUSTOM_CODE', 400);
expect(error.message).toBe('Custom message');
expect(error.code).toBe('CUSTOM_CODE');
expect(error.statusCode).toBe(400);
});
it('should maintain proper stack trace', () => {
const error = new MCPError('Test message');
expect(error.stack).toBeDefined();
expect(error.stack).toContain('MCPError');
});
});
describe('DatabaseError', () => {
it('should create database error without original error', () => {
const error = new DatabaseError('Connection failed');
expect(error.message).toBe('Database error: Connection failed');
expect(error.code).toBe('DATABASE_ERROR');
expect(error.statusCode).toBe(500);
expect(error).toBeInstanceOf(MCPError);
expect(error).toBeInstanceOf(DatabaseError);
});
it('should create database error with original error', () => {
const originalError = new Error('Original error message');
const error = new DatabaseError('Connection failed', originalError);
expect(error.message).toBe('Database error: Connection failed. Original: Original error message');
expect(error.code).toBe('DATABASE_ERROR');
expect(error.stack).toContain('Caused by:');
});
it('should handle original error without stack', () => {
const originalError = new Error('Original error message');
originalError.stack = undefined;
const error = new DatabaseError('Connection failed', originalError);
expect(error.message).toBe('Database error: Connection failed. Original: Original error message');
});
});
describe('DatabaseConnectionError', () => {
it('should create database connection error', () => {
const dbPath = '/path/to/database.db';
const error = new DatabaseConnectionError(dbPath);
expect(error.message).toBe(`Database error: Failed to connect to database at path: ${dbPath}`);
expect(error.code).toBe('DATABASE_CONNECTION_ERROR');
expect(error.statusCode).toBe(500);
expect(error).toBeInstanceOf(DatabaseError);
});
it('should create database connection error with original error', () => {
const dbPath = '/path/to/database.db';
const originalError = new Error('Permission denied');
const error = new DatabaseConnectionError(dbPath, originalError);
expect(error.message).toContain('Failed to connect to database at path: /path/to/database.db');
expect(error.message).toContain('Permission denied');
expect(error.code).toBe('DATABASE_CONNECTION_ERROR');
});
});
describe('ConversationNotFoundError', () => {
it('should create conversation not found error', () => {
const conversationId = 'conv123';
const error = new ConversationNotFoundError(conversationId);
expect(error.message).toBe('Conversation not found: conv123');
expect(error.code).toBe('CONVERSATION_NOT_FOUND');
expect(error.statusCode).toBe(404);
expect(error.conversationId).toBe(conversationId);
expect(error).toBeInstanceOf(MCPError);
});
});
describe('BubbleMessageNotFoundError', () => {
it('should create bubble message not found error', () => {
const composerId = 'composer123';
const bubbleId = 'bubble456';
const error = new BubbleMessageNotFoundError(composerId, bubbleId);
expect(error.message).toBe('Bubble message not found: bubble456 in conversation composer123');
expect(error.code).toBe('BUBBLE_MESSAGE_NOT_FOUND');
expect(error.statusCode).toBe(404);
expect(error.composerId).toBe(composerId);
expect(error.bubbleId).toBe(bubbleId);
expect(error).toBeInstanceOf(MCPError);
});
});
describe('ValidationError', () => {
it('should create validation error without field info', () => {
const error = new ValidationError('Invalid input');
expect(error.message).toBe('Validation error: Invalid input');
expect(error.code).toBe('VALIDATION_ERROR');
expect(error.statusCode).toBe(400);
expect(error.field).toBeUndefined();
expect(error.value).toBeUndefined();
expect(error).toBeInstanceOf(MCPError);
});
it('should create validation error with field info', () => {
const error = new ValidationError('Invalid email format', 'email', 'invalid-email');
expect(error.message).toBe('Validation error: Invalid email format');
expect(error.code).toBe('VALIDATION_ERROR');
expect(error.field).toBe('email');
expect(error.value).toBe('invalid-email');
});
});
describe('MissingParameterError', () => {
it('should create missing parameter error', () => {
const error = new MissingParameterError('username');
expect(error.message).toBe('Validation error: Missing required parameter: username');
expect(error.code).toBe('MISSING_PARAMETER');
expect(error.statusCode).toBe(400);
expect(error.field).toBe('username');
expect(error).toBeInstanceOf(ValidationError);
});
});
describe('InvalidParameterError', () => {
it('should create invalid parameter error without expected type', () => {
const error = new InvalidParameterError('age', 'not-a-number');
expect(error.message).toBe("Validation error: Invalid parameter 'age': not-a-number");
expect(error.code).toBe('INVALID_PARAMETER');
expect(error.field).toBe('age');
expect(error.value).toBe('not-a-number');
expect(error).toBeInstanceOf(ValidationError);
});
it('should create invalid parameter error with expected type', () => {
const error = new InvalidParameterError('age', 'not-a-number', 'number');
expect(error.message).toBe("Validation error: Invalid parameter 'age': expected number, got string");
expect(error.code).toBe('INVALID_PARAMETER');
expect(error.field).toBe('age');
expect(error.value).toBe('not-a-number');
});
});
describe('FileSystemError', () => {
it('should create file system error without original error', () => {
const path = '/path/to/file';
const error = new FileSystemError('File not found', path);
expect(error.message).toBe('File system error: File not found');
expect(error.code).toBe('FILESYSTEM_ERROR');
expect(error.statusCode).toBe(500);
expect(error.path).toBe(path);
expect(error).toBeInstanceOf(MCPError);
});
it('should create file system error with original error', () => {
const path = '/path/to/file';
const originalError = new Error('Permission denied');
const error = new FileSystemError('File not found', path, originalError);
expect(error.message).toBe('File system error: File not found. Original: Permission denied');
expect(error.path).toBe(path);
expect(error.stack).toContain('Caused by:');
});
});
describe('DatabasePathNotFoundError', () => {
it('should create database path not found error', () => {
const attemptedPaths = ['/path1/db', '/path2/db', '/path3/db'];
const error = new DatabasePathNotFoundError(attemptedPaths);
expect(error.message).toBe('File system error: Could not find Cursor database. Attempted paths: /path1/db, /path2/db, /path3/db');
expect(error.code).toBe('DATABASE_PATH_NOT_FOUND');
expect(error.path).toBe('/path1/db');
expect(error).toBeInstanceOf(FileSystemError);
});
it('should handle empty attempted paths array', () => {
const error = new DatabasePathNotFoundError([]);
expect(error.message).toContain('Could not find Cursor database. Attempted paths: ');
expect(error.path).toBe('unknown');
});
});
describe('ConversationParseError', () => {
it('should create conversation parse error without conversation ID', () => {
const error = new ConversationParseError('Invalid JSON format');
expect(error.message).toBe('Parse error: Invalid JSON format');
expect(error.code).toBe('CONVERSATION_PARSE_ERROR');
expect(error.statusCode).toBe(500);
expect(error.conversationId).toBeUndefined();
expect(error).toBeInstanceOf(MCPError);
});
it('should create conversation parse error with conversation ID', () => {
const conversationId = 'conv123';
const error = new ConversationParseError('Invalid JSON format', conversationId);
expect(error.message).toBe('Parse error: Invalid JSON format');
expect(error.conversationId).toBe(conversationId);
});
it('should create conversation parse error with original error', () => {
const originalError = new Error('JSON syntax error');
const error = new ConversationParseError('Invalid JSON format', 'conv123', originalError);
expect(error.message).toBe('Parse error: Invalid JSON format. Original: JSON syntax error');
expect(error.stack).toContain('Caused by:');
});
});
describe('SearchError', () => {
it('should create search error without original error', () => {
const query = 'test query';
const error = new SearchError('Search failed', query);
expect(error.message).toBe('Search error: Search failed');
expect(error.code).toBe('SEARCH_ERROR');
expect(error.statusCode).toBe(500);
expect(error.query).toBe(query);
expect(error).toBeInstanceOf(MCPError);
});
it('should create search error with original error', () => {
const query = 'test query';
const originalError = new Error('Database timeout');
const error = new SearchError('Search failed', query, originalError);
expect(error.message).toBe('Search error: Search failed. Original: Database timeout');
expect(error.query).toBe(query);
expect(error.stack).toContain('Caused by:');
});
});
describe('CacheError', () => {
it('should create cache error without key', () => {
const operation = 'get';
const error = new CacheError('Cache miss', operation);
expect(error.message).toBe('Cache error: Cache miss');
expect(error.code).toBe('CACHE_ERROR');
expect(error.statusCode).toBe(500);
expect(error.operation).toBe(operation);
expect(error.key).toBeUndefined();
expect(error).toBeInstanceOf(MCPError);
});
it('should create cache error with key', () => {
const operation = 'set';
const key = 'cache-key';
const error = new CacheError('Cache write failed', operation, key);
expect(error.message).toBe('Cache error: Cache write failed');
expect(error.operation).toBe(operation);
expect(error.key).toBe(key);
});
it('should create cache error with original error', () => {
const originalError = new Error('Memory full');
const error = new CacheError('Cache write failed', 'set', 'key', originalError);
expect(error.message).toBe('Cache error: Cache write failed. Original: Memory full');
expect(error.stack).toContain('Caused by:');
});
});
describe('isMCPError', () => {
it('should return true for MCP errors', () => {
expect(isMCPError(new MCPError('test'))).toBe(true);
expect(isMCPError(new DatabaseError('test'))).toBe(true);
expect(isMCPError(new ValidationError('test'))).toBe(true);
expect(isMCPError(new ConversationNotFoundError('test'))).toBe(true);
});
it('should return false for non-MCP errors', () => {
expect(isMCPError(new Error('test'))).toBe(false);
expect(isMCPError(new TypeError('test'))).toBe(false);
expect(isMCPError('not an error')).toBe(false);
expect(isMCPError(null)).toBe(false);
expect(isMCPError(undefined)).toBe(false);
});
it('should return false for objects that look like MCP errors', () => {
const fakeError = {
message: 'test',
code: 'TEST_ERROR',
statusCode: 400
};
expect(isMCPError(fakeError)).toBe(false);
});
});
describe('getErrorInfo', () => {
it('should extract info from MCP errors', () => {
const error = new DatabaseError('Database connection failed');
const info = getErrorInfo(error);
expect(info.message).toBe('Database error: Database connection failed');
expect(info.code).toBe('DATABASE_ERROR');
expect(info.statusCode).toBe(500);
expect(info.stack).toBeDefined();
expect(info.originalError).toBeUndefined();
});
it('should extract info from regular errors', () => {
const error = new Error('Regular error');
const info = getErrorInfo(error);
expect(info.message).toBe('Regular error');
expect(info.code).toBe('UNKNOWN_ERROR');
expect(info.statusCode).toBe(500);
expect(info.stack).toBeDefined();
});
it('should handle non-error objects', () => {
const info = getErrorInfo('string error');
expect(info.message).toBe('string error');
expect(info.code).toBe('UNKNOWN_ERROR');
expect(info.statusCode).toBe(500);
expect(info.stack).toBeUndefined();
});
it('should handle null and undefined', () => {
expect(getErrorInfo(null).message).toBe('Unknown error occurred');
expect(getErrorInfo(undefined).message).toBe('Unknown error occurred');
});
it('should handle objects with toString method', () => {
const obj = {
toString: () => 'Custom error message'
};
const info = getErrorInfo(obj);
expect(info.message).toBe('Custom error message');
expect(info.code).toBe('UNKNOWN_ERROR');
});
it('should include original error info for nested errors', () => {
const originalError = new Error('Original error');
const error = new DatabaseError('Wrapper error', originalError);
const info = getErrorInfo(error);
expect(info.originalError).toBe('Original error');
});
});
});
```
--------------------------------------------------------------------------------
/docs/use-cases.md:
--------------------------------------------------------------------------------
```markdown
# Cursor Chat History MCP - Use Cases
This document provides comprehensive use case examples for the Cursor Chat History MCP, organized by user type, scenario, and application domain.
## 🎯 Personal Development & Learning
### Skill Assessment & Growth Tracking
**Track Your Learning Journey**
```
"Analyze my conversations over the last 6 months to identify which programming concepts I ask about most frequently"
"Find conversations where I progressed from asking basic questions to implementing complex solutions"
"Generate a timeline of my React learning based on conversation complexity over time"
```
**Identify Knowledge Gaps**
```
"Search for patterns in my debugging conversations to find recurring issues I struggle with"
"Find topics I've never discussed but are common in my project files"
"Analyze conversations where I repeatedly asked similar questions about async/await"
```
**Create Personal Learning Materials**
```
"Extract all my successful problem-solving conversations to create a personal reference guide"
"Find conversations where I learned new concepts and turn them into study notes"
"Generate flashcards from conversations about complex algorithms I've implemented"
```
### Code Quality Improvement
**Personal Coding Standards**
```
"Analyze my TypeScript conversations to create my personal interface naming conventions"
"Find patterns in my code review discussions to build my quality checklist"
"Extract conversations about refactoring to create my refactoring playbook"
```
**Debugging Mastery**
```
"Create a debugging methodology from conversations where I successfully solved complex bugs"
"Find all conversations about performance issues and extract optimization techniques"
"Generate error handling patterns from my exception-related discussions"
```
## 🏢 Team & Collaboration
### Onboarding & Knowledge Transfer
**New Developer Onboarding**
```
"Export all conversations about our authentication system for new team member documentation"
"Find discussions about our deployment pipeline and create an onboarding guide"
"Generate FAQ from commonly asked questions in team conversations"
```
**Institutional Knowledge Capture**
```
"Extract conversations with senior developers about architectural decisions"
"Find discussions about why certain technologies were chosen over alternatives"
"Create decision logs from conversations about major technical choices"
```
### Team Standards & Best Practices
**Coding Standards Development**
```
"Analyze team conversations about code reviews to create coding guidelines"
"Find discussions about naming conventions and standardize them"
"Extract security-related conversations to build security best practices"
```
**Process Documentation**
```
"Generate testing guidelines from conversations about testing strategies"
"Create deployment checklists from conversations about production issues"
"Build code review templates from successful review discussions"
```
### Knowledge Sharing
**Create Training Materials**
```
"Find conversations where complex concepts were explained well and turn them into training docs"
"Extract step-by-step implementations from conversations for tutorial creation"
"Generate workshop content from conversations about hands-on learning"
```
**Build Team Resources**
```
"Create a troubleshooting database from team problem-solving conversations"
"Generate tool configuration guides from setup discussions"
"Build a team glossary from conversations about domain-specific terms"
```
## 🔧 Development Workflow Optimization
### Debugging & Problem-Solving
**Build Debugging Resources**
```
"Create error code reference guides from conversations about specific errors"
"Find conversations about system failures and build incident response playbooks"
"Generate debugging decision trees from successful troubleshooting sessions"
```
**Performance Optimization**
```
"Extract all performance-related conversations to create optimization checklists"
"Find discussions about database query optimization and build query guidelines"
"Generate performance monitoring guides from conversations about bottlenecks"
```
### Tool & Technology Adoption
**Technology Evaluation**
```
"Compare conversations about different frameworks to create evaluation matrices"
"Find discussions about tool adoption challenges and create adoption playbooks"
"Analyze conversations about migration projects to build migration guides"
```
**Configuration Management**
```
"Generate setup guides from conversations about development environment configuration"
"Create tool comparison documents from conversations about alternative solutions"
"Build troubleshooting guides from conversations about tool-specific issues"
```
### Process Improvement
**Workflow Analysis**
```
"Analyze conversations about workflow pain points to identify improvement opportunities"
"Find discussions about automation and create automation opportunity lists"
"Extract conversations about time-saving techniques and build efficiency guides"
```
**Quality Assurance**
```
"Generate testing strategies from conversations about QA approaches"
"Create review checklists from conversations about quality issues"
"Build validation frameworks from conversations about testing methodologies"
```
## 📊 Analytics & Insights
### Productivity Analysis
**Personal Productivity Insights**
```
"Analyze conversation patterns to identify my most productive coding periods"
"Find correlations between conversation topics and successful implementations"
"Track how quickly I solve similar problems over time"
```
**Team Productivity Metrics**
```
"Analyze team conversation patterns to identify collaboration bottlenecks"
"Find conversations that led to successful outcomes vs. those that didn't"
"Measure knowledge transfer effectiveness through conversation analysis"
```
### Technology Usage Patterns
**Technology Stack Analysis**
```
"Generate reports on programming language usage across all conversations"
"Track the evolution of our technology stack through chat history"
"Find conversations about technology decisions to understand selection criteria"
```
**Adoption Tracking**
```
"Monitor new technology adoption through conversation frequency and sentiment"
"Analyze conversations about learning curves for different technologies"
"Track which technologies generate the most questions and support needs"
```
### Project Insights
**Project Success Patterns**
```
"Analyze conversations by project to identify success factors"
"Find patterns in conversations that predict project challenges"
"Generate project retrospectives based on conversation content and outcomes"
```
**Development Velocity**
```
"Track feature development conversations to understand implementation patterns"
"Analyze conversation complexity vs. actual implementation time"
"Find conversations about estimation accuracy and improve estimation processes"
```
## 🎨 Creative & Strategic Applications
### Innovation & Ideation
**Idea Generation**
```
"Find conversations about experimental features to identify innovation opportunities"
"Extract brainstorming conversations to build idea repositories"
"Analyze discussions about future improvements to generate product roadmaps"
```
**Research & Development**
```
"Find conversations about proof-of-concepts to create experimentation frameworks"
"Generate research reports from conversations about emerging technologies"
"Extract conversations about competitive analysis to inform strategic decisions"
```
### Architecture & Design
**System Design Documentation**
```
"Create architecture documents from conversations about system design decisions"
"Find discussions about scalability to build scaling playbooks"
"Generate design pattern guides from conversations about code structure"
```
**Technical Debt Management**
```
"Extract conversations about technical debt to create remediation plans"
"Find discussions about refactoring priorities and create improvement roadmaps"
"Generate maintenance schedules from conversations about system health"
```
## 🔍 Advanced Search & Analysis Techniques
### Pattern Recognition with LIKE Patterns
**Function and Method Analysis**
```
"Find all React hook usage: likePattern='%useState(%' or '%useEffect(%'"
"Locate API calls: likePattern='%fetch(%' or '%axios.%'"
"Search for error handling: likePattern='%try {%' or '%catch (%'"
"Find database queries: likePattern='%SELECT %' or '%INSERT INTO%'"
```
**File and Configuration Analysis**
```
"Find configuration discussions: likePattern='%.config%' or '%package.json%'"
"Locate styling conversations: likePattern='%.css%' or '%.scss%'"
"Search for test files: likePattern='%.test.%' or '%.spec.%'"
"Find documentation: likePattern='%.md%' or '%README%'"
```
### Multi-Keyword Search Strategies
**Technology Combinations**
```
"Find React + TypeScript discussions: keywords=['react', 'typescript'], keywordOperator='AND'"
"Compare frameworks: keywords=['react', 'vue', 'angular'], keywordOperator='OR'"
"Database + performance: keywords=['database', 'performance', 'optimization'], keywordOperator='AND'"
```
**Problem-Solution Patterns**
```
"Error + solution combinations: keywords=['error', 'fix', 'solution'], keywordOperator='AND'"
"Testing strategies: keywords=['test', 'unit', 'integration'], keywordOperator='OR'"
"Security implementations: keywords=['auth', 'security', 'encryption'], keywordOperator='AND'"
```
### Cross-Project Analysis
**Reusable Components**
```
"Find conversations about components used across multiple projects"
"Extract utility functions discussed in different contexts"
"Identify patterns that could be abstracted into shared libraries"
```
**Consistency Analysis**
```
"Compare how similar problems are solved across different projects"
"Find conversations about standardization opportunities"
"Analyze architectural decisions across project boundaries"
```
## 🎓 Educational & Training Applications
### Curriculum Development
**Course Creation**
```
"Generate programming course content from beginner to advanced conversations"
"Create hands-on exercises from conversations about practical implementations"
"Build assessment materials from conversations about common mistakes"
```
**Tutorial Development**
```
"Extract step-by-step tutorials from conversations about complex implementations"
"Create video script content from detailed explanation conversations"
"Generate interactive coding examples from problem-solving conversations"
```
### Mentoring & Teaching
**Mentoring Resources**
```
"Find conversations where complex concepts were explained simply"
"Create mentoring templates from successful knowledge transfer conversations"
"Generate coaching materials from conversations about skill development"
```
**Teaching Materials**
```
"Build explanation frameworks from conversations about difficult concepts"
"Create example repositories from conversations about best practices"
"Generate quiz questions from conversations about common misconceptions"
```
## 🔒 Security & Compliance
### Security Analysis
**Vulnerability Assessment**
```
"Find conversations about security vulnerabilities to create security checklists"
"Extract authentication discussions to standardize auth implementations"
"Generate security review templates from security-focused conversations"
```
**Incident Response**
```
"Create incident response playbooks from security incident conversations"
"Build security monitoring guides from conversations about threat detection"
"Generate recovery procedures from conversations about security breaches"
```
### Compliance & Governance
**Regulatory Compliance**
```
"Extract conversations about GDPR compliance to create privacy guidelines"
"Find discussions about data handling to build governance policies"
"Generate audit documentation from compliance-related conversations"
```
**Risk Management**
```
"Analyze conversations about risk assessment to improve risk processes"
"Find discussions about security controls to standardize implementations"
"Create risk mitigation strategies from conversations about security measures"
```
## 🚀 Business & Product Development
### Feature Development
**Requirements Analysis**
```
"Analyze conversations about user requirements to improve gathering processes"
"Find discussions about feature complexity to improve estimation accuracy"
"Extract user feedback conversations to inform product decisions"
```
**Product Strategy**
```
"Generate feature roadmaps from conversations about user needs"
"Create market analysis from conversations about competitive features"
"Build product specifications from conversations about successful implementations"
```
### Market Research
**Competitive Intelligence**
```
"Find conversations about competitor analysis to track competitive landscape"
"Analyze discussions about market trends to identify opportunities"
"Extract conversations about user research to inform product strategy"
```
**Technology Trends**
```
"Generate technology trend reports from conversations about emerging tools"
"Find discussions about industry changes to inform strategic planning"
"Create innovation reports from conversations about cutting-edge implementations"
```
## 💡 Creative Use Case Examples
### Personal Brand Development
**Portfolio Creation**
```
"Extract conversations about successful projects to create portfolio content"
"Find discussions about innovative solutions to showcase problem-solving skills"
"Generate case studies from conversations about complex implementations"
```
**Thought Leadership**
```
"Create blog post content from conversations about industry insights"
"Generate speaking topics from conversations about expertise areas"
"Build technical writing samples from detailed explanation conversations"
```
### Community Contribution
**Open Source Development**
```
"Find conversations about common problems to identify open source opportunities"
"Extract solutions that could benefit the broader developer community"
"Generate documentation for open source projects from implementation conversations"
```
**Knowledge Sharing**
```
"Create Stack Overflow answers from conversations about solved problems"
"Generate tutorial content for developer communities"
"Build FAQ resources from conversations about common questions"
```
## 🔧 Implementation Tips
### Getting Started
1. **Start Small**: Begin with simple searches to understand your conversation patterns
2. **Use Filters**: Leverage project paths and date ranges to focus your analysis
3. **Combine Tools**: Use multiple MCP tools together for comprehensive insights
4. **Export Data**: Use export functionality for external analysis and visualization
### Best Practices
1. **Regular Analysis**: Set up periodic reviews of your conversation patterns
2. **Tag Important Conversations**: Use consistent keywords for easier searching
3. **Document Insights**: Keep track of valuable insights discovered through analysis
4. **Share Knowledge**: Use extracted insights to improve team collaboration
### Advanced Techniques
1. **Temporal Analysis**: Compare conversation patterns across different time periods
2. **Cross-Reference**: Combine conversation analysis with code metrics and project outcomes
3. **Predictive Insights**: Use historical patterns to predict future challenges and opportunities
4. **Automated Workflows**: Create scripts to regularly extract and analyze chat data
---
*This document provides a comprehensive overview of use cases for the Cursor Chat History MCP. Each use case can be adapted and customized based on specific needs, team structures, and project requirements.*
```
--------------------------------------------------------------------------------
/src/database/parser.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach } from 'vitest';
import { ConversationParser } from './parser.js';
describe('ConversationParser', () => {
let parser: ConversationParser;
beforeEach(() => {
parser = new ConversationParser();
});
describe('parseConversationJSON', () => {
it('should parse valid legacy conversation JSON', () => {
const legacyConversationJson = JSON.stringify({
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'Hello, world!',
relevantFiles: ['file1.ts'],
suggestedCodeBlocks: [],
attachedFoldersNew: []
}
]
});
const result = parser.parseConversationJSON(legacyConversationJson);
expect(result.composerId).toBe('legacy-123');
expect('conversation' in result).toBe(true);
});
it('should parse valid modern conversation JSON', () => {
const modernConversationJson = JSON.stringify({
composerId: 'modern-123',
_v: 2,
hasLoaded: true,
text: '',
richText: '',
fullConversationHeadersOnly: [
{
type: 1,
bubbleId: 'bubble-1'
}
]
});
const result = parser.parseConversationJSON(modernConversationJson);
expect(result.composerId).toBe('modern-123');
expect('_v' in result).toBe(true);
});
it('should throw error for invalid JSON', () => {
const invalidJson = '{ invalid json }';
expect(() => parser.parseConversationJSON(invalidJson))
.toThrow('Failed to parse conversation JSON');
});
it('should throw error for missing composerId', () => {
const invalidConversation = JSON.stringify({
conversation: []
});
expect(() => parser.parseConversationJSON(invalidConversation))
.toThrow('Invalid conversation format');
});
it('should throw error for invalid conversation structure', () => {
const invalidConversation = JSON.stringify({
composerId: 'test',
conversation: 'not an array'
});
expect(() => parser.parseConversationJSON(invalidConversation))
.toThrow('Invalid conversation format');
});
});
describe('extractMessages', () => {
it('should extract messages from legacy conversation', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'First message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: []
},
{
type: 2,
bubbleId: 'bubble-2',
text: 'Second message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: []
}
]
};
const messages = parser.extractMessages(legacyConversation);
expect(messages).toHaveLength(2);
expect(messages[0].text).toBe('First message');
expect(messages[1].text).toBe('Second message');
});
it('should return empty array for modern conversation', () => {
const modernConversation = {
composerId: 'modern-123',
_v: 2,
hasLoaded: true,
text: '',
richText: '',
fullConversationHeadersOnly: [
{
type: 1,
bubbleId: 'bubble-1'
}
]
};
const messages = parser.extractMessages(modernConversation);
expect(messages).toHaveLength(0);
});
});
describe('extractCodeBlocks', () => {
it('should extract code blocks from legacy conversation', () => {
const codeBlock = {
language: 'typescript',
code: 'console.log("Hello");',
filename: 'test.ts'
};
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'Message with code',
relevantFiles: [],
suggestedCodeBlocks: [codeBlock],
attachedFoldersNew: []
}
]
};
const codeBlocks = parser.extractCodeBlocks(legacyConversation);
expect(codeBlocks).toHaveLength(1);
expect(codeBlocks[0]).toEqual(codeBlock);
});
it('should return empty array when no code blocks exist', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'Message without code',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: []
}
]
};
const codeBlocks = parser.extractCodeBlocks(legacyConversation);
expect(codeBlocks).toHaveLength(0);
});
it('should return empty array for modern conversation', () => {
const modernConversation = {
composerId: 'modern-123',
_v: 2,
hasLoaded: true,
text: '',
richText: '',
fullConversationHeadersOnly: []
};
const codeBlocks = parser.extractCodeBlocks(modernConversation);
expect(codeBlocks).toHaveLength(0);
});
});
describe('extractFileReferences', () => {
it('should extract file references from legacy conversation', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'First message',
relevantFiles: ['file1.ts', 'file2.js'],
suggestedCodeBlocks: [],
attachedFoldersNew: []
},
{
type: 2,
bubbleId: 'bubble-2',
text: 'Second message',
relevantFiles: ['file3.py', 'file1.ts'], // Duplicate file1.ts
suggestedCodeBlocks: [],
attachedFoldersNew: []
}
]
};
const files = parser.extractFileReferences(legacyConversation);
expect(files).toHaveLength(3);
expect(files).toContain('file1.ts');
expect(files).toContain('file2.js');
expect(files).toContain('file3.py');
// Should remove duplicates
expect(files.filter(f => f === 'file1.ts')).toHaveLength(1);
});
it('should return empty array when no file references exist', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'Message without files',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: []
}
]
};
const files = parser.extractFileReferences(legacyConversation);
expect(files).toHaveLength(0);
});
});
describe('extractAttachedFolders', () => {
it('should extract attached folders from legacy conversation', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'First message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: ['src/', 'tests/']
},
{
type: 2,
bubbleId: 'bubble-2',
text: 'Second message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: ['docs/', 'src/'] // Duplicate src/
}
]
};
const folders = parser.extractAttachedFolders(legacyConversation);
expect(folders).toHaveLength(3);
expect(folders).toContain('src/');
expect(folders).toContain('tests/');
expect(folders).toContain('docs/');
// Should remove duplicates
expect(folders.filter(f => f === 'src/')).toHaveLength(1);
});
it('should return empty array when no attached folders exist', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'Message without folders',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: []
}
]
};
const folders = parser.extractAttachedFolders(legacyConversation);
expect(folders).toHaveLength(0);
});
});
describe('extractTimestamps', () => {
it('should extract valid timestamps from legacy conversation', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'First message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: [],
timestamp: '2023-01-01T12:00:00Z'
},
{
type: 2,
bubbleId: 'bubble-2',
text: 'Second message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: [],
timestamp: '2023-01-01T13:00:00Z'
}
]
};
const timestamps = parser.extractTimestamps(legacyConversation);
expect(timestamps).toHaveLength(2);
expect(timestamps[0]).toEqual(new Date('2023-01-01T12:00:00Z'));
expect(timestamps[1]).toEqual(new Date('2023-01-01T13:00:00Z'));
});
it('should skip invalid timestamps', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'First message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: [],
timestamp: 'invalid-date'
},
{
type: 2,
bubbleId: 'bubble-2',
text: 'Second message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: [],
timestamp: '2023-01-01T13:00:00Z'
}
]
};
const timestamps = parser.extractTimestamps(legacyConversation);
expect(timestamps).toHaveLength(1);
expect(timestamps[0]).toEqual(new Date('2023-01-01T13:00:00Z'));
});
});
describe('getConversationMetadata', () => {
it('should return metadata for legacy conversation', () => {
const codeBlock = {
language: 'typescript',
code: 'console.log("Hello");',
filename: 'test.ts'
};
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
storedSummary: 'This is a summary',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'Message with code',
relevantFiles: ['file1.ts', 'file2.js'],
suggestedCodeBlocks: [codeBlock],
attachedFoldersNew: ['src/']
}
]
};
const metadata = parser.getConversationMetadata(legacyConversation);
expect(metadata.format).toBe('legacy');
expect(metadata.messageCount).toBe(1);
expect(metadata.hasCodeBlocks).toBe(true);
expect(metadata.codeBlockCount).toBe(1);
expect(metadata.fileCount).toBe(2);
expect(metadata.folderCount).toBe(1);
expect(metadata.hasStoredSummary).toBe(true);
expect(metadata.size).toBeGreaterThan(0);
});
it('should return metadata for modern conversation', () => {
const modernConversation = {
composerId: 'modern-123',
_v: 2,
hasLoaded: true,
text: '',
richText: '',
fullConversationHeadersOnly: [
{ type: 1, bubbleId: 'bubble-1' },
{ type: 2, bubbleId: 'bubble-2' }
]
};
const metadata = parser.getConversationMetadata(modernConversation);
expect(metadata.format).toBe('modern');
expect(metadata.messageCount).toBe(2);
expect(metadata.hasCodeBlocks).toBe(false);
expect(metadata.codeBlockCount).toBe(0);
expect(metadata.fileCount).toBe(0);
expect(metadata.folderCount).toBe(0);
expect(metadata.hasStoredSummary).toBe(false);
expect(metadata.size).toBeGreaterThan(0);
});
});
describe('searchInConversation', () => {
it('should find matches in conversation text', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'This is a test message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: []
},
{
type: 2,
bubbleId: 'bubble-2',
text: 'Another test with different content',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: []
}
]
};
const results = parser.searchInConversation(legacyConversation, 'test');
expect(results).toHaveLength(2);
expect(results[0].messageIndex).toBe(0);
expect(results[0].message.text).toBe('This is a test message');
expect(results[0].matchPositions).toContain(10); // Position of 'test'
expect(results[1].messageIndex).toBe(1);
expect(results[1].message.text).toBe('Another test with different content');
});
it('should handle case sensitive search', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'This is a Test message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: []
}
]
};
const caseSensitiveResults = parser.searchInConversation(legacyConversation, 'test', true);
const caseInsensitiveResults = parser.searchInConversation(legacyConversation, 'test', false);
expect(caseSensitiveResults).toHaveLength(0);
expect(caseInsensitiveResults).toHaveLength(1);
});
});
describe('containsSummarization', () => {
it('should return true when conversation contains summarization keywords', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'Please summarize this document',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: []
}
]
};
const result = parser.containsSummarization(legacyConversation);
expect(result).toBe(true);
});
it('should return false when conversation does not contain summarization keywords', () => {
const legacyConversation = {
composerId: 'legacy-123',
hasLoaded: true,
text: '',
richText: '',
conversation: [
{
type: 1,
bubbleId: 'bubble-1',
text: 'This is a regular message',
relevantFiles: [],
suggestedCodeBlocks: [],
attachedFoldersNew: []
}
]
};
const result = parser.containsSummarization(legacyConversation);
expect(result).toBe(false);
});
});
describe('parseBubbleMessage', () => {
it('should parse valid bubble message JSON', () => {
const bubbleMessage = {
type: 1,
bubbleId: 'bubble-123',
text: 'Hello from bubble',
relevantFiles: ['file1.ts'],
suggestedCodeBlocks: []
};
const jsonString = JSON.stringify(bubbleMessage);
const result = parser.parseBubbleMessage(jsonString);
expect(result).toEqual(bubbleMessage);
});
it('should throw error for invalid bubble message JSON', () => {
const invalidJson = '{ invalid json }';
expect(() => parser.parseBubbleMessage(invalidJson))
.toThrow('Failed to parse bubble message JSON');
});
it('should throw error for invalid bubble message structure', () => {
const invalidBubble = {
text: 'Missing required fields'
};
const jsonString = JSON.stringify(invalidBubble);
expect(() => parser.parseBubbleMessage(jsonString))
.toThrow('Invalid bubble message format');
});
});
});
```
--------------------------------------------------------------------------------
/.roo/rules/dev_workflow.md:
--------------------------------------------------------------------------------
```markdown
---
description: Guide for using Task Master to manage task-driven development workflows
globs: **/*
alwaysApply: true
---
# Task Master Development Workflow
This guide outlines the typical process for using Task Master to manage software development projects.
## Primary Interaction: MCP Server vs. CLI
Task Master offers two primary ways to interact:
1. **MCP Server (Recommended for Integrated Tools)**:
- For AI agents and integrated development environments (like Roo Code), interacting via the **MCP server is the preferred method**.
- The MCP server exposes Task Master functionality through a set of tools (e.g., `get_tasks`, `add_subtask`).
- This method offers better performance, structured data exchange, and richer error handling compared to CLI parsing.
- Refer to [`mcp.md`](mdc:.roo/rules/mcp.md) for details on the MCP architecture and available tools.
- A comprehensive list and description of MCP tools and their corresponding CLI commands can be found in [`taskmaster.md`](mdc:.roo/rules/taskmaster.md).
- **Restart the MCP server** if core logic in `scripts/modules` or MCP tool/direct function definitions change.
2. **`task-master` CLI (For Users & Fallback)**:
- The global `task-master` command provides a user-friendly interface for direct terminal interaction.
- It can also serve as a fallback if the MCP server is inaccessible or a specific function isn't exposed via MCP.
- Install globally with `npm install -g task-master-ai` or use locally via `npx task-master-ai ...`.
- The CLI commands often mirror the MCP tools (e.g., `task-master list` corresponds to `get_tasks`).
- Refer to [`taskmaster.md`](mdc:.roo/rules/taskmaster.md) for a detailed command reference.
## Standard Development Workflow Process
- Start new projects by running `initialize_project` tool / `task-master init` or `parse_prd` / `task-master parse-prd --input='<prd-file.txt>'` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) to generate initial tasks.json
- Begin coding sessions with `get_tasks` / `task-master list` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) to see current tasks, status, and IDs
- Determine the next task to work on using `next_task` / `task-master next` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)).
- Analyze task complexity with `analyze_project_complexity` / `task-master analyze-complexity --research` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) before breaking down tasks
- Review complexity report using `complexity_report` / `task-master complexity-report` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)).
- Select tasks based on dependencies (all marked 'done'), priority level, and ID order
- Clarify tasks by checking task files in tasks/ directory or asking for user input
- View specific task details using `get_task` / `task-master show <id>` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) to understand implementation requirements
- Break down complex tasks using `expand_task` / `task-master expand --id=<id> --force --research` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) with appropriate flags like `--force` (to replace existing subtasks) and `--research`.
- Clear existing subtasks if needed using `clear_subtasks` / `task-master clear-subtasks --id=<id>` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) before regenerating
- Implement code following task details, dependencies, and project standards
- Verify tasks according to test strategies before marking as complete (See [`tests.md`](mdc:.roo/rules/tests.md))
- Mark completed tasks with `set_task_status` / `task-master set-status --id=<id> --status=done` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md))
- Update dependent tasks when implementation differs from original plan using `update` / `task-master update --from=<id> --prompt="..."` or `update_task` / `task-master update-task --id=<id> --prompt="..."` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md))
- Add new tasks discovered during implementation using `add_task` / `task-master add-task --prompt="..." --research` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)).
- Add new subtasks as needed using `add_subtask` / `task-master add-subtask --parent=<id> --title="..."` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)).
- Append notes or details to subtasks using `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='Add implementation notes here...\nMore details...'` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)).
- Generate task files with `generate` / `task-master generate` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) after updating tasks.json
- Maintain valid dependency structure with `add_dependency`/`remove_dependency` tools or `task-master add-dependency`/`remove-dependency` commands, `validate_dependencies` / `task-master validate-dependencies`, and `fix_dependencies` / `task-master fix-dependencies` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) when needed
- Respect dependency chains and task priorities when selecting work
- Report progress regularly using `get_tasks` / `task-master list`
- Reorganize tasks as needed using `move_task` / `task-master move --from=<id> --to=<id>` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) to change task hierarchy or ordering
## Task Complexity Analysis
- Run `analyze_project_complexity` / `task-master analyze-complexity --research` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) for comprehensive analysis
- Review complexity report via `complexity_report` / `task-master complexity-report` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) for a formatted, readable version.
- Focus on tasks with highest complexity scores (8-10) for detailed breakdown
- Use analysis results to determine appropriate subtask allocation
- Note that reports are automatically used by the `expand_task` tool/command
## Task Breakdown Process
- Use `expand_task` / `task-master expand --id=<id>`. It automatically uses the complexity report if found, otherwise generates default number of subtasks.
- Use `--num=<number>` to specify an explicit number of subtasks, overriding defaults or complexity report recommendations.
- Add `--research` flag to leverage Perplexity AI for research-backed expansion.
- Add `--force` flag to clear existing subtasks before generating new ones (default is to append).
- Use `--prompt="<context>"` to provide additional context when needed.
- Review and adjust generated subtasks as necessary.
- Use `expand_all` tool or `task-master expand --all` to expand multiple pending tasks at once, respecting flags like `--force` and `--research`.
- If subtasks need complete replacement (regardless of the `--force` flag on `expand`), clear them first with `clear_subtasks` / `task-master clear-subtasks --id=<id>`.
## Implementation Drift Handling
- When implementation differs significantly from planned approach
- When future tasks need modification due to current implementation choices
- When new dependencies or requirements emerge
- Use `update` / `task-master update --from=<futureTaskId> --prompt='<explanation>\nUpdate context...' --research` to update multiple future tasks.
- Use `update_task` / `task-master update-task --id=<taskId> --prompt='<explanation>\nUpdate context...' --research` to update a single specific task.
## Task Status Management
- Use 'pending' for tasks ready to be worked on
- Use 'done' for completed and verified tasks
- Use 'deferred' for postponed tasks
- Add custom status values as needed for project-specific workflows
## Task Structure Fields
- **id**: Unique identifier for the task (Example: `1`, `1.1`)
- **title**: Brief, descriptive title (Example: `"Initialize Repo"`)
- **description**: Concise summary of what the task involves (Example: `"Create a new repository, set up initial structure."`)
- **status**: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
- **dependencies**: IDs of prerequisite tasks (Example: `[1, 2.1]`)
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
- This helps quickly identify which prerequisite tasks are blocking work
- **priority**: Importance level (Example: `"high"`, `"medium"`, `"low"`)
- **details**: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`)
- **testStrategy**: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`)
- **subtasks**: List of smaller, more specific tasks (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`)
- Refer to task structure details (previously linked to `tasks.md`).
## Configuration Management (Updated)
Taskmaster configuration is managed through two main mechanisms:
1. **`.taskmasterconfig` File (Primary):**
* Located in the project root directory.
* Stores most configuration settings: AI model selections (main, research, fallback), parameters (max tokens, temperature), logging level, default subtasks/priority, project name, etc.
* **Managed via `task-master models --setup` command.** Do not edit manually unless you know what you are doing.
* **View/Set specific models via `task-master models` command or `models` MCP tool.**
* Created automatically when you run `task-master models --setup` for the first time.
2. **Environment Variables (`.env` / `mcp.json`):**
* Used **only** for sensitive API keys and specific endpoint URLs.
* Place API keys (one per provider) in a `.env` file in the project root for CLI usage.
* For MCP/Roo Code integration, configure these keys in the `env` section of `.roo/mcp.json`.
* Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.md`).
**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
**If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.roo/mcp.json`.
**If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project.
## Determining the Next Task
- Run `next_task` / `task-master next` to show the next task to work on.
- The command identifies tasks with all dependencies satisfied
- Tasks are prioritized by priority level, dependency count, and ID
- The command shows comprehensive task information including:
- Basic task details and description
- Implementation details
- Subtasks (if they exist)
- Contextual suggested actions
- Recommended before starting any new development work
- Respects your project's dependency structure
- Ensures tasks are completed in the appropriate sequence
- Provides ready-to-use commands for common task actions
## Viewing Specific Task Details
- Run `get_task` / `task-master show <id>` to view a specific task.
- Use dot notation for subtasks: `task-master show 1.2` (shows subtask 2 of task 1)
- Displays comprehensive information similar to the next command, but for a specific task
- For parent tasks, shows all subtasks and their current status
- For subtasks, shows parent task information and relationship
- Provides contextual suggested actions appropriate for the specific task
- Useful for examining task details before implementation or checking status
## Managing Task Dependencies
- Use `add_dependency` / `task-master add-dependency --id=<id> --depends-on=<id>` to add a dependency.
- Use `remove_dependency` / `task-master remove-dependency --id=<id> --depends-on=<id>` to remove a dependency.
- The system prevents circular dependencies and duplicate dependency entries
- Dependencies are checked for existence before being added or removed
- Task files are automatically regenerated after dependency changes
- Dependencies are visualized with status indicators in task listings and files
## Task Reorganization
- Use `move_task` / `task-master move --from=<id> --to=<id>` to move tasks or subtasks within the hierarchy
- This command supports several use cases:
- Moving a standalone task to become a subtask (e.g., `--from=5 --to=7`)
- Moving a subtask to become a standalone task (e.g., `--from=5.2 --to=7`)
- Moving a subtask to a different parent (e.g., `--from=5.2 --to=7.3`)
- Reordering subtasks within the same parent (e.g., `--from=5.2 --to=5.4`)
- Moving a task to a new, non-existent ID position (e.g., `--from=5 --to=25`)
- Moving multiple tasks at once using comma-separated IDs (e.g., `--from=10,11,12 --to=16,17,18`)
- The system includes validation to prevent data loss:
- Allows moving to non-existent IDs by creating placeholder tasks
- Prevents moving to existing task IDs that have content (to avoid overwriting)
- Validates source tasks exist before attempting to move them
- The system maintains proper parent-child relationships and dependency integrity
- Task files are automatically regenerated after the move operation
- This provides greater flexibility in organizing and refining your task structure as project understanding evolves
- This is especially useful when dealing with potential merge conflicts arising from teams creating tasks on separate branches. Solve these conflicts very easily by moving your tasks and keeping theirs.
## Iterative Subtask Implementation
Once a task has been broken down into subtasks using `expand_task` or similar methods, follow this iterative process for implementation:
1. **Understand the Goal (Preparation):**
* Use `get_task` / `task-master show <subtaskId>` (see [`taskmaster.md`](mdc:.roo/rules/taskmaster.md)) to thoroughly understand the specific goals and requirements of the subtask.
2. **Initial Exploration & Planning (Iteration 1):**
* This is the first attempt at creating a concrete implementation plan.
* Explore the codebase to identify the precise files, functions, and even specific lines of code that will need modification.
* Determine the intended code changes (diffs) and their locations.
* Gather *all* relevant details from this exploration phase.
3. **Log the Plan:**
* Run `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<detailed plan>'`.
* Provide the *complete and detailed* findings from the exploration phase in the prompt. Include file paths, line numbers, proposed diffs, reasoning, and any potential challenges identified. Do not omit details. The goal is to create a rich, timestamped log within the subtask's `details`.
4. **Verify the Plan:**
* Run `get_task` / `task-master show <subtaskId>` again to confirm that the detailed implementation plan has been successfully appended to the subtask's details.
5. **Begin Implementation:**
* Set the subtask status using `set_task_status` / `task-master set-status --id=<subtaskId> --status=in-progress`.
* Start coding based on the logged plan.
6. **Refine and Log Progress (Iteration 2+):**
* As implementation progresses, you will encounter challenges, discover nuances, or confirm successful approaches.
* **Before appending new information**: Briefly review the *existing* details logged in the subtask (using `get_task` or recalling from context) to ensure the update adds fresh insights and avoids redundancy.
* **Regularly** use `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<update details>\n- What worked...\n- What didn't work...'` to append new findings.
* **Crucially, log:**
* What worked ("fundamental truths" discovered).
* What didn't work and why (to avoid repeating mistakes).
* Specific code snippets or configurations that were successful.
* Decisions made, especially if confirmed with user input.
* Any deviations from the initial plan and the reasoning.
* The objective is to continuously enrich the subtask's details, creating a log of the implementation journey that helps the AI (and human developers) learn, adapt, and avoid repeating errors.
7. **Review & Update Rules (Post-Implementation):**
* Once the implementation for the subtask is functionally complete, review all code changes and the relevant chat history.
* Identify any new or modified code patterns, conventions, or best practices established during the implementation.
* Create new or update existing rules following internal guidelines (previously linked to `cursor_rules.md` and `self_improve.md`).
8. **Mark Task Complete:**
* After verifying the implementation and updating any necessary rules, mark the subtask as completed: `set_task_status` / `task-master set-status --id=<subtaskId> --status=done`.
9. **Commit Changes (If using Git):**
* Stage the relevant code changes and any updated/new rule files (`git add .`).
* Craft a comprehensive Git commit message summarizing the work done for the subtask, including both code implementation and any rule adjustments.
* Execute the commit command directly in the terminal (e.g., `git commit -m 'feat(module): Implement feature X for subtask <subtaskId>\n\n- Details about changes...\n- Updated rule Y for pattern Z'`).
* Consider if a Changeset is needed according to internal versioning guidelines (previously linked to `changeset.md`). If so, run `npm run changeset`, stage the generated file, and amend the commit or create a new one.
10. **Proceed to Next Subtask:**
* Identify the next subtask (e.g., using `next_task` / `task-master next`).
## Code Analysis & Refactoring Techniques
- **Top-Level Function Search**:
- Useful for understanding module structure or planning refactors.
- Use grep/ripgrep to find exported functions/constants:
`rg "export (async function|function|const) \w+"` or similar patterns.
- Can help compare functions between files during migrations or identify potential naming conflicts.
---
*This workflow provides a general guideline. Adapt it based on your specific project needs and team practices.*
```
--------------------------------------------------------------------------------
/src/server.ts:
--------------------------------------------------------------------------------
```typescript
#!/usr/bin/env node
/*
* WORKFLOW GUIDANCE FOR AI ASSISTANTS:
*
* **ALWAYS START WITH PROJECT FILTERING** for project-specific analysis:
* 1. DISCOVERY: Use list_conversations with projectPath parameter to find project-specific conversations
* 2. ANALYTICS: Use get_conversation_analytics with projectPath and ["files", "languages"] breakdowns
* - Files/languages breakdowns contain conversation IDs in their arrays!
* 3. DEEP DIVE: Use get_conversation with specific conversation IDs from step 1 or 2
* 4. ANALYSIS: Use analytics tools (find_related, extract_elements) for insights
* 5. DATE FILTERING: Use get_system_info first when applying date filters to search_conversations
*
* RECOMMENDED PATTERN FOR PROJECT ANALYSIS:
* - list_conversations(projectPath: "project-name", startDate: "YYYY-MM-DD", endDate: "YYYY-MM-DD")
* - get_conversation_analytics(projectPath: "project-name", includeBreakdowns: ["files", "languages"])
* - Extract conversation IDs from files/languages.conversations arrays
* - get_conversation(conversationId: "id-from-breakdown") for each relevant conversation
*
* PROJECT PATH EXAMPLES:
* - "my-app" (project name)
* - "/Users/name/Projects/my-app" (full path)
* - "editor-elements" (project name from path like /Users/name/Projects/editor-elements)
*/
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import {
listConversations,
getConversation,
searchConversations,
getConversationsByProject
} from './tools/conversation-tools.js';
import {
getConversationAnalytics,
findRelatedConversations
} from './tools/analytics-tools.js';
import {
extractConversationElements,
exportConversationData
} from './tools/extraction-tools.js';
import { z } from 'zod';
import { formatResponse } from './utils/formatter.js';
const server = new McpServer({
name: 'cursor-chat-history-mcp',
version: '0.1.0',
});
server.tool(
'list_conversations',
'Lists Cursor chats with summaries, titles, and metadata ordered by recency. **HIGHLY RECOMMENDED: Use projectPath parameter to filter conversations by specific project/codebase** - this dramatically improves relevance by finding conversations that actually worked on files in that project. Returns conversation IDs for use with get_conversation tool. WORKFLOW TIP: Start with projectPath filtering for project-specific analysis, then call get_conversation with specific IDs from results. Includes AI-generated summaries by default. Supports date range filtering (YYYY-MM-DD format).',
{
limit: z.number().min(1).max(100).optional().default(10).describe('Maximum number of conversations to return (1-100)'),
minLength: z.number().min(0).optional().default(100).describe('Minimum conversation length in characters to include'),
hasCodeBlocks: z.boolean().optional().describe('Filter to conversations that contain code blocks'),
keywords: z.array(z.string()).optional().describe('Filter conversations containing any of these exact keywords (literal text matching)'),
projectPath: z.string().optional().describe('**RECOMMENDED** Filter conversations by project/codebase name (e.g., "my-app") or full path (e.g., "/Users/name/Projects/my-app"). This finds conversations that actually worked on files in that project, dramatically improving relevance for project-specific analysis.'),
filePattern: z.string().optional().describe('Filter conversations mentioning files matching this pattern (e.g., "*.tsx")'),
relevantFiles: z.array(z.string()).optional().describe('Filter conversations that reference any of these specific files'),
startDate: z.string().optional().describe('Start date for filtering (YYYY-MM-DD). Note: Timestamps may be unreliable.'),
endDate: z.string().optional().describe('End date for filtering (YYYY-MM-DD). Note: Timestamps may be unreliable.'),
includeEmpty: z.boolean().optional().default(false).describe('Include conversations with no messages'),
includeAiSummaries: z.boolean().optional().default(true).describe('Include AI-generated conversation summaries'),
includeRelevanceScore: z.boolean().optional().default(false).describe('Include relevance scores when filtering by projectPath'),
outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
},
async (input) => {
try {
if (input.projectPath && input.includeRelevanceScore) {
const projectInput = {
projectPath: input.projectPath,
filePattern: input.filePattern,
orderBy: 'recency' as const,
limit: input.limit,
fuzzyMatch: false
};
const result = await getConversationsByProject(projectInput);
const transformedResult = {
conversations: result.conversations.map(conv => ({
...conv,
title: undefined,
aiGeneratedSummary: undefined,
relevanceScore: conv.relevanceScore
})),
totalFound: result.totalFound,
filters: {
limit: input.limit ?? 10,
minLength: input.minLength ?? 100,
hasCodeBlocks: input.hasCodeBlocks,
keywords: input.keywords,
projectPath: input.projectPath,
filePattern: input.filePattern,
relevantFiles: input.relevantFiles,
includeAiSummaries: input.includeAiSummaries
}
};
return {
content: [{
type: 'text',
text: formatResponse(transformedResult, input.outputMode)
}]
};
} else {
const mappedInput = {
limit: input.limit,
minLength: input.minLength,
format: 'both' as const,
hasCodeBlocks: input.hasCodeBlocks,
keywords: input.keywords,
projectPath: input.projectPath,
filePattern: input.filePattern,
relevantFiles: input.relevantFiles,
startDate: input.startDate,
endDate: input.endDate,
includeEmpty: input.includeEmpty,
includeAiSummaries: input.includeAiSummaries
};
const result = await listConversations(mappedInput);
return {
content: [{
type: 'text',
text: formatResponse(result, input.outputMode)
}]
};
}
} catch (error) {
return {
content: [{
type: 'text',
text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
}]
};
}
}
);
server.tool(
'get_conversation',
'Retrieves the complete content of a specific Cursor conversation including all messages, code blocks, file references, title, and AI summary. WORKFLOW TIP: Use conversation IDs from list_conversations, search_conversations, or analytics breakdowns (files/languages arrays contain conversation IDs). Use summaryOnly=true to get enhanced summary data without full message content when you need to conserve context.',
{
conversationId: z.string().min(1).describe('Conversation ID from list_conversations, search_conversations, or analytics breakdowns'),
summaryOnly: z.boolean().optional().default(false).describe('Return only enhanced summary data without full message content'),
outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
},
async (input) => {
try {
const fullInput = {
...input,
includeCodeBlocks: true,
includeFileReferences: true,
includeMetadata: false,
resolveBubbles: true
};
const result = await getConversation(fullInput);
return {
content: [{
type: 'text',
text: formatResponse(result, input.outputMode)
}]
};
} catch (error) {
return {
content: [{
type: 'text',
text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
}]
};
}
}
);
server.tool(
'search_conversations',
'Searches through Cursor chat content using exact text matching (NOT semantic search) to find relevant discussions. **WARNING: For project-specific searches, use list_conversations with projectPath instead of this tool!** This tool is for searching message content, not project filtering.\n\n**WHEN TO USE THIS TOOL:**\n- Searching for specific technical terms in message content (e.g., "useState", "async/await")\n- Finding conversations mentioning specific error messages\n- Searching for code patterns or function names\n\n**WHEN NOT TO USE THIS TOOL:**\n- ❌ DON\'T use query="project-name" - use list_conversations with projectPath instead\n- ❌ DON\'T search for project names in message content\n- ❌ DON\'T use this for project-specific filtering\n\nSearch methods (all use exact/literal text matching):\n1. Simple text matching: Use query parameter for literal string matching (e.g., "react hooks")\n2. Multi-keyword: Use keywords array with keywordOperator for exact matching\n3. LIKE patterns: Advanced pattern matching with SQL wildcards (% = any chars, _ = single char)\n4. Date range: Filter by message timestamps (YYYY-MM-DD format)\n\nIMPORTANT: When using date filters, call get_system_info first to know today\'s date.\n\nExamples: likePattern="%useState(%" for function calls, keywords=["typescript","interface"] with AND operator.',
{
query: z.string().optional().describe('Exact text matching - searches for literal string occurrences in MESSAGE CONTENT (e.g., "react hooks", "useState", "error message"). ❌ DON\'T use for project names - use list_conversations with projectPath instead!'),
keywords: z.array(z.string().min(1)).optional().describe('Array of keywords for exact text matching - use with keywordOperator to find conversations with specific combinations'),
keywordOperator: z.enum(['AND', 'OR']).optional().default('OR').describe('How to combine keywords: "AND" = all keywords must be present, "OR" = any keyword can be present'),
likePattern: z.string().optional().describe('SQL LIKE pattern for advanced searches - use % for any characters, _ for single character. Examples: "%useState(%" for function calls, "%.tsx%" for file types'),
startDate: z.string().optional().describe('Start date for search (YYYY-MM-DD). Note: Timestamps may be unreliable.'),
endDate: z.string().optional().describe('End date for search (YYYY-MM-DD). Note: Timestamps may be unreliable.'),
searchType: z.enum(['all', 'project', 'files', 'code']).optional().default('all').describe('Focus search on specific content types. Use "project" for project-specific searches that leverage file path context.'),
maxResults: z.number().min(1).max(50).optional().default(10).describe('Maximum number of conversations to return'),
includeCode: z.boolean().optional().default(true).describe('Include code blocks in search results'),
outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
},
async (input) => {
try {
const hasSearchCriteria = (input.query && input.query.trim() !== '' && input.query.trim() !== '?') || input.keywords || input.likePattern;
const hasDateFilter = input.startDate || input.endDate;
const hasOtherFilters = input.searchType !== 'all';
if (!hasSearchCriteria && !hasDateFilter && !hasOtherFilters) {
throw new Error('At least one search criteria (query, keywords, likePattern), date filter (startDate, endDate), or search type filter must be provided');
}
const fullInput = {
...input,
contextLines: 2,
searchBubbles: true,
format: 'both' as const,
highlightMatches: true,
projectSearch: input.searchType === 'project',
fuzzyMatch: input.searchType === 'project',
includePartialPaths: input.searchType === 'project',
includeFileContent: false,
minRelevanceScore: 0.1,
orderBy: 'recency' as const
};
const result = await searchConversations(fullInput);
return {
content: [{
type: 'text',
text: formatResponse(result, input.outputMode)
}]
};
} catch (error) {
return {
content: [{
type: 'text',
text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
}]
};
}
}
);
server.tool(
'get_conversation_analytics',
'Get comprehensive analytics and statistics about Cursor chats including usage patterns, file activity, programming language distribution, and temporal trends. **BEST PRACTICE: Use projectPath parameter for project-specific analytics** - this analyzes only conversations that worked on files in that project, providing much more relevant insights for understanding coding patterns, file usage, and development activity within a specific codebase. WORKFLOW TIP: Always include "files" and "languages" in breakdowns - these contain conversation IDs in their arrays that you can immediately use with get_conversation tool. Use includeConversationDetails=true when you need the full conversation ID list and basic metadata for follow-up analysis.',
{
scope: z.enum(['all', 'recent', 'project']).optional().default('all').describe('Analysis scope: all conversations, recent only, or project-specific. Use "project" with projectPath for focused project analysis.'),
projectPath: z.string().optional().describe('**HIGHLY RECOMMENDED** Project/codebase name (e.g., "my-app") or full path for project-scoped analysis. When provided, analyzes only conversations that worked on files in that project, giving much more relevant insights about coding patterns and development activity.'),
recentDays: z.number().min(1).max(365).optional().default(30).describe('Number of recent days to analyze (1-365)'),
includeBreakdowns: z.array(z.enum(['files', 'languages', 'temporal', 'size'])).optional().default(['files', 'languages']).describe('Types of breakdowns to include in the analysis. IMPORTANT: "files" and "languages" breakdowns contain conversation IDs in their arrays - use these for follow-up analysis!'),
includeConversationDetails: z.boolean().optional().default(false).describe('Include full conversation ID list and basic metadata (increases response size significantly)'),
outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
},
async (input) => {
try {
const result = await getConversationAnalytics(input);
return {
content: [{
type: 'text',
text: formatResponse(result, input.outputMode)
}]
};
} catch (error) {
return {
content: [{
type: 'text',
text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
}]
};
}
}
);
server.tool(
'find_related_conversations',
'Find conversations related to a reference conversation based on shared files, folders, programming languages, similar size, or temporal proximity. Use this to discover related discussions, find conversations about the same codebase/project, identify similar problem-solving sessions, or trace the evolution of ideas across multiple conversations.',
{
referenceConversationId: z.string().min(1).describe('ID of the conversation to find related conversations for'),
relationshipTypes: z.array(z.enum(['files', 'folders', 'languages', 'size', 'temporal'])).optional().default(['files']).describe('Types of relationships to consider when finding related conversations'),
maxResults: z.number().min(1).max(50).optional().default(10).describe('Maximum number of related conversations to return (1-50)'),
minScore: z.number().min(0).max(1).optional().default(0.1).describe('Minimum similarity score threshold (0.0-1.0)'),
includeScoreBreakdown: z.boolean().optional().default(false).describe('Include detailed breakdown of how similarity scores were calculated'),
outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
},
async (input) => {
try {
const result = await findRelatedConversations(input);
return {
content: [{
type: 'text',
text: formatResponse(result, input.outputMode)
}]
};
} catch (error) {
return {
content: [{
type: 'text',
text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
}]
};
}
}
);
server.tool(
'extract_conversation_elements',
'Extract specific elements from conversations such as file references, code blocks, programming languages, folder paths, metadata, or conversation structure. Use this to build knowledge bases, analyze code patterns, extract reusable snippets, understand project file usage, or prepare data for further analysis and documentation.',
{
conversationIds: z.array(z.string()).optional().describe('Specific conversation IDs to extract elements from (if not provided, extracts from all conversations)'),
elements: z.array(z.enum(['files', 'folders', 'languages', 'codeblocks', 'metadata', 'structure'])).optional().default(['files', 'codeblocks']).describe('Types of elements to extract from conversations'),
includeContext: z.boolean().optional().default(false).describe('Include surrounding context for extracted elements'),
groupBy: z.enum(['conversation', 'element', 'none']).optional().default('conversation').describe('How to group the extracted elements in the output'),
filters: z.object({
minCodeLength: z.number().optional().describe('Minimum length for code blocks to include'),
fileExtensions: z.array(z.string()).optional().describe('Only include files with these extensions'),
languages: z.array(z.string()).optional().describe('Only include code blocks in these programming languages')
}).optional().describe('Filters to apply when extracting elements'),
outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
},
async (input) => {
try {
const mappedInput = {
conversationIds: input.conversationIds,
elements: input.elements,
includeContext: input.includeContext,
groupBy: input.groupBy,
filters: input.filters
};
const result = await extractConversationElements(mappedInput);
return {
content: [{
type: 'text',
text: formatResponse(result, input.outputMode)
}]
};
} catch (error) {
return {
content: [{
type: 'text',
text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
}]
};
}
}
);
server.tool(
'export_conversation_data',
'Export chat data in various formats (JSON, CSV, Graph) for external analysis, visualization, or integration with other tools. **TIP: Use filters.projectPath to export only project-specific conversations** for focused analysis of a particular codebase. Use this to create datasets for machine learning, generate reports for stakeholders, prepare data for visualization tools like Gephi or Tableau, or backup chat data in structured formats.',
{
conversationIds: z.array(z.string()).optional().describe('Specific conversation IDs to export (if not provided, exports all conversations)'),
format: z.enum(['json', 'csv', 'graph']).optional().default('json').describe('Export format: JSON for structured data, CSV for spreadsheets, Graph for network analysis'),
includeContent: z.boolean().optional().default(false).describe('Include full conversation content in the export'),
includeRelationships: z.boolean().optional().default(false).describe('Include relationship data between conversations'),
flattenStructure: z.boolean().optional().default(false).describe('Flatten nested structures for easier processing'),
filters: z.object({
minSize: z.number().optional().describe('Minimum conversation size to include'),
hasCodeBlocks: z.boolean().optional().describe('Only include conversations with code blocks'),
projectPath: z.string().optional().describe('**RECOMMENDED** Only include conversations related to this project/codebase name or path. Dramatically improves relevance by filtering to conversations that actually worked on files in that project.')
}).optional().describe('Filters to apply when selecting conversations to export'),
outputMode: z.enum(['json', 'compact-json']).optional().default('json').describe('Output format: "json" for formatted JSON (default), "compact-json" for minified JSON')
},
async (input) => {
try {
const mappedInput = {
conversationIds: input.conversationIds,
format: input.format,
includeContent: input.includeContent,
includeRelationships: input.includeRelationships,
flattenStructure: input.flattenStructure,
filters: input.filters
};
const result = await exportConversationData(mappedInput);
return {
content: [{
type: 'text',
text: formatResponse(result, input.outputMode)
}]
};
} catch (error) {
return {
content: [{
type: 'text',
text: `Error: ${error instanceof Error ? error.message : 'Unknown error occurred'}`
}]
};
}
}
);
server.tool(
'get_system_info',
'Get system information and utilities for AI assistants. Provides current date, timezone, and other helpful context that AI assistants may not have access to. Use this when you need reference information for date filtering, time-based queries, or other system context.',
{
info: z.enum(['date', 'timezone', 'all']).optional().default('all').describe('Type of system information to retrieve: "date" for current date only, "timezone" for timezone info, "all" for everything')
},
async (input) => {
const now = new Date();
const currentDate = now.toISOString().split('T')[0];
const currentTime = now.toISOString();
const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone;
let response = '';
if (input.info === 'date') {
response = `Current date: ${currentDate}`;
} else if (input.info === 'timezone') {
response = `Timezone: ${timezone}`;
} else {
response = [
`Current date: ${currentDate}`,
`Current time: ${currentTime}`,
`Timezone: ${timezone}`,
``,
`Use this date information when applying date filters to search_conversations.`,
`Date format for filters: YYYY-MM-DD (e.g., "${currentDate}")`
].join('\n');
}
return {
content: [{
type: 'text',
text: response
}]
};
}
);
const transport = new StdioServerTransport();
await server.connect(transport);
```
--------------------------------------------------------------------------------
/.roo/rules/taskmaster.md:
--------------------------------------------------------------------------------
```markdown
---
description: Comprehensive reference for Taskmaster MCP tools and CLI commands.
globs: **/*
alwaysApply: true
---
# Taskmaster Tool & Command Reference
This document provides a detailed reference for interacting with Taskmaster, covering both the recommended MCP tools, suitable for integrations like Roo Code, and the corresponding `task-master` CLI commands, designed for direct user interaction or fallback.
**Note:** For interacting with Taskmaster programmatically or via integrated tools, using the **MCP tools is strongly recommended** due to better performance, structured data, and error handling. The CLI commands serve as a user-friendly alternative and fallback.
**Important:** Several MCP tools involve AI processing... The AI-powered tools include `parse_prd`, `analyze_project_complexity`, `update_subtask`, `update_task`, `update`, `expand_all`, `expand_task`, and `add_task`.
---
## Initialization & Setup
### 1. Initialize Project (`init`)
* **MCP Tool:** `initialize_project`
* **CLI Command:** `task-master init [options]`
* **Description:** `Set up the basic Taskmaster file structure and configuration in the current directory for a new project.`
* **Key CLI Options:**
* `--name <name>`: `Set the name for your project in Taskmaster's configuration.`
* `--description <text>`: `Provide a brief description for your project.`
* `--version <version>`: `Set the initial version for your project, e.g., '0.1.0'.`
* `-y, --yes`: `Initialize Taskmaster quickly using default settings without interactive prompts.`
* **Usage:** Run this once at the beginning of a new project.
* **MCP Variant Description:** `Set up the basic Taskmaster file structure and configuration in the current directory for a new project by running the 'task-master init' command.`
* **Key MCP Parameters/Options:**
* `projectName`: `Set the name for your project.` (CLI: `--name <name>`)
* `projectDescription`: `Provide a brief description for your project.` (CLI: `--description <text>`)
* `projectVersion`: `Set the initial version for your project, e.g., '0.1.0'.` (CLI: `--version <version>`)
* `authorName`: `Author name.` (CLI: `--author <author>`)
* `skipInstall`: `Skip installing dependencies. Default is false.` (CLI: `--skip-install`)
* `addAliases`: `Add shell aliases tm and taskmaster. Default is false.` (CLI: `--aliases`)
* `yes`: `Skip prompts and use defaults/provided arguments. Default is false.` (CLI: `-y, --yes`)
* **Usage:** Run this once at the beginning of a new project, typically via an integrated tool like Roo Code. Operates on the current working directory of the MCP server.
* **Important:** Once complete, you *MUST* parse a prd in order to generate tasks. There will be no tasks files until then. The next step after initializing should be to create a PRD using the example PRD in scripts/example_prd.txt.
### 2. Parse PRD (`parse_prd`)
* **MCP Tool:** `parse_prd`
* **CLI Command:** `task-master parse-prd [file] [options]`
* **Description:** `Parse a Product Requirements Document, PRD, or text file with Taskmaster to automatically generate an initial set of tasks in tasks.json.`
* **Key Parameters/Options:**
* `input`: `Path to your PRD or requirements text file that Taskmaster should parse for tasks.` (CLI: `[file]` positional or `-i, --input <file>`)
* `output`: `Specify where Taskmaster should save the generated 'tasks.json' file. Defaults to 'tasks/tasks.json'.` (CLI: `-o, --output <file>`)
* `numTasks`: `Approximate number of top-level tasks Taskmaster should aim to generate from the document.` (CLI: `-n, --num-tasks <number>`)
* `force`: `Use this to allow Taskmaster to overwrite an existing 'tasks.json' without asking for confirmation.` (CLI: `-f, --force`)
* **Usage:** Useful for bootstrapping a project from an existing requirements document.
* **Notes:** Task Master will strictly adhere to any specific requirements mentioned in the PRD, such as libraries, database schemas, frameworks, tech stacks, etc., while filling in any gaps where the PRD isn't fully specified. Tasks are designed to provide the most direct implementation path while avoiding over-engineering.
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. If the user does not have a PRD, suggest discussing their idea and then use the example PRD in `scripts/example_prd.txt` as a template for creating the PRD based on their idea, for use with `parse-prd`.
---
## AI Model Configuration
### 2. Manage Models (`models`)
* **MCP Tool:** `models`
* **CLI Command:** `task-master models [options]`
* **Description:** `View the current AI model configuration or set specific models for different roles (main, research, fallback). Allows setting custom model IDs for Ollama and OpenRouter.`
* **Key MCP Parameters/Options:**
* `setMain <model_id>`: `Set the primary model ID for task generation/updates.` (CLI: `--set-main <model_id>`)
* `setResearch <model_id>`: `Set the model ID for research-backed operations.` (CLI: `--set-research <model_id>`)
* `setFallback <model_id>`: `Set the model ID to use if the primary fails.` (CLI: `--set-fallback <model_id>`)
* `ollama <boolean>`: `Indicates the set model ID is a custom Ollama model.` (CLI: `--ollama`)
* `openrouter <boolean>`: `Indicates the set model ID is a custom OpenRouter model.` (CLI: `--openrouter`)
* `listAvailableModels <boolean>`: `If true, lists available models not currently assigned to a role.` (CLI: No direct equivalent; CLI lists available automatically)
* `projectRoot <string>`: `Optional. Absolute path to the project root directory.` (CLI: Determined automatically)
* **Key CLI Options:**
* `--set-main <model_id>`: `Set the primary model.`
* `--set-research <model_id>`: `Set the research model.`
* `--set-fallback <model_id>`: `Set the fallback model.`
* `--ollama`: `Specify that the provided model ID is for Ollama (use with --set-*).`
* `--openrouter`: `Specify that the provided model ID is for OpenRouter (use with --set-*). Validates against OpenRouter API.`
* `--setup`: `Run interactive setup to configure models, including custom Ollama/OpenRouter IDs.`
* **Usage (MCP):** Call without set flags to get current config. Use `setMain`, `setResearch`, or `setFallback` with a valid model ID to update the configuration. Use `listAvailableModels: true` to get a list of unassigned models. To set a custom model, provide the model ID and set `ollama: true` or `openrouter: true`.
* **Usage (CLI):** Run without flags to view current configuration and available models. Use set flags to update specific roles. Use `--setup` for guided configuration, including custom models. To set a custom model via flags, use `--set-<role>=<model_id>` along with either `--ollama` or `--openrouter`.
* **Notes:** Configuration is stored in `.taskmasterconfig` in the project root. This command/tool modifies that file. Use `listAvailableModels` or `task-master models` to see internally supported models. OpenRouter custom models are validated against their live API. Ollama custom models are not validated live.
* **API note:** API keys for selected AI providers (based on their model) need to exist in the mcp.json file to be accessible in MCP context. The API keys must be present in the local .env file for the CLI to be able to read them.
* **Model costs:** The costs in supported models are expressed in dollars. An input/output value of 3 is $3.00. A value of 0.8 is $0.80.
* **Warning:** DO NOT MANUALLY EDIT THE .taskmasterconfig FILE. Use the included commands either in the MCP or CLI format as needed. Always prioritize MCP tools when available and use the CLI as a fallback.
---
## Task Listing & Viewing
### 3. Get Tasks (`get_tasks`)
* **MCP Tool:** `get_tasks`
* **CLI Command:** `task-master list [options]`
* **Description:** `List your Taskmaster tasks, optionally filtering by status and showing subtasks.`
* **Key Parameters/Options:**
* `status`: `Show only Taskmaster tasks matching this status, e.g., 'pending' or 'done'.` (CLI: `-s, --status <status>`)
* `withSubtasks`: `Include subtasks indented under their parent tasks in the list.` (CLI: `--with-subtasks`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Get an overview of the project status, often used at the start of a work session.
### 4. Get Next Task (`next_task`)
* **MCP Tool:** `next_task`
* **CLI Command:** `task-master next [options]`
* **Description:** `Ask Taskmaster to show the next available task you can work on, based on status and completed dependencies.`
* **Key Parameters/Options:**
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Identify what to work on next according to the plan.
### 5. Get Task Details (`get_task`)
* **MCP Tool:** `get_task`
* **CLI Command:** `task-master show [id] [options]`
* **Description:** `Display detailed information for a specific Taskmaster task or subtask by its ID.`
* **Key Parameters/Options:**
* `id`: `Required. The ID of the Taskmaster task, e.g., '15', or subtask, e.g., '15.2', you want to view.` (CLI: `[id]` positional or `-i, --id <id>`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Understand the full details, implementation notes, and test strategy for a specific task before starting work.
---
## Task Creation & Modification
### 6. Add Task (`add_task`)
* **MCP Tool:** `add_task`
* **CLI Command:** `task-master add-task [options]`
* **Description:** `Add a new task to Taskmaster by describing it; AI will structure it.`
* **Key Parameters/Options:**
* `prompt`: `Required. Describe the new task you want Taskmaster to create, e.g., "Implement user authentication using JWT".` (CLI: `-p, --prompt <text>`)
* `dependencies`: `Specify the IDs of any Taskmaster tasks that must be completed before this new one can start, e.g., '12,14'.` (CLI: `-d, --dependencies <ids>`)
* `priority`: `Set the priority for the new task: 'high', 'medium', or 'low'. Default is 'medium'.` (CLI: `--priority <priority>`)
* `research`: `Enable Taskmaster to use the research role for potentially more informed task creation.` (CLI: `-r, --research`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Quickly add newly identified tasks during development.
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
### 7. Add Subtask (`add_subtask`)
* **MCP Tool:** `add_subtask`
* **CLI Command:** `task-master add-subtask [options]`
* **Description:** `Add a new subtask to a Taskmaster parent task, or convert an existing task into a subtask.`
* **Key Parameters/Options:**
* `id` / `parent`: `Required. The ID of the Taskmaster task that will be the parent.` (MCP: `id`, CLI: `-p, --parent <id>`)
* `taskId`: `Use this if you want to convert an existing top-level Taskmaster task into a subtask of the specified parent.` (CLI: `-i, --task-id <id>`)
* `title`: `Required if not using taskId. The title for the new subtask Taskmaster should create.` (CLI: `-t, --title <title>`)
* `description`: `A brief description for the new subtask.` (CLI: `-d, --description <text>`)
* `details`: `Provide implementation notes or details for the new subtask.` (CLI: `--details <text>`)
* `dependencies`: `Specify IDs of other tasks or subtasks, e.g., '15' or '16.1', that must be done before this new subtask.` (CLI: `--dependencies <ids>`)
* `status`: `Set the initial status for the new subtask. Default is 'pending'.` (CLI: `-s, --status <status>`)
* `skipGenerate`: `Prevent Taskmaster from automatically regenerating markdown task files after adding the subtask.` (CLI: `--skip-generate`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Break down tasks manually or reorganize existing tasks.
### 8. Update Tasks (`update`)
* **MCP Tool:** `update`
* **CLI Command:** `task-master update [options]`
* **Description:** `Update multiple upcoming tasks in Taskmaster based on new context or changes, starting from a specific task ID.`
* **Key Parameters/Options:**
* `from`: `Required. The ID of the first task Taskmaster should update. All tasks with this ID or higher that are not 'done' will be considered.` (CLI: `--from <id>`)
* `prompt`: `Required. Explain the change or new context for Taskmaster to apply to the tasks, e.g., "We are now using React Query instead of Redux Toolkit for data fetching".` (CLI: `-p, --prompt <text>`)
* `research`: `Enable Taskmaster to use the research role for more informed updates. Requires appropriate API key.` (CLI: `-r, --research`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Handle significant implementation changes or pivots that affect multiple future tasks. Example CLI: `task-master update --from='18' --prompt='Switching to React Query.\nNeed to refactor data fetching...'`
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
### 9. Update Task (`update_task`)
* **MCP Tool:** `update_task`
* **CLI Command:** `task-master update-task [options]`
* **Description:** `Modify a specific Taskmaster task or subtask by its ID, incorporating new information or changes.`
* **Key Parameters/Options:**
* `id`: `Required. The specific ID of the Taskmaster task, e.g., '15', or subtask, e.g., '15.2', you want to update.` (CLI: `-i, --id <id>`)
* `prompt`: `Required. Explain the specific changes or provide the new information Taskmaster should incorporate into this task.` (CLI: `-p, --prompt <text>`)
* `research`: `Enable Taskmaster to use the research role for more informed updates. Requires appropriate API key.` (CLI: `-r, --research`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Refine a specific task based on new understanding or feedback. Example CLI: `task-master update-task --id='15' --prompt='Clarification: Use PostgreSQL instead of MySQL.\nUpdate schema details...'`
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
### 10. Update Subtask (`update_subtask`)
* **MCP Tool:** `update_subtask`
* **CLI Command:** `task-master update-subtask [options]`
* **Description:** `Append timestamped notes or details to a specific Taskmaster subtask without overwriting existing content. Intended for iterative implementation logging.`
* **Key Parameters/Options:**
* `id`: `Required. The specific ID of the Taskmaster subtask, e.g., '15.2', you want to add information to.` (CLI: `-i, --id <id>`)
* `prompt`: `Required. Provide the information or notes Taskmaster should append to the subtask's details. Ensure this adds *new* information not already present.` (CLI: `-p, --prompt <text>`)
* `research`: `Enable Taskmaster to use the research role for more informed updates. Requires appropriate API key.` (CLI: `-r, --research`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Add implementation notes, code snippets, or clarifications to a subtask during development. Before calling, review the subtask's current details to append only fresh insights, helping to build a detailed log of the implementation journey and avoid redundancy. Example CLI: `task-master update-subtask --id='15.2' --prompt='Discovered that the API requires header X.\nImplementation needs adjustment...'`
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
### 11. Set Task Status (`set_task_status`)
* **MCP Tool:** `set_task_status`
* **CLI Command:** `task-master set-status [options]`
* **Description:** `Update the status of one or more Taskmaster tasks or subtasks, e.g., 'pending', 'in-progress', 'done'.`
* **Key Parameters/Options:**
* `id`: `Required. The ID(s) of the Taskmaster task(s) or subtask(s), e.g., '15', '15.2', or '16,17.1', to update.` (CLI: `-i, --id <id>`)
* `status`: `Required. The new status to set, e.g., 'done', 'pending', 'in-progress', 'review', 'cancelled'.` (CLI: `-s, --status <status>`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Mark progress as tasks move through the development cycle.
### 12. Remove Task (`remove_task`)
* **MCP Tool:** `remove_task`
* **CLI Command:** `task-master remove-task [options]`
* **Description:** `Permanently remove a task or subtask from the Taskmaster tasks list.`
* **Key Parameters/Options:**
* `id`: `Required. The ID of the Taskmaster task, e.g., '5', or subtask, e.g., '5.2', to permanently remove.` (CLI: `-i, --id <id>`)
* `yes`: `Skip the confirmation prompt and immediately delete the task.` (CLI: `-y, --yes`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Permanently delete tasks or subtasks that are no longer needed in the project.
* **Notes:** Use with caution as this operation cannot be undone. Consider using 'blocked', 'cancelled', or 'deferred' status instead if you just want to exclude a task from active planning but keep it for reference. The command automatically cleans up dependency references in other tasks.
---
## Task Structure & Breakdown
### 13. Expand Task (`expand_task`)
* **MCP Tool:** `expand_task`
* **CLI Command:** `task-master expand [options]`
* **Description:** `Use Taskmaster's AI to break down a complex task into smaller, manageable subtasks. Appends subtasks by default.`
* **Key Parameters/Options:**
* `id`: `The ID of the specific Taskmaster task you want to break down into subtasks.` (CLI: `-i, --id <id>`)
* `num`: `Optional: Suggests how many subtasks Taskmaster should aim to create. Uses complexity analysis/defaults otherwise.` (CLI: `-n, --num <number>`)
* `research`: `Enable Taskmaster to use the research role for more informed subtask generation. Requires appropriate API key.` (CLI: `-r, --research`)
* `prompt`: `Optional: Provide extra context or specific instructions to Taskmaster for generating the subtasks.` (CLI: `-p, --prompt <text>`)
* `force`: `Optional: If true, clear existing subtasks before generating new ones. Default is false (append).` (CLI: `--force`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Generate a detailed implementation plan for a complex task before starting coding. Automatically uses complexity report recommendations if available and `num` is not specified.
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
### 14. Expand All Tasks (`expand_all`)
* **MCP Tool:** `expand_all`
* **CLI Command:** `task-master expand --all [options]` (Note: CLI uses the `expand` command with the `--all` flag)
* **Description:** `Tell Taskmaster to automatically expand all eligible pending/in-progress tasks based on complexity analysis or defaults. Appends subtasks by default.`
* **Key Parameters/Options:**
* `num`: `Optional: Suggests how many subtasks Taskmaster should aim to create per task.` (CLI: `-n, --num <number>`)
* `research`: `Enable research role for more informed subtask generation. Requires appropriate API key.` (CLI: `-r, --research`)
* `prompt`: `Optional: Provide extra context for Taskmaster to apply generally during expansion.` (CLI: `-p, --prompt <text>`)
* `force`: `Optional: If true, clear existing subtasks before generating new ones for each eligible task. Default is false (append).` (CLI: `--force`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Useful after initial task generation or complexity analysis to break down multiple tasks at once.
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
### 15. Clear Subtasks (`clear_subtasks`)
* **MCP Tool:** `clear_subtasks`
* **CLI Command:** `task-master clear-subtasks [options]`
* **Description:** `Remove all subtasks from one or more specified Taskmaster parent tasks.`
* **Key Parameters/Options:**
* `id`: `The ID(s) of the Taskmaster parent task(s) whose subtasks you want to remove, e.g., '15' or '16,18'. Required unless using `all`.) (CLI: `-i, --id <ids>`)
* `all`: `Tell Taskmaster to remove subtasks from all parent tasks.` (CLI: `--all`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Used before regenerating subtasks with `expand_task` if the previous breakdown needs replacement.
### 16. Remove Subtask (`remove_subtask`)
* **MCP Tool:** `remove_subtask`
* **CLI Command:** `task-master remove-subtask [options]`
* **Description:** `Remove a subtask from its Taskmaster parent, optionally converting it into a standalone task.`
* **Key Parameters/Options:**
* `id`: `Required. The ID(s) of the Taskmaster subtask(s) to remove, e.g., '15.2' or '16.1,16.3'.` (CLI: `-i, --id <id>`)
* `convert`: `If used, Taskmaster will turn the subtask into a regular top-level task instead of deleting it.` (CLI: `-c, --convert`)
* `skipGenerate`: `Prevent Taskmaster from automatically regenerating markdown task files after removing the subtask.` (CLI: `--skip-generate`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Delete unnecessary subtasks or promote a subtask to a top-level task.
### 17. Move Task (`move_task`)
* **MCP Tool:** `move_task`
* **CLI Command:** `task-master move [options]`
* **Description:** `Move a task or subtask to a new position within the task hierarchy.`
* **Key Parameters/Options:**
* `from`: `Required. ID of the task/subtask to move (e.g., "5" or "5.2"). Can be comma-separated for multiple tasks.` (CLI: `--from <id>`)
* `to`: `Required. ID of the destination (e.g., "7" or "7.3"). Must match the number of source IDs if comma-separated.` (CLI: `--to <id>`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Reorganize tasks by moving them within the hierarchy. Supports various scenarios like:
* Moving a task to become a subtask
* Moving a subtask to become a standalone task
* Moving a subtask to a different parent
* Reordering subtasks within the same parent
* Moving a task to a new, non-existent ID (automatically creates placeholders)
* Moving multiple tasks at once with comma-separated IDs
* **Validation Features:**
* Allows moving tasks to non-existent destination IDs (creates placeholder tasks)
* Prevents moving to existing task IDs that already have content (to avoid overwriting)
* Validates that source tasks exist before attempting to move them
* Maintains proper parent-child relationships
* **Example CLI:** `task-master move --from=5.2 --to=7.3` to move subtask 5.2 to become subtask 7.3.
* **Example Multi-Move:** `task-master move --from=10,11,12 --to=16,17,18` to move multiple tasks to new positions.
* **Common Use:** Resolving merge conflicts in tasks.json when multiple team members create tasks on different branches.
---
## Dependency Management
### 18. Add Dependency (`add_dependency`)
* **MCP Tool:** `add_dependency`
* **CLI Command:** `task-master add-dependency [options]`
* **Description:** `Define a dependency in Taskmaster, making one task a prerequisite for another.`
* **Key Parameters/Options:**
* `id`: `Required. The ID of the Taskmaster task that will depend on another.` (CLI: `-i, --id <id>`)
* `dependsOn`: `Required. The ID of the Taskmaster task that must be completed first, the prerequisite.` (CLI: `-d, --depends-on <id>`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <path>`)
* **Usage:** Establish the correct order of execution between tasks.
### 19. Remove Dependency (`remove_dependency`)
* **MCP Tool:** `remove_dependency`
* **CLI Command:** `task-master remove-dependency [options]`
* **Description:** `Remove a dependency relationship between two Taskmaster tasks.`
* **Key Parameters/Options:**
* `id`: `Required. The ID of the Taskmaster task you want to remove a prerequisite from.` (CLI: `-i, --id <id>`)
* `dependsOn`: `Required. The ID of the Taskmaster task that should no longer be a prerequisite.` (CLI: `-d, --depends-on <id>`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Update task relationships when the order of execution changes.
### 20. Validate Dependencies (`validate_dependencies`)
* **MCP Tool:** `validate_dependencies`
* **CLI Command:** `task-master validate-dependencies [options]`
* **Description:** `Check your Taskmaster tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.`
* **Key Parameters/Options:**
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Audit the integrity of your task dependencies.
### 21. Fix Dependencies (`fix_dependencies`)
* **MCP Tool:** `fix_dependencies`
* **CLI Command:** `task-master fix-dependencies [options]`
* **Description:** `Automatically fix dependency issues (like circular references or links to non-existent tasks) in your Taskmaster tasks.`
* **Key Parameters/Options:**
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Clean up dependency errors automatically.
---
## Analysis & Reporting
### 22. Analyze Project Complexity (`analyze_project_complexity`)
* **MCP Tool:** `analyze_project_complexity`
* **CLI Command:** `task-master analyze-complexity [options]`
* **Description:** `Have Taskmaster analyze your tasks to determine their complexity and suggest which ones need to be broken down further.`
* **Key Parameters/Options:**
* `output`: `Where to save the complexity analysis report (default: 'scripts/task-complexity-report.json').` (CLI: `-o, --output <file>`)
* `threshold`: `The minimum complexity score (1-10) that should trigger a recommendation to expand a task.` (CLI: `-t, --threshold <number>`)
* `research`: `Enable research role for more accurate complexity analysis. Requires appropriate API key.` (CLI: `-r, --research`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Used before breaking down tasks to identify which ones need the most attention.
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
### 23. View Complexity Report (`complexity_report`)
* **MCP Tool:** `complexity_report`
* **CLI Command:** `task-master complexity-report [options]`
* **Description:** `Display the task complexity analysis report in a readable format.`
* **Key Parameters/Options:**
* `file`: `Path to the complexity report (default: 'scripts/task-complexity-report.json').` (CLI: `-f, --file <file>`)
* **Usage:** Review and understand the complexity analysis results after running analyze-complexity.
---
## File Management
### 24. Generate Task Files (`generate`)
* **MCP Tool:** `generate`
* **CLI Command:** `task-master generate [options]`
* **Description:** `Create or update individual Markdown files for each task based on your tasks.json.`
* **Key Parameters/Options:**
* `output`: `The directory where Taskmaster should save the task files (default: in a 'tasks' directory).` (CLI: `-o, --output <directory>`)
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
* **Usage:** Run this after making changes to tasks.json to keep individual task files up to date.
---
## Environment Variables Configuration (Updated)
Taskmaster primarily uses the **`.taskmasterconfig`** file (in project root) for configuration (models, parameters, logging level, etc.), managed via `task-master models --setup`.
Environment variables are used **only** for sensitive API keys related to AI providers and specific overrides like the Ollama base URL:
* **API Keys (Required for corresponding provider):**
* `ANTHROPIC_API_KEY`
* `PERPLEXITY_API_KEY`
* `OPENAI_API_KEY`
* `GOOGLE_API_KEY`
* `MISTRAL_API_KEY`
* `AZURE_OPENAI_API_KEY` (Requires `AZURE_OPENAI_ENDPOINT` too)
* `OPENROUTER_API_KEY`
* `XAI_API_KEY`
* `OLLANA_API_KEY` (Requires `OLLAMA_BASE_URL` too)
* **Endpoints (Optional/Provider Specific inside .taskmasterconfig):**
* `AZURE_OPENAI_ENDPOINT`
* `OLLAMA_BASE_URL` (Default: `http://localhost:11434/api`)
**Set API keys** in your **`.env`** file in the project root (for CLI use) or within the `env` section of your **`.roo/mcp.json`** file (for MCP/Roo Code integration). All other settings (model choice, max tokens, temperature, log level, custom endpoints) are managed in `.taskmasterconfig` via `task-master models` command or `models` MCP tool.
---
For details on how these commands fit into the development process, see the [Development Workflow Guide](mdc:.roo/rules/dev_workflow.md).
```
--------------------------------------------------------------------------------
/src/tools/conversation-tools.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from 'zod';
import { CursorDatabaseReader } from '../database/reader.js';
import { ConversationParser } from '../database/parser.js';
import type { ConversationFilters, ConversationSummary, ConversationSearchResult, BubbleMessage } from '../database/types.js';
import { detectCursorDatabasePath } from '../utils/database-utils.js';
// Input schema for list_conversations tool
export const listConversationsSchema = z.object({
limit: z.number().min(1).max(1000).optional(),
minLength: z.number().min(0).optional(),
keywords: z.array(z.string()).optional(),
hasCodeBlocks: z.boolean().optional(),
format: z.enum(['legacy', 'modern', 'both']).optional(),
includeEmpty: z.boolean().optional(),
projectPath: z.string().optional(),
filePattern: z.string().optional(),
relevantFiles: z.array(z.string()).optional(),
startDate: z.string().optional(),
endDate: z.string().optional(),
includeAiSummaries: z.boolean().optional().default(true)
});
export type ListConversationsInput = z.infer<typeof listConversationsSchema>;
// Output type for list_conversations tool
export interface ListConversationsOutput {
conversations: Array<{
composerId: string;
format: 'legacy' | 'modern';
messageCount: number;
hasCodeBlocks: boolean;
relevantFiles: string[];
attachedFolders: string[];
firstMessage?: string;
title?: string;
aiGeneratedSummary?: string;
size: number;
}>;
totalFound: number;
filters: {
limit: number;
minLength: number;
format: string;
hasCodeBlocks?: boolean;
keywords?: string[];
projectPath?: string;
filePattern?: string;
relevantFiles?: string[];
includeAiSummaries?: boolean;
};
}
/**
* List Cursor conversations with optional filters and ROWID-based ordering
*/
export async function listConversations(input: ListConversationsInput): Promise<ListConversationsOutput> {
const validatedInput = listConversationsSchema.parse(input);
const dbPath = process.env.CURSOR_DB_PATH || detectCursorDatabasePath();
const reader = new CursorDatabaseReader({ dbPath });
try {
await reader.connect();
const filters: ConversationFilters = {
minLength: validatedInput.minLength,
format: validatedInput.format,
hasCodeBlocks: validatedInput.hasCodeBlocks,
keywords: validatedInput.keywords,
projectPath: validatedInput.projectPath,
filePattern: validatedInput.filePattern,
relevantFiles: validatedInput.relevantFiles
};
// Add date range filter if provided
if (validatedInput.startDate || validatedInput.endDate) {
const start = validatedInput.startDate ? new Date(validatedInput.startDate) : new Date('1970-01-01');
const end = validatedInput.endDate ? new Date(validatedInput.endDate) : new Date();
filters.dateRange = { start, end };
}
const conversationIds = await reader.getConversationIds(filters);
let limitedIds = conversationIds.slice(0, validatedInput.limit);
// Apply date filtering if specified (post-query filtering due to unreliable timestamps)
if (validatedInput.startDate || validatedInput.endDate) {
const filteredIds = [];
for (const composerId of limitedIds) {
try {
const conversation = await reader.getConversationById(composerId);
if (!conversation) continue;
const hasValidDate = checkConversationDateRange(
conversation,
validatedInput.startDate,
validatedInput.endDate
);
if (hasValidDate) {
filteredIds.push(composerId);
}
} catch (error) {
// Skip conversations that can't be processed
continue;
}
}
limitedIds = filteredIds;
}
const conversations = [];
for (const composerId of limitedIds) {
try {
const summary = await reader.getConversationSummary(composerId, {
includeFirstMessage: true,
maxFirstMessageLength: 150,
includeTitle: true,
includeAIGeneratedSummary: validatedInput.includeAiSummaries
});
if (summary) {
conversations.push({
composerId: summary.composerId,
format: summary.format,
messageCount: summary.messageCount,
hasCodeBlocks: summary.hasCodeBlocks,
relevantFiles: summary.relevantFiles || [],
attachedFolders: summary.attachedFolders || [],
firstMessage: summary.firstMessage,
title: summary.title,
aiGeneratedSummary: summary.aiGeneratedSummary,
size: summary.conversationSize
});
}
} catch (error) {
console.error(`Failed to get summary for conversation ${composerId}:`, error);
}
}
return {
conversations,
totalFound: conversationIds.length,
filters: {
limit: validatedInput.limit ?? 10,
minLength: validatedInput.minLength ?? 100,
format: validatedInput.format ?? 'both',
hasCodeBlocks: validatedInput.hasCodeBlocks,
keywords: validatedInput.keywords,
projectPath: validatedInput.projectPath,
filePattern: validatedInput.filePattern,
relevantFiles: validatedInput.relevantFiles,
includeAiSummaries: validatedInput.includeAiSummaries
}
};
} finally {
// Always close the database connection
reader.close();
}
}
// Input schema for get_conversation tool
export const getConversationSchema = z.object({
conversationId: z.string().min(1),
includeCodeBlocks: z.boolean().optional().default(true),
includeFileReferences: z.boolean().optional().default(true),
includeMetadata: z.boolean().optional().default(false),
resolveBubbles: z.boolean().optional().default(true),
summaryOnly: z.boolean().optional().default(false)
});
export type GetConversationInput = z.infer<typeof getConversationSchema>;
// Output type for get_conversation tool
export interface GetConversationOutput {
conversation: {
composerId: string;
format: 'legacy' | 'modern';
messageCount: number;
title?: string;
aiGeneratedSummary?: string;
messages?: Array<{
type: number;
text: string;
bubbleId: string;
relevantFiles?: string[];
attachedFolders?: string[];
codeBlocks?: Array<{
language: string;
code: string;
filename?: string;
}>;
}>;
codeBlocks?: Array<{
language: string;
code: string;
filename?: string;
}>;
relevantFiles?: string[];
attachedFolders?: string[];
metadata?: {
hasLoaded: boolean;
storedSummary?: string;
storedRichText?: string;
size: number;
};
} | null;
}
/**
* Get a specific conversation by ID with full content
*/
export async function getConversation(input: GetConversationInput): Promise<GetConversationOutput> {
// Validate input
const validatedInput = getConversationSchema.parse(input);
// Create database reader
const dbPath = process.env.CURSOR_DB_PATH || detectCursorDatabasePath();
const reader = new CursorDatabaseReader({ dbPath });
try {
// Connect to database
await reader.connect();
// If summaryOnly is requested, return enhanced summary without full content
if (validatedInput.summaryOnly) {
const summary = await reader.getConversationSummary(validatedInput.conversationId, {
includeTitle: true,
includeAIGeneratedSummary: true,
includeFirstMessage: true,
includeLastMessage: true,
maxFirstMessageLength: 200,
maxLastMessageLength: 200
});
if (!summary) {
return { conversation: null };
}
return {
conversation: {
composerId: summary.composerId,
format: summary.format,
messageCount: summary.messageCount,
title: summary.title,
aiGeneratedSummary: summary.aiGeneratedSummary,
relevantFiles: validatedInput.includeFileReferences ? summary.relevantFiles : undefined,
attachedFolders: validatedInput.includeFileReferences ? summary.attachedFolders : undefined,
metadata: validatedInput.includeMetadata ? {
hasLoaded: true,
storedSummary: summary.storedSummary,
storedRichText: summary.storedRichText,
size: summary.conversationSize
} : undefined
}
};
}
// Get conversation
const conversation = await reader.getConversationById(validatedInput.conversationId);
if (!conversation) {
return { conversation: null };
}
// Get conversation summary to extract title and AI summary
const summary = await reader.getConversationSummary(validatedInput.conversationId, {
includeTitle: true,
includeAIGeneratedSummary: true
});
// Determine format
const format = conversation.hasOwnProperty('_v') ? 'modern' : 'legacy';
// Build response based on format
if (format === 'legacy') {
const legacyConv = conversation as any;
const messages = legacyConv.conversation || [];
// Extract data
let allCodeBlocks: any[] = [];
let allRelevantFiles: string[] = [];
let allAttachedFolders: string[] = [];
const processedMessages = messages.map((msg: any) => {
if (validatedInput.includeCodeBlocks && msg.suggestedCodeBlocks) {
allCodeBlocks.push(...msg.suggestedCodeBlocks);
}
if (validatedInput.includeFileReferences) {
if (msg.relevantFiles) allRelevantFiles.push(...msg.relevantFiles);
if (msg.attachedFoldersNew) allAttachedFolders.push(...msg.attachedFoldersNew);
}
return {
type: msg.type,
text: msg.text,
bubbleId: msg.bubbleId,
relevantFiles: validatedInput.includeFileReferences ? msg.relevantFiles : undefined,
attachedFolders: validatedInput.includeFileReferences ? msg.attachedFoldersNew : undefined,
codeBlocks: validatedInput.includeCodeBlocks ? msg.suggestedCodeBlocks : undefined
};
});
allRelevantFiles = Array.from(new Set(allRelevantFiles));
allAttachedFolders = Array.from(new Set(allAttachedFolders));
return {
conversation: {
composerId: legacyConv.composerId,
format: 'legacy',
messageCount: messages.length,
title: summary?.title,
aiGeneratedSummary: summary?.aiGeneratedSummary,
messages: processedMessages,
codeBlocks: validatedInput.includeCodeBlocks ? allCodeBlocks : undefined,
relevantFiles: validatedInput.includeFileReferences ? allRelevantFiles : undefined,
attachedFolders: validatedInput.includeFileReferences ? allAttachedFolders : undefined,
metadata: validatedInput.includeMetadata ? {
hasLoaded: true,
storedSummary: legacyConv.storedSummary,
storedRichText: legacyConv.storedRichText,
size: JSON.stringify(conversation).length
} : undefined
}
};
} else {
const modernConv = conversation as any;
const headers = modernConv.fullConversationHeadersOnly || [];
if (validatedInput.resolveBubbles) {
const resolvedMessages = [];
for (const header of headers.slice(0, 10)) {
try {
const bubbleMessage = await reader.getBubbleMessage(modernConv.composerId, header.bubbleId);
if (bubbleMessage) {
resolvedMessages.push({
type: header.type,
text: bubbleMessage.text,
bubbleId: header.bubbleId,
relevantFiles: validatedInput.includeFileReferences ? bubbleMessage.relevantFiles : undefined,
attachedFolders: validatedInput.includeFileReferences ? bubbleMessage.attachedFoldersNew : undefined,
codeBlocks: validatedInput.includeCodeBlocks ? bubbleMessage.suggestedCodeBlocks : undefined
});
}
} catch (error) {
console.error(`Failed to resolve bubble ${header.bubbleId}:`, error);
}
}
return {
conversation: {
composerId: modernConv.composerId,
format: 'modern',
messageCount: headers.length,
title: summary?.title,
aiGeneratedSummary: summary?.aiGeneratedSummary,
messages: resolvedMessages,
metadata: validatedInput.includeMetadata ? {
hasLoaded: true,
storedSummary: modernConv.storedSummary,
storedRichText: modernConv.storedRichText,
size: JSON.stringify(conversation).length
} : undefined
}
};
} else {
return {
conversation: {
composerId: modernConv.composerId,
format: 'modern',
messageCount: headers.length,
title: summary?.title,
aiGeneratedSummary: summary?.aiGeneratedSummary,
metadata: validatedInput.includeMetadata ? {
hasLoaded: true,
storedSummary: modernConv.storedSummary,
storedRichText: modernConv.storedRichText,
size: JSON.stringify(conversation).length
} : undefined
}
};
}
}
} finally {
// Always close the database connection
reader.close();
}
}
// Input schema for get_conversation_summary tool
export const getConversationSummarySchema = z.object({
conversationId: z.string().min(1),
includeFirstMessage: z.boolean().optional().default(false),
includeLastMessage: z.boolean().optional().default(false),
maxFirstMessageLength: z.number().min(1).max(1000).optional().default(200),
maxLastMessageLength: z.number().min(1).max(1000).optional().default(200),
includeMetadata: z.boolean().optional().default(false)
});
export type GetConversationSummaryInput = z.infer<typeof getConversationSummarySchema>;
// Output type for get_conversation_summary tool
export interface GetConversationSummaryOutput {
summary: {
composerId: string;
format: 'legacy' | 'modern';
messageCount: number;
hasCodeBlocks: boolean;
codeBlockCount?: number;
conversationSize: number;
firstMessage?: string;
lastMessage?: string;
storedSummary?: string;
storedRichText?: string;
relevantFiles?: string[];
attachedFolders?: string[];
metadata?: {
totalCharacters: number;
averageMessageLength: number;
};
} | null;
}
/**
* Get conversation summary with optional first/last message content
*/
export async function getConversationSummary(input: GetConversationSummaryInput): Promise<GetConversationSummaryOutput> {
const validatedInput = getConversationSummarySchema.parse(input);
const dbPath = process.env.CURSOR_DB_PATH || detectCursorDatabasePath();
const reader = new CursorDatabaseReader({ dbPath });
try {
await reader.connect();
const summary = await reader.getConversationSummary(validatedInput.conversationId, {
includeFirstMessage: validatedInput.includeFirstMessage,
includeLastMessage: validatedInput.includeLastMessage,
maxFirstMessageLength: validatedInput.maxFirstMessageLength,
maxLastMessageLength: validatedInput.maxLastMessageLength
});
if (!summary) {
return { summary: null };
}
return {
summary: {
composerId: summary.composerId,
format: summary.format,
messageCount: summary.messageCount,
hasCodeBlocks: summary.hasCodeBlocks,
codeBlockCount: summary.codeBlockCount,
conversationSize: summary.conversationSize,
firstMessage: summary.firstMessage,
lastMessage: summary.lastMessage,
storedSummary: summary.storedSummary,
storedRichText: summary.storedRichText,
relevantFiles: summary.relevantFiles,
attachedFolders: summary.attachedFolders,
metadata: validatedInput.includeMetadata ? {
totalCharacters: summary.conversationSize,
averageMessageLength: Math.round(summary.conversationSize / summary.messageCount)
} : undefined
}
};
} finally {
reader.close();
}
}
// Input schema for search_conversations tool
export const searchConversationsSchema = z.object({
// Simple query (existing - backward compatible)
query: z.string().optional(),
// Multi-keyword search
keywords: z.array(z.string().min(1)).optional(),
keywordOperator: z.enum(['AND', 'OR']).optional().default('OR'),
// LIKE pattern search (database-level)
likePattern: z.string().optional(),
// Date filtering
startDate: z.string().optional(),
endDate: z.string().optional(),
// Existing options
includeCode: z.boolean().optional().default(true),
contextLines: z.number().min(0).max(10).optional().default(2),
maxResults: z.number().min(1).max(100).optional().default(10),
searchBubbles: z.boolean().optional().default(true),
searchType: z.enum(['all', 'summarization', 'code', 'files', 'project']).optional().default('all'),
format: z.enum(['legacy', 'modern', 'both']).optional().default('both'),
highlightMatches: z.boolean().optional().default(true),
projectSearch: z.boolean().optional().default(false),
fuzzyMatch: z.boolean().optional().default(false),
includePartialPaths: z.boolean().optional().default(true),
includeFileContent: z.boolean().optional().default(false),
minRelevanceScore: z.number().min(0).max(1).optional().default(0.1),
orderBy: z.enum(['relevance', 'recency']).optional().default('relevance')
}).refine(
(data) => {
const hasSearchCriteria = (data.query && data.query.trim() !== '' && data.query.trim() !== '?') || data.keywords || data.likePattern;
const hasDateFilter = data.startDate || data.endDate;
const hasOtherFilters = data.searchType !== 'all';
return hasSearchCriteria || hasDateFilter || hasOtherFilters;
},
{ message: "At least one search criteria (query, keywords, likePattern), date filter (startDate, endDate), or search type filter must be provided" }
);
export type SearchConversationsInput = z.infer<typeof searchConversationsSchema>;
// Output type for search_conversations tool
export interface SearchConversationsOutput {
conversations: Array<{
composerId: string;
format: 'legacy' | 'modern';
messageCount: number;
hasCodeBlocks: boolean;
relevantFiles: string[];
attachedFolders: string[];
firstMessage?: string;
title?: string;
aiGeneratedSummary?: string;
size: number;
relevanceScore?: number;
matchDetails?: {
exactPathMatch: boolean;
partialPathMatch: boolean;
filePathMatch: boolean;
fuzzyMatch: boolean;
matchedPaths: string[];
matchedFiles: string[];
};
}>;
totalResults: number;
query: string;
searchOptions: {
includeCode: boolean;
contextLines: number;
maxResults: number;
searchBubbles: boolean;
searchType: 'all' | 'summarization' | 'code' | 'files' | 'project';
format: 'legacy' | 'modern' | 'both';
highlightMatches: boolean;
projectSearch?: boolean;
fuzzyMatch?: boolean;
includePartialPaths?: boolean;
includeFileContent?: boolean;
minRelevanceScore?: number;
orderBy?: 'relevance' | 'recency';
};
debugInfo?: {
totalConversationsScanned: number;
averageRelevanceScore: number;
matchTypeDistribution: {
exactPath: number;
partialPath: number;
filePath: number;
fuzzy: number;
};
};
}
/**
* Search conversations with enhanced multi-keyword and LIKE pattern support
*/
export async function searchConversations(input: SearchConversationsInput): Promise<SearchConversationsOutput> {
const validatedInput = searchConversationsSchema.parse(input);
const dbPath = process.env.CURSOR_DB_PATH || detectCursorDatabasePath();
const reader = new CursorDatabaseReader({ dbPath });
try {
await reader.connect();
// Determine the search query for display purposes
const displayQuery = validatedInput.query ||
(validatedInput.keywords ? validatedInput.keywords.join(` ${validatedInput.keywordOperator} `) : '') ||
validatedInput.likePattern ||
'advanced search';
if (validatedInput.projectSearch && validatedInput.query) {
// Handle project search (existing logic)
const searchOptions = {
fuzzyMatch: validatedInput.fuzzyMatch,
includePartialPaths: validatedInput.includePartialPaths,
includeFileContent: validatedInput.includeFileContent,
minRelevanceScore: validatedInput.minRelevanceScore,
orderBy: validatedInput.orderBy,
limit: validatedInput.maxResults
};
const conversationIds = await reader.getConversationIds({
format: validatedInput.format,
projectPath: validatedInput.query
});
const conversations = [];
const matchTypeDistribution = {
exactPath: 0,
partialPath: 0,
filePath: 0,
fuzzy: 0
};
let totalConversationsScanned = 0;
let totalRelevanceScore = 0;
for (const composerId of conversationIds.slice(0, validatedInput.maxResults * 2)) {
try {
totalConversationsScanned++;
const conversation = await reader.getConversationById(composerId);
if (!conversation) continue;
const format = conversation.hasOwnProperty('_v') ? 'modern' : 'legacy';
if (format === 'modern') {
const modernConv = conversation as any;
const headers = modernConv.fullConversationHeadersOnly || [];
for (const header of headers.slice(0, 5)) {
try {
const bubbleMessage = await reader.getBubbleMessage(modernConv.composerId, header.bubbleId);
if (bubbleMessage) {
(conversation as any).resolvedMessages = (conversation as any).resolvedMessages || [];
(conversation as any).resolvedMessages.push(bubbleMessage);
}
} catch (error) {
continue;
}
}
}
const relevanceResult = calculateEnhancedProjectRelevance(
conversation,
validatedInput.query,
{
fuzzyMatch: validatedInput.fuzzyMatch || false,
includePartialPaths: validatedInput.includePartialPaths || false,
includeFileContent: validatedInput.includeFileContent || false
}
);
if (relevanceResult.score >= (validatedInput.minRelevanceScore || 0.1)) {
const summary = await reader.getConversationSummary(composerId, {
includeFirstMessage: true,
maxFirstMessageLength: 150
});
if (summary) {
conversations.push({
composerId: summary.composerId,
format: summary.format,
messageCount: summary.messageCount,
hasCodeBlocks: summary.hasCodeBlocks,
relevantFiles: summary.relevantFiles || [],
attachedFolders: summary.attachedFolders || [],
firstMessage: summary.firstMessage,
size: summary.conversationSize,
relevanceScore: relevanceResult.score,
matchDetails: relevanceResult.details
});
totalRelevanceScore += relevanceResult.score;
if (relevanceResult.details.exactPathMatch) matchTypeDistribution.exactPath++;
if (relevanceResult.details.partialPathMatch) matchTypeDistribution.partialPath++;
if (relevanceResult.details.filePathMatch) matchTypeDistribution.filePath++;
if (relevanceResult.details.fuzzyMatch) matchTypeDistribution.fuzzy++;
}
}
} catch (error) {
continue;
}
}
if (validatedInput.orderBy === 'relevance') {
conversations.sort((a, b) => (b.relevanceScore || 0) - (a.relevanceScore || 0));
}
return {
conversations: conversations.slice(0, validatedInput.maxResults),
totalResults: conversations.length,
query: displayQuery,
searchOptions: {
includeCode: validatedInput.includeCode,
contextLines: validatedInput.contextLines,
maxResults: validatedInput.maxResults,
searchBubbles: validatedInput.searchBubbles,
searchType: validatedInput.searchType,
format: validatedInput.format,
highlightMatches: validatedInput.highlightMatches,
projectSearch: validatedInput.projectSearch,
fuzzyMatch: validatedInput.fuzzyMatch,
includePartialPaths: validatedInput.includePartialPaths,
includeFileContent: validatedInput.includeFileContent,
minRelevanceScore: validatedInput.minRelevanceScore,
orderBy: validatedInput.orderBy
},
debugInfo: {
totalConversationsScanned,
averageRelevanceScore: totalConversationsScanned > 0 ? totalRelevanceScore / totalConversationsScanned : 0,
matchTypeDistribution
}
};
} else {
const hasSearchCriteria = (validatedInput.query && validatedInput.query.trim() !== '' && validatedInput.query.trim() !== '?') || validatedInput.keywords || validatedInput.likePattern;
if (!hasSearchCriteria && (validatedInput.startDate || validatedInput.endDate)) {
// Date-only search: get all conversations and filter by date
const allConversationIds = await reader.getConversationIds({
format: validatedInput.format
});
const conversations = [];
for (const composerId of allConversationIds.slice(0, validatedInput.maxResults * 2)) {
try {
const conversation = await reader.getConversationById(composerId);
if (!conversation) continue;
// Apply date filtering
const hasValidDate = checkConversationDateRange(
conversation,
validatedInput.startDate,
validatedInput.endDate
);
if (!hasValidDate) continue;
const summary = await reader.getConversationSummary(composerId, {
includeFirstMessage: true,
maxFirstMessageLength: 150,
includeTitle: true,
includeAIGeneratedSummary: true
});
if (summary) {
conversations.push({
composerId: summary.composerId,
format: summary.format,
messageCount: summary.messageCount,
hasCodeBlocks: summary.hasCodeBlocks,
relevantFiles: summary.relevantFiles || [],
attachedFolders: summary.attachedFolders || [],
firstMessage: summary.firstMessage,
title: summary.title,
aiGeneratedSummary: summary.aiGeneratedSummary,
size: summary.conversationSize
});
if (conversations.length >= validatedInput.maxResults) break;
}
} catch (error) {
console.error(`Failed to process conversation ${composerId}:`, error);
}
}
return {
conversations,
totalResults: conversations.length,
query: displayQuery,
searchOptions: {
includeCode: validatedInput.includeCode,
contextLines: validatedInput.contextLines,
maxResults: validatedInput.maxResults,
searchBubbles: validatedInput.searchBubbles,
searchType: validatedInput.searchType,
format: validatedInput.format,
highlightMatches: validatedInput.highlightMatches
}
};
}
// Handle enhanced search with keywords, LIKE patterns, or simple query
const searchResults = await reader.searchConversationsEnhanced({
query: validatedInput.query,
keywords: validatedInput.keywords,
keywordOperator: validatedInput.keywordOperator,
likePattern: validatedInput.likePattern,
includeCode: validatedInput.includeCode,
contextLines: validatedInput.contextLines,
maxResults: validatedInput.maxResults,
searchBubbles: validatedInput.searchBubbles,
searchType: validatedInput.searchType === 'project' ? 'all' : validatedInput.searchType,
format: validatedInput.format,
startDate: validatedInput.startDate,
endDate: validatedInput.endDate
});
// Convert search results to conversation summaries for consistency
const conversations = [];
for (const result of searchResults) {
try {
// Apply date filtering if specified (post-query filtering due to unreliable timestamps)
if (validatedInput.startDate || validatedInput.endDate) {
const conversation = await reader.getConversationById(result.composerId);
if (!conversation) continue;
const hasValidDate = checkConversationDateRange(
conversation,
validatedInput.startDate,
validatedInput.endDate
);
if (!hasValidDate) continue;
}
const summary = await reader.getConversationSummary(result.composerId, {
includeFirstMessage: true,
maxFirstMessageLength: 150,
includeTitle: true,
includeAIGeneratedSummary: true
});
if (summary) {
conversations.push({
composerId: summary.composerId,
format: summary.format,
messageCount: summary.messageCount,
hasCodeBlocks: summary.hasCodeBlocks,
relevantFiles: summary.relevantFiles || [],
attachedFolders: summary.attachedFolders || [],
firstMessage: summary.firstMessage,
title: summary.title,
aiGeneratedSummary: summary.aiGeneratedSummary,
size: summary.conversationSize
});
}
} catch (error) {
console.error(`Failed to get summary for conversation ${result.composerId}:`, error);
}
}
return {
conversations,
totalResults: conversations.length,
query: displayQuery,
searchOptions: {
includeCode: validatedInput.includeCode,
contextLines: validatedInput.contextLines,
maxResults: validatedInput.maxResults,
searchBubbles: validatedInput.searchBubbles,
searchType: validatedInput.searchType,
format: validatedInput.format,
highlightMatches: validatedInput.highlightMatches
}
};
}
} finally {
reader.close();
}
}
// Get bubble message tool schema and types
export const getBubbleMessageSchema = z.object({
composerId: z.string().min(1).describe('The composer ID of the conversation containing the bubble message'),
bubbleId: z.string().min(1).describe('The unique bubble ID of the message to retrieve'),
includeMetadata: z.boolean().optional().default(false).describe('Include additional metadata about the bubble message'),
includeCodeBlocks: z.boolean().optional().default(true).describe('Include code blocks in the response'),
includeFileReferences: z.boolean().optional().default(true).describe('Include file references and attached folders'),
resolveReferences: z.boolean().optional().default(false).describe('Attempt to resolve file references to actual content')
});
export type GetBubbleMessageInput = z.infer<typeof getBubbleMessageSchema>;
export interface GetBubbleMessageOutput {
bubbleMessage: BubbleMessage | null;
metadata?: {
composerId: string;
bubbleId: string;
messageType: 'user' | 'assistant' | 'unknown';
hasCodeBlocks: boolean;
codeBlockCount: number;
hasFileReferences: boolean;
fileReferenceCount: number;
hasAttachedFolders: boolean;
attachedFolderCount: number;
messageLength: number;
timestamp?: string;
};
error?: string;
}
/**
* Get a specific bubble message from a modern format conversation
*/
export async function getBubbleMessage(input: GetBubbleMessageInput): Promise<GetBubbleMessageOutput> {
// Validate input
const validatedInput = getBubbleMessageSchema.parse(input);
// Create database reader
const dbPath = process.env.CURSOR_DB_PATH || detectCursorDatabasePath();
const reader = new CursorDatabaseReader({ dbPath });
try {
// Connect to database
await reader.connect();
// Get the bubble message
const bubbleMessage = await reader.getBubbleMessage(validatedInput.composerId, validatedInput.bubbleId);
if (!bubbleMessage) {
return {
bubbleMessage: null,
error: `Bubble message not found: ${validatedInput.bubbleId} in conversation ${validatedInput.composerId}`
};
}
// Build metadata if requested
let metadata;
if (validatedInput.includeMetadata) {
const hasCodeBlocks = !!(bubbleMessage.suggestedCodeBlocks && bubbleMessage.suggestedCodeBlocks.length > 0);
const hasFileReferences = !!(bubbleMessage.relevantFiles && bubbleMessage.relevantFiles.length > 0);
const hasAttachedFolders = !!(bubbleMessage.attachedFoldersNew && bubbleMessage.attachedFoldersNew.length > 0);
const messageType: 'user' | 'assistant' | 'unknown' =
bubbleMessage.type === 0 ? 'user' :
bubbleMessage.type === 1 ? 'assistant' : 'unknown';
metadata = {
composerId: validatedInput.composerId,
bubbleId: validatedInput.bubbleId,
messageType,
hasCodeBlocks,
codeBlockCount: bubbleMessage.suggestedCodeBlocks?.length || 0,
hasFileReferences,
fileReferenceCount: bubbleMessage.relevantFiles?.length || 0,
hasAttachedFolders,
attachedFolderCount: bubbleMessage.attachedFoldersNew?.length || 0,
messageLength: bubbleMessage.text.length,
timestamp: bubbleMessage.timestamp
};
}
return {
bubbleMessage,
metadata
};
} finally {
// Always close the database connection
reader.close();
}
}
// Input schema for get_recent_conversations tool
export const getRecentConversationsSchema = z.object({
limit: z.number().min(1).max(100).optional().default(10),
includeEmpty: z.boolean().optional().default(false),
format: z.enum(['legacy', 'modern', 'both']).optional().default('both'),
includeFirstMessage: z.boolean().optional().default(true),
maxFirstMessageLength: z.number().min(10).max(500).optional().default(150),
includeMetadata: z.boolean().optional().default(false)
});
export type GetRecentConversationsInput = z.infer<typeof getRecentConversationsSchema>;
// Output type for get_recent_conversations tool
export interface GetRecentConversationsOutput {
conversations: Array<{
composerId: string;
format: 'legacy' | 'modern';
messageCount: number;
hasCodeBlocks: boolean;
relevantFiles: string[];
attachedFolders: string[];
firstMessage?: string;
size: number;
metadata?: {
hasLoaded: boolean;
totalCharacters: number;
averageMessageLength: number;
codeBlockCount: number;
fileReferenceCount: number;
attachedFolderCount: number;
};
}>;
totalFound: number;
requestedLimit: number;
timestamp: string;
}
/**
* Get recent Cursor conversations ordered by ROWID (most recent first)
*/
export async function getRecentConversations(input: GetRecentConversationsInput): Promise<GetRecentConversationsOutput> {
// Validate input
const validatedInput = getRecentConversationsSchema.parse(input);
// Create database reader
const dbPath = process.env.CURSOR_DB_PATH || detectCursorDatabasePath();
const reader = new CursorDatabaseReader({
dbPath,
minConversationSize: validatedInput.includeEmpty ? 0 : 5000
});
try {
// Connect to database
await reader.connect();
// Build minimal filters for recent conversations
const filters: ConversationFilters = {
minLength: validatedInput.includeEmpty ? 0 : 5000,
format: validatedInput.format
};
// Get conversation IDs (already ordered by ROWID DESC)
const conversationIds = await reader.getConversationIds(filters);
// Limit results
const limitedIds = conversationIds.slice(0, validatedInput.limit);
// Get conversation summaries
const conversations = [];
for (const composerId of limitedIds) {
try {
const summary = await reader.getConversationSummary(composerId, {
includeFirstMessage: validatedInput.includeFirstMessage,
maxFirstMessageLength: validatedInput.maxFirstMessageLength,
includeFileList: true,
includeCodeBlockCount: true
});
if (summary) {
const conversationData: any = {
composerId: summary.composerId,
format: summary.format,
messageCount: summary.messageCount,
hasCodeBlocks: summary.hasCodeBlocks,
relevantFiles: summary.relevantFiles,
attachedFolders: summary.attachedFolders,
firstMessage: summary.firstMessage,
size: summary.conversationSize
};
// Add metadata if requested
if (validatedInput.includeMetadata) {
conversationData.metadata = {
hasLoaded: true,
totalCharacters: summary.conversationSize,
averageMessageLength: summary.messageCount > 0 ? Math.round(summary.conversationSize / summary.messageCount) : 0,
codeBlockCount: summary.codeBlockCount || 0,
fileReferenceCount: summary.relevantFiles.length,
attachedFolderCount: summary.attachedFolders.length
};
}
conversations.push(conversationData);
}
} catch (error) {
console.error(`Failed to get summary for conversation ${composerId}:`, error);
// Continue with other conversations
}
}
return {
conversations,
totalFound: conversationIds.length,
requestedLimit: validatedInput.limit,
timestamp: new Date().toISOString()
};
} finally {
// Always close the database connection
reader.close();
}
}
// Input schema for get_conversations_by_project tool
export const getConversationsByProjectSchema = z.object({
projectPath: z.string().min(1),
filePattern: z.string().optional(),
exactFilePath: z.string().optional(),
orderBy: z.enum(['recency', 'relevance']).optional().default('recency'),
limit: z.number().min(1).max(1000).optional().default(50),
fuzzyMatch: z.boolean().optional().default(false)
});
export type GetConversationsByProjectInput = z.infer<typeof getConversationsByProjectSchema>;
// Output type for get_conversations_by_project tool
export interface GetConversationsByProjectOutput {
conversations: Array<{
composerId: string;
format: 'legacy' | 'modern';
messageCount: number;
hasCodeBlocks: boolean;
relevantFiles: string[];
attachedFolders: string[];
firstMessage?: string;
size: number;
relevanceScore?: number;
}>;
totalFound: number;
filters: {
projectPath: string;
filePattern?: string;
exactFilePath?: string;
orderBy: string;
limit: number;
};
}
/**
* Get conversations filtered by project path, attached folders, or relevant files
*/
export async function getConversationsByProject(input: GetConversationsByProjectInput): Promise<GetConversationsByProjectOutput> {
// Validate input
const validatedInput = getConversationsByProjectSchema.parse(input);
// Create database reader
const dbPath = process.env.CURSOR_DB_PATH || detectCursorDatabasePath();
const reader = new CursorDatabaseReader({
dbPath,
minConversationSize: 5000 // Default minimum size for project conversations
});
try {
// Connect to database
await reader.connect();
// Get conversation IDs with project-specific filtering
const conversationResults = await reader.getConversationIdsByProject(
validatedInput.projectPath,
{
filePattern: validatedInput.filePattern,
exactFilePath: validatedInput.exactFilePath,
orderBy: validatedInput.orderBy,
limit: validatedInput.limit,
format: 'both', // Support both legacy and modern formats
fuzzyMatch: validatedInput.fuzzyMatch
}
);
// Get conversation summaries
const conversations = [];
for (const result of conversationResults) {
try {
const summary = await reader.getConversationSummary(result.composerId, {
includeFirstMessage: true,
maxFirstMessageLength: 100,
includeFileList: true,
includeCodeBlockCount: true
});
if (summary) {
conversations.push({
composerId: summary.composerId,
format: summary.format,
messageCount: summary.messageCount,
hasCodeBlocks: summary.hasCodeBlocks,
relevantFiles: summary.relevantFiles,
attachedFolders: summary.attachedFolders,
firstMessage: summary.firstMessage,
size: summary.conversationSize,
relevanceScore: result.relevanceScore
});
}
} catch (error) {
console.error(`Failed to get summary for conversation ${result.composerId}:`, error);
// Continue with other conversations
}
}
return {
conversations,
totalFound: conversationResults.length,
filters: {
projectPath: validatedInput.projectPath,
filePattern: validatedInput.filePattern,
exactFilePath: validatedInput.exactFilePath,
orderBy: validatedInput.orderBy,
limit: validatedInput.limit
}
};
} finally {
// Always close the database connection
reader.close();
}
}
// Input schema for search_conversations_by_project tool (improved project search)
export const searchConversationsByProjectSchema = z.object({
projectQuery: z.string().min(1),
fuzzyMatch: z.boolean().optional().default(true),
includePartialPaths: z.boolean().optional().default(true),
includeFileContent: z.boolean().optional().default(false),
minRelevanceScore: z.number().min(0).max(10).optional().default(1),
orderBy: z.enum(['relevance', 'recency']).optional().default('relevance'),
limit: z.number().min(1).max(1000).optional().default(50),
includeDebugInfo: z.boolean().optional().default(false)
});
export type SearchConversationsByProjectInput = z.infer<typeof searchConversationsByProjectSchema>;
// Output type for search_conversations_by_project tool
export interface SearchConversationsByProjectOutput {
conversations: Array<{
composerId: string;
format: 'legacy' | 'modern';
messageCount: number;
hasCodeBlocks: boolean;
relevantFiles: string[];
attachedFolders: string[];
firstMessage?: string;
size: number;
relevanceScore: number;
matchDetails?: {
exactPathMatch: boolean;
partialPathMatch: boolean;
filePathMatch: boolean;
fuzzyMatch: boolean;
matchedPaths: string[];
matchedFiles: string[];
};
}>;
totalFound: number;
searchQuery: string;
searchOptions: {
fuzzyMatch: boolean;
includePartialPaths: boolean;
includeFileContent: boolean;
minRelevanceScore: number;
orderBy: string;
limit: number;
};
debugInfo?: {
totalConversationsScanned: number;
averageRelevanceScore: number;
matchTypeDistribution: {
exactPath: number;
partialPath: number;
filePath: number;
fuzzy: number;
};
};
}
/**
* Enhanced project search with fuzzy matching and flexible path matching
*/
export async function searchConversationsByProject(input: SearchConversationsByProjectInput): Promise<SearchConversationsByProjectOutput> {
// Validate input
const validatedInput = searchConversationsByProjectSchema.parse(input);
// Create database reader
const dbPath = process.env.CURSOR_DB_PATH || detectCursorDatabasePath();
const reader = new CursorDatabaseReader({
dbPath,
minConversationSize: 1000 // Lower threshold for broader search
});
try {
// Connect to database
await reader.connect();
// Get all conversations for flexible searching
const allConversationIds = await reader.getConversationIds({
format: 'both',
minLength: 1000
});
const results: Array<{
composerId: string;
format: 'legacy' | 'modern';
messageCount: number;
hasCodeBlocks: boolean;
relevantFiles: string[];
attachedFolders: string[];
firstMessage?: string;
size: number;
relevanceScore: number;
matchDetails?: any;
}> = [];
let totalScanned = 0;
let matchTypeDistribution = {
exactPath: 0,
partialPath: 0,
filePath: 0,
fuzzy: 0
};
// Process conversations in batches to avoid memory issues
const batchSize = 100;
for (let i = 0; i < allConversationIds.length; i += batchSize) {
const batch = allConversationIds.slice(i, i + batchSize);
for (const composerId of batch) {
totalScanned++;
try {
const conversation = await reader.getConversationById(composerId);
if (!conversation) continue;
// For modern format conversations, we need to resolve bubble messages to get file paths
let enrichedConversation = conversation as any;
if (conversation.hasOwnProperty('_v')) {
// Modern format - resolve bubble messages
const headers = (conversation as any).fullConversationHeadersOnly || [];
const bubbleMessages: any[] = [];
// Resolve a few bubble messages to get file paths (limit to avoid performance issues)
const maxBubblesToResolve = Math.min(headers.length, 10);
for (let i = 0; i < maxBubblesToResolve; i++) {
const header = headers[i];
try {
const bubbleMessage = await reader.getBubbleMessage(composerId, header.bubbleId);
if (bubbleMessage) {
bubbleMessages.push(bubbleMessage);
}
} catch (error) {
// Continue with other bubbles if one fails
console.error(`Failed to resolve bubble ${header.bubbleId}:`, error);
}
}
// Add resolved messages to the conversation object for matching
enrichedConversation = {
...conversation,
messages: bubbleMessages
};
}
const matchResult = calculateEnhancedProjectRelevance(
enrichedConversation,
validatedInput.projectQuery,
{
fuzzyMatch: validatedInput.fuzzyMatch,
includePartialPaths: validatedInput.includePartialPaths,
includeFileContent: validatedInput.includeFileContent
}
);
if (matchResult.score >= validatedInput.minRelevanceScore) {
const summary = await reader.getConversationSummary(composerId, {
includeFirstMessage: true,
maxFirstMessageLength: 100,
includeFileList: true,
includeCodeBlockCount: true
});
if (summary) {
// Update match type distribution
if (matchResult.details.exactPathMatch) matchTypeDistribution.exactPath++;
if (matchResult.details.partialPathMatch) matchTypeDistribution.partialPath++;
if (matchResult.details.filePathMatch) matchTypeDistribution.filePath++;
if (matchResult.details.fuzzyMatch) matchTypeDistribution.fuzzy++;
results.push({
composerId: summary.composerId,
format: summary.format,
messageCount: summary.messageCount,
hasCodeBlocks: summary.hasCodeBlocks,
relevantFiles: summary.relevantFiles,
attachedFolders: summary.attachedFolders,
firstMessage: summary.firstMessage,
size: summary.conversationSize,
relevanceScore: matchResult.score,
matchDetails: validatedInput.includeDebugInfo ? matchResult.details : undefined
});
}
}
} catch (error) {
console.error(`Failed to process conversation ${composerId}:`, error);
// Continue with other conversations
}
}
}
// Sort by relevance or recency
if (validatedInput.orderBy === 'relevance') {
results.sort((a, b) => b.relevanceScore - a.relevanceScore);
} else {
// For recency, we rely on the original ROWID order from getConversationIds
// which is already in descending order (most recent first)
}
const limitedResults = results.slice(0, validatedInput.limit);
const debugInfo = validatedInput.includeDebugInfo ? {
totalConversationsScanned: totalScanned,
averageRelevanceScore: results.length > 0 ? results.reduce((sum, r) => sum + r.relevanceScore, 0) / results.length : 0,
matchTypeDistribution
} : undefined;
return {
conversations: limitedResults,
totalFound: results.length,
searchQuery: validatedInput.projectQuery,
searchOptions: {
fuzzyMatch: validatedInput.fuzzyMatch,
includePartialPaths: validatedInput.includePartialPaths,
includeFileContent: validatedInput.includeFileContent,
minRelevanceScore: validatedInput.minRelevanceScore,
orderBy: validatedInput.orderBy,
limit: validatedInput.limit
},
debugInfo
};
} finally {
// Always close the database connection
reader.close();
}
}
/**
* Calculate enhanced project relevance with fuzzy matching and flexible path matching
*/
function calculateEnhancedProjectRelevance(
conversation: any,
projectQuery: string,
options: {
fuzzyMatch: boolean;
includePartialPaths: boolean;
includeFileContent: boolean;
}
): {
score: number;
details: {
exactPathMatch: boolean;
partialPathMatch: boolean;
filePathMatch: boolean;
fuzzyMatch: boolean;
matchedPaths: string[];
matchedFiles: string[];
};
} {
let score = 0;
const details = {
exactPathMatch: false,
partialPathMatch: false,
filePathMatch: false,
fuzzyMatch: false,
matchedPaths: [] as string[],
matchedFiles: [] as string[]
};
const queryLower = projectQuery.toLowerCase();
const queryParts = queryLower.split(/[-_\s]+/); // Split on common separators
// Helper function for fuzzy matching
const fuzzyMatch = (text: string, query: string): number => {
const textLower = text.toLowerCase();
// Exact match
if (textLower.includes(query)) return 10;
// Check if all query parts are present
const allPartsPresent = queryParts.every(part => textLower.includes(part));
if (allPartsPresent) return 8;
// Check for partial matches
const partialMatches = queryParts.filter(part => textLower.includes(part)).length;
if (partialMatches > 0) return (partialMatches / queryParts.length) * 6;
// Levenshtein-like similarity for very fuzzy matching
const similarity = calculateSimilarity(textLower, query);
if (similarity > 0.6) return similarity * 4;
return 0;
};
// Helper function to process files and folders
const processFiles = (files: string[], scoreMultiplier: number = 1) => {
if (!files || !Array.isArray(files)) return;
for (const file of files) {
if (typeof file === 'string') {
const fileName = file.split('/').pop() || file;
const filePath = file.toLowerCase();
const fileNameLower = fileName.toLowerCase();
// Check if file path contains project query
if (filePath.includes(queryLower)) {
score += 10 * scoreMultiplier;
details.filePathMatch = true;
details.matchedFiles.push(file);
}
// Check file name
else if (fileNameLower.includes(queryLower)) {
score += 8 * scoreMultiplier;
details.filePathMatch = true;
details.matchedFiles.push(file);
}
// Fuzzy match on file paths
else if (options.fuzzyMatch) {
const fuzzyScore = Math.max(
fuzzyMatch(file, queryLower),
fuzzyMatch(fileName, queryLower)
);
if (fuzzyScore > 0) {
score += fuzzyScore * 0.5 * scoreMultiplier; // Lower weight for file matches
details.fuzzyMatch = true;
details.matchedFiles.push(file);
}
}
}
}
};
const processFolders = (folders: string[], scoreMultiplier: number = 1) => {
if (!folders || !Array.isArray(folders)) return;
for (const folder of folders) {
if (typeof folder === 'string') {
const folderName = folder.split('/').pop() || folder; // Get last part of path
const folderLower = folder.toLowerCase();
// Exact path match
if (folderLower === queryLower || folderName.toLowerCase() === queryLower) {
score += 20 * scoreMultiplier;
details.exactPathMatch = true;
details.matchedPaths.push(folder);
}
// Partial path match
else if (options.includePartialPaths && (folderLower.includes(queryLower) || folderName.toLowerCase().includes(queryLower))) {
score += 15 * scoreMultiplier;
details.partialPathMatch = true;
details.matchedPaths.push(folder);
}
// Fuzzy match
else if (options.fuzzyMatch) {
const fuzzyScore = Math.max(
fuzzyMatch(folder, queryLower),
fuzzyMatch(folderName, queryLower)
);
if (fuzzyScore > 0) {
score += fuzzyScore * scoreMultiplier;
details.fuzzyMatch = true;
details.matchedPaths.push(folder);
}
}
}
}
};
// Check top-level attachedFoldersNew and relevantFiles (legacy format)
processFolders(conversation.attachedFoldersNew);
processFiles(conversation.relevantFiles);
// Check legacy conversation messages
if (conversation.conversation && Array.isArray(conversation.conversation)) {
for (const message of conversation.conversation) {
processFolders(message.attachedFoldersNew, 0.8);
processFiles(message.relevantFiles, 0.8);
// Check message content if enabled
if (options.includeFileContent && message.text && typeof message.text === 'string') {
const textLower = message.text.toLowerCase();
if (textLower.includes(queryLower)) {
score += 2; // Lower weight for content matches
}
}
}
}
// Check modern format messages (this is the key fix!)
if (conversation.messages && Array.isArray(conversation.messages)) {
for (const message of conversation.messages) {
processFolders(message.attachedFolders, 0.8);
processFiles(message.relevantFiles, 0.8);
// Check message content if enabled
if (options.includeFileContent && message.text && typeof message.text === 'string') {
const textLower = message.text.toLowerCase();
if (textLower.includes(queryLower)) {
score += 2; // Lower weight for content matches
}
}
}
}
// Check modern format bubbles for additional context
if (conversation._v && conversation.bubbles && Array.isArray(conversation.bubbles)) {
for (const bubble of conversation.bubbles) {
processFolders(bubble.attachedFoldersNew, 0.5);
processFiles(bubble.relevantFiles, 0.5);
}
}
return {
score: Math.max(score, 0),
details
};
}
/**
* Calculate string similarity (simplified Levenshtein-based)
*/
function calculateSimilarity(str1: string, str2: string): number {
const longer = str1.length > str2.length ? str1 : str2;
const shorter = str1.length > str2.length ? str2 : str1;
if (longer.length === 0) return 1.0;
const editDistance = levenshteinDistance(longer, shorter);
return (longer.length - editDistance) / longer.length;
}
/**
* Calculate Levenshtein distance between two strings
*/
function levenshteinDistance(str1: string, str2: string): number {
const matrix = [];
for (let i = 0; i <= str2.length; i++) {
matrix[i] = [i];
}
for (let j = 0; j <= str1.length; j++) {
matrix[0][j] = j;
}
for (let i = 1; i <= str2.length; i++) {
for (let j = 1; j <= str1.length; j++) {
if (str2.charAt(i - 1) === str1.charAt(j - 1)) {
matrix[i][j] = matrix[i - 1][j - 1];
} else {
matrix[i][j] = Math.min(
matrix[i - 1][j - 1] + 1,
matrix[i][j - 1] + 1,
matrix[i - 1][j] + 1
);
}
}
}
return matrix[str2.length][str1.length];
}
/**
* Check if a conversation falls within the specified date range
*/
function checkConversationDateRange(conversation: any, startDate?: string, endDate?: string): boolean {
if (!startDate && !endDate) return true;
const start = startDate ? new Date(startDate) : new Date('1970-01-01');
const end = endDate ? new Date(endDate) : new Date();
// Check if conversation is legacy or modern format
const isLegacy = conversation.conversation && Array.isArray(conversation.conversation);
if (isLegacy) {
// Legacy format: check timestamps in conversation.conversation array
for (const message of conversation.conversation) {
if (message.timestamp) {
const messageDate = new Date(message.timestamp);
if (messageDate >= start && messageDate <= end) {
return true;
}
}
}
} else {
// Modern format: would need to resolve bubble messages to check timestamps
// For now, return true to include all modern conversations when date filtering
// since resolving all bubble messages would be too expensive
return true;
}
// If no valid timestamps found, include the conversation
return true;
}
```