This is page 2 of 3. Use http://codebase.md/jhawkins11/task-manager-mcp?lines=true&page={x} to view the full context. # Directory Structure ``` ├── .gitignore ├── frontend │ ├── .gitignore │ ├── .npmrc │ ├── components.json │ ├── package-lock.json │ ├── package.json │ ├── postcss.config.cjs │ ├── README.md │ ├── src │ │ ├── app.d.ts │ │ ├── app.html │ │ ├── app.pcss │ │ ├── lib │ │ │ ├── components │ │ │ │ ├── ImportTasksModal.svelte │ │ │ │ ├── QuestionModal.svelte │ │ │ │ ├── TaskFormModal.svelte │ │ │ │ └── ui │ │ │ │ ├── badge │ │ │ │ │ ├── badge.svelte │ │ │ │ │ └── index.ts │ │ │ │ ├── button │ │ │ │ │ ├── button.svelte │ │ │ │ │ └── index.ts │ │ │ │ ├── card │ │ │ │ │ ├── card-content.svelte │ │ │ │ │ ├── card-description.svelte │ │ │ │ │ ├── card-footer.svelte │ │ │ │ │ ├── card-header.svelte │ │ │ │ │ ├── card-title.svelte │ │ │ │ │ ├── card.svelte │ │ │ │ │ └── index.ts │ │ │ │ ├── checkbox │ │ │ │ │ ├── checkbox.svelte │ │ │ │ │ └── index.ts │ │ │ │ ├── dialog │ │ │ │ │ ├── dialog-content.svelte │ │ │ │ │ ├── dialog-description.svelte │ │ │ │ │ ├── dialog-footer.svelte │ │ │ │ │ ├── dialog-header.svelte │ │ │ │ │ ├── dialog-overlay.svelte │ │ │ │ │ ├── dialog-portal.svelte │ │ │ │ │ ├── dialog-title.svelte │ │ │ │ │ └── index.ts │ │ │ │ ├── input │ │ │ │ │ ├── index.ts │ │ │ │ │ └── input.svelte │ │ │ │ ├── label │ │ │ │ │ ├── index.ts │ │ │ │ │ └── label.svelte │ │ │ │ ├── progress │ │ │ │ │ ├── index.ts │ │ │ │ │ └── progress.svelte │ │ │ │ ├── select │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── select-content.svelte │ │ │ │ │ ├── select-group-heading.svelte │ │ │ │ │ ├── select-item.svelte │ │ │ │ │ ├── select-scroll-down-button.svelte │ │ │ │ │ ├── select-scroll-up-button.svelte │ │ │ │ │ ├── select-separator.svelte │ │ │ │ │ └── select-trigger.svelte │ │ │ │ ├── separator │ │ │ │ │ ├── index.ts │ │ │ │ │ └── separator.svelte │ │ │ │ └── textarea │ │ │ │ ├── index.ts │ │ │ │ └── textarea.svelte │ │ │ ├── index.ts │ │ │ ├── types.ts │ │ │ └── utils.ts │ │ └── routes │ │ ├── +layout.server.ts │ │ ├── +layout.svelte │ │ └── +page.svelte │ ├── static │ │ └── favicon.png │ ├── svelte.config.js │ ├── tailwind.config.js │ ├── tsconfig.json │ └── vite.config.ts ├── img │ └── ui.png ├── jest.config.js ├── package-lock.json ├── package.json ├── README.md ├── src │ ├── config │ │ ├── index.ts │ │ ├── migrations.sql │ │ └── schema.sql │ ├── index.ts │ ├── lib │ │ ├── dbUtils.ts │ │ ├── llmUtils.ts │ │ ├── logger.ts │ │ ├── repomixUtils.ts │ │ ├── utils.ts │ │ └── winstonLogger.ts │ ├── models │ │ └── types.ts │ ├── server.ts │ ├── services │ │ ├── aiService.ts │ │ ├── databaseService.ts │ │ ├── planningStateService.ts │ │ └── webSocketService.ts │ └── tools │ ├── adjustPlan.ts │ ├── markTaskComplete.ts │ ├── planFeature.ts │ └── reviewChanges.ts ├── tests │ ├── json-parser.test.ts │ ├── llmUtils.unit.test.ts │ ├── reviewChanges.integration.test.ts │ └── setupEnv.ts └── tsconfig.json ``` # Files -------------------------------------------------------------------------------- /src/services/aiService.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { 2 | GoogleGenerativeAI, 3 | GenerativeModel, 4 | GenerateContentResult, 5 | GoogleGenerativeAIError, 6 | } from '@google/generative-ai' 7 | import OpenAI, { OpenAIError } from 'openai' 8 | import { logToFile } from '../lib/logger' 9 | import { 10 | GEMINI_API_KEY, 11 | OPENROUTER_API_KEY, 12 | GEMINI_MODEL, 13 | OPENROUTER_MODEL, 14 | REVIEW_LLM_API_KEY, 15 | safetySettings, 16 | FALLBACK_GEMINI_MODEL, 17 | FALLBACK_OPENROUTER_MODEL, 18 | } from '../config' 19 | import { z } from 'zod' 20 | import { parseAndValidateJsonResponse } from '../lib/llmUtils' 21 | 22 | type StructuredCallResult<T extends z.ZodType, R> = 23 | | { success: true; data: z.infer<T>; rawResponse: R } 24 | | { success: false; error: string; rawResponse?: R | null } 25 | 26 | // Class to manage AI models and provide access to them 27 | class AIService { 28 | private genAI: GoogleGenerativeAI | null = null 29 | private openRouter: OpenAI | null = null 30 | private planningModel: GenerativeModel | undefined 31 | private reviewModel: GenerativeModel | undefined 32 | private initialized = false 33 | 34 | constructor() { 35 | this.initialize() 36 | } 37 | 38 | private initialize(): void { 39 | // Initialize OpenRouter if API key is available 40 | if (OPENROUTER_API_KEY) { 41 | try { 42 | this.openRouter = new OpenAI({ 43 | apiKey: OPENROUTER_API_KEY, 44 | baseURL: 'https://openrouter.ai/api/v1', 45 | }) 46 | console.error( 47 | '[TaskServer] LOG: OpenRouter SDK initialized successfully.' 48 | ) 49 | } catch (sdkError) { 50 | console.error( 51 | '[TaskServer] CRITICAL ERROR initializing OpenRouter SDK:', 52 | sdkError 53 | ) 54 | } 55 | } else if (GEMINI_API_KEY) { 56 | try { 57 | this.genAI = new GoogleGenerativeAI(GEMINI_API_KEY) 58 | // Configure the model. 59 | this.planningModel = this.genAI.getGenerativeModel({ 60 | model: GEMINI_MODEL, 61 | }) 62 | this.reviewModel = this.genAI.getGenerativeModel({ 63 | model: GEMINI_MODEL, 64 | }) 65 | console.error( 66 | '[TaskServer] LOG: Google AI SDK initialized successfully.' 67 | ) 68 | } catch (sdkError) { 69 | console.error( 70 | '[TaskServer] CRITICAL ERROR initializing Google AI SDK:', 71 | sdkError 72 | ) 73 | } 74 | } else { 75 | console.error( 76 | '[TaskServer] WARNING: Neither OPENROUTER_API_KEY nor GEMINI_API_KEY environment variable is set. API calls will fail.' 77 | ) 78 | } 79 | 80 | this.initialized = true 81 | } 82 | 83 | /** 84 | * Gets the appropriate planning model for task planning 85 | */ 86 | getPlanningModel(): GenerativeModel | OpenAI | null { 87 | logToFile( 88 | `[TaskServer] Planning model: ${JSON.stringify( 89 | this.openRouter ? 'OpenRouter' : 'Gemini' 90 | )}` 91 | ) 92 | return this.openRouter || this.planningModel || null 93 | } 94 | 95 | /** 96 | * Gets the appropriate review model for code reviews 97 | */ 98 | getReviewModel(): GenerativeModel | OpenAI | null { 99 | return this.openRouter || this.reviewModel || null 100 | } 101 | 102 | /** 103 | * Extracts the text content from an AI API result. 104 | * Handles both OpenRouter and Gemini responses. 105 | */ 106 | extractTextFromResponse( 107 | result: 108 | | GenerateContentResult 109 | | OpenAI.Chat.Completions.ChatCompletion 110 | | undefined 111 | ): string | null { 112 | // For OpenRouter responses 113 | if ( 114 | result && 115 | 'choices' in result && 116 | result.choices && 117 | result.choices.length > 0 118 | ) { 119 | const choice = result.choices[0] 120 | if (choice.message && choice.message.content) { 121 | return choice.message.content 122 | } 123 | return null 124 | } 125 | 126 | // For Gemini responses 127 | if (result && 'response' in result) { 128 | try { 129 | const response = result.response 130 | if (response.promptFeedback?.blockReason) { 131 | console.error( 132 | `[TaskServer] Gemini response blocked: ${response.promptFeedback.blockReason}` 133 | ) 134 | return null 135 | } 136 | if (response.candidates && response.candidates.length > 0) { 137 | const candidate = response.candidates[0] 138 | if (candidate.content?.parts?.[0]?.text) { 139 | return candidate.content.parts[0].text 140 | } 141 | } 142 | console.error( 143 | '[TaskServer] No text content found in Gemini response candidate.' 144 | ) 145 | return null 146 | } catch (error) { 147 | console.error( 148 | '[TaskServer] Error extracting text from Gemini response:', 149 | error 150 | ) 151 | return null 152 | } 153 | } 154 | 155 | return null 156 | } 157 | 158 | /** 159 | * Extracts and validates structured data from an AI API result. 160 | * Handles both OpenRouter and Gemini responses and validates against a schema. 161 | * 162 | * @param result The raw API response from either OpenRouter or Gemini 163 | * @param schema The Zod schema to validate against 164 | * @returns An object with either validated data or error information 165 | */ 166 | extractStructuredResponse<T extends z.ZodType>( 167 | result: 168 | | GenerateContentResult 169 | | OpenAI.Chat.Completions.ChatCompletion 170 | | undefined, 171 | schema: T 172 | ): 173 | | { success: true; data: z.infer<T> } 174 | | { success: false; error: string; rawData: any | null } { 175 | // First extract text content using existing method 176 | const textContent = this.extractTextFromResponse(result) 177 | 178 | // Then parse and validate as JSON against the schema 179 | return parseAndValidateJsonResponse(textContent, schema) 180 | } 181 | 182 | /** 183 | * Makes a structured OpenRouter API call with JSON schema validation 184 | * 185 | * @param modelName The model to use for the request 186 | * @param messages The messages to send to the model 187 | * @param schema The Zod schema to validate the response against 188 | * @param options Additional options for the API call 189 | * @returns A promise that resolves to the validated data or error information 190 | */ 191 | async callOpenRouterWithSchema<T extends z.ZodType>( 192 | modelName: string, 193 | messages: Array<OpenAI.Chat.ChatCompletionMessageParam>, 194 | schema: T, 195 | options: { 196 | temperature?: number 197 | max_tokens?: number 198 | } = {}, 199 | isRetry: boolean = false 200 | ): Promise<StructuredCallResult<T, OpenAI.Chat.Completions.ChatCompletion>> { 201 | if (!this.openRouter) { 202 | return { 203 | success: false, 204 | error: 'OpenRouter client is not initialized', 205 | rawResponse: null, 206 | } 207 | } 208 | 209 | const currentModel = isRetry ? FALLBACK_OPENROUTER_MODEL : modelName 210 | await logToFile( 211 | `[AIService] Calling OpenRouter model: ${currentModel}${ 212 | isRetry ? ' (Fallback)' : '' 213 | }` 214 | ) 215 | 216 | let response: OpenAI.Chat.Completions.ChatCompletion | null = null 217 | try { 218 | response = await this.openRouter.chat.completions.create({ 219 | model: currentModel, 220 | messages, 221 | temperature: options.temperature ?? 0.7, 222 | max_tokens: options.max_tokens, 223 | response_format: { type: 'json_object' }, 224 | }) 225 | 226 | const openRouterError = (response as any)?.error 227 | let responseBodyRateLimitDetected = false 228 | 229 | if (openRouterError) { 230 | await logToFile( 231 | `[AIService] OpenRouter response contains error object: ${JSON.stringify( 232 | openRouterError 233 | )}` 234 | ) 235 | if ( 236 | openRouterError.code === 429 || 237 | openRouterError.status === 'RESOURCE_EXHAUSTED' || 238 | (typeof openRouterError.message === 'string' && 239 | openRouterError.message.includes('quota')) 240 | ) { 241 | responseBodyRateLimitDetected = true 242 | } 243 | } 244 | 245 | if (responseBodyRateLimitDetected && !isRetry) { 246 | await logToFile( 247 | `[AIService] Rate limit (429) detected in response body for ${currentModel}. Retrying with fallback ${FALLBACK_OPENROUTER_MODEL}...` 248 | ) 249 | return this.callOpenRouterWithSchema( 250 | modelName, 251 | messages, 252 | schema, 253 | options, 254 | true 255 | ) 256 | } 257 | 258 | const textContent = this.extractTextFromResponse(response) 259 | const validationResult = parseAndValidateJsonResponse(textContent, schema) 260 | 261 | if (openRouterError && !validationResult.success) { 262 | await logToFile( 263 | `[AIService] Non-retryable error detected in response body for ${currentModel}.` 264 | ) 265 | return { 266 | success: false, 267 | error: `API response contained error: ${ 268 | openRouterError.message || 'Unknown error' 269 | }`, 270 | rawResponse: response, 271 | } 272 | } 273 | 274 | if (validationResult.success) { 275 | return { 276 | success: true, 277 | data: validationResult.data, 278 | rawResponse: response, 279 | } 280 | } else { 281 | await logToFile( 282 | `[AIService] Schema validation failed for ${currentModel}: ${ 283 | validationResult.error 284 | }. Raw data: ${JSON.stringify(validationResult.rawData)?.substring( 285 | 0, 286 | 200 287 | )}` 288 | ) 289 | const errorMessage = openRouterError?.message 290 | ? `API response contained error: ${openRouterError.message}` 291 | : validationResult.error 292 | return { 293 | success: false, 294 | error: errorMessage, 295 | rawResponse: response, 296 | } 297 | } 298 | } catch (error: any) { 299 | await logToFile( 300 | `[AIService] API call failed for ${currentModel}. Error: ${ 301 | error.message 302 | }, Status: ${error.status || 'unknown'}` 303 | ) 304 | 305 | let isRateLimitError = false 306 | if (error instanceof OpenAIError && (error as any).status === 429) { 307 | isRateLimitError = true 308 | } else if (error.status === 429) { 309 | isRateLimitError = true 310 | } 311 | 312 | if (isRateLimitError && !isRetry) { 313 | await logToFile( 314 | `[AIService] Rate limit hit (thrown error ${ 315 | error.status || 429 316 | }) for ${currentModel}. Retrying with fallback ${FALLBACK_OPENROUTER_MODEL}...` 317 | ) 318 | return this.callOpenRouterWithSchema( 319 | FALLBACK_OPENROUTER_MODEL, 320 | messages, 321 | schema, 322 | options, 323 | true 324 | ) 325 | } 326 | 327 | const rawErrorResponse = error?.response 328 | return { 329 | success: false, 330 | error: `API call failed: ${error.message}`, 331 | rawResponse: rawErrorResponse || null, 332 | } 333 | } 334 | } 335 | 336 | /** 337 | * Makes a structured Gemini API call with JSON schema validation. 338 | * Note: Gemini currently has limited built-in JSON schema support, 339 | * so we use prompt engineering to get structured output. 340 | * 341 | * @param modelName The model to use for the request 342 | * @param prompt The prompt to send to the model 343 | * @param schema The Zod schema to validate the response against 344 | * @param options Additional options for the API call 345 | * @returns A promise that resolves to the validated data or error information 346 | */ 347 | async callGeminiWithSchema<T extends z.ZodType>( 348 | modelName: string, 349 | prompt: string, 350 | schema: T, 351 | options: { 352 | temperature?: number 353 | maxOutputTokens?: number 354 | } = {}, 355 | isRetry: boolean = false 356 | ): Promise< 357 | | { success: true; data: z.infer<T>; rawResponse: GenerateContentResult } 358 | | { 359 | success: false 360 | error: string 361 | rawResponse?: GenerateContentResult | null 362 | } 363 | > { 364 | if (!this.genAI) { 365 | return { 366 | success: false, 367 | error: 'Gemini client is not initialized', 368 | rawResponse: null, 369 | } 370 | } 371 | 372 | const currentModelName = isRetry ? FALLBACK_GEMINI_MODEL : modelName 373 | await logToFile( 374 | `[AIService] Calling Gemini model: ${currentModelName}${ 375 | isRetry ? ' (Fallback)' : '' 376 | }` 377 | ) 378 | 379 | const schemaDescription = this.createSchemaDescription(schema) 380 | const enhancedPrompt = `${prompt}\n\nYour response must be a valid JSON object with the following structure:\n${schemaDescription}\n\nEnsure your response is valid JSON with no markdown formatting or additional text.` 381 | 382 | try { 383 | const model = this.genAI.getGenerativeModel({ model: currentModelName }) 384 | const response = await model.generateContent({ 385 | contents: [{ role: 'user', parts: [{ text: enhancedPrompt }] }], 386 | generationConfig: { 387 | temperature: options.temperature ?? 0.7, 388 | maxOutputTokens: options.maxOutputTokens, 389 | }, 390 | safetySettings, 391 | }) 392 | 393 | const textContent = this.extractTextFromResponse(response) 394 | const validationResult = parseAndValidateJsonResponse(textContent, schema) 395 | 396 | if (validationResult.success) { 397 | return { 398 | success: true, 399 | data: validationResult.data, 400 | rawResponse: response, 401 | } 402 | } else { 403 | await logToFile( 404 | `[AIService] Schema validation failed for ${currentModelName}: ${ 405 | validationResult.error 406 | }. Raw data: ${JSON.stringify(validationResult.rawData)?.substring( 407 | 0, 408 | 200 409 | )}` 410 | ) 411 | return { 412 | success: false, 413 | error: validationResult.error, 414 | rawResponse: response, 415 | } 416 | } 417 | } catch (error: any) { 418 | await logToFile( 419 | `[AIService] API call failed for ${currentModelName}. Error: ${error.message}` 420 | ) 421 | 422 | let isRateLimitError = false 423 | if ( 424 | error instanceof GoogleGenerativeAIError && 425 | error.message.includes('RESOURCE_EXHAUSTED') 426 | ) { 427 | isRateLimitError = true 428 | } else if (error.status === 429) { 429 | isRateLimitError = true 430 | } 431 | 432 | if (isRateLimitError && !isRetry) { 433 | await logToFile( 434 | `[AIService] Rate limit hit for ${currentModelName}. Retrying with fallback model ${FALLBACK_GEMINI_MODEL}...` 435 | ) 436 | return this.callGeminiWithSchema( 437 | FALLBACK_GEMINI_MODEL, 438 | prompt, 439 | schema, 440 | options, 441 | true 442 | ) 443 | } 444 | 445 | return { 446 | success: false, 447 | error: `API call failed: ${error.message}`, 448 | rawResponse: null, 449 | } 450 | } 451 | } 452 | 453 | /** 454 | * Creates a human-readable description of a Zod schema for prompt engineering 455 | */ 456 | private createSchemaDescription(schema: z.ZodType): string { 457 | // Use the schema describe functionality to extract metadata 458 | const description = schema._def.description ?? 'JSON object' 459 | 460 | // For object schemas, extract shape information 461 | if (schema instanceof z.ZodObject) { 462 | const shape = schema._def.shape() 463 | const fields = Object.entries(shape).map(([key, field]) => { 464 | const fieldType = this.getZodTypeDescription(field as z.ZodType) 465 | const fieldDesc = (field as z.ZodType)._def.description || '' 466 | return ` "${key}": ${fieldType}${fieldDesc ? ` // ${fieldDesc}` : ''}` 467 | }) 468 | 469 | return `{\n${fields.join(',\n')}\n}` 470 | } 471 | 472 | // For array schemas 473 | if (schema instanceof z.ZodArray) { 474 | const elementType = this.getZodTypeDescription(schema._def.type) 475 | return `[\n ${elementType} // Array of items\n]` 476 | } 477 | 478 | // For other types 479 | return description 480 | } 481 | 482 | /** 483 | * Gets a simple description of a Zod type for schema representation 484 | */ 485 | private getZodTypeDescription(schema: z.ZodType): string { 486 | if (schema instanceof z.ZodString) return '"string"' 487 | if (schema instanceof z.ZodNumber) return 'number' 488 | if (schema instanceof z.ZodBoolean) return 'boolean' 489 | if (schema instanceof z.ZodArray) { 490 | const elementType = this.getZodTypeDescription(schema._def.type) 491 | return `[${elementType}]` 492 | } 493 | if (schema instanceof z.ZodObject) { 494 | const shape = schema._def.shape() 495 | const fields = Object.entries(shape).map(([key]) => `"${key}"`) 496 | return `{ ${fields.join(', ')} }` 497 | } 498 | if (schema instanceof z.ZodEnum) { 499 | const values = schema._def.values.map((v: string) => `"${v}"`) 500 | return `one of: ${values.join(' | ')}` 501 | } 502 | 503 | return 'any' 504 | } 505 | 506 | /** 507 | * Checks if the service is properly initialized 508 | */ 509 | isInitialized(): boolean { 510 | return this.initialized && (!!this.openRouter || !!this.planningModel) 511 | } 512 | } 513 | 514 | // Export a singleton instance 515 | export const aiService = new AIService() 516 | ``` -------------------------------------------------------------------------------- /src/services/databaseService.ts: -------------------------------------------------------------------------------- ```typescript 1 | import sqlite3 from 'sqlite3' 2 | import fs from 'fs' 3 | import path from 'path' 4 | import { promisify } from 'util' 5 | import { SQLITE_DB_PATH } from '../config' 6 | import logger from '../lib/winstonLogger' 7 | 8 | // Define Task type for database operations 9 | interface Task { 10 | id: string 11 | title?: string 12 | description?: string 13 | status: 'pending' | 'in_progress' | 'completed' | 'decomposed' 14 | completed: boolean 15 | effort?: 'low' | 'medium' | 'high' 16 | feature_id?: string 17 | parent_task_id?: string 18 | created_at: number 19 | updated_at: number 20 | fromReview?: boolean 21 | } 22 | 23 | // Define interface for task updates 24 | interface TaskUpdate { 25 | title?: string 26 | description?: string 27 | effort?: 'low' | 'medium' | 'high' 28 | parent_task_id?: string 29 | fromReview?: boolean 30 | } 31 | 32 | // Define History Entry type for database operations 33 | export interface HistoryEntry { 34 | id?: number 35 | timestamp: number 36 | role: 'user' | 'model' | 'tool_call' | 'tool_response' 37 | content: string 38 | feature_id: string 39 | task_id?: string 40 | action?: string 41 | details?: string 42 | } 43 | 44 | class DatabaseService { 45 | private db: sqlite3.Database | null = null 46 | private dbPath: string 47 | 48 | constructor(dbPath: string = SQLITE_DB_PATH) { 49 | this.dbPath = dbPath 50 | try { 51 | this.ensureDatabaseDirectory() 52 | } catch (error: any) { 53 | console.error( 54 | `[DatabaseService] CRITICAL: Failed to ensure database directory exists at ${path.dirname( 55 | this.dbPath 56 | )}: ${error.message}` 57 | ) 58 | } 59 | } 60 | 61 | private ensureDatabaseDirectory(): void { 62 | const dbDir = path.dirname(this.dbPath) 63 | if (!fs.existsSync(dbDir)) { 64 | console.log(`[DatabaseService] Creating database directory: ${dbDir}`) 65 | fs.mkdirSync(dbDir, { recursive: true }) 66 | } 67 | } 68 | 69 | async connect(): Promise<void> { 70 | if (this.db) { 71 | logger.debug('[DatabaseService] Already connected.') 72 | return Promise.resolve() 73 | } 74 | logger.debug(`[DatabaseService] Connecting to database at: ${this.dbPath}`) 75 | return new Promise((resolve, reject) => { 76 | const verboseDb = new (sqlite3.verbose().Database)(this.dbPath, (err) => { 77 | if (err) { 78 | logger.error(`Error connecting to SQLite database: ${err.message}`, { 79 | stack: err.stack, 80 | }) 81 | reject( 82 | new Error(`Error connecting to SQLite database: ${err.message}`) 83 | ) 84 | return 85 | } 86 | this.db = verboseDb 87 | logger.debug('[DatabaseService] Database connection successful.') 88 | resolve() 89 | }) 90 | }) 91 | } 92 | 93 | async close(): Promise<void> { 94 | logger.debug('[DatabaseService] Attempting to close database connection.') 95 | return new Promise((resolve, reject) => { 96 | if (!this.db) { 97 | logger.debug('[DatabaseService] No active connection to close.') 98 | resolve() 99 | return 100 | } 101 | this.db.close((err) => { 102 | if (err) { 103 | logger.error(`Error closing SQLite database: ${err.message}`, { 104 | stack: err.stack, 105 | }) 106 | reject(new Error(`Error closing SQLite database: ${err.message}`)) 107 | return 108 | } 109 | this.db = null 110 | logger.debug( 111 | '[DatabaseService] Database connection closed successfully.' 112 | ) 113 | resolve() 114 | }) 115 | }) 116 | } 117 | 118 | public async runAsync( 119 | sql: string, 120 | params: any[] = [] 121 | ): Promise<sqlite3.RunResult> { 122 | if (!this.db) { 123 | logger.error( 124 | '[DatabaseService] runAsync called but database is not connected.' 125 | ) 126 | throw new Error('Database is not connected') 127 | } 128 | return new Promise((resolve, reject) => { 129 | this.db!.run(sql, params, function (err) { 130 | if (err) { 131 | logger.error( 132 | `Error executing SQL: ${sql} - Params: ${JSON.stringify( 133 | params 134 | )} - Error: ${err.message}`, 135 | { stack: err.stack } 136 | ) 137 | reject(new Error(`Error executing SQL: ${err.message}`)) 138 | } else { 139 | resolve(this) 140 | } 141 | }) 142 | }) 143 | } 144 | 145 | private async runSchemaFromFile(): Promise<void> { 146 | const schemaPath = path.join(__dirname, '..', 'config', 'schema.sql') 147 | logger.info(`Attempting to run schema from: ${schemaPath}`) 148 | if (!fs.existsSync(schemaPath)) { 149 | logger.error(`Schema file not found at ${schemaPath}`) 150 | throw new Error(`Schema file not found at ${schemaPath}`) 151 | } 152 | logger.info(`Schema file found at ${schemaPath}`) 153 | const schema = fs.readFileSync(schemaPath, 'utf8') 154 | const statements = schema 155 | .split(';') 156 | .map((statement) => statement.trim()) 157 | .filter((statement) => statement.length > 0) 158 | logger.info(`Found ${statements.length} SQL statements in schema file.`) 159 | if (!this.db) { 160 | logger.error('Database is not connected in runSchemaFromFile.') 161 | throw new Error('Database is not connected') 162 | } 163 | try { 164 | logger.info('Starting transaction for schema execution.') 165 | await this.runAsync('BEGIN TRANSACTION;') 166 | for (let i = 0; i < statements.length; i++) { 167 | const statement = statements[i] 168 | logger.debug( 169 | `Executing schema statement #${i + 1}: ${statement.substring( 170 | 0, 171 | 60 172 | )}...` 173 | ) 174 | await this.runAsync(statement) 175 | logger.debug(`Successfully executed statement #${i + 1}`) 176 | } 177 | logger.info('Committing transaction for schema execution.') 178 | await this.runAsync('COMMIT;') 179 | logger.info('Schema execution committed successfully.') 180 | } catch (error: any) { 181 | logger.error( 182 | `Error during schema execution: ${error.message}. Rolling back transaction.`, 183 | { stack: error.stack } 184 | ) 185 | try { 186 | await this.runAsync('ROLLBACK;') 187 | logger.info('Transaction rolled back successfully.') 188 | } catch (rollbackError: any) { 189 | logger.error(`Failed to rollback transaction: ${rollbackError.message}`) 190 | } 191 | throw new Error(`Schema execution failed: ${error.message}`) 192 | } 193 | } 194 | 195 | async tableExists(tableName: string): Promise<boolean> { 196 | if (!this.db) { 197 | logger.error( 198 | '[DatabaseService] tableExists called but database is not connected.' 199 | ) 200 | throw new Error('Database is not connected') 201 | } 202 | return new Promise((resolve, reject) => { 203 | this.db!.get( 204 | "SELECT name FROM sqlite_master WHERE type='table' AND name=?", 205 | [tableName], 206 | (err, row) => { 207 | if (err) { 208 | logger.error( 209 | `Error checking if table ${tableName} exists: ${err.message}` 210 | ) 211 | reject(err) 212 | } else { 213 | resolve(!!row) 214 | } 215 | } 216 | ) 217 | }) 218 | } 219 | 220 | async initializeDatabase(): Promise<void> { 221 | if (!this.db) { 222 | logger.info( 223 | '[DatabaseService] Connecting DB within initializeDatabase...' 224 | ) 225 | await this.connect() 226 | } else { 227 | logger.debug('[DatabaseService] DB already connected for initialization.') 228 | } 229 | try { 230 | logger.info('[DatabaseService] Checking if tables exist...') 231 | const tablesExist = await this.tableExists('tasks') 232 | logger.info( 233 | `[DatabaseService] 'tasks' table exists check returned: ${tablesExist}` 234 | ) 235 | if (!tablesExist) { 236 | logger.info( 237 | '[DatabaseService] Initializing database schema as tables do not exist...' 238 | ) 239 | await this.runSchemaFromFile() 240 | logger.info( 241 | '[DatabaseService] Database schema initialization complete.' 242 | ) 243 | } else { 244 | logger.info( 245 | '[DatabaseService] Database tables already exist. Skipping schema initialization.' 246 | ) 247 | } 248 | } catch (error: any) { 249 | logger.error(`Error during database initialization: ${error.message}`, { 250 | stack: error.stack, 251 | }) 252 | console.error('Error initializing database:', error) 253 | throw error 254 | } 255 | } 256 | 257 | async runMigrations(): Promise<void> { 258 | if (!this.db) { 259 | throw new Error('Database is not connected') 260 | } 261 | 262 | try { 263 | // Run schema first to create tables if they don't exist 264 | await this.runSchemaFromFile() 265 | 266 | // Run migrations to update existing tables 267 | await this.runMigrationsFromFile() 268 | } catch (error) { 269 | console.error('Error running migrations:', error) 270 | throw error 271 | } 272 | } 273 | 274 | private async runMigrationsFromFile(): Promise<void> { 275 | // Use __dirname to reliably locate the file relative to the compiled JS file 276 | const migrationsPath = path.join( 277 | __dirname, 278 | '..', 279 | 'config', 280 | 'migrations.sql' 281 | ) 282 | console.log( 283 | `[DB Service] Attempting to load migrations from: ${migrationsPath}` 284 | ) // Log path 285 | 286 | if (!fs.existsSync(migrationsPath)) { 287 | console.log( 288 | `[DB Service] Migrations file not found at ${migrationsPath}, skipping migrations.` // Adjusted log level 289 | ) 290 | return 291 | } 292 | console.log( 293 | `[DB Service] Migrations file found at ${migrationsPath}. Reading...` 294 | ) // Log if found 295 | 296 | const migrations = fs.readFileSync(migrationsPath, 'utf8') 297 | const statements = migrations 298 | .split(';') 299 | .map((statement) => statement.trim()) 300 | .filter((statement) => statement.length > 0) 301 | 302 | console.log( 303 | `[DB Service] Executing ${statements.length} statements from migrations.sql...` 304 | ) // Log count 305 | for (const statement of statements) { 306 | try { 307 | console.log( 308 | `[DB Service] Executing migration statement: ${statement.substring( 309 | 0, 310 | 100 311 | )}...` 312 | ) // Log statement (truncated) 313 | await this.runAsync(statement) 314 | } catch (error: any) { 315 | // Only ignore the error if it's specifically about a duplicate column 316 | if (error?.message?.includes('duplicate column name')) { 317 | console.log( 318 | `[DB Service] Migration statement likely already applied (duplicate column): ${statement}` // Adjusted log 319 | ) 320 | } else { 321 | // Re-throw any other error during migration 322 | console.error( 323 | `[DB Service] Migration statement failed: ${statement}`, 324 | error 325 | ) // Adjusted log 326 | throw error 327 | } 328 | } 329 | } 330 | console.log(`[DB Service] Finished executing migration statements.`) // Log completion 331 | } 332 | 333 | async get(sql: string, params: any[] = []): Promise<any> { 334 | if (!this.db) { 335 | throw new Error('Database is not connected') 336 | } 337 | 338 | return new Promise((resolve, reject) => { 339 | this.db!.get(sql, params, (err, row) => { 340 | if (err) { 341 | reject(`Error executing SQL: ${err.message}`) 342 | return 343 | } 344 | resolve(row) 345 | }) 346 | }) 347 | } 348 | 349 | async all(sql: string, params: any[] = []): Promise<any[]> { 350 | if (!this.db) { 351 | throw new Error('Database is not connected') 352 | } 353 | 354 | return new Promise((resolve, reject) => { 355 | this.db!.all(sql, params, (err, rows) => { 356 | if (err) { 357 | reject(`Error executing SQL: ${err.message}`) 358 | return 359 | } 360 | resolve(rows) 361 | }) 362 | }) 363 | } 364 | 365 | async getTasksByFeatureId(featureId: string): Promise<Task[]> { 366 | if (!this.db) { 367 | throw new Error('Database is not connected') 368 | } 369 | 370 | try { 371 | const rows = await this.all( 372 | `SELECT 373 | id, title, description, status, 374 | completed, effort, feature_id, parent_task_id, 375 | created_at, updated_at, from_review 376 | FROM tasks 377 | WHERE feature_id = ? 378 | ORDER BY created_at ASC`, 379 | [featureId] 380 | ) 381 | 382 | return rows.map((row) => ({ 383 | ...row, 384 | completed: Boolean(row.completed), 385 | fromReview: Boolean(row.from_review), 386 | })) 387 | } catch (error) { 388 | console.error(`Error fetching tasks for feature ${featureId}:`, error) 389 | throw error 390 | } 391 | } 392 | 393 | async getTaskById(taskId: string): Promise<Task | null> { 394 | if (!this.db) { 395 | throw new Error('Database is not connected') 396 | } 397 | 398 | try { 399 | const row = await this.get( 400 | `SELECT 401 | id, title, description, status, 402 | completed, effort, feature_id, parent_task_id, 403 | created_at, updated_at, from_review 404 | FROM tasks 405 | WHERE id = ?`, 406 | [taskId] 407 | ) 408 | 409 | if (!row) { 410 | return null 411 | } 412 | 413 | return { 414 | ...row, 415 | completed: Boolean(row.completed), 416 | fromReview: Boolean(row.from_review), 417 | } 418 | } catch (error) { 419 | console.error(`Error fetching task ${taskId}:`, error) 420 | throw error 421 | } 422 | } 423 | 424 | async addTask(task: Task): Promise<string> { 425 | if (!this.db) { 426 | throw new Error('Database is not connected') 427 | } 428 | 429 | const now = Math.floor(Date.now() / 1000) 430 | const timestamp = task.created_at || now 431 | 432 | try { 433 | await this.runAsync( 434 | `INSERT INTO tasks ( 435 | id, title, description, status, 436 | completed, effort, feature_id, parent_task_id, 437 | created_at, updated_at, from_review 438 | ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, 439 | [ 440 | task.id, 441 | task.title || null, 442 | task.description || null, 443 | task.status, 444 | task.completed ? 1 : 0, 445 | task.effort || null, 446 | task.feature_id || null, 447 | task.parent_task_id || null, 448 | timestamp, 449 | task.updated_at || timestamp, 450 | task.fromReview ? 1 : 0, 451 | ] 452 | ) 453 | 454 | return task.id 455 | } catch (error) { 456 | console.error('Error adding task:', error) 457 | throw error 458 | } 459 | } 460 | 461 | async updateTaskStatus( 462 | taskId: string, 463 | status: 'pending' | 'in_progress' | 'completed' | 'decomposed', 464 | completed?: boolean 465 | ): Promise<boolean> { 466 | if (!this.db) { 467 | throw new Error('Database is not connected') 468 | } 469 | 470 | const now = Math.floor(Date.now() / 1000) 471 | 472 | try { 473 | let result 474 | 475 | if (completed !== undefined) { 476 | result = await this.runAsync( 477 | `UPDATE tasks 478 | SET status = ?, completed = ?, updated_at = ? 479 | WHERE id = ?`, 480 | [status, completed ? 1 : 0, now, taskId] 481 | ) 482 | } else { 483 | result = await this.runAsync( 484 | `UPDATE tasks 485 | SET status = ?, updated_at = ? 486 | WHERE id = ?`, 487 | [status, now, taskId] 488 | ) 489 | } 490 | 491 | return result.changes > 0 492 | } catch (error) { 493 | console.error(`Error updating status for task ${taskId}:`, error) 494 | throw error 495 | } 496 | } 497 | 498 | async updateTaskDetails( 499 | taskId: string, 500 | updates: TaskUpdate 501 | ): Promise<boolean> { 502 | if (!this.db) { 503 | throw new Error('Database is not connected') 504 | } 505 | 506 | const now = Math.floor(Date.now() / 1000) 507 | 508 | try { 509 | const task = await this.getTaskById(taskId) 510 | 511 | if (!task) { 512 | return false 513 | } 514 | 515 | const updatedTask = { 516 | ...task, 517 | title: updates.title ?? task.title, 518 | description: updates.description ?? task.description, 519 | effort: updates.effort ?? task.effort, 520 | parent_task_id: updates.parent_task_id ?? task.parent_task_id, 521 | fromReview: 522 | updates.fromReview !== undefined 523 | ? updates.fromReview 524 | : task.fromReview, 525 | updated_at: now, 526 | } 527 | 528 | const result = await this.runAsync( 529 | `UPDATE tasks 530 | SET title = ?, description = ?, effort = ?, parent_task_id = ?, updated_at = ?, from_review = ? 531 | WHERE id = ?`, 532 | [ 533 | updatedTask.title || null, 534 | updatedTask.description || null, 535 | updatedTask.effort || null, 536 | updatedTask.parent_task_id || null, 537 | updatedTask.updated_at, 538 | updatedTask.fromReview ? 1 : 0, 539 | taskId, 540 | ] 541 | ) 542 | 543 | return result.changes > 0 544 | } catch (error) { 545 | console.error(`Error updating details for task ${taskId}:`, error) 546 | throw error 547 | } 548 | } 549 | 550 | async deleteTask(taskId: string): Promise<boolean> { 551 | if (!this.db) { 552 | throw new Error('Database is not connected') 553 | } 554 | 555 | try { 556 | // Begin transaction 557 | await this.runAsync('BEGIN TRANSACTION') 558 | 559 | try { 560 | // Delete any task relationships first 561 | await this.runAsync( 562 | 'DELETE FROM task_relationships WHERE parent_id = ? OR child_id = ?', 563 | [taskId, taskId] 564 | ) 565 | 566 | // Finally delete the task 567 | const result = await this.runAsync('DELETE FROM tasks WHERE id = ?', [ 568 | taskId, 569 | ]) 570 | 571 | // Commit transaction 572 | await this.runAsync('COMMIT') 573 | 574 | return result.changes > 0 575 | } catch (error) { 576 | // Rollback in case of error 577 | await this.runAsync('ROLLBACK') 578 | throw error 579 | } 580 | } catch (error) { 581 | console.error(`Error deleting task ${taskId}:`, error) 582 | throw error 583 | } 584 | } 585 | 586 | // History Entry Operations 587 | 588 | async getHistoryByFeatureId( 589 | featureId: string, 590 | limit: number = 100 591 | ): Promise<HistoryEntry[]> { 592 | if (!this.db) { 593 | throw new Error('Database is not connected') 594 | } 595 | 596 | try { 597 | const rows = await this.all( 598 | `SELECT 599 | id, timestamp, role, content, feature_id, 600 | task_id, action, details 601 | FROM history_entries 602 | WHERE feature_id = ? 603 | ORDER BY timestamp DESC 604 | LIMIT ?`, 605 | [featureId, limit] 606 | ) 607 | 608 | return rows.map((row) => ({ 609 | ...row, 610 | content: 611 | typeof row.content === 'string' 612 | ? JSON.parse(row.content) 613 | : row.content, 614 | })) 615 | } catch (error) { 616 | console.error(`Error fetching history for feature ${featureId}:`, error) 617 | throw error 618 | } 619 | } 620 | 621 | async addHistoryEntry(entry: HistoryEntry): Promise<number> { 622 | if (!this.db) { 623 | throw new Error('Database is not connected') 624 | } 625 | 626 | const now = Math.floor(Date.now() / 1000) 627 | const timestamp = entry.timestamp || now 628 | const content = 629 | typeof entry.content === 'object' 630 | ? JSON.stringify(entry.content) 631 | : entry.content 632 | 633 | try { 634 | const result = await this.runAsync( 635 | `INSERT INTO history_entries ( 636 | timestamp, role, content, feature_id, 637 | task_id, action, details 638 | ) VALUES (?, ?, ?, ?, ?, ?, ?)`, 639 | [ 640 | timestamp, 641 | entry.role, 642 | content, 643 | entry.feature_id, 644 | entry.task_id || null, 645 | entry.action || null, 646 | entry.details || null, 647 | ] 648 | ) 649 | 650 | return result.lastID 651 | } catch (error) { 652 | console.error('Error adding history entry:', error) 653 | throw error 654 | } 655 | } 656 | 657 | async deleteHistoryByFeatureId(featureId: string): Promise<boolean> { 658 | if (!this.db) { 659 | throw new Error('Database is not connected') 660 | } 661 | 662 | try { 663 | const result = await this.runAsync( 664 | 'DELETE FROM history_entries WHERE feature_id = ?', 665 | [featureId] 666 | ) 667 | 668 | return result.changes > 0 669 | } catch (error) { 670 | console.error(`Error deleting history for feature ${featureId}:`, error) 671 | throw error 672 | } 673 | } 674 | 675 | // Feature Management 676 | 677 | /** 678 | * Creates a new feature in the database 679 | * @param id The feature ID 680 | * @param description The feature description 681 | * @param projectPath The project path for the feature 682 | * @returns The created feature 683 | */ 684 | async createFeature( 685 | id: string, 686 | description: string, 687 | projectPath: string 688 | ): Promise<{ id: string; description: string; project_path: string }> { 689 | try { 690 | const now = Math.floor(Date.now() / 1000) 691 | 692 | await this.connect() 693 | 694 | await this.runAsync( 695 | `INSERT INTO features (id, description, project_path, created_at, updated_at) 696 | VALUES (?, ?, ?, ?, ?)`, 697 | [id, description, projectPath, now, now] 698 | ) 699 | 700 | await this.close() 701 | 702 | return { id, description, project_path: projectPath } 703 | } catch (error) { 704 | console.error(`Error creating feature:`, error) 705 | throw error 706 | } 707 | } 708 | 709 | /** 710 | * Gets a feature by ID 711 | * @param featureId The feature ID 712 | * @returns The feature or null if not found 713 | */ 714 | async getFeatureById(featureId: string): Promise<{ 715 | id: string 716 | description: string 717 | project_path: string | null 718 | status: string 719 | } | null> { 720 | try { 721 | const feature = await this.get( 722 | `SELECT id, description, project_path, status 723 | FROM features 724 | WHERE id = ?`, 725 | [featureId] 726 | ) 727 | 728 | return feature || null 729 | } catch (error) { 730 | console.error(`Error fetching feature ${featureId}:`, error) 731 | return null 732 | } 733 | } 734 | } 735 | 736 | export const databaseService = new DatabaseService() 737 | export default DatabaseService 738 | ``` -------------------------------------------------------------------------------- /src/tools/reviewChanges.ts: -------------------------------------------------------------------------------- ```typescript 1 | // src/tools/reviewChanges.ts 2 | 3 | import { logToFile } from '../lib/logger' // Use specific log functions 4 | import { aiService } from '../services/aiService' 5 | import { promisify } from 'util' 6 | import { exec } from 'child_process' 7 | import crypto from 'crypto' 8 | // Import the correct schema for task list output and other necessary types 9 | import { 10 | Task, 11 | ReviewResponseWithTasksSchema, // Schema for review task output 12 | PlanFeatureResponseSchema, // Schema for initial plan (if used elsewhere) 13 | type Task as AppTask, // Rename if needed to avoid conflict with local Task type/variable 14 | } from '../models/types' 15 | import { z } from 'zod' 16 | import { 17 | parseAndValidateJsonResponse, 18 | processAndFinalizePlan, // We WILL use this now 19 | } from '../lib/llmUtils' 20 | import { 21 | GIT_DIFF_MAX_BUFFER_MB, 22 | GEMINI_MODEL, // Make sure these are imported if needed directly 23 | OPENROUTER_MODEL, // Make sure these are imported if needed directly 24 | } from '../config' 25 | import path from 'path' 26 | import { getCodebaseContext } from '../lib/repomixUtils' 27 | import { addHistoryEntry, getHistoryForFeature } from '../lib/dbUtils' 28 | 29 | const execPromise = promisify(exec) 30 | 31 | interface ReviewChangesParams { 32 | featureId: string // Make featureId mandatory 33 | project_path?: string 34 | } 35 | 36 | // Use the standard response type 37 | interface PlanFeatureStandardResponse { 38 | status: 'completed' | 'awaiting_clarification' | 'error' 39 | message: string 40 | featureId: string 41 | taskCount?: number 42 | firstTask?: Task | { description: string; effort: string } // Allow slightly different structure if needed 43 | uiUrl?: string 44 | data?: any // For clarification details or other metadata 45 | } 46 | 47 | interface ReviewChangesResult { 48 | content: Array<{ type: 'text'; text: string }> 49 | isError?: boolean 50 | } 51 | 52 | export async function handleReviewChanges( 53 | params: ReviewChangesParams 54 | ): Promise<ReviewChangesResult> { 55 | const { featureId, project_path } = params 56 | const reviewId = crypto.randomUUID() // Unique ID for this review operation 57 | 58 | logToFile( 59 | `[TaskServer] Handling review_changes request for feature ${featureId} (Review ID: ${reviewId})` 60 | ) 61 | // Wrap initial history logging 62 | try { 63 | await addHistoryEntry(featureId, 'tool_call', { 64 | tool: 'review_changes', 65 | params, 66 | reviewId, 67 | }) 68 | } catch (historyError) { 69 | console.error( 70 | `[TaskServer] Failed to add initial history entry for review: ${historyError}` 71 | ) 72 | // Continue execution even if initial history fails 73 | } 74 | 75 | let targetDir = process.cwd() 76 | 77 | if (project_path) { 78 | // Basic check for path traversal characters 79 | if (project_path.includes('..') || project_path.includes('~')) { 80 | const errorMsg = `Error: Invalid project_path provided: ${project_path}. Path cannot contain '..' or '~'.` 81 | await logToFile(`[TaskServer] ${errorMsg}`) 82 | // Try to log error to history before returning 83 | try { 84 | await addHistoryEntry(featureId, 'tool_response', { 85 | tool: 'review_changes', 86 | isError: true, 87 | message: errorMsg, 88 | reviewId, 89 | step: 'invalid_path', 90 | }) 91 | } catch (historyError) { 92 | /* Ignore */ 93 | } 94 | return { content: [{ type: 'text', text: errorMsg }], isError: true } 95 | } 96 | 97 | // Resolve the path and check it's within a reasonable base (e.g., current working directory) 98 | const resolvedPath = path.resolve(project_path) 99 | const cwd = process.cwd() 100 | 101 | // This is a basic check; more robust checks might compare against a known workspace root 102 | if (!resolvedPath.startsWith(cwd)) { 103 | const errorMsg = `Error: Invalid project_path provided: ${project_path}. Path must be within the current workspace.` 104 | await logToFile(`[TaskServer] ${errorMsg}`) 105 | // Try to log error to history before returning 106 | try { 107 | await addHistoryEntry(featureId, 'tool_response', { 108 | tool: 'review_changes', 109 | isError: true, 110 | message: errorMsg, 111 | reviewId, 112 | step: 'invalid_path', 113 | }) 114 | } catch (historyError) { 115 | /* Ignore */ 116 | } 117 | return { content: [{ type: 'text', text: errorMsg }], isError: true } 118 | } 119 | targetDir = resolvedPath 120 | } 121 | 122 | try { 123 | let message: string | null = null 124 | let isError = false 125 | 126 | const reviewModel = aiService.getReviewModel() 127 | if (!reviewModel) { 128 | message = 'Error: Review model not initialized. Check API Key.' 129 | isError = true 130 | logToFile(`[TaskServer] ${message} (Review ID: ${reviewId})`) 131 | // Wrap history logging 132 | try { 133 | await addHistoryEntry(featureId, 'tool_response', { 134 | tool: 'review_changes', 135 | isError, 136 | message, 137 | reviewId, 138 | }) 139 | } catch (historyError) { 140 | console.error( 141 | `[TaskServer] Failed to add history entry for model init failure: ${historyError}` 142 | ) 143 | } 144 | return { content: [{ type: 'text', text: message }], isError } 145 | } 146 | 147 | // --- Get Codebase Context --- (Keep as is) 148 | const { context: codebaseContext, error: contextError } = 149 | await getCodebaseContext(targetDir, reviewId) 150 | if (contextError) { 151 | message = contextError 152 | isError = true 153 | // Wrap history logging 154 | try { 155 | await addHistoryEntry(featureId, 'tool_response', { 156 | tool: 'review_changes', 157 | isError, 158 | message, 159 | reviewId, 160 | step: 'context_error', 161 | }) 162 | } catch (historyError) { 163 | console.error( 164 | `[TaskServer] Failed to add history entry for context error: ${historyError}` 165 | ) 166 | } 167 | return { content: [{ type: 'text', text: message }], isError } 168 | } 169 | // --- End Codebase Context --- 170 | 171 | // --- Git Diff Execution --- (Keep as is) 172 | let gitDiff = '' 173 | try { 174 | await logToFile( 175 | `[TaskServer] Running git diff HEAD in directory: ${targetDir}... (reviewId: ${reviewId})` 176 | ) 177 | // Execute git commands directly in the validated target directory 178 | const diffCmd = `git --no-pager diff HEAD` 179 | const lsFilesCmd = `git ls-files --others --exclude-standard` 180 | const execOptions = { 181 | cwd: targetDir, // Set current working directory for the command 182 | maxBuffer: GIT_DIFF_MAX_BUFFER_MB * 1024 * 1024, 183 | } 184 | 185 | const { stdout: diffStdout, stderr: diffStderr } = await execPromise( 186 | diffCmd, 187 | execOptions 188 | ) 189 | if (diffStderr) { 190 | await logToFile( 191 | `[TaskServer] git diff stderr: ${diffStderr} (reviewId: ${reviewId})` 192 | ) 193 | } 194 | let combinedDiff = diffStdout 195 | 196 | const { stdout: untrackedStdout } = await execPromise( 197 | lsFilesCmd, 198 | execOptions 199 | ) 200 | const untrackedFiles = untrackedStdout 201 | .split('\n') 202 | .map((f: string) => f.trim()) 203 | .filter((f: string) => f.length > 0) 204 | for (const file of untrackedFiles) { 205 | // Ensure the filename itself is not malicious (basic check) 206 | if (file.includes('..') || file.includes('/') || file.includes('\\')) { 207 | await logToFile( 208 | `[TaskServer] Skipping potentially unsafe untracked filename: ${file} (reviewId: ${reviewId})` 209 | ) 210 | continue 211 | } 212 | // For each untracked file, get its diff 213 | const fileDiffCmd = `git --no-pager diff --no-index /dev/null "${file}"` 214 | try { 215 | const { stdout: fileDiff } = await execPromise( 216 | fileDiffCmd, 217 | execOptions 218 | ) 219 | if (fileDiff && fileDiff.trim().length > 0) { 220 | combinedDiff += `\n\n${fileDiff}` 221 | } 222 | } catch (fileDiffErr) { 223 | await logToFile( 224 | `[TaskServer] Error getting diff for untracked file ${file}: ${fileDiffErr} (reviewId: ${reviewId})` 225 | ) 226 | } 227 | } 228 | gitDiff = combinedDiff 229 | if (!gitDiff.trim()) { 230 | message = 'No staged or untracked changes found to review.' 231 | logToFile(`[TaskServer] ${message} (Review ID: ${reviewId})`) 232 | // Wrap history logging 233 | try { 234 | await addHistoryEntry(featureId, 'tool_response', { 235 | tool: 'review_changes', 236 | isError: false, 237 | message, 238 | reviewId, 239 | status: 'no_changes', 240 | }) 241 | } catch (historyError) { 242 | console.error( 243 | `[TaskServer] Failed to add history entry for no_changes status: ${historyError}` 244 | ) 245 | } 246 | return { content: [{ type: 'text', text: message }] } 247 | } 248 | logToFile( 249 | `[TaskServer] git diff captured (${gitDiff.length} chars). (Review ID: ${reviewId})` 250 | ) 251 | } catch (error: any) { 252 | message = `Error running git diff: ${error.message || error}` // Assign error message 253 | isError = true 254 | logToFile(`[TaskServer] ${message} (Review ID: ${reviewId})`, error) 255 | // Wrap history logging 256 | try { 257 | await addHistoryEntry(featureId, 'tool_response', { 258 | tool: 'review_changes', 259 | isError: true, 260 | message, 261 | reviewId, 262 | step: 'git_diff_error', 263 | }) 264 | } catch (historyError) { 265 | console.error( 266 | `[TaskServer] Failed to add history entry for git diff error: ${historyError}` 267 | ) 268 | } 269 | return { content: [{ type: 'text', text: message }], isError } 270 | } 271 | // --- End Git Diff --- 272 | 273 | // --- LLM Call to Generate Tasks from Review --- 274 | try { 275 | logToFile( 276 | `[TaskServer] Calling LLM for review analysis and task generation... (Review ID: ${reviewId})` 277 | ) 278 | 279 | // Fetch history to get original feature request 280 | let originalFeatureRequest = 'Original feature request not found.' 281 | try { 282 | const history: any[] = await getHistoryForFeature(featureId, 200) // Fetch more history if needed 283 | const planFeatureCall = history.find( 284 | (entry) => 285 | entry.role === 'tool_call' && 286 | entry.content?.tool === 'plan_feature' && 287 | entry.content?.params?.feature_description 288 | ) 289 | if (planFeatureCall) { 290 | originalFeatureRequest = 291 | planFeatureCall.content.params.feature_description 292 | logToFile( 293 | `[TaskServer] Found original feature request for review context: "${originalFeatureRequest.substring( 294 | 0, 295 | 50 296 | )}..."` 297 | ) 298 | } else { 299 | logToFile( 300 | `[TaskServer] Could not find original plan_feature call in history for feature ${featureId}.` 301 | ) 302 | } 303 | } catch (historyError) { 304 | logToFile( 305 | `[TaskServer] Error fetching history to get original feature request: ${historyError}. Proceeding without it.` 306 | ) 307 | } 308 | 309 | const contextPromptPart = codebaseContext 310 | ? `\n\nCodebase Context Overview:\n\`\`\`\n${codebaseContext}\n\`\`\`\n` 311 | : '\n\n(No overall codebase context was available.)' 312 | 313 | // *** REVISED Prompt: Ask for TASKS based on checklist criteria *** 314 | const structuredPrompt = `You are a senior software engineer performing a code review. 315 | Original Feature Request Context: "${originalFeatureRequest}" 316 | 317 | Review the following code changes (git diff) and consider the overall codebase context (if provided). 318 | Your goal is to identify necessary fixes, improvements, or refactorings based on standard best practices and generate a list of actionable coding tasks for another developer to implement. 319 | 320 | \`\`\`diff 321 | ${gitDiff} 322 | \`\`\` 323 | ${contextPromptPart} 324 | **Review Criteria (Generate tasks based on these):** 325 | 1. **Functionality:** Does the change work? Are there bugs? Handle edge cases & errors? 326 | 2. **Design:** Does it fit the architecture? Is it modular/maintainable (SOLID/DRY)? Overly complex? 327 | 3. **Readability:** Is code clear? Are names good? Are comments needed (explaining 'why')? Style consistent? 328 | 4. **Maintainability:** Easy to modify/debug/test? Clean dependencies? 329 | 5. **Performance:** Obvious bottlenecks? 330 | 6. **Security:** Potential vulnerabilities (input validation, etc.)? 331 | 332 | **Output Format:** 333 | Respond ONLY with a single valid JSON object matching this exact schema: 334 | { 335 | "tasks": [ 336 | { 337 | "description": "string // Clear, concise description of the required coding action.", 338 | "effort": "'low' | 'medium' | 'high' // Estimated effort level." 339 | } 340 | // ... include all actionable tasks generated from the review. 341 | // If NO tasks are needed, return an empty array: "tasks": [] 342 | ] 343 | } 344 | 345 | Do NOT include summaries, commentary, or anything outside this JSON structure. Do not use markdown formatting.` 346 | 347 | let llmResponseData: { tasks: Task[] } | null = null 348 | let rawLLMResponse: any = null 349 | 350 | // Call LLM using aiService - Attempt structured output 351 | if ('chat' in reviewModel) { 352 | // OpenRouter 353 | const structuredResult = await aiService.callOpenRouterWithSchema( 354 | OPENROUTER_MODEL, // Use configured or default for review 355 | [{ role: 'user', content: structuredPrompt }], 356 | ReviewResponseWithTasksSchema, // *** USE THE CORRECT SCHEMA HERE *** 357 | { temperature: 0.5 } // Slightly higher temp might be ok for task generation 358 | ) 359 | rawLLMResponse = structuredResult.rawResponse 360 | 361 | if (structuredResult.success) { 362 | llmResponseData = structuredResult.data as { tasks: Task[] } 363 | // Wrap history logging 364 | try { 365 | await addHistoryEntry(featureId, 'model', { 366 | tool: 'review_changes', 367 | reviewId, 368 | response: llmResponseData, 369 | structured: true, 370 | }) 371 | } catch (historyError) { 372 | console.error( 373 | `[TaskServer] Failed to add history entry for successful structured OpenRouter response: ${historyError}` 374 | ) 375 | } 376 | } else { 377 | logToFile( 378 | `[TaskServer] Structured review task generation failed (OpenRouter): ${structuredResult.error}. Cannot reliably generate tasks from review.` 379 | ) 380 | message = `Error: AI failed to generate structured tasks based on review: ${structuredResult.error}` 381 | isError = true 382 | // Wrap history logging 383 | try { 384 | await addHistoryEntry(featureId, 'tool_response', { 385 | tool: 'review_changes', 386 | isError, 387 | message, 388 | reviewId, 389 | step: 'llm_structured_fail', 390 | }) 391 | } catch (historyError) { 392 | console.error( 393 | `[TaskServer] Failed to add history entry for OpenRouter structured fail: ${historyError}` 394 | ) 395 | } 396 | return { content: [{ type: 'text', text: message }], isError } 397 | } 398 | } else if ('generateContentStream' in reviewModel) { 399 | // Gemini 400 | const structuredResult = await aiService.callGeminiWithSchema( 401 | process.env.GEMINI_MODEL || 'gemini-1.5-flash-latest', 402 | structuredPrompt, 403 | ReviewResponseWithTasksSchema, // *** USE THE CORRECT SCHEMA HERE *** 404 | { temperature: 0.5 } 405 | ) 406 | rawLLMResponse = structuredResult.rawResponse 407 | 408 | if (structuredResult.success) { 409 | llmResponseData = structuredResult.data as { tasks: Task[] } 410 | // Wrap history logging 411 | try { 412 | await addHistoryEntry(featureId, 'model', { 413 | tool: 'review_changes', 414 | reviewId, 415 | response: llmResponseData, 416 | structured: true, 417 | }) 418 | } catch (historyError) { 419 | console.error( 420 | `[TaskServer] Failed to add history entry for successful structured Gemini response: ${historyError}` 421 | ) 422 | } 423 | } else { 424 | logToFile( 425 | `[TaskServer] Structured review task generation failed (Gemini): ${structuredResult.error}. Cannot reliably generate tasks from review.` 426 | ) 427 | message = `Error: AI failed to generate structured tasks based on review: ${structuredResult.error}` 428 | isError = true 429 | // Wrap history logging 430 | try { 431 | await addHistoryEntry(featureId, 'tool_response', { 432 | tool: 'review_changes', 433 | isError, 434 | message, 435 | reviewId, 436 | step: 'llm_structured_fail', 437 | }) 438 | } catch (historyError) { 439 | console.error( 440 | `[TaskServer] Failed to add history entry for Gemini structured fail: ${historyError}` 441 | ) 442 | } 443 | return { content: [{ type: 'text', text: message }], isError } 444 | } 445 | } else { 446 | message = 'Error: Review model does not support structured output.' 447 | isError = true 448 | logToFile(`[TaskServer] ${message} (Review ID: ${reviewId})`) 449 | // Wrap history logging 450 | try { 451 | await addHistoryEntry(featureId, 'tool_response', { 452 | tool: 'review_changes', 453 | isError, 454 | message, 455 | reviewId, 456 | step: 'llm_structured_fail', 457 | }) 458 | } catch (historyError) { 459 | console.error( 460 | `[TaskServer] Failed to add history entry for structured fail: ${historyError}` 461 | ) 462 | } 463 | return { content: [{ type: 'text', text: message }], isError } 464 | } 465 | 466 | // --- Process and Save Generated Tasks --- 467 | if (!llmResponseData || !llmResponseData.tasks) { 468 | message = 'Error: LLM response did not contain a valid task list.' 469 | isError = true 470 | logToFile(`[TaskServer] ${message} (Review ID: ${reviewId})`) 471 | // Wrap history logging 472 | try { 473 | await addHistoryEntry(featureId, 'tool_response', { 474 | tool: 'review_changes', 475 | isError, 476 | message, 477 | reviewId, 478 | step: 'task_processing_error', 479 | }) 480 | } catch (historyError) { 481 | console.error( 482 | `[TaskServer] Failed to add history entry for task processing error: ${historyError}` 483 | ) 484 | } 485 | return { content: [{ type: 'text', text: message }], isError } 486 | } 487 | 488 | if (llmResponseData.tasks.length === 0) { 489 | message = 490 | 'Code review completed. No immediate action tasks were identified.' 491 | logToFile(`[TaskServer] ${message} (Review ID: ${reviewId})`) 492 | // Wrap history logging 493 | try { 494 | await addHistoryEntry(featureId, 'tool_response', { 495 | tool: 'review_changes', 496 | isError: false, 497 | message, 498 | reviewId, 499 | status: 'no_tasks_generated', 500 | }) 501 | } catch (historyError) { 502 | console.error( 503 | `[TaskServer] Failed to add history entry for no_tasks_generated status: ${historyError}` 504 | ) 505 | } 506 | return { content: [{ type: 'text', text: message }], isError: false } 507 | } 508 | 509 | // Format tasks for processing (like in planFeature) 510 | const rawPlanSteps = llmResponseData.tasks.map( 511 | (task) => `[${task.effort}] ${task.description}` 512 | ) 513 | 514 | logToFile( 515 | `[TaskServer] Generated ${rawPlanSteps.length} tasks from review. Processing... (Review ID: ${reviewId})` 516 | ) 517 | 518 | // Process these tasks (effort check, breakdown, save, notify) 519 | // This adds the review-generated tasks to the existing feature plan 520 | const finalTasks = await processAndFinalizePlan( 521 | rawPlanSteps, 522 | reviewModel, // Use the same model for potential breakdown 523 | featureId, 524 | true // Indicate tasks came from review context 525 | ) 526 | 527 | const taskCount = finalTasks.length // Count tasks *added* or processed 528 | const firstNewTask = finalTasks[0] // Get the first task generated by *this* review 529 | 530 | const responseData: PlanFeatureStandardResponse = { 531 | status: 'completed', // Indicates review+task generation is done 532 | // Provide a clear message indicating tasks were *added* from review 533 | message: `Code review complete. Generated ${taskCount} actionable tasks based on the review. ${ 534 | firstNewTask 535 | ? 'First new task: "' + firstNewTask.description + '"' 536 | : '' 537 | } Call 'get_next_task' with featureId '${featureId}' to continue implementation.`, 538 | featureId: featureId, 539 | taskCount: taskCount, 540 | firstTask: firstNewTask 541 | ? { 542 | description: firstNewTask.description || '', 543 | effort: firstNewTask.effort || 'medium', 544 | } 545 | : undefined, // Ensure effort is present 546 | } 547 | 548 | logToFile( 549 | `[TaskServer] Review tasks processed and saved for feature ${featureId}. (Review ID: ${reviewId})` 550 | ) 551 | // Wrap history logging 552 | try { 553 | await addHistoryEntry(featureId, 'tool_response', { 554 | tool: 'review_changes', 555 | isError: false, 556 | message: responseData.message, 557 | reviewId, 558 | responseData, 559 | }) 560 | } catch (historyError) { 561 | console.error( 562 | `[TaskServer] Failed to add final success history entry: ${historyError}` 563 | ) 564 | } 565 | 566 | return { 567 | content: [{ type: 'text', text: responseData.message }], 568 | isError: false, 569 | } 570 | } catch (error: any) { 571 | message = `Error occurred during review analysis API call: ${error.message}` 572 | isError = true 573 | logToFile( 574 | `[TaskServer] Error calling LLM review API (Review ID: ${reviewId})`, 575 | error 576 | ) 577 | // Wrap history logging inside the catch block 578 | try { 579 | await addHistoryEntry(featureId, 'tool_response', { 580 | tool: 'review_changes', 581 | isError, 582 | message, 583 | error: error.message, 584 | reviewId, 585 | }) 586 | } catch (historyError) { 587 | console.error( 588 | `[TaskServer] Failed to add error history entry during LLM API call failure: ${historyError}` 589 | ) 590 | } 591 | return { content: [{ type: 'text', text: message }], isError } 592 | } 593 | } catch (error: any) { 594 | // Outer catch already wraps history logging and ignores errors 595 | const errorMsg = `Error processing review_changes request: ${error.message}` 596 | logToFile(`[TaskServer] ${errorMsg} (Review ID: ${reviewId})`, error) 597 | try { 598 | await addHistoryEntry(featureId, 'tool_response', { 599 | tool: 'review_changes', 600 | isError: true, 601 | message: errorMsg, 602 | reviewId, 603 | step: 'preprocessing_error', 604 | }) 605 | } catch (historyError) { 606 | /* Ignore */ 607 | } 608 | return { content: [{ type: 'text', text: errorMsg }], isError: true } 609 | } 610 | } 611 | ``` -------------------------------------------------------------------------------- /src/services/webSocketService.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { WebSocket, WebSocketServer } from 'ws' 2 | import { UI_PORT } from '../config' 3 | import { logToFile } from '../lib/logger' 4 | import { 5 | WebSocketMessage, 6 | WebSocketMessageType, 7 | ClientRegistrationPayload, 8 | ErrorPayload, 9 | ShowQuestionPayload, 10 | QuestionResponsePayload, 11 | PlanFeatureResponseSchema, 12 | IntermediatePlanningState, 13 | } from '../models/types' 14 | import planningStateService from '../services/planningStateService' 15 | import { aiService } from '../services/aiService' 16 | import { OPENROUTER_MODEL, GEMINI_MODEL } from '../config' 17 | import { addHistoryEntry } from '../lib/dbUtils' 18 | import crypto from 'crypto' 19 | import { 20 | processAndBreakdownTasks, 21 | ensureEffortRatings, 22 | processAndFinalizePlan, 23 | } from '../lib/llmUtils' 24 | import OpenAI from 'openai' 25 | import { GenerativeModel } from '@google/generative-ai' 26 | import { z } from 'zod' 27 | import { databaseService } from '../services/databaseService' 28 | 29 | interface WebSocketConnection { 30 | socket: WebSocket 31 | featureId?: string 32 | clientId?: string 33 | lastActivity: Date 34 | } 35 | 36 | class WebSocketService { 37 | private wss: WebSocketServer | null = null 38 | private connections: Map<WebSocket, WebSocketConnection> = new Map() 39 | private static instance: WebSocketService 40 | private isInitialized = false 41 | 42 | private constructor() {} 43 | 44 | /** 45 | * Returns the singleton instance of WebSocketService 46 | */ 47 | public static getInstance(): WebSocketService { 48 | if (!WebSocketService.instance) { 49 | WebSocketService.instance = new WebSocketService() 50 | } 51 | return WebSocketService.instance 52 | } 53 | 54 | /** 55 | * Initializes the WebSocket server using an existing HTTP server 56 | * 57 | * @param httpServer The Node.js HTTP server instance from Express 58 | */ 59 | public async initialize(httpServer: import('http').Server): Promise<void> { 60 | if (this.isInitialized) { 61 | await logToFile( 62 | '[WebSocketService] WebSocket server already initialized.' 63 | ) 64 | return 65 | } 66 | 67 | try { 68 | // Attach WebSocket server to the existing HTTP server 69 | this.wss = new WebSocketServer({ server: httpServer }) 70 | 71 | // Use UI_PORT for logging consistency if needed 72 | await logToFile( 73 | `[WebSocketService] WebSocket server attached to HTTP server on port ${UI_PORT}` 74 | ) 75 | 76 | this.wss.on('connection', this.handleConnection.bind(this)) 77 | this.wss.on('error', this.handleServerError.bind(this)) 78 | 79 | // Set up connection cleanup interval (runs every minute) 80 | setInterval(this.cleanupInactiveConnections.bind(this), 60000) 81 | 82 | this.isInitialized = true 83 | } catch (error) { 84 | await logToFile( 85 | `[WebSocketService] Failed to initialize WebSocket server: ${error}` 86 | ) 87 | throw error 88 | } 89 | } 90 | 91 | /** 92 | * Handles new WebSocket connections 93 | */ 94 | private handleConnection(socket: WebSocket, _request: any): void { 95 | // Create a new connection entry 96 | const connection: WebSocketConnection = { 97 | socket, 98 | lastActivity: new Date(), 99 | } 100 | 101 | this.connections.set(socket, connection) 102 | 103 | logToFile( 104 | `[WebSocketService] New client connected. Total connections: ${this.connections.size}` 105 | ) 106 | 107 | // Send a connection established message 108 | this.sendToSocket(socket, { 109 | type: 'connection_established', 110 | }) 111 | 112 | // Set up event listeners for the socket 113 | socket.on('message', (data: Buffer) => this.handleMessage(socket, data)) 114 | socket.on('close', () => this.handleDisconnect(socket)) 115 | socket.on('error', (error) => this.handleSocketError(socket, error)) 116 | } 117 | 118 | /** 119 | * Handles incoming WebSocket messages 120 | */ 121 | private handleMessage(socket: WebSocket, data: Buffer): void { 122 | try { 123 | // Update last activity timestamp 124 | const connection = this.connections.get(socket) 125 | if (connection) { 126 | connection.lastActivity = new Date() 127 | } 128 | 129 | // Parse the message 130 | const message = JSON.parse(data.toString()) as WebSocketMessage 131 | 132 | // Handle client registration 133 | if (message.type === 'client_registration' && message.payload) { 134 | this.handleClientRegistration( 135 | socket, 136 | message.payload as ClientRegistrationPayload 137 | ) 138 | return 139 | } 140 | 141 | // Handle question response 142 | if (message.type === 'question_response' && message.payload) { 143 | this.handleQuestionResponse( 144 | message.featureId || '', 145 | message.payload as QuestionResponsePayload 146 | ) 147 | return 148 | } 149 | 150 | // Log the message type 151 | logToFile(`[WebSocketService] Received message of type: ${message.type}`) 152 | 153 | // Additional message handling logic can be added here 154 | } catch (error) { 155 | logToFile(`[WebSocketService] Error handling message: ${error}`) 156 | this.sendToSocket(socket, { 157 | type: 'error', 158 | payload: { 159 | code: 'MESSAGE_PARSING_ERROR', 160 | message: 'Failed to parse incoming message', 161 | } as ErrorPayload, 162 | }) 163 | } 164 | } 165 | 166 | /** 167 | * Handles client registration messages 168 | */ 169 | private handleClientRegistration( 170 | socket: WebSocket, 171 | payload: ClientRegistrationPayload 172 | ): void { 173 | const connection = this.connections.get(socket) 174 | 175 | if (connection) { 176 | connection.featureId = payload.featureId 177 | connection.clientId = payload.clientId || `client-${Date.now()}` 178 | 179 | logToFile( 180 | `[WebSocketService] Client registered: ${connection.clientId} for feature: ${connection.featureId}` 181 | ) 182 | 183 | // Confirm registration to the client 184 | this.sendToSocket(socket, { 185 | type: 'client_registration', 186 | featureId: connection.featureId, 187 | payload: { 188 | featureId: connection.featureId, 189 | clientId: connection.clientId, 190 | }, 191 | }) 192 | } 193 | } 194 | 195 | /** 196 | * Handles socket disconnections 197 | */ 198 | private handleDisconnect(socket: WebSocket): void { 199 | const connection = this.connections.get(socket) 200 | 201 | if (connection) { 202 | logToFile( 203 | `[WebSocketService] Client disconnected: ${ 204 | connection.clientId || 'unknown' 205 | }` 206 | ) 207 | this.connections.delete(socket) 208 | } 209 | } 210 | 211 | /** 212 | * Handles socket errors 213 | */ 214 | private handleSocketError(socket: WebSocket, error: Error): void { 215 | const connection = this.connections.get(socket) 216 | 217 | logToFile( 218 | `[WebSocketService] Socket error for client ${ 219 | connection?.clientId || 'unknown' 220 | }: ${error.message}` 221 | ) 222 | 223 | // Try to send an error message to the client 224 | this.sendToSocket(socket, { 225 | type: 'error', 226 | payload: { 227 | code: 'SOCKET_ERROR', 228 | message: 'Socket error occurred', 229 | } as ErrorPayload, 230 | }) 231 | 232 | // Close the connection after an error 233 | try { 234 | socket.terminate() 235 | } catch (closeError) { 236 | logToFile(`[WebSocketService] Error closing socket: ${closeError}`) 237 | } 238 | 239 | // Remove the connection from our map 240 | this.connections.delete(socket) 241 | } 242 | 243 | /** 244 | * Handles server errors 245 | */ 246 | private async handleServerError(error: Error): Promise<void> { 247 | await logToFile( 248 | `[WebSocketService] WebSocket server error: ${error.message}` 249 | ) 250 | } 251 | 252 | /** 253 | * Cleans up inactive connections 254 | */ 255 | private cleanupInactiveConnections(): void { 256 | const now = new Date() 257 | const inactivityThreshold = 30 * 60 * 1000 // 30 minutes 258 | 259 | for (const [socket, connection] of this.connections.entries()) { 260 | const timeSinceLastActivity = 261 | now.getTime() - connection.lastActivity.getTime() 262 | 263 | if (timeSinceLastActivity > inactivityThreshold) { 264 | logToFile( 265 | `[WebSocketService] Closing inactive connection: ${ 266 | connection.clientId || 'unknown' 267 | }` 268 | ) 269 | 270 | try { 271 | socket.terminate() 272 | } catch (error) { 273 | logToFile( 274 | `[WebSocketService] Error terminating inactive socket: ${error}` 275 | ) 276 | } 277 | 278 | this.connections.delete(socket) 279 | } 280 | } 281 | } 282 | 283 | /** 284 | * Sends a message to all connected clients for a specific feature 285 | */ 286 | public broadcast(message: WebSocketMessage): void { 287 | if (!message.featureId) { 288 | logToFile('[WebSocketService] Cannot broadcast without featureId') 289 | return 290 | } 291 | 292 | let recipientCount = 0 293 | 294 | for (const [socket, connection] of this.connections.entries()) { 295 | // Only send to clients registered for this feature 296 | if (connection.featureId === message.featureId) { 297 | this.sendToSocket(socket, message) 298 | recipientCount++ 299 | } 300 | } 301 | 302 | logToFile( 303 | `[WebSocketService] Broadcast message of type '${message.type}' to ${recipientCount} clients for feature: ${message.featureId}` 304 | ) 305 | } 306 | 307 | /** 308 | * Sends a message to a specific socket 309 | */ 310 | private sendToSocket(socket: WebSocket, message: WebSocketMessage): void { 311 | if (socket.readyState === WebSocket.OPEN) { 312 | try { 313 | socket.send(JSON.stringify(message)) 314 | } catch (error) { 315 | logToFile( 316 | `[WebSocketService] Error sending message to socket: ${error}` 317 | ) 318 | } 319 | } 320 | } 321 | 322 | /** 323 | * Gracefully shutdowns the WebSocket server 324 | */ 325 | public async shutdown(): Promise<void> { 326 | if (!this.wss) { 327 | return 328 | } 329 | 330 | await logToFile('[WebSocketService] Shutting down WebSocket server...') 331 | 332 | // Close all connections 333 | for (const [socket] of this.connections.entries()) { 334 | try { 335 | socket.terminate() 336 | } catch (error) { 337 | await logToFile( 338 | `[WebSocketService] Error terminating socket during shutdown: ${error}` 339 | ) 340 | } 341 | } 342 | 343 | this.connections.clear() 344 | 345 | // Close the server 346 | this.wss.close((error) => { 347 | if (error) { 348 | logToFile(`[WebSocketService] Error closing WebSocket server: ${error}`) 349 | } else { 350 | logToFile('[WebSocketService] WebSocket server closed successfully') 351 | } 352 | }) 353 | 354 | this.wss = null 355 | this.isInitialized = false 356 | } 357 | 358 | /** 359 | * Broadcasts a task update notification for a feature 360 | */ 361 | public notifyTasksUpdated(featureId: string, tasks: any): void { 362 | // Log tasks to help debug 363 | console.log( 364 | 'WebSocketService.notifyTasksUpdated - Task fromReview values:', 365 | tasks.map((t: any) => ({ id: t.id, fromReview: !!t.fromReview })) 366 | ) 367 | 368 | // Make sure fromReview is properly formatted for all tasks 369 | const formattedTasks = tasks.map((task: any) => ({ 370 | ...task, 371 | fromReview: task.fromReview === 1 ? true : !!task.fromReview, 372 | })) 373 | 374 | this.broadcast({ 375 | type: 'tasks_updated', 376 | featureId, 377 | payload: { 378 | tasks: formattedTasks, 379 | updatedAt: new Date().toISOString(), 380 | }, 381 | }) 382 | } 383 | 384 | /** 385 | * Broadcasts a task status change notification 386 | */ 387 | public notifyTaskStatusChanged( 388 | featureId: string, 389 | taskId: string, 390 | status: 'pending' | 'completed' | 'decomposed' 391 | ): void { 392 | this.broadcast({ 393 | type: 'status_changed', 394 | featureId, 395 | payload: { 396 | taskId, 397 | status, 398 | updatedAt: new Date().toISOString(), 399 | }, 400 | }) 401 | } 402 | 403 | /** 404 | * Broadcasts a notification when a task is created 405 | */ 406 | public notifyTaskCreated(featureId: string, task: any): void { 407 | // Make sure fromReview is properly formatted 408 | const formattedTask = { 409 | ...task, 410 | fromReview: task.fromReview === 1 ? true : !!task.fromReview, 411 | } 412 | 413 | this.broadcast({ 414 | type: 'task_created', 415 | featureId, 416 | payload: { 417 | task: formattedTask, 418 | featureId, 419 | createdAt: new Date().toISOString(), 420 | }, 421 | }) 422 | 423 | logToFile( 424 | `[WebSocketService] Broadcasted task_created for task ID: ${task.id}` 425 | ) 426 | } 427 | 428 | /** 429 | * Broadcasts a notification when a task is updated 430 | */ 431 | public notifyTaskUpdated(featureId: string, task: any): void { 432 | // Make sure fromReview is properly formatted 433 | const formattedTask = { 434 | ...task, 435 | fromReview: task.fromReview === 1 ? true : !!task.fromReview, 436 | } 437 | 438 | this.broadcast({ 439 | type: 'task_updated', 440 | featureId, 441 | payload: { 442 | task: formattedTask, 443 | featureId, 444 | updatedAt: new Date().toISOString(), 445 | }, 446 | }) 447 | 448 | logToFile( 449 | `[WebSocketService] Broadcasted task_updated for task ID: ${task.id}` 450 | ) 451 | } 452 | 453 | /** 454 | * Broadcasts a notification when a task is deleted 455 | */ 456 | public notifyTaskDeleted(featureId: string, taskId: string): void { 457 | this.broadcast({ 458 | type: 'task_deleted', 459 | featureId, 460 | payload: { 461 | taskId, 462 | featureId, 463 | deletedAt: new Date().toISOString(), 464 | }, 465 | }) 466 | 467 | logToFile( 468 | `[WebSocketService] Broadcasted task_deleted for task ID: ${taskId}` 469 | ) 470 | } 471 | 472 | /** 473 | * Sends a question to UI clients 474 | */ 475 | public sendQuestion( 476 | featureId: string, 477 | questionId: string, 478 | question: string, 479 | options?: string[], 480 | allowsText?: boolean 481 | ): void { 482 | try { 483 | if (!featureId || !questionId || !question) { 484 | logToFile( 485 | '[WebSocketService] Cannot send question: Missing required parameters' 486 | ) 487 | return 488 | } 489 | 490 | // Check if any clients are connected for this feature 491 | let featureClients = 0 492 | for (const connection of this.connections.values()) { 493 | if (connection.featureId === featureId) { 494 | featureClients++ 495 | } 496 | } 497 | 498 | // Log if no clients are available 499 | if (featureClients === 0) { 500 | logToFile( 501 | `[WebSocketService] Warning: Sending question ${questionId} to feature ${featureId} with no connected clients` 502 | ) 503 | } 504 | 505 | this.broadcast({ 506 | type: 'show_question', 507 | featureId, 508 | payload: { 509 | questionId, 510 | question, 511 | options, 512 | allowsText, 513 | } as ShowQuestionPayload, 514 | }) 515 | 516 | logToFile( 517 | `[WebSocketService] Sent question to ${featureClients} clients for feature ${featureId}: ${question}` 518 | ) 519 | } catch (error: any) { 520 | logToFile(`[WebSocketService] Error sending question: ${error.message}`) 521 | } 522 | } 523 | 524 | /** 525 | * Requests a screenshot from UI clients 526 | */ 527 | public requestScreenshot( 528 | featureId: string, 529 | requestId: string, 530 | target?: string 531 | ): void { 532 | this.broadcast({ 533 | type: 'request_screenshot', 534 | featureId, 535 | payload: { 536 | requestId, 537 | target, 538 | }, 539 | }) 540 | } 541 | 542 | /** 543 | * Handles user responses to questions 544 | */ 545 | private async handleQuestionResponse( 546 | featureId: string, 547 | payload: QuestionResponsePayload 548 | ): Promise<void> { 549 | try { 550 | if (!featureId) { 551 | logToFile( 552 | '[WebSocketService] Cannot handle question response: Missing featureId' 553 | ) 554 | return 555 | } 556 | 557 | const { questionId, response } = payload 558 | 559 | if (!questionId) { 560 | logToFile( 561 | '[WebSocketService] Cannot handle question response: Missing questionId' 562 | ) 563 | this.broadcast({ 564 | type: 'error', 565 | featureId, 566 | payload: { 567 | code: 'INVALID_RESPONSE', 568 | message: 'Invalid response format: missing questionId', 569 | } as ErrorPayload, 570 | }) 571 | return 572 | } 573 | 574 | logToFile( 575 | `[WebSocketService] Received response to question ${questionId}: ${response}` 576 | ) 577 | 578 | // Get the stored planning state 579 | const state = await planningStateService.getStateByQuestionId(questionId) 580 | 581 | if (!state) { 582 | logToFile( 583 | `[WebSocketService] No planning state found for question ${questionId}` 584 | ) 585 | this.broadcast({ 586 | type: 'error', 587 | featureId, 588 | payload: { 589 | code: 'QUESTION_EXPIRED', 590 | message: 'The question session has expired or is invalid.', 591 | } as ErrorPayload, 592 | }) 593 | return 594 | } 595 | 596 | // Verify feature ID matches 597 | if (state.featureId !== featureId) { 598 | logToFile( 599 | `[WebSocketService] Feature ID mismatch: question belongs to ${state.featureId}, but response came from ${featureId}` 600 | ) 601 | this.broadcast({ 602 | type: 'error', 603 | featureId, 604 | payload: { 605 | code: 'FEATURE_MISMATCH', 606 | message: 607 | 'Response came from a different feature than the question.', 608 | } as ErrorPayload, 609 | }) 610 | return 611 | } 612 | 613 | // Add the response to history 614 | await addHistoryEntry(featureId, 'user', { 615 | questionId, 616 | question: state.partialResponse, 617 | response, 618 | }) 619 | 620 | // Notify UI that response is being processed 621 | this.broadcast({ 622 | type: 'status_changed', 623 | featureId, 624 | payload: { 625 | status: 'processing_response', 626 | questionId, 627 | }, 628 | }) 629 | 630 | // Resume planning/adjustment with the user's response 631 | try { 632 | const planningModel = aiService.getPlanningModel() 633 | if (!planningModel) { 634 | throw new Error('Planning model not available') 635 | } 636 | 637 | logToFile( 638 | `[WebSocketService] Resuming ${state.planningType} with user response for feature ${featureId}` 639 | ) 640 | 641 | // Fetch feature information from database 642 | await databaseService.connect() 643 | const feature = await databaseService.getFeatureById(featureId) 644 | await databaseService.close() 645 | 646 | if (!feature) { 647 | throw new Error(`Feature with ID ${featureId} not found`) 648 | } 649 | 650 | // Get previous history entries for context 651 | await databaseService.connect() 652 | const history = await databaseService.getHistoryByFeatureId( 653 | featureId, 654 | 10 655 | ) 656 | await databaseService.close() 657 | 658 | // Extract original feature description 659 | const originalDescription = 660 | feature.description || 'Unknown feature description' 661 | 662 | // Create a comprehensive follow-up prompt with complete context 663 | let followUpPrompt = `You previously received this feature request: "${originalDescription}" 664 | 665 | When planning this feature implementation, you asked for clarification with this question: 666 | ${state.partialResponse} 667 | 668 | The user has now provided this answer to your question: "${response}" 669 | ` 670 | 671 | // Call the LLM with the comprehensive follow-up prompt 672 | if (planningModel instanceof OpenAI) { 673 | await this.processOpenRouterResumeResponse( 674 | planningModel, 675 | followUpPrompt, 676 | state, 677 | featureId, 678 | questionId 679 | ) 680 | } else { 681 | await this.processGeminiResumeResponse( 682 | planningModel, 683 | followUpPrompt, 684 | state, 685 | featureId, 686 | questionId 687 | ) 688 | } 689 | } catch (error: any) { 690 | logToFile( 691 | `[WebSocketService] Error resuming planning: ${error.message}` 692 | ) 693 | 694 | // Notify clients of the error 695 | this.broadcast({ 696 | type: 'error', 697 | featureId, 698 | payload: { 699 | code: 'RESUME_PLANNING_FAILED', 700 | message: `Failed to process your response: ${error.message}`, 701 | } as ErrorPayload, 702 | }) 703 | 704 | // Add error history entry 705 | await addHistoryEntry(featureId, 'tool_response', { 706 | tool: 707 | state.planningType === 'feature_planning' 708 | ? 'plan_feature' 709 | : 'adjust_plan', 710 | status: 'failed_after_clarification', 711 | error: error.message, 712 | }) 713 | } 714 | } catch (error: any) { 715 | logToFile( 716 | `[WebSocketService] Unhandled error in question response handler: ${error.message}` 717 | ) 718 | if (featureId) { 719 | this.broadcast({ 720 | type: 'error', 721 | featureId, 722 | payload: { 723 | code: 'INTERNAL_ERROR', 724 | message: 725 | 'An internal error occurred while processing your response.', 726 | } as ErrorPayload, 727 | }) 728 | } 729 | } 730 | } 731 | 732 | /** 733 | * Process OpenRouter response for resuming planning after clarification 734 | */ 735 | private async processOpenRouterResumeResponse( 736 | model: OpenAI, 737 | prompt: string, 738 | state: IntermediatePlanningState, 739 | featureId: string, 740 | questionId: string 741 | ): Promise<void> { 742 | try { 743 | logToFile( 744 | `[WebSocketService] Calling OpenRouter with follow-up prompt for feature ${featureId}` 745 | ) 746 | 747 | const result = await aiService.callOpenRouterWithSchema( 748 | OPENROUTER_MODEL, 749 | [{ role: 'user', content: prompt }], 750 | PlanFeatureResponseSchema, 751 | { temperature: 0.3, max_tokens: 4096 } 752 | ) 753 | 754 | if (result.success) { 755 | await this.processPlanningSuccess( 756 | result.data, 757 | model, 758 | state, 759 | featureId, 760 | questionId 761 | ) 762 | } else { 763 | throw new Error( 764 | `OpenRouter failed to generate a valid response: ${result.error}` 765 | ) 766 | } 767 | } catch (error: any) { 768 | logToFile( 769 | `[WebSocketService] Error in OpenRouter response processing: ${error.message}` 770 | ) 771 | throw error // Re-throw to be handled by the caller 772 | } 773 | } 774 | 775 | /** 776 | * Process Gemini response for resuming planning after clarification 777 | */ 778 | private async processGeminiResumeResponse( 779 | model: GenerativeModel, 780 | prompt: string, 781 | state: IntermediatePlanningState, 782 | featureId: string, 783 | questionId: string 784 | ): Promise<void> { 785 | try { 786 | logToFile( 787 | `[WebSocketService] Calling Gemini with follow-up prompt for feature ${featureId}` 788 | ) 789 | 790 | const result = await aiService.callGeminiWithSchema( 791 | GEMINI_MODEL, 792 | prompt, 793 | PlanFeatureResponseSchema, 794 | { temperature: 0.3 } 795 | ) 796 | 797 | if (result.success) { 798 | await this.processPlanningSuccess( 799 | result.data, 800 | model, 801 | state, 802 | featureId, 803 | questionId 804 | ) 805 | } else { 806 | throw new Error( 807 | `Gemini failed to generate a valid response: ${result.error}` 808 | ) 809 | } 810 | } catch (error: any) { 811 | logToFile( 812 | `[WebSocketService] Error in Gemini response processing: ${error.message}` 813 | ) 814 | throw error // Re-throw to be handled by the caller 815 | } 816 | } 817 | 818 | /** 819 | * Process successful planning result after clarification 820 | */ 821 | private async processPlanningSuccess( 822 | data: z.infer<typeof PlanFeatureResponseSchema>, 823 | model: GenerativeModel | OpenAI, 824 | state: IntermediatePlanningState, 825 | featureId: string, 826 | questionId: string 827 | ): Promise<void> { 828 | try { 829 | // Check if tasks exist before processing 830 | if (!data.tasks) { 831 | logToFile( 832 | `[WebSocketService] Error: processPlanningSuccess called but response contained clarificationNeeded instead of tasks for feature ${featureId}` 833 | ) 834 | // Optionally, you could try to handle the clarification again here, but throwing seems safer 835 | throw new Error( 836 | 'processPlanningSuccess received clarification request, expected tasks.' 837 | ) 838 | } 839 | 840 | // Process the tasks using schema data 841 | const rawPlanSteps = data.tasks.map( 842 | (task: { effort: string; description: string }) => 843 | `[${task.effort}] ${task.description}` 844 | ) 845 | 846 | // Log the result before detailed processing 847 | await addHistoryEntry(featureId, 'model', { 848 | step: 'resumed_planning_response', 849 | response: JSON.stringify(data), 850 | }) 851 | 852 | logToFile( 853 | `[WebSocketService] Got ${rawPlanSteps.length} raw tasks after clarification for feature ${featureId}` 854 | ) 855 | 856 | // Call the centralized processing function 857 | const finalTasks = await processAndFinalizePlan( 858 | rawPlanSteps, 859 | model, 860 | featureId 861 | ) 862 | 863 | logToFile( 864 | `[WebSocketService] Processed ${finalTasks.length} final tasks after clarification for feature ${featureId}` 865 | ) 866 | 867 | // Clean up the temporary state 868 | await planningStateService.clearState(questionId) 869 | 870 | // Add success history entry (notification/saving is now handled within processAndFinalizePlan) 871 | await addHistoryEntry(featureId, 'tool_response', { 872 | tool: 873 | state.planningType === 'feature_planning' 874 | ? 'plan_feature' 875 | : 'adjust_plan', 876 | status: 'completed_after_clarification', 877 | taskCount: finalTasks.length, 878 | }) 879 | 880 | logToFile( 881 | `[WebSocketService] Successfully completed ${state.planningType} after clarification for feature ${featureId}` 882 | ) 883 | } catch (error: any) { 884 | logToFile( 885 | `[WebSocketService] Error processing successful planning result: ${error.message}` 886 | ) 887 | throw error // Re-throw to be handled by the caller 888 | } 889 | } 890 | } 891 | 892 | export default WebSocketService.getInstance() 893 | ``` -------------------------------------------------------------------------------- /src/server.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js' 2 | import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js' 3 | import { z } from 'zod' 4 | import * as fsSync from 'fs' 5 | import * as fs from 'fs/promises' 6 | import { logToFile } from './lib/logger' 7 | import logger from './lib/winstonLogger' 8 | import { handleMarkTaskComplete } from './tools/markTaskComplete' 9 | import { handlePlanFeature } from './tools/planFeature' 10 | import { handleReviewChanges } from './tools/reviewChanges' 11 | import { AdjustPlanInputSchema, AdjustPlanInput } from './models/types' 12 | import { adjustPlanHandler } from './tools/adjustPlan' 13 | import webSocketService from './services/webSocketService' 14 | import planningStateService from './services/planningStateService' 15 | // Re-add static imports 16 | import express, { Request, Response, NextFunction } from 'express' 17 | import path from 'path' 18 | import crypto from 'crypto' 19 | 20 | import { FEATURE_TASKS_DIR, UI_PORT } from './config' 21 | import { Task } from './models/types' 22 | import { detectClarificationRequest } from './lib/llmUtils' 23 | import { databaseService } from './services/databaseService' 24 | import { addHistoryEntry } from './lib/dbUtils' 25 | 26 | // Immediately log that we're starting up 27 | logger.info('Starting task manager server...') 28 | 29 | // --- MCP Server Setup --- 30 | logger.info('Setting up MCP Server instance...') 31 | const server = new McpServer({ 32 | name: 'task-manager-mcp', 33 | version: '0.6.3', 34 | description: 35 | 'MCP Server using Google AI SDK and repomix for planning and review.', 36 | capabilities: { 37 | tools: { listChanged: false }, 38 | }, 39 | }) 40 | logger.info('MCP Server instance created.') 41 | 42 | // --- Tool Definitions --- 43 | logger.info('Defining tools...') 44 | 45 | // New 'get_next_task' tool 46 | server.tool( 47 | 'get_next_task', 48 | { 49 | featureId: z 50 | .string() 51 | .uuid({ message: 'Valid feature ID (UUID) is required.' }), 52 | }, 53 | async (args, _extra) => { 54 | try { 55 | const { featureId } = args 56 | await logToFile( 57 | `[TaskServer] Handling get_next_task request for feature: ${featureId}` 58 | ) 59 | 60 | // 1. Read tasks for the given feature ID 61 | await databaseService.connect() 62 | const tasks = await databaseService.getTasksByFeatureId(featureId) 63 | await databaseService.close() 64 | 65 | if (tasks.length === 0) { 66 | const message = `No tasks found for feature ID ${featureId}. The feature may not exist or has not been planned yet.` 67 | await logToFile(`[TaskServer] ${message}`) 68 | return { 69 | content: [{ type: 'text', text: message }], 70 | isError: false, 71 | } 72 | } 73 | 74 | // 2. Find the first pending task in the list 75 | const nextTask = tasks.find((task) => task.status === 'pending') 76 | 77 | if (!nextTask) { 78 | const message = `All tasks have been completed for feature ${featureId}.` 79 | await logToFile(`[TaskServer] ${message}`) 80 | return { 81 | content: [{ type: 'text', text: message }], 82 | isError: false, 83 | } 84 | } 85 | 86 | // 3. Format response with task details 87 | // Include effort in the message if available 88 | const effortInfo = nextTask.effort ? ` (Effort: ${nextTask.effort})` : '' 89 | 90 | // Include parent info if this is a subtask 91 | let parentInfo = '' 92 | if (nextTask.parent_task_id) { 93 | // Find the parent task 94 | const parentTask = tasks.find((t) => t.id === nextTask.parent_task_id) 95 | if (parentTask) { 96 | const parentDesc = 97 | parentTask.description && parentTask.description.length > 30 98 | ? parentTask.description.substring(0, 30) + '...' 99 | : parentTask.description || '' // Use empty string if description is undefined 100 | parentInfo = ` (Subtask of: "${parentDesc}")` 101 | } else { 102 | parentInfo = ` (Subtask of parent ID: ${nextTask.parent_task_id})` // Fallback if parent not found 103 | } 104 | } 105 | 106 | // Embed ID, description, effort, and parent info in the text message 107 | const message = `Next pending task (ID: ${ 108 | nextTask.id 109 | })${effortInfo}${parentInfo}: ${ 110 | nextTask.title !== nextTask.description 111 | ? `${nextTask.title}: ${nextTask.description}` 112 | : nextTask.description 113 | }` 114 | 115 | await logToFile(`[TaskServer] Found next task: ${nextTask.id}`) 116 | 117 | return { 118 | content: [{ type: 'text', text: message }], 119 | isError: false, 120 | } 121 | } catch (error: any) { 122 | const errorMsg = `Error processing get_next_task request: ${ 123 | error instanceof Error ? error.message : String(error) 124 | }` 125 | logger.error(errorMsg) 126 | await logToFile(`[TaskServer] ${errorMsg}`) 127 | 128 | return { 129 | content: [{ type: 'text', text: errorMsg }], 130 | isError: true, 131 | } 132 | } 133 | } 134 | ) 135 | 136 | // 1. Tool: mark_task_complete 137 | server.tool( 138 | 'mark_task_complete', 139 | { 140 | task_id: z.string().uuid({ message: 'Valid task ID (UUID) is required.' }), 141 | feature_id: z 142 | .string() 143 | .uuid({ message: 'Valid feature ID (UUID) is required.' }), 144 | }, 145 | async (args, _extra) => { 146 | const result = await handleMarkTaskComplete(args) 147 | // Transform the content to match SDK expected format 148 | return { 149 | content: result.content.map((item) => ({ 150 | type: item.type as 'text', 151 | text: item.text, 152 | })), 153 | isError: result.isError, 154 | } 155 | } 156 | ) 157 | 158 | // 2. Tool: plan_feature 159 | server.tool( 160 | 'plan_feature', 161 | { 162 | feature_description: z.string().min(10, { 163 | message: 'Feature description must be at least 10 characters.', 164 | }), 165 | project_path: z 166 | .string() 167 | .describe( 168 | 'The absolute path to the project directory to scan with repomix. ' 169 | ), 170 | }, 171 | async (args, _extra) => { 172 | const result = await handlePlanFeature(args) 173 | // Transform the content to match SDK expected format 174 | // Since handlePlanFeature now always returns Array<{type: 'text', text: string}> 175 | // we can use a simple map. 176 | return { 177 | content: result.content.map((item) => ({ 178 | type: 'text', 179 | text: item.text, 180 | })), 181 | isError: result.isError, 182 | } 183 | } 184 | ) 185 | 186 | // 3. Tool: review_changes 187 | server.tool( 188 | 'review_changes', 189 | { 190 | project_path: z 191 | .string() 192 | .optional() 193 | .describe( 194 | 'The absolute path to the project directory where git commands should run. Defaults to the workspace root if not provided.' 195 | ), 196 | featureId: z 197 | .string() 198 | .uuid({ message: 'Valid feature ID (UUID) is required.' }), 199 | }, 200 | async (args, _extra) => { 201 | // Pass the project_path argument to the handler 202 | const result = await handleReviewChanges({ 203 | featureId: args.featureId, 204 | project_path: args.project_path, 205 | }) 206 | // Transform the content to match SDK expected format 207 | return { 208 | content: result.content.map((item) => ({ 209 | type: item.type as 'text', 210 | text: item.text, 211 | })), 212 | isError: result.isError, 213 | } 214 | } 215 | ) 216 | 217 | // 4. Tool: adjust_plan 218 | server.tool( 219 | 'adjust_plan', 220 | { 221 | featureId: z 222 | .string() 223 | .uuid({ message: 'Valid feature ID (UUID) is required.' }), 224 | adjustment_request: z 225 | .string() 226 | .min(1, { message: 'Adjustment request cannot be empty.' }), 227 | }, 228 | async (args: AdjustPlanInput, _extra) => { 229 | const result = await adjustPlanHandler(args) 230 | 231 | return { 232 | content: [{ type: 'text', text: result.message }], 233 | isError: result.status === 'error', 234 | } 235 | } 236 | ) 237 | 238 | logger.info('Tools defined.') 239 | 240 | // --- Error Handlers --- 241 | // Add top-level error handler for synchronous errors during load 242 | process.on('uncaughtException', (error) => { 243 | // Cannot reliably use async logToFile here 244 | logger.error('Uncaught Exception:', error) 245 | // Use synchronous append for critical errors before exit 246 | try { 247 | const logDir = process.env.LOG_DIR || './logs' 248 | const logFile = process.env.LOG_FILE || `${logDir}/debug.log` 249 | fsSync.mkdirSync(logDir, { recursive: true }) 250 | fsSync.appendFileSync( 251 | logFile, 252 | `${new Date().toISOString()} - [TaskServer] FATAL: Uncaught Exception: ${ 253 | error?.message || error 254 | }\n${error?.stack || ''}\n` 255 | ) 256 | } catch (logErr) { 257 | logger.error('Error writing uncaughtException to sync log:', logErr) 258 | } 259 | process.exit(1) 260 | }) 261 | 262 | process.on('unhandledRejection', (reason, promise) => { 263 | logger.error('Unhandled Rejection at:', promise, 'reason:', reason) 264 | try { 265 | const logDir = process.env.LOG_DIR || './logs' 266 | const logFile = process.env.LOG_FILE || `${logDir}/debug.log` 267 | fsSync.mkdirSync(logDir, { recursive: true }) 268 | fsSync.appendFileSync( 269 | logFile, 270 | `${new Date().toISOString()} - [TaskServer] FATAL: Unhandled Rejection: ${reason}\n` 271 | ) 272 | } catch (logErr) { 273 | logger.error('Error writing unhandledRejection to sync log:', logErr) 274 | } 275 | process.exit(1) 276 | }) 277 | 278 | // Helper function to list all features 279 | async function listFeatures() { 280 | try { 281 | // Ensure features directory exists 282 | await fs.mkdir(FEATURE_TASKS_DIR, { recursive: true }) 283 | 284 | // Read all files in the features directory 285 | const files = await fs.readdir(FEATURE_TASKS_DIR) 286 | 287 | // Filter for task files (ending with _mcp_tasks.json) 288 | const taskFiles = files.filter((file) => file.endsWith('_mcp_tasks.json')) 289 | 290 | // Extract feature IDs from filenames 291 | const featureIds = taskFiles.map((file) => 292 | file.replace('_mcp_tasks.json', '') 293 | ) 294 | 295 | return featureIds 296 | } catch (error) { 297 | logger.error('Error listing features:', error) 298 | return [] 299 | } 300 | } 301 | 302 | // Helper function to format a task for the frontend 303 | function formatTaskForFrontend(task: any, featureId: string) { 304 | return { 305 | id: task.id, 306 | title: task.title || task.description, 307 | description: task.description, 308 | status: task.status, 309 | completed: task.status === 'completed' || Boolean(task.completed), 310 | effort: task.effort, 311 | feature_id: featureId, 312 | // Convert from snake_case to camelCase for frontend compatibility 313 | parentTaskId: task.parent_task_id, 314 | createdAt: 315 | typeof task.created_at === 'number' 316 | ? new Date(task.created_at * 1000).toISOString() 317 | : task.createdAt, 318 | updatedAt: 319 | typeof task.updated_at === 'number' 320 | ? new Date(task.updated_at * 1000).toISOString() 321 | : task.updatedAt, 322 | fromReview: Boolean(task.fromReview || task.from_review === 1), 323 | } 324 | } 325 | 326 | // --- Server Start --- 327 | async function main() { 328 | logger.info('Entering main function...') 329 | 330 | // Initialize database *before* starting other services 331 | try { 332 | logger.info('Initializing database...') 333 | await databaseService.initializeDatabase() 334 | logger.info('Database initialized successfully.') 335 | } catch (dbError) { 336 | logger.error( 337 | 'FATAL: Failed to initialize database. Server cannot start.', 338 | dbError 339 | ) 340 | process.exit(1) // Exit if database fails to initialize 341 | } 342 | 343 | await logToFile('[TaskServer] LOG: main() started.') 344 | logger.info('Main function started') 345 | 346 | try { 347 | // --- Express Server Setup --- Moved inside main, after MCP connect 348 | const app = express() 349 | const PORT = process.env.PORT || UI_PORT || 4999 350 | 351 | // HTTP request logging middleware 352 | app.use((req: Request, res: Response, next: NextFunction) => { 353 | const start = new Date().getTime() 354 | 355 | res.on('finish', () => { 356 | const duration = new Date().getTime() - start 357 | logger.info({ 358 | method: req.method, 359 | url: req.url, 360 | status: res.statusCode, 361 | duration: `${duration}ms`, 362 | }) 363 | }) 364 | 365 | next() 366 | }) 367 | 368 | // --- MCP Server Connection --- Moved after Express init 369 | await logToFile('[TaskServer] LOG: Creating transport...') 370 | const transport = new StdioServerTransport() 371 | await logToFile('[TaskServer] LOG: Transport created.') 372 | 373 | await logToFile('[TaskServer] LOG: Connecting server to transport...') 374 | await server.connect(transport) 375 | await logToFile( 376 | '[TaskServer] LOG: MCP Task Manager Server connected and running on stdio...' 377 | ) 378 | 379 | // Setup API endpoints 380 | 381 | // Get list of features 382 | app.get('/api/features', (req: Request, res: Response) => { 383 | ;(async () => { 384 | try { 385 | const featureIds = await listFeatures() 386 | res.json(featureIds) 387 | } catch (error: any) { 388 | logger.error(`Failed to fetch features: ${error?.message || error}`) 389 | await logToFile( 390 | `[TaskServer] ERROR fetching features: ${error?.message || error}` 391 | ) 392 | res.status(500).json({ error: 'Failed to fetch features' }) 393 | } 394 | })() 395 | }) 396 | 397 | // Get tasks for a specific feature 398 | app.get('/api/tasks/:featureId', (req: Request, res: Response) => { 399 | ;(async () => { 400 | const { featureId } = req.params 401 | try { 402 | await databaseService.connect() 403 | const tasks = await databaseService.getTasksByFeatureId(featureId) 404 | await databaseService.close() 405 | 406 | // Use the helper function to format tasks 407 | const formattedTasks = tasks.map((task) => 408 | formatTaskForFrontend(task, featureId) 409 | ) 410 | 411 | res.json(formattedTasks) 412 | } catch (error: any) { 413 | logger.error( 414 | `Failed to fetch tasks for feature ${featureId}: ${ 415 | error?.message || error 416 | }` 417 | ) 418 | await logToFile( 419 | `[TaskServer] ERROR fetching tasks for feature ${featureId}: ${ 420 | error?.message || error 421 | }` 422 | ) 423 | res.status(500).json({ error: 'Failed to fetch tasks' }) 424 | } 425 | })() 426 | }) 427 | 428 | // Parse JSON bodies for API requests 429 | app.use(express.json()) 430 | 431 | // POST: Create a new task 432 | app.post('/api/tasks', (req: Request, res: Response) => { 433 | ;(async () => { 434 | try { 435 | const { featureId, description, title, effort } = req.body 436 | 437 | // Use title if description is missing 438 | const taskDescription = description || title 439 | 440 | if (!featureId || !taskDescription) { 441 | return res.status(400).json({ 442 | error: 443 | 'Missing required fields: featureId and title are required', 444 | }) 445 | } 446 | 447 | // Read existing tasks 448 | await databaseService.connect() 449 | const tasks = await databaseService.getTasksByFeatureId(featureId) 450 | 451 | // Create a new task with a UUID 452 | const now = Math.floor(Date.now() / 1000) 453 | const newTask = { 454 | id: crypto.randomUUID(), 455 | description: taskDescription, 456 | title: title || taskDescription, // Use title or derived description 457 | status: 'pending' as const, 458 | completed: false, 459 | effort: effort as 'low' | 'medium' | 'high' | undefined, 460 | feature_id: featureId, 461 | created_at: now, 462 | updated_at: now, 463 | } 464 | 465 | // Convert to DB format and add to database 466 | await databaseService.addTask(newTask) 467 | 468 | // Add the new task to the list for WS notifications 469 | tasks.push(newTask) 470 | 471 | // Notify clients via WebSocket - both general task update and specific creation event 472 | webSocketService.broadcast({ 473 | type: 'tasks_updated', 474 | featureId, 475 | payload: { 476 | tasks: tasks.map((task) => 477 | formatTaskForFrontend(task, featureId) 478 | ), 479 | updatedAt: new Date().toISOString(), 480 | }, 481 | }) 482 | 483 | // Also send a specific task created notification 484 | webSocketService.notifyTaskCreated( 485 | featureId, 486 | formatTaskForFrontend(newTask, featureId) 487 | ) 488 | 489 | await databaseService.close() 490 | res.status(201).json(formatTaskForFrontend(newTask, featureId)) 491 | } catch (error: any) { 492 | logger.error(`Failed to create task: ${error?.message || error}`) 493 | await logToFile( 494 | `[TaskServer] ERROR creating task: ${error?.message || error}` 495 | ) 496 | res.status(500).json({ error: 'Failed to create task' }) 497 | } 498 | })() 499 | }) 500 | 501 | // PUT: Update an existing task 502 | app.put('/api/tasks/:taskId', (req: Request, res: Response) => { 503 | ;(async () => { 504 | try { 505 | const { taskId } = req.params 506 | const { featureId, description, title, status, completed, effort } = 507 | req.body 508 | 509 | if (!featureId) { 510 | return res 511 | .status(400) 512 | .json({ error: 'Missing required field: featureId' }) 513 | } 514 | 515 | // Read existing tasks 516 | await databaseService.connect() 517 | 518 | // Check if task exists 519 | const task = await databaseService.getTaskById(taskId) 520 | 521 | if (!task) { 522 | await databaseService.close() 523 | return res.status(404).json({ error: 'Task not found' }) 524 | } 525 | 526 | // First determine what kind of update we need (status or details) 527 | if (status || completed !== undefined) { 528 | // Status update 529 | const newStatus = status || task.status 530 | const isCompleted = 531 | completed !== undefined ? completed : task.completed 532 | await databaseService.updateTaskStatus( 533 | taskId, 534 | newStatus, 535 | isCompleted 536 | ) 537 | } 538 | 539 | // If we have other fields to update, do that as well 540 | if (title || description || effort) { 541 | await databaseService.updateTaskDetails(taskId, { 542 | title: title, 543 | description: description, 544 | effort: effort as 'low' | 'medium' | 'high' | undefined, 545 | }) 546 | } 547 | 548 | // Get updated tasks for WebSocket notification 549 | const tasks = await databaseService.getTasksByFeatureId(featureId) 550 | const updatedTask = await databaseService.getTaskById(taskId) 551 | 552 | // Notify clients via WebSocket - both general task update and specific update event 553 | webSocketService.broadcast({ 554 | type: 'tasks_updated', 555 | featureId, 556 | payload: { 557 | tasks: tasks.map((task) => 558 | formatTaskForFrontend(task, featureId) 559 | ), 560 | updatedAt: new Date().toISOString(), 561 | }, 562 | }) 563 | 564 | // Send a specific task updated notification 565 | webSocketService.notifyTaskUpdated( 566 | featureId, 567 | formatTaskForFrontend(updatedTask!, featureId) 568 | ) 569 | 570 | // Also send a status change notification if the status was updated 571 | if (status) { 572 | webSocketService.notifyTaskStatusChanged(featureId, taskId, status) 573 | } 574 | 575 | await databaseService.close() 576 | res.json(formatTaskForFrontend(updatedTask!, featureId)) 577 | } catch (error: any) { 578 | logger.error(`Failed to update task: ${error?.message || error}`) 579 | await logToFile( 580 | `[TaskServer] ERROR updating task: ${error?.message || error}` 581 | ) 582 | res.status(500).json({ error: 'Failed to update task' }) 583 | } 584 | })() 585 | }) 586 | 587 | // DELETE: Remove a task 588 | app.delete('/api/tasks/:taskId', (req: Request, res: Response) => { 589 | ;(async () => { 590 | try { 591 | const { taskId } = req.params 592 | const { featureId } = req.query 593 | 594 | if (!featureId) { 595 | return res 596 | .status(400) 597 | .json({ error: 'Missing required query parameter: featureId' }) 598 | } 599 | 600 | // Connect to database 601 | await databaseService.connect() 602 | 603 | // Get the task before deletion for the response 604 | const task = await databaseService.getTaskById(taskId) 605 | 606 | if (!task) { 607 | await databaseService.close() 608 | return res.status(404).json({ error: 'Task not found' }) 609 | } 610 | 611 | // Delete the task 612 | const deleted = await databaseService.deleteTask(taskId) 613 | 614 | if (!deleted) { 615 | await databaseService.close() 616 | return res.status(404).json({ error: 'Failed to delete task' }) 617 | } 618 | 619 | // Get updated tasks for WebSocket notification 620 | const remainingTasks = await databaseService.getTasksByFeatureId( 621 | featureId as string 622 | ) 623 | 624 | // Notify clients via WebSocket - both general task update and specific deletion event 625 | webSocketService.broadcast({ 626 | type: 'tasks_updated', 627 | featureId: featureId as string, 628 | payload: { 629 | tasks: remainingTasks.map((task) => 630 | formatTaskForFrontend(task, featureId as string) 631 | ), 632 | updatedAt: new Date().toISOString(), 633 | }, 634 | }) 635 | 636 | // Send a specific task deleted notification 637 | webSocketService.notifyTaskDeleted(featureId as string, taskId) 638 | 639 | await databaseService.close() 640 | res.json({ 641 | message: 'Task deleted successfully', 642 | task: formatTaskForFrontend(task, featureId as string), 643 | }) 644 | } catch (error: any) { 645 | logger.error(`Failed to delete task: ${error?.message || error}`) 646 | await logToFile( 647 | `[TaskServer] ERROR deleting task: ${error?.message || error}` 648 | ) 649 | res.status(500).json({ error: 'Failed to delete task' }) 650 | } 651 | })() 652 | }) 653 | 654 | // Get pending question for a specific feature 655 | app.get( 656 | '/api/features/:featureId/pending-question', 657 | (req: Request, res: Response) => { 658 | ;(async () => { 659 | const { featureId } = req.params 660 | try { 661 | const state = await planningStateService.getStateByFeatureId( 662 | featureId 663 | ) 664 | if (state && state.partialResponse) { 665 | // Attempt to parse the stored partialResponse as JSON 666 | let parsedData: any 667 | try { 668 | parsedData = JSON.parse(state.partialResponse) 669 | } catch (parseError) { 670 | logToFile( 671 | `[TaskServer] Error parsing partialResponse JSON for feature ${featureId}: ${parseError}. Content: ${state.partialResponse}` 672 | ) 673 | res.json(null) // Cannot parse the stored state 674 | return 675 | } 676 | 677 | // Check if the parsed data contains the clarificationNeeded structure 678 | if (parsedData && parsedData.clarificationNeeded) { 679 | const clarification = parsedData.clarificationNeeded 680 | logToFile( 681 | `[TaskServer] Found pending question ${state.questionId} for feature ${featureId}` 682 | ) 683 | res.json({ 684 | questionId: state.questionId, // Use the ID from the stored state 685 | question: clarification.question, 686 | options: clarification.options, 687 | allowsText: clarification.allowsText, 688 | }) 689 | } else { 690 | logToFile( 691 | `[TaskServer] State found for feature ${featureId}, but partialResponse JSON did not contain 'clarificationNeeded'. Content: ${state.partialResponse}` 692 | ) 693 | res.json(null) // Parsed data structure unexpected 694 | } 695 | } else { 696 | logToFile( 697 | `[TaskServer] No pending question found for feature ${featureId}` 698 | ) 699 | res.json(null) // No pending question found 700 | } 701 | } catch (error: any) { 702 | logToFile( 703 | `[TaskServer] ERROR fetching pending question for feature ${featureId}: ${ 704 | error?.message || error 705 | }` 706 | ) 707 | res.status(500).json({ error: 'Failed to fetch pending question' }) 708 | } 709 | })() 710 | } 711 | ) 712 | 713 | // Default endpoint to get tasks from most recent feature 714 | app.get('/api/tasks', (req: Request, res: Response) => { 715 | ;(async () => { 716 | try { 717 | const featureIds = await listFeatures() 718 | 719 | if (featureIds.length === 0) { 720 | // Return empty array if no features exist 721 | return res.json([]) 722 | } 723 | 724 | // Sort feature IDs by creation time (using file stats) 725 | const featuresWithStats = await Promise.all( 726 | featureIds.map(async (featureId) => { 727 | const filePath = path.join( 728 | FEATURE_TASKS_DIR, 729 | `${featureId}_mcp_tasks.json` 730 | ) 731 | const stats = await fs.stat(filePath) 732 | return { featureId, mtime: stats.mtime } 733 | }) 734 | ) 735 | 736 | // Sort by most recent modification time 737 | featuresWithStats.sort( 738 | (a, b) => b.mtime.getTime() - a.mtime.getTime() 739 | ) 740 | 741 | // Get tasks for the most recent feature 742 | const mostRecentFeatureId = featuresWithStats[0].featureId 743 | await databaseService.connect() 744 | const tasks = await databaseService.getTasksByFeatureId( 745 | mostRecentFeatureId 746 | ) 747 | await databaseService.close() 748 | 749 | // Use the helper function to format tasks 750 | const formattedTasks = tasks.map((task) => 751 | formatTaskForFrontend(task, mostRecentFeatureId) 752 | ) 753 | 754 | res.json(formattedTasks) 755 | } catch (error: any) { 756 | await logToFile( 757 | `[TaskServer] ERROR fetching default tasks: ${ 758 | error?.message || error 759 | }` 760 | ) 761 | res.status(500).json({ error: 'Failed to fetch tasks' }) 762 | } 763 | })() 764 | }) 765 | 766 | // Serve static frontend files 767 | const staticFrontendPath = path.join(__dirname, 'frontend-ui') 768 | app.use(express.static(staticFrontendPath)) 769 | 770 | // Catch-all route to serve the SPA for any unmatched routes 771 | app.get('*', (req: Request, res: Response) => { 772 | res.sendFile(path.join(staticFrontendPath, 'index.html')) 773 | }) 774 | 775 | // Start the Express server and capture the HTTP server instance 776 | const httpServer = app.listen(PORT, () => { 777 | const url = `http://localhost:${PORT}` 778 | logger.info(`Frontend server running at ${url}`) 779 | }) 780 | 781 | // Initialize WebSocket service with the HTTP server instance 782 | try { 783 | await webSocketService.initialize(httpServer) 784 | await logToFile( 785 | '[TaskServer] LOG: WebSocket server attached to HTTP server.' 786 | ) 787 | } catch (wsError) { 788 | await logToFile( 789 | `[TaskServer] WARN: Failed to initialize WebSocket server: ${wsError}` 790 | ) 791 | logger.error('WebSocket server initialization failed:', wsError) 792 | // Decide if this is fatal or can continue 793 | } 794 | 795 | // Handle process termination gracefully 796 | process.on('SIGINT', async () => { 797 | await logToFile( 798 | '[TaskServer] LOG: Received SIGINT. Shutting down gracefully...' 799 | ) 800 | 801 | // Shutdown WebSocket server 802 | try { 803 | await webSocketService.shutdown() 804 | await logToFile( 805 | '[TaskServer] LOG: WebSocket server shut down successfully.' 806 | ) 807 | } catch (error) { 808 | await logToFile( 809 | `[TaskServer] ERROR: Error shutting down WebSocket server: ${error}` 810 | ) 811 | } 812 | 813 | process.exit(0) 814 | }) 815 | 816 | process.on('SIGTERM', async () => { 817 | await logToFile( 818 | '[TaskServer] LOG: Received SIGTERM. Shutting down gracefully...' 819 | ) 820 | 821 | // Shutdown WebSocket server 822 | try { 823 | await webSocketService.shutdown() 824 | await logToFile( 825 | '[TaskServer] LOG: WebSocket server shut down successfully.' 826 | ) 827 | } catch (error) { 828 | await logToFile( 829 | `[TaskServer] ERROR: Error shutting down WebSocket server: ${error}` 830 | ) 831 | } 832 | 833 | process.exit(0) 834 | }) 835 | } catch (connectError) { 836 | logger.error('CRITICAL ERROR during server.connect():', connectError) 837 | process.exit(1) 838 | } 839 | } 840 | 841 | logger.info('Script execution reaching end of top-level code.') 842 | main().catch((error) => { 843 | logger.error('CRITICAL ERROR executing main():', error) 844 | try { 845 | const logDir = process.env.LOG_DIR || './logs' 846 | const logFile = process.env.LOG_FILE || `${logDir}/debug.log` 847 | fsSync.mkdirSync(logDir, { recursive: true }) 848 | fsSync.appendFileSync( 849 | logFile, 850 | `${new Date().toISOString()} - [TaskServer] CRITICAL ERROR executing main(): ${ 851 | error?.message || error 852 | }\n${error?.stack || ''}\n` 853 | ) 854 | } catch (logErr) { 855 | logger.error('Error writing main() catch to sync log:', logErr) 856 | } 857 | process.exit(1) // Exit if main promise rejects 858 | }) 859 | ``` -------------------------------------------------------------------------------- /src/tools/planFeature.ts: -------------------------------------------------------------------------------- ```typescript 1 | import path from 'path' 2 | import crypto from 'crypto' 3 | import { 4 | Task, 5 | PlanFeatureResponseSchema, 6 | PlanningOutputSchema, 7 | PlanningTaskSchema, 8 | } from '../models/types' 9 | import { promisify } from 'util' 10 | import { aiService } from '../services/aiService' 11 | import { 12 | parseGeminiPlanResponse, 13 | extractParentTaskId, 14 | extractEffort, 15 | ensureEffortRatings, 16 | processAndBreakdownTasks, 17 | detectClarificationRequest, 18 | processAndFinalizePlan, 19 | } from '../lib/llmUtils' 20 | import { logToFile } from '../lib/logger' 21 | import fs from 'fs/promises' 22 | import { exec } from 'child_process' 23 | import * as fsSync from 'fs' 24 | import { encoding_for_model } from 'tiktoken' 25 | import webSocketService from '../services/webSocketService' 26 | import { z } from 'zod' 27 | import { GEMINI_MODEL, OPENROUTER_MODEL, WS_PORT, UI_PORT } from '../config' 28 | import { dynamicImportDefault } from '../lib/utils' 29 | import planningStateService from '../services/planningStateService' 30 | import { databaseService } from '../services/databaseService' 31 | import { addHistoryEntry } from '../lib/dbUtils' 32 | import { getCodebaseContext } from '../lib/repomixUtils' 33 | 34 | // Promisify child_process.exec for easier async/await usage 35 | const execPromise = promisify(exec) 36 | 37 | interface PlanFeatureParams { 38 | feature_description: string 39 | project_path: string 40 | } 41 | 42 | // Revert interface to only expect text content for SDK compatibility 43 | interface PlanFeatureResult { 44 | content: Array<{ type: 'text'; text: string }> 45 | isError?: boolean 46 | } 47 | 48 | // Define a standard structure for the serialized response 49 | interface PlanFeatureStandardResponse { 50 | status: 'completed' | 'awaiting_clarification' | 'error' 51 | message: string 52 | featureId: string 53 | data?: any // For clarification details or potentially first task info 54 | uiUrl?: string // Include UI URL for convenience 55 | firstTask?: { 56 | id: string 57 | description: string 58 | effort: string 59 | } 60 | } 61 | 62 | /** 63 | * Handles the plan_feature tool request 64 | */ 65 | export async function handlePlanFeature( 66 | params: PlanFeatureParams 67 | ): Promise<PlanFeatureResult> { 68 | const { feature_description, project_path } = params 69 | 70 | // Generate a unique feature ID *first* 71 | const featureId = crypto.randomUUID() 72 | // Define UI URL early 73 | const uiUrl = `http://localhost:${UI_PORT || 4999}?featureId=${featureId}` 74 | let browserOpened = false // Flag to track if browser was opened for clarification 75 | 76 | await logToFile( 77 | `[TaskServer] Handling plan_feature request: "${feature_description}" (Path: ${ 78 | project_path || 'CWD' 79 | }), Feature ID: ${featureId}` 80 | ) 81 | 82 | // Define message and isError outside the try block to ensure they are always available 83 | let message: string 84 | let isError = false 85 | let task_count: number | undefined = undefined // Keep track of task count 86 | 87 | try { 88 | // Record tool call in history *early* 89 | await addHistoryEntry(featureId, 'tool_call', { 90 | tool: 'plan_feature', 91 | params: { feature_description, project_path }, 92 | }) 93 | 94 | // Create the feature record with project_path 95 | try { 96 | await databaseService.createFeature( 97 | featureId, 98 | feature_description, 99 | project_path 100 | ) 101 | await logToFile( 102 | `[TaskServer] Created feature record with ID: ${featureId}, Project Path: ${project_path}` 103 | ) 104 | } catch (featureError) { 105 | await logToFile( 106 | `[TaskServer] Error creating feature record: ${featureError}` 107 | ) 108 | // Continue even if feature creation fails - we can recover later 109 | } 110 | 111 | const planningModel = aiService.getPlanningModel() 112 | 113 | if (!planningModel) { 114 | await logToFile( 115 | '[TaskServer] Planning model not initialized (check API key).' 116 | ) 117 | message = 'Error: Planning model not initialized. Check API Key.' 118 | isError = true 119 | 120 | // Record error in history, handling potential logging errors 121 | try { 122 | await addHistoryEntry(featureId, 'tool_response', { 123 | tool: 'plan_feature', 124 | isError: true, 125 | message, 126 | }) 127 | } catch (historyError) { 128 | console.error( 129 | `[TaskServer] Failed to add error history entry during model init failure: ${historyError}` 130 | ) 131 | } 132 | 133 | // Return the structured error object *serialized* 134 | const errorResponse: PlanFeatureStandardResponse = { 135 | status: 'error', 136 | message: message, 137 | featureId: featureId, // Include featureId even in early errors 138 | } 139 | return { 140 | content: [{ type: 'text', text: JSON.stringify(errorResponse) }], 141 | isError, 142 | } 143 | } 144 | 145 | // --- Get Codebase Context using Utility Function --- 146 | const targetDir = project_path || '.' // Keep targetDir logic 147 | const { context: codebaseContext, error: contextError } = 148 | await getCodebaseContext( 149 | targetDir, 150 | featureId // Use featureId as log context 151 | ) 152 | 153 | // Handle potential errors from getCodebaseContext 154 | if (contextError) { 155 | message = contextError // Use the user-friendly error from the utility 156 | isError = true 157 | // Record error in history, handling potential logging errors 158 | try { 159 | await addHistoryEntry(featureId, 'tool_response', { 160 | tool: 'plan_feature', 161 | isError: true, 162 | message, 163 | step: 'repomix_context_gathering', 164 | }) 165 | } catch (historyError) { 166 | console.error( 167 | `[TaskServer] Failed to add error history entry during context gathering failure: ${historyError}` 168 | ) 169 | } 170 | return { content: [{ type: 'text', text: message }], isError } 171 | } 172 | 173 | // Optional: Add token counting / compression logic here if needed, 174 | // operating on the returned `codebaseContext`. 175 | // This part is kept separate from the core getCodebaseContext utility for now. 176 | // ... (Token counting and compression logic could go here) 177 | 178 | // --- LLM Planning & Task Generation --- 179 | let planSteps: string[] = [] 180 | 181 | try { 182 | await logToFile('[TaskServer] Calling LLM API for planning...') 183 | const contextPromptPart = codebaseContext 184 | ? `Based on the following codebase context:\n\`\`\`\n${codebaseContext}\n\`\`\`\n\n` 185 | : 'Based on the provided feature description (no codebase context available):\n\n' 186 | 187 | // Define the structured planning prompt incorporating the new schema 188 | const structuredPlanningPrompt = `${contextPromptPart}Generate a detailed, step-by-step coding implementation plan for the feature: \"${feature_description}\". 189 | 190 | The plan should ONLY include actionable tasks a developer needs to perform within the code. Exclude steps related to project management, deployment, manual testing, documentation updates, or obtaining approvals. 191 | 192 | For each coding task, include an effort rating (low, medium, or high) based on implementation work involved. High effort tasks often require breakdown. 193 | 194 | Use these effort definitions: 195 | - Low: Simple, quick changes in one or few files, minimal logic changes. 196 | - Medium: Requires moderate development time, involves changes across several files/components, includes writing new functions/classes. 197 | - High: Significant development time, complex architectural changes, intricate algorithms, deep refactoring. 198 | 199 | **RESPONSE FORMAT:** 200 | You MUST respond with a single JSON object. 201 | 202 | **Case 1: Clarification Needed** 203 | If you require clarification before creating the plan, respond with this JSON structure: 204 | \`\`\`json 205 | {\n "clarificationNeeded": {\n "question": "Your specific question here. Be precise.", 206 | \n "options": ["Option A", "Option B"] // Optional: Provide options if helpful 207 | \n "allowsText": true // Optional: Set to false if only options are valid 208 | \n }\n }\`\`\` 209 | 210 | **Case 2: No Clarification Needed** 211 | If you DO NOT need clarification, respond with this JSON structure, containing a non-empty array of tasks: 212 | \`\`\`json 213 | {\n "tasks": [\n { "description": "Description of the first task", "effort": "low" | "medium" | "high" },\n { "description": "Description of the second task", "effort": "low" | "medium" | "high" },\n ...\n ]\n }\`\`\` 214 | 215 | **IMPORTANT:** Respond *only* with the valid JSON object conforming to one of the two cases described above. Do not include any introductory text, concluding remarks, or markdown formatting outside the JSON structure.` 216 | 217 | // Log truncated structured prompt for history 218 | await addHistoryEntry(featureId, 'model', { 219 | step: 'structured_planning_prompt', 220 | prompt: `Generate structured plan or clarification for: "${feature_description}" ${ 221 | codebaseContext ? '(with context)' : '(no context)' 222 | }...`, 223 | }) 224 | 225 | if ('chat' in planningModel) { 226 | // It's OpenRouter - Use structured output 227 | const structuredResult = await aiService.callOpenRouterWithSchema( 228 | OPENROUTER_MODEL, 229 | [{ role: 'user', content: structuredPlanningPrompt }], 230 | PlanFeatureResponseSchema, 231 | { temperature: 0.7 } 232 | ) 233 | 234 | logToFile( 235 | `[TaskServer] Structured result (OpenRouter): ${JSON.stringify( 236 | structuredResult 237 | )}` 238 | ) 239 | 240 | if (structuredResult.success) { 241 | // Check if clarification is needed 242 | if (structuredResult.data.clarificationNeeded) { 243 | logToFile( 244 | '[TaskServer] Clarification needed based on structured response.' 245 | ) 246 | const clarification = structuredResult.data.clarificationNeeded 247 | 248 | // Open the browser *now* to show the question 249 | try { 250 | logToFile(`[TaskServer] Launching UI for clarification: ${uiUrl}`) 251 | const open = await dynamicImportDefault('open') 252 | await open(uiUrl) 253 | browserOpened = true // Mark browser as opened 254 | logToFile( 255 | '[TaskServer] Browser launch for clarification initiated.' 256 | ) 257 | } catch (openError: any) { 258 | logToFile( 259 | `[TaskServer] Error launching browser for clarification: ${openError.message}` 260 | ) 261 | // Continue even if browser launch fails, WS should still work if UI is open 262 | } 263 | 264 | // Store the intermediate state 265 | const questionId = 266 | await planningStateService.storeIntermediateState( 267 | featureId, 268 | structuredPlanningPrompt, 269 | JSON.stringify(structuredResult.data), 270 | 'feature_planning' 271 | ) 272 | // Send WebSocket message 273 | webSocketService.broadcast({ 274 | type: 'show_question', 275 | featureId, 276 | payload: { 277 | questionId, 278 | question: clarification.question, 279 | options: clarification.options, 280 | allowsText: clarification.allowsText, 281 | }, 282 | }) 283 | // Record in history 284 | await addHistoryEntry(featureId, 'tool_response', { 285 | tool: 'plan_feature', 286 | status: 'awaiting_clarification', 287 | questionId, 288 | }) 289 | // Return structured clarification info *serialized as text* 290 | const clarificationData = { 291 | questionId: questionId, 292 | question: clarification.question, 293 | options: clarification.options, 294 | allowsText: clarification.allowsText, 295 | } 296 | const clarificationResponse: PlanFeatureStandardResponse = { 297 | status: 'awaiting_clarification', 298 | message: `Planning paused for feature ${featureId}. User clarification needed via UI (${uiUrl}). Once submitted, call 'get_next_task' with featureId '${featureId}' to retrieve the first task.`, 299 | featureId: featureId, 300 | data: clarificationData, 301 | uiUrl: uiUrl, 302 | } 303 | return { 304 | // Serialize the standard response structure 305 | content: [ 306 | { type: 'text', text: JSON.stringify(clarificationResponse) }, 307 | ], 308 | isError: false, // Not an error, just waiting 309 | } 310 | } else if (structuredResult.data.tasks) { 311 | logToFile('[TaskServer] Tasks received in structured response.') 312 | // Convert the structured response to the expected format for processing 313 | planSteps = structuredResult.data.tasks.map( 314 | (task) => `[${task.effort}] ${task.description}` 315 | ) 316 | await addHistoryEntry(featureId, 'model', { 317 | step: 'structured_planning_response', 318 | response: JSON.stringify(structuredResult.data), 319 | }) 320 | } else { 321 | // Schema validation should prevent this, but handle defensively 322 | throw new Error( 323 | 'Structured response valid but contained neither tasks nor clarification.' 324 | ) 325 | } 326 | } else { 327 | // Fallback to unstructured response if structured fails 328 | console.warn( 329 | `[TaskServer] Structured planning failed: ${structuredResult.error}. Falling back to unstructured format.` 330 | ) 331 | 332 | // Use traditional prompt and formatting 333 | const unstructuredFallbackPrompt = `${contextPromptPart}Generate a detailed, step-by-step coding implementation plan for the feature: "${feature_description}". 334 | 335 | The plan should ONLY include actionable tasks a developer needs to perform within the code. Exclude steps related to project management, deployment, manual testing, documentation updates, or obtaining approvals. 336 | 337 | For each coding task, include an effort rating (low, medium, or high) based on implementation work involved. High effort tasks often require breakdown. 338 | 339 | Use these effort definitions: 340 | - Low: Simple, quick changes in one or few files, minimal logic changes. 341 | - Medium: Requires moderate development time, involves changes across several files/components, includes writing new functions/classes. 342 | - High: Significant development time, complex architectural changes, intricate algorithms, deep refactoring. 343 | 344 | IF YOU NEED CLARIFICATION BEFORE YOU CAN PROPERLY CREATE THE PLAN: 345 | Instead of returning a task list, use the following format to ask for clarification: 346 | [CLARIFICATION_NEEDED] 347 | Your specific question here. Be precise about what information you need to proceed. 348 | Options: [Option A, Option B, Option C] (include this line only if providing multiple-choice options) 349 | MULTIPLE_CHOICE_ONLY (include this if only the listed options are valid, omit if free text is also acceptable) 350 | [END_CLARIFICATION] 351 | 352 | For example: 353 | [CLARIFICATION_NEEDED] 354 | Should the feature implementation use SQL or NoSQL database for data storage? 355 | Options: [SQL, NoSQL] 356 | [END_CLARIFICATION] 357 | 358 | IF NO CLARIFICATION IS NEEDED: 359 | **IMPORTANT: Do NOT include any introductory sentences, summaries, concluding remarks, or any text other than the formatted task list itself. The response must start *directly* with the first task (e.g., "[low] Add console log...").** 360 | 361 | Now, generate the list of coding tasks, ensuring each task strictly follows this format: [effort] Task Description` 362 | 363 | let result = await planningModel.chat.completions.create({ 364 | model: OPENROUTER_MODEL, 365 | messages: [{ role: 'user', content: unstructuredFallbackPrompt }], 366 | temperature: 0.7, 367 | }) 368 | // We need to get the text response here for the old parsing logic to work 369 | let responseText = aiService.extractTextFromResponse(result) 370 | 371 | if (responseText === null) { 372 | message = 373 | 'Error: Failed to get planning response from LLM or response was blocked. AI Agent: Do not try to call again. Simply alert the user.' 374 | isError = true 375 | // Record error in history, handling potential logging errors 376 | try { 377 | await addHistoryEntry(featureId, 'tool_response', { 378 | tool: 'plan_feature', 379 | isError: true, 380 | message, 381 | step: 'llm_response_processing', 382 | }) 383 | } catch (historyError) { 384 | console.error( 385 | `[TaskServer] Failed to add error history entry during LLM processing failure: ${historyError}` 386 | ) 387 | } 388 | return { content: [{ type: 'text', text: message }], isError } 389 | } 390 | 391 | // If no plan steps were extracted from structured response, try text parsing from fallback 392 | if (!planSteps.length && responseText) { 393 | logToFile( 394 | '[TaskServer] Attempting text parsing on unstructured fallback response.' 395 | ) 396 | // IMPORTANT: Ensure parseGeminiPlanResponse ONLY extracts tasks and doesn't get confused by potential JSON remnants 397 | planSteps = parseGeminiPlanResponse(responseText) 398 | if (planSteps.length > 0) { 399 | logToFile( 400 | `[TaskServer] Extracted ${planSteps.length} tasks via text parsing.` 401 | ) 402 | } else { 403 | // If still no tasks, log error and return *serialized* 404 | message = 405 | 'Error: The planning model did not return a recognizable list of tasks.' 406 | isError = true 407 | // Record error in history, handling potential logging errors 408 | try { 409 | await addHistoryEntry(featureId, 'tool_response', { 410 | tool: 'plan_feature', 411 | isError: true, 412 | message, 413 | step: 'response_parsing', 414 | }) 415 | } catch (historyError) { 416 | console.error( 417 | `[TaskServer] Failed to add error history entry during response parsing failure: ${historyError}` 418 | ) 419 | } 420 | const errorResponse: PlanFeatureStandardResponse = { 421 | status: 'error', 422 | message: message, 423 | featureId: featureId, 424 | } 425 | return { 426 | content: [ 427 | { type: 'text', text: JSON.stringify(errorResponse) }, 428 | ], 429 | isError: true, 430 | } 431 | } 432 | } 433 | } 434 | } else { 435 | // It's Gemini - Use structured output 436 | const structuredResult = await aiService.callGeminiWithSchema( 437 | GEMINI_MODEL, 438 | structuredPlanningPrompt, 439 | PlanFeatureResponseSchema, 440 | { temperature: 0.7 } 441 | ) 442 | 443 | logToFile( 444 | `[TaskServer] Structured result (Gemini): ${JSON.stringify( 445 | structuredResult 446 | )}` 447 | ) 448 | 449 | if (structuredResult.success) { 450 | // Check if clarification is needed 451 | if (structuredResult.data.clarificationNeeded) { 452 | logToFile( 453 | '[TaskServer] Clarification needed based on structured response.' 454 | ) 455 | const clarification = structuredResult.data.clarificationNeeded 456 | 457 | // Open the browser *now* to show the question 458 | try { 459 | logToFile(`[TaskServer] Launching UI for clarification: ${uiUrl}`) 460 | const open = await dynamicImportDefault('open') 461 | await open(uiUrl) 462 | browserOpened = true // Mark browser as opened 463 | logToFile( 464 | '[TaskServer] Browser launch for clarification initiated.' 465 | ) 466 | } catch (openError: any) { 467 | logToFile( 468 | `[TaskServer] Error launching browser for clarification: ${openError.message}` 469 | ) 470 | // Continue even if browser launch fails, WS should still work if UI is open 471 | } 472 | 473 | // Store the intermediate state 474 | const questionId = 475 | await planningStateService.storeIntermediateState( 476 | featureId, 477 | structuredPlanningPrompt, 478 | JSON.stringify(structuredResult.data), 479 | 'feature_planning' 480 | ) 481 | // Send WebSocket message 482 | webSocketService.broadcast({ 483 | type: 'show_question', 484 | featureId, 485 | payload: { 486 | questionId, 487 | question: clarification.question, 488 | options: clarification.options, 489 | allowsText: clarification.allowsText, 490 | }, 491 | }) 492 | // Record in history 493 | await addHistoryEntry(featureId, 'tool_response', { 494 | tool: 'plan_feature', 495 | status: 'awaiting_clarification', 496 | questionId, 497 | }) 498 | // Return structured clarification info *serialized as text* 499 | const clarificationData = { 500 | questionId: questionId, 501 | question: clarification.question, 502 | options: clarification.options, 503 | allowsText: clarification.allowsText, 504 | } 505 | const clarificationResponse: PlanFeatureStandardResponse = { 506 | status: 'awaiting_clarification', 507 | message: `Planning paused for feature ${featureId}. User clarification needed via UI (${uiUrl}). Once submitted, call 'get_next_task' with featureId '${featureId}' to retrieve the first task.`, 508 | featureId: featureId, 509 | data: clarificationData, 510 | uiUrl: uiUrl, 511 | } 512 | return { 513 | // Serialize the standard response structure 514 | content: [ 515 | { type: 'text', text: JSON.stringify(clarificationResponse) }, 516 | ], 517 | isError: false, // Not an error, just waiting 518 | } 519 | } else if (structuredResult.data.tasks) { 520 | logToFile('[TaskServer] Tasks received in structured response.') 521 | // Convert the structured response to the expected format for processing 522 | planSteps = structuredResult.data.tasks.map( 523 | (task) => `[${task.effort}] ${task.description}` 524 | ) 525 | await addHistoryEntry(featureId, 'model', { 526 | step: 'structured_planning_response', 527 | response: JSON.stringify(structuredResult.data), 528 | }) 529 | } else { 530 | // Schema validation should prevent this, but handle defensively 531 | throw new Error( 532 | 'Structured response valid but contained neither tasks nor clarification.' 533 | ) 534 | } 535 | } else { 536 | // Fallback to unstructured response if structured fails 537 | console.warn( 538 | `[TaskServer] Structured planning failed: ${structuredResult.error}. Falling back to unstructured format.` 539 | ) 540 | 541 | // Use traditional Gemini call 542 | const unstructuredFallbackPrompt = `${contextPromptPart}Generate a detailed, step-by-step coding implementation plan for the feature: "${feature_description}". 543 | 544 | Engineer it in the best way possible, considering all side effects and edge cases. Be extremely thorough and meticulous. 545 | 546 | The plan should ONLY include actionable tasks a developer needs to perform within the code. Exclude steps related to project management, deployment, manual testing, documentation updates, or obtaining approvals. 547 | 548 | For each coding task, include an effort rating (low, medium, or high) based on implementation work involved. High effort tasks often require breakdown. 549 | 550 | Use these effort definitions: 551 | - Low: Simple, quick changes in one or few files, minimal logic changes. 552 | - Medium: Requires moderate development time, involves changes across several files/components, includes writing new functions/classes. 553 | - High: Significant development time, complex architectural changes, intricate algorithms, deep refactoring. 554 | 555 | IF YOU NEED CLARIFICATION BEFORE YOU CAN PROPERLY CREATE THE PLAN: 556 | Instead of returning a task list, use the following format to ask for clarification: 557 | [CLARIFICATION_NEEDED] 558 | Your specific question here. Be precise about what information you need to proceed. 559 | Options: [Option A, Option B, Option C] (include this line only if providing multiple-choice options) 560 | MULTIPLE_CHOICE_ONLY (include this if only the listed options are valid, omit if free text is also acceptable) 561 | [END_CLARIFICATION] 562 | 563 | For example: 564 | [CLARIFICATION_NEEDED] 565 | Should the feature implementation use SQL or NoSQL database for data storage? 566 | Options: [SQL, NoSQL] 567 | [END_CLARIFICATION] 568 | 569 | IF NO CLARIFICATION IS NEEDED: 570 | **IMPORTANT: Do NOT include any introductory sentences, summaries, concluding remarks, or any text other than the formatted task list itself. The response must start *directly* with the first task (e.g., "[low] Add console log...").** 571 | 572 | Now, generate the list of coding tasks, ensuring each task strictly follows this format: [effort] Task Description` 573 | 574 | let result = await planningModel.generateContent({ 575 | contents: [ 576 | { role: 'user', parts: [{ text: unstructuredFallbackPrompt }] }, 577 | ], 578 | generationConfig: { 579 | temperature: 0.7, 580 | }, 581 | }) 582 | // We need to get the text response here for the old parsing logic to work 583 | let responseText = aiService.extractTextFromResponse(result) 584 | 585 | if (responseText === null) { 586 | message = 587 | 'Error: Failed to get planning response from LLM or response was blocked. AI Agent: Do not try to call again. Simply alert the user.' 588 | isError = true 589 | // Record error in history, handling potential logging errors 590 | try { 591 | await addHistoryEntry(featureId, 'tool_response', { 592 | tool: 'plan_feature', 593 | isError: true, 594 | message, 595 | step: 'llm_response_processing', 596 | }) 597 | } catch (historyError) { 598 | console.error( 599 | `[TaskServer] Failed to add error history entry during LLM processing failure: ${historyError}` 600 | ) 601 | } 602 | return { content: [{ type: 'text', text: message }], isError } 603 | } 604 | 605 | // If no plan steps were extracted from structured response, try text parsing from fallback 606 | if (!planSteps.length && responseText) { 607 | logToFile( 608 | '[TaskServer] Attempting text parsing on unstructured fallback response.' 609 | ) 610 | // IMPORTANT: Ensure parseGeminiPlanResponse ONLY extracts tasks and doesn't get confused by potential JSON remnants 611 | planSteps = parseGeminiPlanResponse(responseText) 612 | if (planSteps.length > 0) { 613 | logToFile( 614 | `[TaskServer] Extracted ${planSteps.length} tasks via text parsing.` 615 | ) 616 | } else { 617 | // If still no tasks, log error and return *serialized* 618 | message = 619 | 'Error: The planning model did not return a recognizable list of tasks.' 620 | isError = true 621 | // Record error in history, handling potential logging errors 622 | try { 623 | await addHistoryEntry(featureId, 'tool_response', { 624 | tool: 'plan_feature', 625 | isError: true, 626 | message, 627 | step: 'response_parsing', 628 | }) 629 | } catch (historyError) { 630 | console.error( 631 | `[TaskServer] Failed to add error history entry during response parsing failure: ${historyError}` 632 | ) 633 | } 634 | const errorResponse: PlanFeatureStandardResponse = { 635 | status: 'error', 636 | message: message, 637 | featureId: featureId, 638 | } 639 | return { 640 | content: [ 641 | { type: 'text', text: JSON.stringify(errorResponse) }, 642 | ], 643 | isError: true, 644 | } 645 | } 646 | } 647 | } 648 | } 649 | 650 | // Process the plan steps using the centralized function 651 | const finalTasks = await processAndFinalizePlan( 652 | planSteps, // Use the extracted/parsed plan steps 653 | planningModel, 654 | featureId 655 | ) 656 | 657 | task_count = finalTasks.length 658 | 659 | message = `Successfully planned feature '${feature_description}' with ${task_count} tasks.` 660 | logToFile(`[TaskServer] ${message}`) 661 | 662 | // Record final success in history 663 | await addHistoryEntry(featureId, 'tool_response', { 664 | tool: 'plan_feature', 665 | status: 'completed', 666 | taskCount: task_count, 667 | }) 668 | } catch (error: any) { 669 | message = `Error during feature planning: ${error.message}` 670 | isError = true 671 | await logToFile(`[TaskServer] ${message} Stack: ${error.stack}`) 672 | 673 | // Record error in history, handling potential logging errors 674 | try { 675 | await addHistoryEntry(featureId, 'tool_response', { 676 | tool: 'plan_feature', 677 | isError: true, 678 | message: error.message, 679 | step: 'planning_execution', // Indicate where the error occurred 680 | }) 681 | } catch (historyError) { 682 | console.error( 683 | `[TaskServer] Failed to add error history entry during planning execution failure: ${historyError}` 684 | ) 685 | } 686 | } 687 | } catch (outerError: any) { 688 | // Catch errors happening before LLM call (e.g., history writing) 689 | message = `[TaskServer] Unexpected error in handlePlanFeature: ${ 690 | outerError?.message || String(outerError) 691 | }` 692 | isError = true 693 | await logToFile(`${message} Stack: ${outerError?.stack}`, 'error') 694 | 695 | // Record error in history, handling potential logging errors 696 | try { 697 | await addHistoryEntry(featureId, 'tool_response', { 698 | tool: 'plan_feature', 699 | isError: true, 700 | message: outerError?.message || String(outerError), 701 | step: 'outer_catch_block', // Indicate where the error was caught 702 | errorDetails: outerError?.stack, // Include stack trace if available 703 | }) 704 | } catch (historyError: any) { 705 | // Log the failure to record history, but don't crash the main process 706 | await logToFile( 707 | `[TaskServer] Failed to record history entry for outer error: ${ 708 | historyError?.message || String(historyError) 709 | }`, 710 | 'error' 711 | ) 712 | } 713 | } 714 | 715 | // Open the UI in the browser *if not already opened for clarification* 716 | if (!browserOpened) { 717 | try { 718 | await logToFile( 719 | `[TaskServer] Planning complete/failed. Launching UI: ${uiUrl}` 720 | ) 721 | const open = await dynamicImportDefault('open') 722 | await open(uiUrl) 723 | await logToFile('[TaskServer] Browser launch initiated successfully') 724 | } catch (openError: any) { 725 | await logToFile( 726 | `[TaskServer] Error launching browser post-process: ${openError.message}` 727 | ) 728 | // Continue even if browser launch fails 729 | } 730 | } 731 | 732 | // Prepare the final return content *as standard response object* 733 | let responseData: PlanFeatureStandardResponse 734 | if (!isError && task_count && task_count > 0) { 735 | let firstTaskDesc: string | undefined 736 | let updatedTasks: Task[] = [] 737 | try { 738 | // Use databaseService instead of readTasks 739 | await databaseService.connect() 740 | updatedTasks = await databaseService.getTasksByFeatureId(featureId) 741 | await databaseService.close() 742 | 743 | if (updatedTasks.length > 0) { 744 | const firstTask = updatedTasks[0] 745 | // Format the first task for the return message 746 | firstTaskDesc = firstTask.description // Store first task desc 747 | } else { 748 | // Fallback if tasks array is somehow empty after successful planning 749 | firstTaskDesc = undefined 750 | } 751 | } catch (readError) { 752 | logToFile( 753 | `[TaskServer] Error reading tasks after finalization: ${readError}` 754 | ) 755 | // Fallback to the original message if reading fails 756 | firstTaskDesc = undefined 757 | } 758 | 759 | // Construct success response 760 | responseData = { 761 | status: 'completed', 762 | message: `Successfully planned ${task_count || 0} tasks.${ 763 | firstTaskDesc ? ' First task: "' + firstTaskDesc + '"' : '' 764 | }`, 765 | featureId: featureId, 766 | uiUrl: uiUrl, 767 | firstTask: 768 | updatedTasks.length > 0 769 | ? { 770 | id: updatedTasks[0].id, 771 | description: updatedTasks[0].description || '', 772 | effort: updatedTasks[0].effort || 'medium', 773 | } 774 | : undefined, 775 | } 776 | } else { 777 | // Construct error or no-tasks response 778 | responseData = { 779 | status: isError ? 'error' : 'completed', // 'completed' but with 0 tasks is possible 780 | message: message, // Use the message determined earlier (could be error or success-with-0-tasks) 781 | featureId: featureId, 782 | uiUrl: uiUrl, 783 | } 784 | } 785 | 786 | // Final return structure using the standard serialized format 787 | return { 788 | content: [ 789 | { 790 | type: 'text', 791 | text: JSON.stringify(responseData), 792 | }, 793 | ], 794 | isError, // Keep isError consistent with internal state for SDK 795 | } 796 | } 797 | ```