This is page 27 of 38. Use http://codebase.md/eyaltoledano/claude-task-master?page={x} to view the full context. # Directory Structure ``` ├── .changeset │ ├── config.json │ └── README.md ├── .claude │ ├── agents │ │ ├── task-checker.md │ │ ├── task-executor.md │ │ └── task-orchestrator.md │ ├── commands │ │ ├── dedupe.md │ │ └── tm │ │ ├── add-dependency │ │ │ └── add-dependency.md │ │ ├── add-subtask │ │ │ ├── add-subtask.md │ │ │ └── convert-task-to-subtask.md │ │ ├── add-task │ │ │ └── add-task.md │ │ ├── analyze-complexity │ │ │ └── analyze-complexity.md │ │ ├── complexity-report │ │ │ └── complexity-report.md │ │ ├── expand │ │ │ ├── expand-all-tasks.md │ │ │ └── expand-task.md │ │ ├── fix-dependencies │ │ │ └── fix-dependencies.md │ │ ├── generate │ │ │ └── generate-tasks.md │ │ ├── help.md │ │ ├── init │ │ │ ├── init-project-quick.md │ │ │ └── init-project.md │ │ ├── learn.md │ │ ├── list │ │ │ ├── list-tasks-by-status.md │ │ │ ├── list-tasks-with-subtasks.md │ │ │ └── list-tasks.md │ │ ├── models │ │ │ ├── setup-models.md │ │ │ └── view-models.md │ │ ├── next │ │ │ └── next-task.md │ │ ├── parse-prd │ │ │ ├── parse-prd-with-research.md │ │ │ └── parse-prd.md │ │ ├── remove-dependency │ │ │ └── remove-dependency.md │ │ ├── remove-subtask │ │ │ └── remove-subtask.md │ │ ├── remove-subtasks │ │ │ ├── remove-all-subtasks.md │ │ │ └── remove-subtasks.md │ │ ├── remove-task │ │ │ └── remove-task.md │ │ ├── set-status │ │ │ ├── to-cancelled.md │ │ │ ├── to-deferred.md │ │ │ ├── to-done.md │ │ │ ├── to-in-progress.md │ │ │ ├── to-pending.md │ │ │ └── to-review.md │ │ ├── setup │ │ │ ├── install-taskmaster.md │ │ │ └── quick-install-taskmaster.md │ │ ├── show │ │ │ └── show-task.md │ │ ├── status │ │ │ └── project-status.md │ │ ├── sync-readme │ │ │ └── sync-readme.md │ │ ├── tm-main.md │ │ ├── update │ │ │ ├── update-single-task.md │ │ │ ├── update-task.md │ │ │ └── update-tasks-from-id.md │ │ ├── utils │ │ │ └── analyze-project.md │ │ ├── validate-dependencies │ │ │ └── validate-dependencies.md │ │ └── workflows │ │ ├── auto-implement-tasks.md │ │ ├── command-pipeline.md │ │ └── smart-workflow.md │ └── TM_COMMANDS_GUIDE.md ├── .coderabbit.yaml ├── .cursor │ ├── mcp.json │ └── rules │ ├── ai_providers.mdc │ ├── ai_services.mdc │ ├── architecture.mdc │ ├── changeset.mdc │ ├── commands.mdc │ ├── context_gathering.mdc │ ├── cursor_rules.mdc │ ├── dependencies.mdc │ ├── dev_workflow.mdc │ ├── git_workflow.mdc │ ├── glossary.mdc │ ├── mcp.mdc │ ├── new_features.mdc │ ├── self_improve.mdc │ ├── tags.mdc │ ├── taskmaster.mdc │ ├── tasks.mdc │ ├── telemetry.mdc │ ├── test_workflow.mdc │ ├── tests.mdc │ ├── ui.mdc │ └── utilities.mdc ├── .cursorignore ├── .env.example ├── .github │ ├── ISSUE_TEMPLATE │ │ ├── bug_report.md │ │ ├── enhancements---feature-requests.md │ │ └── feedback.md │ ├── PULL_REQUEST_TEMPLATE │ │ ├── bugfix.md │ │ ├── config.yml │ │ ├── feature.md │ │ └── integration.md │ ├── PULL_REQUEST_TEMPLATE.md │ ├── scripts │ │ ├── auto-close-duplicates.mjs │ │ ├── backfill-duplicate-comments.mjs │ │ ├── check-pre-release-mode.mjs │ │ ├── parse-metrics.mjs │ │ ├── release.mjs │ │ ├── tag-extension.mjs │ │ └── utils.mjs │ └── workflows │ ├── auto-close-duplicates.yml │ ├── backfill-duplicate-comments.yml │ ├── ci.yml │ ├── claude-dedupe-issues.yml │ ├── claude-docs-trigger.yml │ ├── claude-docs-updater.yml │ ├── claude-issue-triage.yml │ ├── claude.yml │ ├── extension-ci.yml │ ├── extension-release.yml │ ├── log-issue-events.yml │ ├── pre-release.yml │ ├── release-check.yml │ ├── release.yml │ ├── update-models-md.yml │ └── weekly-metrics-discord.yml ├── .gitignore ├── .kiro │ ├── hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── settings │ │ └── mcp.json │ └── steering │ ├── dev_workflow.md │ ├── kiro_rules.md │ ├── self_improve.md │ ├── taskmaster_hooks_workflow.md │ └── taskmaster.md ├── .manypkg.json ├── .mcp.json ├── .npmignore ├── .nvmrc ├── .taskmaster │ ├── CLAUDE.md │ ├── config.json │ ├── docs │ │ ├── MIGRATION-ROADMAP.md │ │ ├── prd-tm-start.txt │ │ ├── prd.txt │ │ ├── README.md │ │ ├── research │ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md │ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md │ │ │ ├── 2025-06-14_test-save-functionality.md │ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md │ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md │ │ ├── task-template-importing-prd.txt │ │ ├── test-prd.txt │ │ └── tm-core-phase-1.txt │ ├── reports │ │ ├── task-complexity-report_cc-kiro-hooks.json │ │ ├── task-complexity-report_test-prd-tag.json │ │ ├── task-complexity-report_tm-core-phase-1.json │ │ ├── task-complexity-report.json │ │ └── tm-core-complexity.json │ ├── state.json │ ├── tasks │ │ ├── task_001_tm-start.txt │ │ ├── task_002_tm-start.txt │ │ ├── task_003_tm-start.txt │ │ ├── task_004_tm-start.txt │ │ ├── task_007_tm-start.txt │ │ └── tasks.json │ └── templates │ └── example_prd.txt ├── .vscode │ ├── extensions.json │ └── settings.json ├── apps │ ├── cli │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ ├── commands │ │ │ │ ├── auth.command.ts │ │ │ │ ├── context.command.ts │ │ │ │ ├── list.command.ts │ │ │ │ ├── set-status.command.ts │ │ │ │ ├── show.command.ts │ │ │ │ └── start.command.ts │ │ │ ├── index.ts │ │ │ ├── ui │ │ │ │ ├── components │ │ │ │ │ ├── dashboard.component.ts │ │ │ │ │ ├── header.component.ts │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── next-task.component.ts │ │ │ │ │ ├── suggested-steps.component.ts │ │ │ │ │ └── task-detail.component.ts │ │ │ │ └── index.ts │ │ │ └── utils │ │ │ ├── auto-update.ts │ │ │ └── ui.ts │ │ └── tsconfig.json │ ├── docs │ │ ├── archive │ │ │ ├── ai-client-utils-example.mdx │ │ │ ├── ai-development-workflow.mdx │ │ │ ├── command-reference.mdx │ │ │ ├── configuration.mdx │ │ │ ├── cursor-setup.mdx │ │ │ ├── examples.mdx │ │ │ └── Installation.mdx │ │ ├── best-practices │ │ │ ├── advanced-tasks.mdx │ │ │ ├── configuration-advanced.mdx │ │ │ └── index.mdx │ │ ├── capabilities │ │ │ ├── cli-root-commands.mdx │ │ │ ├── index.mdx │ │ │ ├── mcp.mdx │ │ │ └── task-structure.mdx │ │ ├── CHANGELOG.md │ │ ├── docs.json │ │ ├── favicon.svg │ │ ├── getting-started │ │ │ ├── contribute.mdx │ │ │ ├── faq.mdx │ │ │ └── quick-start │ │ │ ├── configuration-quick.mdx │ │ │ ├── execute-quick.mdx │ │ │ ├── installation.mdx │ │ │ ├── moving-forward.mdx │ │ │ ├── prd-quick.mdx │ │ │ ├── quick-start.mdx │ │ │ ├── requirements.mdx │ │ │ ├── rules-quick.mdx │ │ │ └── tasks-quick.mdx │ │ ├── introduction.mdx │ │ ├── licensing.md │ │ ├── logo │ │ │ ├── dark.svg │ │ │ ├── light.svg │ │ │ └── task-master-logo.png │ │ ├── package.json │ │ ├── README.md │ │ ├── style.css │ │ ├── vercel.json │ │ └── whats-new.mdx │ └── extension │ ├── .vscodeignore │ ├── assets │ │ ├── banner.png │ │ ├── icon-dark.svg │ │ ├── icon-light.svg │ │ ├── icon.png │ │ ├── screenshots │ │ │ ├── kanban-board.png │ │ │ └── task-details.png │ │ └── sidebar-icon.svg │ ├── CHANGELOG.md │ ├── components.json │ ├── docs │ │ ├── extension-CI-setup.md │ │ └── extension-development-guide.md │ ├── esbuild.js │ ├── LICENSE │ ├── package.json │ ├── package.mjs │ ├── package.publish.json │ ├── README.md │ ├── src │ │ ├── components │ │ │ ├── ConfigView.tsx │ │ │ ├── constants.ts │ │ │ ├── TaskDetails │ │ │ │ ├── AIActionsSection.tsx │ │ │ │ ├── DetailsSection.tsx │ │ │ │ ├── PriorityBadge.tsx │ │ │ │ ├── SubtasksSection.tsx │ │ │ │ ├── TaskMetadataSidebar.tsx │ │ │ │ └── useTaskDetails.ts │ │ │ ├── TaskDetailsView.tsx │ │ │ ├── TaskMasterLogo.tsx │ │ │ └── ui │ │ │ ├── badge.tsx │ │ │ ├── breadcrumb.tsx │ │ │ ├── button.tsx │ │ │ ├── card.tsx │ │ │ ├── collapsible.tsx │ │ │ ├── CollapsibleSection.tsx │ │ │ ├── dropdown-menu.tsx │ │ │ ├── label.tsx │ │ │ ├── scroll-area.tsx │ │ │ ├── separator.tsx │ │ │ ├── shadcn-io │ │ │ │ └── kanban │ │ │ │ └── index.tsx │ │ │ └── textarea.tsx │ │ ├── extension.ts │ │ ├── index.ts │ │ ├── lib │ │ │ └── utils.ts │ │ ├── services │ │ │ ├── config-service.ts │ │ │ ├── error-handler.ts │ │ │ ├── notification-preferences.ts │ │ │ ├── polling-service.ts │ │ │ ├── polling-strategies.ts │ │ │ ├── sidebar-webview-manager.ts │ │ │ ├── task-repository.ts │ │ │ ├── terminal-manager.ts │ │ │ └── webview-manager.ts │ │ ├── test │ │ │ └── extension.test.ts │ │ ├── utils │ │ │ ├── configManager.ts │ │ │ ├── connectionManager.ts │ │ │ ├── errorHandler.ts │ │ │ ├── event-emitter.ts │ │ │ ├── logger.ts │ │ │ ├── mcpClient.ts │ │ │ ├── notificationPreferences.ts │ │ │ └── task-master-api │ │ │ ├── cache │ │ │ │ └── cache-manager.ts │ │ │ ├── index.ts │ │ │ ├── mcp-client.ts │ │ │ ├── transformers │ │ │ │ └── task-transformer.ts │ │ │ └── types │ │ │ └── index.ts │ │ └── webview │ │ ├── App.tsx │ │ ├── components │ │ │ ├── AppContent.tsx │ │ │ ├── EmptyState.tsx │ │ │ ├── ErrorBoundary.tsx │ │ │ ├── PollingStatus.tsx │ │ │ ├── PriorityBadge.tsx │ │ │ ├── SidebarView.tsx │ │ │ ├── TagDropdown.tsx │ │ │ ├── TaskCard.tsx │ │ │ ├── TaskEditModal.tsx │ │ │ ├── TaskMasterKanban.tsx │ │ │ ├── ToastContainer.tsx │ │ │ └── ToastNotification.tsx │ │ ├── constants │ │ │ └── index.ts │ │ ├── contexts │ │ │ └── VSCodeContext.tsx │ │ ├── hooks │ │ │ ├── useTaskQueries.ts │ │ │ ├── useVSCodeMessages.ts │ │ │ └── useWebviewHeight.ts │ │ ├── index.css │ │ ├── index.tsx │ │ ├── providers │ │ │ └── QueryProvider.tsx │ │ ├── reducers │ │ │ └── appReducer.ts │ │ ├── sidebar.tsx │ │ ├── types │ │ │ └── index.ts │ │ └── utils │ │ ├── logger.ts │ │ └── toast.ts │ └── tsconfig.json ├── assets │ ├── .windsurfrules │ ├── AGENTS.md │ ├── claude │ │ ├── agents │ │ │ ├── task-checker.md │ │ │ ├── task-executor.md │ │ │ └── task-orchestrator.md │ │ ├── commands │ │ │ └── tm │ │ │ ├── add-dependency │ │ │ │ └── add-dependency.md │ │ │ ├── add-subtask │ │ │ │ ├── add-subtask.md │ │ │ │ └── convert-task-to-subtask.md │ │ │ ├── add-task │ │ │ │ └── add-task.md │ │ │ ├── analyze-complexity │ │ │ │ └── analyze-complexity.md │ │ │ ├── clear-subtasks │ │ │ │ ├── clear-all-subtasks.md │ │ │ │ └── clear-subtasks.md │ │ │ ├── complexity-report │ │ │ │ └── complexity-report.md │ │ │ ├── expand │ │ │ │ ├── expand-all-tasks.md │ │ │ │ └── expand-task.md │ │ │ ├── fix-dependencies │ │ │ │ └── fix-dependencies.md │ │ │ ├── generate │ │ │ │ └── generate-tasks.md │ │ │ ├── help.md │ │ │ ├── init │ │ │ │ ├── init-project-quick.md │ │ │ │ └── init-project.md │ │ │ ├── learn.md │ │ │ ├── list │ │ │ │ ├── list-tasks-by-status.md │ │ │ │ ├── list-tasks-with-subtasks.md │ │ │ │ └── list-tasks.md │ │ │ ├── models │ │ │ │ ├── setup-models.md │ │ │ │ └── view-models.md │ │ │ ├── next │ │ │ │ └── next-task.md │ │ │ ├── parse-prd │ │ │ │ ├── parse-prd-with-research.md │ │ │ │ └── parse-prd.md │ │ │ ├── remove-dependency │ │ │ │ └── remove-dependency.md │ │ │ ├── remove-subtask │ │ │ │ └── remove-subtask.md │ │ │ ├── remove-subtasks │ │ │ │ ├── remove-all-subtasks.md │ │ │ │ └── remove-subtasks.md │ │ │ ├── remove-task │ │ │ │ └── remove-task.md │ │ │ ├── set-status │ │ │ │ ├── to-cancelled.md │ │ │ │ ├── to-deferred.md │ │ │ │ ├── to-done.md │ │ │ │ ├── to-in-progress.md │ │ │ │ ├── to-pending.md │ │ │ │ └── to-review.md │ │ │ ├── setup │ │ │ │ ├── install-taskmaster.md │ │ │ │ └── quick-install-taskmaster.md │ │ │ ├── show │ │ │ │ └── show-task.md │ │ │ ├── status │ │ │ │ └── project-status.md │ │ │ ├── sync-readme │ │ │ │ └── sync-readme.md │ │ │ ├── tm-main.md │ │ │ ├── update │ │ │ │ ├── update-single-task.md │ │ │ │ ├── update-task.md │ │ │ │ └── update-tasks-from-id.md │ │ │ ├── utils │ │ │ │ └── analyze-project.md │ │ │ ├── validate-dependencies │ │ │ │ └── validate-dependencies.md │ │ │ └── workflows │ │ │ ├── auto-implement-tasks.md │ │ │ ├── command-pipeline.md │ │ │ └── smart-workflow.md │ │ └── TM_COMMANDS_GUIDE.md │ ├── config.json │ ├── env.example │ ├── example_prd.txt │ ├── gitignore │ ├── kiro-hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── roocode │ │ ├── .roo │ │ │ ├── rules-architect │ │ │ │ └── architect-rules │ │ │ ├── rules-ask │ │ │ │ └── ask-rules │ │ │ ├── rules-code │ │ │ │ └── code-rules │ │ │ ├── rules-debug │ │ │ │ └── debug-rules │ │ │ ├── rules-orchestrator │ │ │ │ └── orchestrator-rules │ │ │ └── rules-test │ │ │ └── test-rules │ │ └── .roomodes │ ├── rules │ │ ├── cursor_rules.mdc │ │ ├── dev_workflow.mdc │ │ ├── self_improve.mdc │ │ ├── taskmaster_hooks_workflow.mdc │ │ └── taskmaster.mdc │ └── scripts_README.md ├── bin │ └── task-master.js ├── biome.json ├── CHANGELOG.md ├── CLAUDE.md ├── context │ ├── chats │ │ ├── add-task-dependencies-1.md │ │ └── max-min-tokens.txt.md │ ├── fastmcp-core.txt │ ├── fastmcp-docs.txt │ ├── MCP_INTEGRATION.md │ ├── mcp-js-sdk-docs.txt │ ├── mcp-protocol-repo.txt │ ├── mcp-protocol-schema-03262025.json │ └── mcp-protocol-spec.txt ├── CONTRIBUTING.md ├── docs │ ├── CLI-COMMANDER-PATTERN.md │ ├── command-reference.md │ ├── configuration.md │ ├── contributor-docs │ │ └── testing-roo-integration.md │ ├── cross-tag-task-movement.md │ ├── examples │ │ └── claude-code-usage.md │ ├── examples.md │ ├── licensing.md │ ├── mcp-provider-guide.md │ ├── mcp-provider.md │ ├── migration-guide.md │ ├── models.md │ ├── providers │ │ └── gemini-cli.md │ ├── README.md │ ├── scripts │ │ └── models-json-to-markdown.js │ ├── task-structure.md │ └── tutorial.md ├── images │ └── logo.png ├── index.js ├── jest.config.js ├── jest.resolver.cjs ├── LICENSE ├── llms-install.md ├── mcp-server │ ├── server.js │ └── src │ ├── core │ │ ├── __tests__ │ │ │ └── context-manager.test.js │ │ ├── context-manager.js │ │ ├── direct-functions │ │ │ ├── add-dependency.js │ │ │ ├── add-subtask.js │ │ │ ├── add-tag.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── cache-stats.js │ │ │ ├── clear-subtasks.js │ │ │ ├── complexity-report.js │ │ │ ├── copy-tag.js │ │ │ ├── create-tag-from-branch.js │ │ │ ├── delete-tag.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── fix-dependencies.js │ │ │ ├── generate-task-files.js │ │ │ ├── initialize-project.js │ │ │ ├── list-tags.js │ │ │ ├── list-tasks.js │ │ │ ├── models.js │ │ │ ├── move-task-cross-tag.js │ │ │ ├── move-task.js │ │ │ ├── next-task.js │ │ │ ├── parse-prd.js │ │ │ ├── remove-dependency.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── rename-tag.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── rules.js │ │ │ ├── scope-down.js │ │ │ ├── scope-up.js │ │ │ ├── set-task-status.js │ │ │ ├── show-task.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ ├── update-tasks.js │ │ │ ├── use-tag.js │ │ │ └── validate-dependencies.js │ │ ├── task-master-core.js │ │ └── utils │ │ ├── env-utils.js │ │ └── path-utils.js │ ├── custom-sdk │ │ ├── errors.js │ │ ├── index.js │ │ ├── json-extractor.js │ │ ├── language-model.js │ │ ├── message-converter.js │ │ └── schema-converter.js │ ├── index.js │ ├── logger.js │ ├── providers │ │ └── mcp-provider.js │ └── tools │ ├── add-dependency.js │ ├── add-subtask.js │ ├── add-tag.js │ ├── add-task.js │ ├── analyze.js │ ├── clear-subtasks.js │ ├── complexity-report.js │ ├── copy-tag.js │ ├── delete-tag.js │ ├── expand-all.js │ ├── expand-task.js │ ├── fix-dependencies.js │ ├── generate.js │ ├── get-operation-status.js │ ├── get-task.js │ ├── get-tasks.js │ ├── index.js │ ├── initialize-project.js │ ├── list-tags.js │ ├── models.js │ ├── move-task.js │ ├── next-task.js │ ├── parse-prd.js │ ├── remove-dependency.js │ ├── remove-subtask.js │ ├── remove-task.js │ ├── rename-tag.js │ ├── research.js │ ├── response-language.js │ ├── rules.js │ ├── scope-down.js │ ├── scope-up.js │ ├── set-task-status.js │ ├── update-subtask.js │ ├── update-task.js │ ├── update.js │ ├── use-tag.js │ ├── utils.js │ └── validate-dependencies.js ├── mcp-test.js ├── output.json ├── package-lock.json ├── package.json ├── packages │ ├── build-config │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ └── tsdown.base.ts │ │ └── tsconfig.json │ └── tm-core │ ├── .gitignore │ ├── CHANGELOG.md │ ├── docs │ │ └── listTasks-architecture.md │ ├── package.json │ ├── POC-STATUS.md │ ├── README.md │ ├── src │ │ ├── auth │ │ │ ├── auth-manager.test.ts │ │ │ ├── auth-manager.ts │ │ │ ├── config.ts │ │ │ ├── credential-store.test.ts │ │ │ ├── credential-store.ts │ │ │ ├── index.ts │ │ │ ├── oauth-service.ts │ │ │ ├── supabase-session-storage.ts │ │ │ └── types.ts │ │ ├── clients │ │ │ ├── index.ts │ │ │ └── supabase-client.ts │ │ ├── config │ │ │ ├── config-manager.spec.ts │ │ │ ├── config-manager.ts │ │ │ ├── index.ts │ │ │ └── services │ │ │ ├── config-loader.service.spec.ts │ │ │ ├── config-loader.service.ts │ │ │ ├── config-merger.service.spec.ts │ │ │ ├── config-merger.service.ts │ │ │ ├── config-persistence.service.spec.ts │ │ │ ├── config-persistence.service.ts │ │ │ ├── environment-config-provider.service.spec.ts │ │ │ ├── environment-config-provider.service.ts │ │ │ ├── index.ts │ │ │ ├── runtime-state-manager.service.spec.ts │ │ │ └── runtime-state-manager.service.ts │ │ ├── constants │ │ │ └── index.ts │ │ ├── entities │ │ │ └── task.entity.ts │ │ ├── errors │ │ │ ├── index.ts │ │ │ └── task-master-error.ts │ │ ├── executors │ │ │ ├── base-executor.ts │ │ │ ├── claude-executor.ts │ │ │ ├── executor-factory.ts │ │ │ ├── executor-service.ts │ │ │ ├── index.ts │ │ │ └── types.ts │ │ ├── index.ts │ │ ├── interfaces │ │ │ ├── ai-provider.interface.ts │ │ │ ├── configuration.interface.ts │ │ │ ├── index.ts │ │ │ └── storage.interface.ts │ │ ├── logger │ │ │ ├── factory.ts │ │ │ ├── index.ts │ │ │ └── logger.ts │ │ ├── mappers │ │ │ └── TaskMapper.ts │ │ ├── parser │ │ │ └── index.ts │ │ ├── providers │ │ │ ├── ai │ │ │ │ ├── base-provider.ts │ │ │ │ └── index.ts │ │ │ └── index.ts │ │ ├── repositories │ │ │ ├── supabase-task-repository.ts │ │ │ └── task-repository.interface.ts │ │ ├── services │ │ │ ├── index.ts │ │ │ ├── organization.service.ts │ │ │ ├── task-execution-service.ts │ │ │ └── task-service.ts │ │ ├── storage │ │ │ ├── api-storage.ts │ │ │ ├── file-storage │ │ │ │ ├── file-operations.ts │ │ │ │ ├── file-storage.ts │ │ │ │ ├── format-handler.ts │ │ │ │ ├── index.ts │ │ │ │ └── path-resolver.ts │ │ │ ├── index.ts │ │ │ └── storage-factory.ts │ │ ├── subpath-exports.test.ts │ │ ├── task-master-core.ts │ │ ├── types │ │ │ ├── database.types.ts │ │ │ ├── index.ts │ │ │ └── legacy.ts │ │ └── utils │ │ ├── id-generator.ts │ │ └── index.ts │ ├── tests │ │ ├── integration │ │ │ └── list-tasks.test.ts │ │ ├── mocks │ │ │ └── mock-provider.ts │ │ ├── setup.ts │ │ └── unit │ │ ├── base-provider.test.ts │ │ ├── executor.test.ts │ │ └── smoke.test.ts │ ├── tsconfig.json │ └── vitest.config.ts ├── README-task-master.md ├── README.md ├── scripts │ ├── dev.js │ ├── init.js │ ├── modules │ │ ├── ai-services-unified.js │ │ ├── commands.js │ │ ├── config-manager.js │ │ ├── dependency-manager.js │ │ ├── index.js │ │ ├── prompt-manager.js │ │ ├── supported-models.json │ │ ├── sync-readme.js │ │ ├── task-manager │ │ │ ├── add-subtask.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── clear-subtasks.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── find-next-task.js │ │ │ ├── generate-task-files.js │ │ │ ├── is-task-dependent.js │ │ │ ├── list-tasks.js │ │ │ ├── migrate.js │ │ │ ├── models.js │ │ │ ├── move-task.js │ │ │ ├── parse-prd │ │ │ │ ├── index.js │ │ │ │ ├── parse-prd-config.js │ │ │ │ ├── parse-prd-helpers.js │ │ │ │ ├── parse-prd-non-streaming.js │ │ │ │ ├── parse-prd-streaming.js │ │ │ │ └── parse-prd.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── scope-adjustment.js │ │ │ ├── set-task-status.js │ │ │ ├── tag-management.js │ │ │ ├── task-exists.js │ │ │ ├── update-single-task-status.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ └── update-tasks.js │ │ ├── task-manager.js │ │ ├── ui.js │ │ ├── update-config-tokens.js │ │ ├── utils │ │ │ ├── contextGatherer.js │ │ │ ├── fuzzyTaskSearch.js │ │ │ └── git-utils.js │ │ └── utils.js │ ├── task-complexity-report.json │ ├── test-claude-errors.js │ └── test-claude.js ├── src │ ├── ai-providers │ │ ├── anthropic.js │ │ ├── azure.js │ │ ├── base-provider.js │ │ ├── bedrock.js │ │ ├── claude-code.js │ │ ├── custom-sdk │ │ │ ├── claude-code │ │ │ │ ├── errors.js │ │ │ │ ├── index.js │ │ │ │ ├── json-extractor.js │ │ │ │ ├── language-model.js │ │ │ │ ├── message-converter.js │ │ │ │ └── types.js │ │ │ └── grok-cli │ │ │ ├── errors.js │ │ │ ├── index.js │ │ │ ├── json-extractor.js │ │ │ ├── language-model.js │ │ │ ├── message-converter.js │ │ │ └── types.js │ │ ├── gemini-cli.js │ │ ├── google-vertex.js │ │ ├── google.js │ │ ├── grok-cli.js │ │ ├── groq.js │ │ ├── index.js │ │ ├── ollama.js │ │ ├── openai.js │ │ ├── openrouter.js │ │ ├── perplexity.js │ │ └── xai.js │ ├── constants │ │ ├── commands.js │ │ ├── paths.js │ │ ├── profiles.js │ │ ├── providers.js │ │ ├── rules-actions.js │ │ ├── task-priority.js │ │ └── task-status.js │ ├── profiles │ │ ├── amp.js │ │ ├── base-profile.js │ │ ├── claude.js │ │ ├── cline.js │ │ ├── codex.js │ │ ├── cursor.js │ │ ├── gemini.js │ │ ├── index.js │ │ ├── kilo.js │ │ ├── kiro.js │ │ ├── opencode.js │ │ ├── roo.js │ │ ├── trae.js │ │ ├── vscode.js │ │ ├── windsurf.js │ │ └── zed.js │ ├── progress │ │ ├── base-progress-tracker.js │ │ ├── cli-progress-factory.js │ │ ├── parse-prd-tracker.js │ │ ├── progress-tracker-builder.js │ │ └── tracker-ui.js │ ├── prompts │ │ ├── add-task.json │ │ ├── analyze-complexity.json │ │ ├── expand-task.json │ │ ├── parse-prd.json │ │ ├── README.md │ │ ├── research.json │ │ ├── schemas │ │ │ ├── parameter.schema.json │ │ │ ├── prompt-template.schema.json │ │ │ ├── README.md │ │ │ └── variant.schema.json │ │ ├── update-subtask.json │ │ ├── update-task.json │ │ └── update-tasks.json │ ├── provider-registry │ │ └── index.js │ ├── task-master.js │ ├── ui │ │ ├── confirm.js │ │ ├── indicators.js │ │ └── parse-prd.js │ └── utils │ ├── asset-resolver.js │ ├── create-mcp-config.js │ ├── format.js │ ├── getVersion.js │ ├── logger-utils.js │ ├── manage-gitignore.js │ ├── path-utils.js │ ├── profiles.js │ ├── rule-transformer.js │ ├── stream-parser.js │ └── timeout-manager.js ├── test-clean-tags.js ├── test-config-manager.js ├── test-prd.txt ├── test-tag-functions.js ├── test-version-check-full.js ├── test-version-check.js ├── tests │ ├── e2e │ │ ├── e2e_helpers.sh │ │ ├── parse_llm_output.cjs │ │ ├── run_e2e.sh │ │ ├── run_fallback_verification.sh │ │ └── test_llm_analysis.sh │ ├── fixture │ │ └── test-tasks.json │ ├── fixtures │ │ ├── .taskmasterconfig │ │ ├── sample-claude-response.js │ │ ├── sample-prd.txt │ │ └── sample-tasks.js │ ├── integration │ │ ├── claude-code-optional.test.js │ │ ├── cli │ │ │ ├── commands.test.js │ │ │ ├── complex-cross-tag-scenarios.test.js │ │ │ └── move-cross-tag.test.js │ │ ├── manage-gitignore.test.js │ │ ├── mcp-server │ │ │ └── direct-functions.test.js │ │ ├── move-task-cross-tag.integration.test.js │ │ ├── move-task-simple.integration.test.js │ │ └── profiles │ │ ├── amp-init-functionality.test.js │ │ ├── claude-init-functionality.test.js │ │ ├── cline-init-functionality.test.js │ │ ├── codex-init-functionality.test.js │ │ ├── cursor-init-functionality.test.js │ │ ├── gemini-init-functionality.test.js │ │ ├── opencode-init-functionality.test.js │ │ ├── roo-files-inclusion.test.js │ │ ├── roo-init-functionality.test.js │ │ ├── rules-files-inclusion.test.js │ │ ├── trae-init-functionality.test.js │ │ ├── vscode-init-functionality.test.js │ │ └── windsurf-init-functionality.test.js │ ├── manual │ │ ├── progress │ │ │ ├── parse-prd-analysis.js │ │ │ ├── test-parse-prd.js │ │ │ └── TESTING_GUIDE.md │ │ └── prompts │ │ ├── prompt-test.js │ │ └── README.md │ ├── README.md │ ├── setup.js │ └── unit │ ├── ai-providers │ │ ├── claude-code.test.js │ │ ├── custom-sdk │ │ │ └── claude-code │ │ │ └── language-model.test.js │ │ ├── gemini-cli.test.js │ │ ├── mcp-components.test.js │ │ └── openai.test.js │ ├── ai-services-unified.test.js │ ├── commands.test.js │ ├── config-manager.test.js │ ├── config-manager.test.mjs │ ├── dependency-manager.test.js │ ├── init.test.js │ ├── initialize-project.test.js │ ├── kebab-case-validation.test.js │ ├── manage-gitignore.test.js │ ├── mcp │ │ └── tools │ │ ├── __mocks__ │ │ │ └── move-task.js │ │ ├── add-task.test.js │ │ ├── analyze-complexity.test.js │ │ ├── expand-all.test.js │ │ ├── get-tasks.test.js │ │ ├── initialize-project.test.js │ │ ├── move-task-cross-tag-options.test.js │ │ ├── move-task-cross-tag.test.js │ │ └── remove-task.test.js │ ├── mcp-providers │ │ ├── mcp-components.test.js │ │ └── mcp-provider.test.js │ ├── parse-prd.test.js │ ├── profiles │ │ ├── amp-integration.test.js │ │ ├── claude-integration.test.js │ │ ├── cline-integration.test.js │ │ ├── codex-integration.test.js │ │ ├── cursor-integration.test.js │ │ ├── gemini-integration.test.js │ │ ├── kilo-integration.test.js │ │ ├── kiro-integration.test.js │ │ ├── mcp-config-validation.test.js │ │ ├── opencode-integration.test.js │ │ ├── profile-safety-check.test.js │ │ ├── roo-integration.test.js │ │ ├── rule-transformer-cline.test.js │ │ ├── rule-transformer-cursor.test.js │ │ ├── rule-transformer-gemini.test.js │ │ ├── rule-transformer-kilo.test.js │ │ ├── rule-transformer-kiro.test.js │ │ ├── rule-transformer-opencode.test.js │ │ ├── rule-transformer-roo.test.js │ │ ├── rule-transformer-trae.test.js │ │ ├── rule-transformer-vscode.test.js │ │ ├── rule-transformer-windsurf.test.js │ │ ├── rule-transformer-zed.test.js │ │ ├── rule-transformer.test.js │ │ ├── selective-profile-removal.test.js │ │ ├── subdirectory-support.test.js │ │ ├── trae-integration.test.js │ │ ├── vscode-integration.test.js │ │ ├── windsurf-integration.test.js │ │ └── zed-integration.test.js │ ├── progress │ │ └── base-progress-tracker.test.js │ ├── prompt-manager.test.js │ ├── prompts │ │ └── expand-task-prompt.test.js │ ├── providers │ │ └── provider-registry.test.js │ ├── scripts │ │ └── modules │ │ ├── commands │ │ │ ├── move-cross-tag.test.js │ │ │ └── README.md │ │ ├── dependency-manager │ │ │ ├── circular-dependencies.test.js │ │ │ ├── cross-tag-dependencies.test.js │ │ │ └── fix-dependencies-command.test.js │ │ ├── task-manager │ │ │ ├── add-subtask.test.js │ │ │ ├── add-task.test.js │ │ │ ├── analyze-task-complexity.test.js │ │ │ ├── clear-subtasks.test.js │ │ │ ├── complexity-report-tag-isolation.test.js │ │ │ ├── expand-all-tasks.test.js │ │ │ ├── expand-task.test.js │ │ │ ├── find-next-task.test.js │ │ │ ├── generate-task-files.test.js │ │ │ ├── list-tasks.test.js │ │ │ ├── move-task-cross-tag.test.js │ │ │ ├── move-task.test.js │ │ │ ├── parse-prd.test.js │ │ │ ├── remove-subtask.test.js │ │ │ ├── remove-task.test.js │ │ │ ├── research.test.js │ │ │ ├── scope-adjustment.test.js │ │ │ ├── set-task-status.test.js │ │ │ ├── setup.js │ │ │ ├── update-single-task-status.test.js │ │ │ ├── update-subtask-by-id.test.js │ │ │ ├── update-task-by-id.test.js │ │ │ └── update-tasks.test.js │ │ ├── ui │ │ │ └── cross-tag-error-display.test.js │ │ └── utils-tag-aware-paths.test.js │ ├── task-finder.test.js │ ├── task-manager │ │ ├── clear-subtasks.test.js │ │ ├── move-task.test.js │ │ ├── tag-boundary.test.js │ │ └── tag-management.test.js │ ├── task-master.test.js │ ├── ui │ │ └── indicators.test.js │ ├── ui.test.js │ ├── utils-strip-ansi.test.js │ └── utils.test.js ├── tsconfig.json ├── tsdown.config.ts └── turbo.json ``` # Files -------------------------------------------------------------------------------- /scripts/modules/task-manager/update-task-by-id.js: -------------------------------------------------------------------------------- ```javascript import fs from 'fs'; import path from 'path'; import chalk from 'chalk'; import boxen from 'boxen'; import Table from 'cli-table3'; import { z } from 'zod'; // Keep Zod for post-parse validation import { log as consoleLog, readJSON, writeJSON, truncate, isSilentMode, flattenTasksWithSubtasks, findProjectRoot } from '../utils.js'; import { getStatusWithColor, startLoadingIndicator, stopLoadingIndicator, displayAiUsageSummary } from '../ui.js'; import { generateTextService } from '../ai-services-unified.js'; import { getDebugFlag, isApiKeySet, hasCodebaseAnalysis } from '../config-manager.js'; import { getPromptManager } from '../prompt-manager.js'; import { ContextGatherer } from '../utils/contextGatherer.js'; import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js'; // Zod schema for post-parsing validation of the updated task object const updatedTaskSchema = z .object({ id: z.number().int(), title: z.string(), // Title should be preserved, but check it exists description: z.string(), status: z.string(), dependencies: z.array(z.union([z.number().int(), z.string()])), priority: z.string().nullable().default('medium'), details: z.string().nullable().default(''), testStrategy: z.string().nullable().default(''), subtasks: z .array( z.object({ id: z .number() .int() .positive() .describe('Sequential subtask ID starting from 1'), title: z.string(), description: z.string(), status: z.string(), dependencies: z.array(z.number().int()).nullable().default([]), details: z.string().nullable().default(''), testStrategy: z.string().nullable().default('') }) ) .nullable() .default([]) }) .strip(); // Allows parsing even if AI adds extra fields, but validation focuses on schema /** * Parses a single updated task object from AI's text response. * @param {string} text - Response text from AI. * @param {number} expectedTaskId - The ID of the task expected. * @param {Function | Object} logFn - Logging function or MCP logger. * @param {boolean} isMCP - Flag indicating MCP context. * @returns {Object} Parsed and validated task object. * @throws {Error} If parsing or validation fails. */ function parseUpdatedTaskFromText(text, expectedTaskId, logFn, isMCP) { // Report helper consistent with the established pattern const report = (level, ...args) => { if (isMCP) { if (typeof logFn[level] === 'function') logFn[level](...args); else logFn.info(...args); } else if (!isSilentMode()) { logFn(level, ...args); } }; report( 'info', 'Attempting to parse updated task object from text response...' ); if (!text || text.trim() === '') throw new Error('AI response text is empty.'); let cleanedResponse = text.trim(); const originalResponseForDebug = cleanedResponse; let parseMethodUsed = 'raw'; // Keep track of which method worked // --- NEW Step 1: Try extracting between {} first --- const firstBraceIndex = cleanedResponse.indexOf('{'); const lastBraceIndex = cleanedResponse.lastIndexOf('}'); let potentialJsonFromBraces = null; if (firstBraceIndex !== -1 && lastBraceIndex > firstBraceIndex) { potentialJsonFromBraces = cleanedResponse.substring( firstBraceIndex, lastBraceIndex + 1 ); if (potentialJsonFromBraces.length <= 2) { potentialJsonFromBraces = null; // Ignore empty braces {} } } // If {} extraction yielded something, try parsing it immediately if (potentialJsonFromBraces) { try { const testParse = JSON.parse(potentialJsonFromBraces); // It worked! Use this as the primary cleaned response. cleanedResponse = potentialJsonFromBraces; parseMethodUsed = 'braces'; } catch (e) { report( 'info', 'Content between {} looked promising but failed initial parse. Proceeding to other methods.' ); // Reset cleanedResponse to original if brace parsing failed cleanedResponse = originalResponseForDebug; } } // --- Step 2: If brace parsing didn't work or wasn't applicable, try code block extraction --- if (parseMethodUsed === 'raw') { const codeBlockMatch = cleanedResponse.match( /```(?:json|javascript)?\s*([\s\S]*?)\s*```/i ); if (codeBlockMatch) { cleanedResponse = codeBlockMatch[1].trim(); parseMethodUsed = 'codeblock'; report('info', 'Extracted JSON content from Markdown code block.'); } else { // --- Step 3: If code block failed, try stripping prefixes --- const commonPrefixes = [ 'json\n', 'javascript\n' // ... other prefixes ... ]; let prefixFound = false; for (const prefix of commonPrefixes) { if (cleanedResponse.toLowerCase().startsWith(prefix)) { cleanedResponse = cleanedResponse.substring(prefix.length).trim(); parseMethodUsed = 'prefix'; report('info', `Stripped prefix: "${prefix.trim()}"`); prefixFound = true; break; } } if (!prefixFound) { report( 'warn', 'Response does not appear to contain {}, code block, or known prefix. Attempting raw parse.' ); } } } // --- Step 4: Attempt final parse --- let parsedTask; try { parsedTask = JSON.parse(cleanedResponse); } catch (parseError) { report('error', `Failed to parse JSON object: ${parseError.message}`); report( 'error', `Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}` ); report( 'error', `Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}` ); throw new Error( `Failed to parse JSON response object: ${parseError.message}` ); } if (!parsedTask || typeof parsedTask !== 'object') { report( 'error', `Parsed content is not an object. Type: ${typeof parsedTask}` ); report( 'error', `Parsed content sample: ${JSON.stringify(parsedTask).substring(0, 200)}` ); throw new Error('Parsed AI response is not a valid JSON object.'); } // Preprocess the task to ensure subtasks have proper structure const preprocessedTask = { ...parsedTask, status: parsedTask.status || 'pending', dependencies: Array.isArray(parsedTask.dependencies) ? parsedTask.dependencies : [], details: typeof parsedTask.details === 'string' ? parsedTask.details : String(parsedTask.details || ''), testStrategy: typeof parsedTask.testStrategy === 'string' ? parsedTask.testStrategy : String(parsedTask.testStrategy || ''), // Ensure subtasks is an array and each subtask has required fields subtasks: Array.isArray(parsedTask.subtasks) ? parsedTask.subtasks.map((subtask) => ({ ...subtask, title: subtask.title || '', description: subtask.description || '', status: subtask.status || 'pending', dependencies: Array.isArray(subtask.dependencies) ? subtask.dependencies : [], details: typeof subtask.details === 'string' ? subtask.details : String(subtask.details || ''), testStrategy: typeof subtask.testStrategy === 'string' ? subtask.testStrategy : String(subtask.testStrategy || '') })) : [] }; // Validate the parsed task object using Zod const validationResult = updatedTaskSchema.safeParse(preprocessedTask); if (!validationResult.success) { report('error', 'Parsed task object failed Zod validation.'); validationResult.error.errors.forEach((err) => { report('error', ` - Field '${err.path.join('.')}': ${err.message}`); }); throw new Error( `AI response failed task structure validation: ${validationResult.error.message}` ); } // Final check: ensure ID matches expected ID (AI might hallucinate) if (validationResult.data.id !== expectedTaskId) { report( 'warn', `AI returned task with ID ${validationResult.data.id}, but expected ${expectedTaskId}. Overwriting ID.` ); validationResult.data.id = expectedTaskId; // Enforce correct ID } report('info', 'Successfully validated updated task structure.'); return validationResult.data; // Return the validated task data } /** * Update a task by ID with new information using the unified AI service. * @param {string} tasksPath - Path to the tasks.json file * @param {number} taskId - ID of the task to update * @param {string} prompt - Prompt for generating updated task information * @param {boolean} [useResearch=false] - Whether to use the research AI role. * @param {Object} context - Context object containing session and mcpLog. * @param {Object} [context.session] - Session object from MCP server. * @param {Object} [context.mcpLog] - MCP logger object. * @param {string} [context.projectRoot] - Project root path. * @param {string} [context.tag] - Tag for the task * @param {string} [outputFormat='text'] - Output format ('text' or 'json'). * @param {boolean} [appendMode=false] - If true, append to details instead of full update. * @returns {Promise<Object|null>} - The updated task or null if update failed. */ async function updateTaskById( tasksPath, taskId, prompt, useResearch = false, context = {}, outputFormat = 'text', appendMode = false ) { const { session, mcpLog, projectRoot: providedProjectRoot, tag } = context; const logFn = mcpLog || consoleLog; const isMCP = !!mcpLog; // Use report helper for logging const report = (level, ...args) => { if (isMCP) { if (typeof logFn[level] === 'function') logFn[level](...args); else logFn.info(...args); } else if (!isSilentMode()) { logFn(level, ...args); } }; try { report('info', `Updating single task ${taskId} with prompt: "${prompt}"`); // --- Input Validations (Keep existing) --- if (!Number.isInteger(taskId) || taskId <= 0) throw new Error( `Invalid task ID: ${taskId}. Task ID must be a positive integer.` ); if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') throw new Error('Prompt cannot be empty.'); if (useResearch && !isApiKeySet('perplexity', session)) { report( 'warn', 'Perplexity research requested but API key not set. Falling back.' ); if (outputFormat === 'text') console.log( chalk.yellow('Perplexity AI not available. Falling back to main AI.') ); useResearch = false; } if (!fs.existsSync(tasksPath)) throw new Error(`Tasks file not found: ${tasksPath}`); // --- End Input Validations --- // Determine project root const projectRoot = providedProjectRoot || findProjectRoot(); if (!projectRoot) { throw new Error('Could not determine project root directory'); } // --- Task Loading and Status Check (Keep existing) --- const data = readJSON(tasksPath, projectRoot, tag); if (!data || !data.tasks) throw new Error(`No valid tasks found in ${tasksPath}.`); const taskIndex = data.tasks.findIndex((task) => task.id === taskId); if (taskIndex === -1) throw new Error(`Task with ID ${taskId} not found.`); const taskToUpdate = data.tasks[taskIndex]; if (taskToUpdate.status === 'done' || taskToUpdate.status === 'completed') { report( 'warn', `Task ${taskId} is already marked as done and cannot be updated` ); // Only show warning box for text output (CLI) if (outputFormat === 'text') { console.log( boxen( chalk.yellow( `Task ${taskId} is already marked as ${taskToUpdate.status} and cannot be updated.` ) + '\n\n' + chalk.white( 'Completed tasks are locked to maintain consistency. To modify a completed task, you must first:' ) + '\n' + chalk.white( '1. Change its status to "pending" or "in-progress"' ) + '\n' + chalk.white('2. Then run the update-task command'), { padding: 1, borderColor: 'yellow', borderStyle: 'round' } ) ); } return null; } // --- End Task Loading --- // --- Context Gathering --- let gatheredContext = ''; try { const contextGatherer = new ContextGatherer(projectRoot, tag); const allTasksFlat = flattenTasksWithSubtasks(data.tasks); const fuzzySearch = new FuzzyTaskSearch(allTasksFlat, 'update-task'); const searchQuery = `${taskToUpdate.title} ${taskToUpdate.description} ${prompt}`; const searchResults = fuzzySearch.findRelevantTasks(searchQuery, { maxResults: 5, includeSelf: true }); const relevantTaskIds = fuzzySearch.getTaskIds(searchResults); const finalTaskIds = [ ...new Set([taskId.toString(), ...relevantTaskIds]) ]; if (finalTaskIds.length > 0) { const contextResult = await contextGatherer.gather({ tasks: finalTaskIds, format: 'research' }); gatheredContext = contextResult.context || ''; } } catch (contextError) { report('warn', `Could not gather context: ${contextError.message}`); } // --- End Context Gathering --- // --- Display Task Info (CLI Only - Keep existing) --- if (outputFormat === 'text') { // Show the task that will be updated const table = new Table({ head: [ chalk.cyan.bold('ID'), chalk.cyan.bold('Title'), chalk.cyan.bold('Status') ], colWidths: [5, 60, 10] }); table.push([ taskToUpdate.id, truncate(taskToUpdate.title, 57), getStatusWithColor(taskToUpdate.status) ]); console.log( boxen(chalk.white.bold(`Updating Task #${taskId}`), { padding: 1, borderColor: 'blue', borderStyle: 'round', margin: { top: 1, bottom: 0 } }) ); console.log(table.toString()); // Display a message about how completed subtasks are handled console.log( boxen( chalk.cyan.bold('How Completed Subtasks Are Handled:') + '\n\n' + chalk.white( '• Subtasks marked as "done" or "completed" will be preserved\n' ) + chalk.white( '• New subtasks will build upon what has already been completed\n' ) + chalk.white( '• If completed work needs revision, a new subtask will be created instead of modifying done items\n' ) + chalk.white( '• This approach maintains a clear record of completed work and new requirements' ), { padding: 1, borderColor: 'blue', borderStyle: 'round', margin: { top: 1, bottom: 1 } } ) ); } // --- Build Prompts using PromptManager --- const promptManager = getPromptManager(); const promptParams = { task: taskToUpdate, taskJson: JSON.stringify(taskToUpdate, null, 2), updatePrompt: prompt, appendMode: appendMode, useResearch: useResearch, currentDetails: taskToUpdate.details || '(No existing details)', gatheredContext: gatheredContext || '', hasCodebaseAnalysis: hasCodebaseAnalysis( useResearch, projectRoot, session ), projectRoot: projectRoot }; const variantKey = appendMode ? 'append' : useResearch ? 'research' : 'default'; report( 'info', `Loading prompt template with variant: ${variantKey}, appendMode: ${appendMode}, useResearch: ${useResearch}` ); let systemPrompt; let userPrompt; try { const promptResult = await promptManager.loadPrompt( 'update-task', promptParams, variantKey ); report( 'info', `Prompt result type: ${typeof promptResult}, keys: ${promptResult ? Object.keys(promptResult).join(', ') : 'null'}` ); // Extract prompts - loadPrompt returns { systemPrompt, userPrompt, metadata } systemPrompt = promptResult.systemPrompt; userPrompt = promptResult.userPrompt; report( 'info', `Loaded prompts - systemPrompt length: ${systemPrompt?.length}, userPrompt length: ${userPrompt?.length}` ); } catch (error) { report('error', `Failed to load prompt template: ${error.message}`); throw new Error(`Failed to load prompt template: ${error.message}`); } // If prompts are still not set, throw an error if (!systemPrompt || !userPrompt) { throw new Error( `Failed to load prompts: systemPrompt=${!!systemPrompt}, userPrompt=${!!userPrompt}` ); } // --- End Build Prompts --- let loadingIndicator = null; let aiServiceResponse = null; if (!isMCP && outputFormat === 'text') { loadingIndicator = startLoadingIndicator( useResearch ? 'Updating task with research...\n' : 'Updating task...\n' ); } try { const serviceRole = useResearch ? 'research' : 'main'; aiServiceResponse = await generateTextService({ role: serviceRole, session: session, projectRoot: projectRoot, systemPrompt: systemPrompt, prompt: userPrompt, commandName: 'update-task', outputType: isMCP ? 'mcp' : 'cli' }); if (loadingIndicator) stopLoadingIndicator(loadingIndicator, 'AI update complete.'); if (appendMode) { // Append mode: handle as plain text const generatedContentString = aiServiceResponse.mainResult; let newlyAddedSnippet = ''; if (generatedContentString && generatedContentString.trim()) { const timestamp = new Date().toISOString(); const formattedBlock = `<info added on ${timestamp}>\n${generatedContentString.trim()}\n</info added on ${timestamp}>`; newlyAddedSnippet = formattedBlock; // Append to task details taskToUpdate.details = (taskToUpdate.details ? taskToUpdate.details + '\n' : '') + formattedBlock; } else { report( 'warn', 'AI response was empty or whitespace after trimming. Original details remain unchanged.' ); newlyAddedSnippet = 'No new details were added by the AI.'; } // Update description with timestamp if prompt is short if (prompt.length < 100) { if (taskToUpdate.description) { taskToUpdate.description += ` [Updated: ${new Date().toLocaleDateString()}]`; } } // Write the updated task back to file data.tasks[taskIndex] = taskToUpdate; writeJSON(tasksPath, data, projectRoot, tag); report('success', `Successfully appended to task ${taskId}`); // Display success message for CLI if (outputFormat === 'text') { console.log( boxen( chalk.green(`Successfully appended to task #${taskId}`) + '\n\n' + chalk.white.bold('Title:') + ' ' + taskToUpdate.title + '\n\n' + chalk.white.bold('Newly Added Content:') + '\n' + chalk.white(newlyAddedSnippet), { padding: 1, borderColor: 'green', borderStyle: 'round' } ) ); } // Display AI usage telemetry for CLI users if (outputFormat === 'text' && aiServiceResponse.telemetryData) { displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli'); } // Return the updated task return { updatedTask: taskToUpdate, telemetryData: aiServiceResponse.telemetryData, tagInfo: aiServiceResponse.tagInfo }; } // Full update mode: Use mainResult (text) for parsing const updatedTask = parseUpdatedTaskFromText( aiServiceResponse.mainResult, taskId, logFn, isMCP ); // --- Task Validation/Correction (Keep existing logic) --- if (!updatedTask || typeof updatedTask !== 'object') throw new Error('Received invalid task object from AI.'); if (!updatedTask.title || !updatedTask.description) throw new Error('Updated task missing required fields.'); // Preserve ID if AI changed it if (updatedTask.id !== taskId) { report('warn', `AI changed task ID. Restoring original ID ${taskId}.`); updatedTask.id = taskId; } // Preserve status if AI changed it if ( updatedTask.status !== taskToUpdate.status && !prompt.toLowerCase().includes('status') ) { report( 'warn', `AI changed task status. Restoring original status '${taskToUpdate.status}'.` ); updatedTask.status = taskToUpdate.status; } // Fix subtask IDs if they exist (ensure they are numeric and sequential) if (updatedTask.subtasks && Array.isArray(updatedTask.subtasks)) { let currentSubtaskId = 1; updatedTask.subtasks = updatedTask.subtasks.map((subtask) => { // Fix AI-generated subtask IDs that might be strings or use parent ID as prefix const correctedSubtask = { ...subtask, id: currentSubtaskId, // Override AI-generated ID with correct sequential ID dependencies: Array.isArray(subtask.dependencies) ? subtask.dependencies .map((dep) => typeof dep === 'string' ? parseInt(dep, 10) : dep ) .filter( (depId) => !Number.isNaN(depId) && depId >= 1 && depId < currentSubtaskId ) : [], status: subtask.status || 'pending' }; currentSubtaskId++; return correctedSubtask; }); report( 'info', `Fixed ${updatedTask.subtasks.length} subtask IDs to be sequential numeric IDs.` ); } // Preserve completed subtasks (Keep existing logic) if (taskToUpdate.subtasks?.length > 0) { if (!updatedTask.subtasks) { report( 'warn', 'Subtasks removed by AI. Restoring original subtasks.' ); updatedTask.subtasks = taskToUpdate.subtasks; } else { const completedOriginal = taskToUpdate.subtasks.filter( (st) => st.status === 'done' || st.status === 'completed' ); completedOriginal.forEach((compSub) => { const updatedSub = updatedTask.subtasks.find( (st) => st.id === compSub.id ); if ( !updatedSub || JSON.stringify(updatedSub) !== JSON.stringify(compSub) ) { report( 'warn', `Completed subtask ${compSub.id} was modified or removed. Restoring.` ); // Remove potentially modified version updatedTask.subtasks = updatedTask.subtasks.filter( (st) => st.id !== compSub.id ); // Add back original updatedTask.subtasks.push(compSub); } }); // Deduplicate just in case const subtaskIds = new Set(); updatedTask.subtasks = updatedTask.subtasks.filter((st) => { if (!subtaskIds.has(st.id)) { subtaskIds.add(st.id); return true; } report('warn', `Duplicate subtask ID ${st.id} removed.`); return false; }); } } // --- End Task Validation/Correction --- // --- Update Task Data (Keep existing) --- data.tasks[taskIndex] = updatedTask; // --- End Update Task Data --- // --- Write File and Generate (Unchanged) --- writeJSON(tasksPath, data, projectRoot, tag); report('success', `Successfully updated task ${taskId}`); // await generateTaskFiles(tasksPath, path.dirname(tasksPath)); // --- End Write File --- // --- Display CLI Telemetry --- if (outputFormat === 'text' && aiServiceResponse.telemetryData) { displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli'); // <<< ADD display } // --- Return Success with Telemetry --- return { updatedTask: updatedTask, // Return the updated task object telemetryData: aiServiceResponse.telemetryData, // <<< ADD telemetryData tagInfo: aiServiceResponse.tagInfo }; } catch (error) { // Catch errors from generateTextService if (loadingIndicator) stopLoadingIndicator(loadingIndicator); report('error', `Error during AI service call: ${error.message}`); if (error.message.includes('API key')) { report('error', 'Please ensure API keys are configured correctly.'); } throw error; // Re-throw error } } catch (error) { // General error catch // --- General Error Handling (Keep existing) --- report('error', `Error updating task: ${error.message}`); if (outputFormat === 'text') { console.error(chalk.red(`Error: ${error.message}`)); // ... helpful hints ... if (getDebugFlag(session)) console.error(error); process.exit(1); } else { throw error; // Re-throw for MCP } return null; // Indicate failure in CLI case if process doesn't exit // --- End General Error Handling --- } } export default updateTaskById; ``` -------------------------------------------------------------------------------- /tests/unit/dependency-manager.test.js: -------------------------------------------------------------------------------- ```javascript /** * Dependency Manager module tests */ import { jest } from '@jest/globals'; import { validateTaskDependencies, isCircularDependency, removeDuplicateDependencies, cleanupSubtaskDependencies, ensureAtLeastOneIndependentSubtask, validateAndFixDependencies, canMoveWithDependencies } from '../../scripts/modules/dependency-manager.js'; import * as utils from '../../scripts/modules/utils.js'; import { sampleTasks } from '../fixtures/sample-tasks.js'; // Mock dependencies jest.mock('path'); jest.mock('chalk', () => ({ green: jest.fn((text) => `<green>${text}</green>`), yellow: jest.fn((text) => `<yellow>${text}</yellow>`), red: jest.fn((text) => `<red>${text}</red>`), cyan: jest.fn((text) => `<cyan>${text}</cyan>`), bold: jest.fn((text) => `<bold>${text}</bold>`) })); jest.mock('boxen', () => jest.fn((text) => `[boxed: ${text}]`)); jest.mock('@anthropic-ai/sdk', () => ({ Anthropic: jest.fn().mockImplementation(() => ({})) })); // Mock utils module const mockTaskExists = jest.fn(); const mockFormatTaskId = jest.fn(); const mockFindCycles = jest.fn(); const mockLog = jest.fn(); const mockReadJSON = jest.fn(); const mockWriteJSON = jest.fn(); jest.mock('../../scripts/modules/utils.js', () => ({ log: mockLog, readJSON: mockReadJSON, writeJSON: mockWriteJSON, taskExists: mockTaskExists, formatTaskId: mockFormatTaskId, findCycles: mockFindCycles })); jest.mock('../../scripts/modules/ui.js', () => ({ displayBanner: jest.fn() })); jest.mock('../../scripts/modules/task-manager.js', () => ({ generateTaskFiles: jest.fn() })); // Create a path for test files const TEST_TASKS_PATH = 'tests/fixture/test-tasks.json'; describe('Dependency Manager Module', () => { beforeEach(() => { jest.clearAllMocks(); // Set default implementations mockTaskExists.mockImplementation((tasks, id) => { if (Array.isArray(tasks)) { if (typeof id === 'string' && id.includes('.')) { const [taskId, subtaskId] = id.split('.').map(Number); const task = tasks.find((t) => t.id === taskId); return ( task && task.subtasks && task.subtasks.some((st) => st.id === subtaskId) ); } return tasks.some( (task) => task.id === (typeof id === 'string' ? parseInt(id, 10) : id) ); } return false; }); mockFormatTaskId.mockImplementation((id) => { if (typeof id === 'string' && id.includes('.')) { return id; } return parseInt(id, 10); }); mockFindCycles.mockImplementation((tasks) => { // Simplified cycle detection for testing const dependencyMap = new Map(); // Build dependency map tasks.forEach((task) => { if (task.dependencies) { dependencyMap.set(task.id, task.dependencies); } }); const visited = new Set(); const recursionStack = new Set(); function dfs(taskId) { visited.add(taskId); recursionStack.add(taskId); const dependencies = dependencyMap.get(taskId) || []; for (const depId of dependencies) { if (!visited.has(depId)) { if (dfs(depId)) return true; } else if (recursionStack.has(depId)) { return true; } } recursionStack.delete(taskId); return false; } // Check for cycles starting from each unvisited node for (const taskId of dependencyMap.keys()) { if (!visited.has(taskId)) { if (dfs(taskId)) return true; } } return false; }); }); describe('isCircularDependency function', () => { test('should detect a direct circular dependency', () => { const tasks = [ { id: 1, dependencies: [2] }, { id: 2, dependencies: [1] } ]; const result = isCircularDependency(tasks, 1); expect(result).toBe(true); }); test('should detect an indirect circular dependency', () => { const tasks = [ { id: 1, dependencies: [2] }, { id: 2, dependencies: [3] }, { id: 3, dependencies: [1] } ]; const result = isCircularDependency(tasks, 1); expect(result).toBe(true); }); test('should return false for non-circular dependencies', () => { const tasks = [ { id: 1, dependencies: [2] }, { id: 2, dependencies: [3] }, { id: 3, dependencies: [] } ]; const result = isCircularDependency(tasks, 1); expect(result).toBe(false); }); test('should handle a task with no dependencies', () => { const tasks = [ { id: 1, dependencies: [] }, { id: 2, dependencies: [1] } ]; const result = isCircularDependency(tasks, 1); expect(result).toBe(false); }); test('should handle a task depending on itself', () => { const tasks = [{ id: 1, dependencies: [1] }]; const result = isCircularDependency(tasks, 1); expect(result).toBe(true); }); test('should handle subtask dependencies correctly', () => { const tasks = [ { id: 1, dependencies: [], subtasks: [ { id: 1, dependencies: ['1.2'] }, { id: 2, dependencies: ['1.3'] }, { id: 3, dependencies: ['1.1'] } ] } ]; // This creates a circular dependency: 1.1 -> 1.2 -> 1.3 -> 1.1 const result = isCircularDependency(tasks, '1.1', ['1.3', '1.2']); expect(result).toBe(true); }); test('should allow non-circular subtask dependencies within same parent', () => { const tasks = [ { id: 1, dependencies: [], subtasks: [ { id: 1, dependencies: [] }, { id: 2, dependencies: ['1.1'] }, { id: 3, dependencies: ['1.2'] } ] } ]; // This is a valid dependency chain: 1.3 -> 1.2 -> 1.1 const result = isCircularDependency(tasks, '1.1', []); expect(result).toBe(false); }); test('should properly handle dependencies between subtasks of the same parent', () => { const tasks = [ { id: 1, dependencies: [], subtasks: [ { id: 1, dependencies: [] }, { id: 2, dependencies: ['1.1'] }, { id: 3, dependencies: [] } ] } ]; // Check if adding a dependency from subtask 1.3 to 1.2 creates a circular dependency // This should be false as 1.3 -> 1.2 -> 1.1 is a valid chain mockTaskExists.mockImplementation(() => true); const result = isCircularDependency(tasks, '1.3', ['1.2']); expect(result).toBe(false); }); test('should correctly detect circular dependencies in subtasks of the same parent', () => { const tasks = [ { id: 1, dependencies: [], subtasks: [ { id: 1, dependencies: ['1.3'] }, { id: 2, dependencies: ['1.1'] }, { id: 3, dependencies: ['1.2'] } ] } ]; // This creates a circular dependency: 1.1 -> 1.3 -> 1.2 -> 1.1 mockTaskExists.mockImplementation(() => true); const result = isCircularDependency(tasks, '1.2', ['1.1']); expect(result).toBe(true); }); }); describe('validateTaskDependencies function', () => { test('should detect missing dependencies', () => { const tasks = [ { id: 1, dependencies: [99] }, // 99 doesn't exist { id: 2, dependencies: [1] } ]; const result = validateTaskDependencies(tasks); expect(result.valid).toBe(false); expect(result.issues.length).toBeGreaterThan(0); expect(result.issues[0].type).toBe('missing'); expect(result.issues[0].taskId).toBe(1); expect(result.issues[0].dependencyId).toBe(99); }); test('should detect circular dependencies', () => { const tasks = [ { id: 1, dependencies: [2] }, { id: 2, dependencies: [1] } ]; const result = validateTaskDependencies(tasks); expect(result.valid).toBe(false); expect(result.issues.some((issue) => issue.type === 'circular')).toBe( true ); }); test('should detect self-dependencies', () => { const tasks = [{ id: 1, dependencies: [1] }]; const result = validateTaskDependencies(tasks); expect(result.valid).toBe(false); expect( result.issues.some( (issue) => issue.type === 'self' && issue.taskId === 1 ) ).toBe(true); }); test('should return valid for correct dependencies', () => { const tasks = [ { id: 1, dependencies: [] }, { id: 2, dependencies: [1] }, { id: 3, dependencies: [1, 2] } ]; const result = validateTaskDependencies(tasks); expect(result.valid).toBe(true); expect(result.issues.length).toBe(0); }); test('should handle tasks with no dependencies property', () => { const tasks = [ { id: 1 }, // Missing dependencies property { id: 2, dependencies: [1] } ]; const result = validateTaskDependencies(tasks); // Should be valid since a missing dependencies property is interpreted as an empty array expect(result.valid).toBe(true); }); test('should handle subtask dependencies correctly', () => { const tasks = [ { id: 1, dependencies: [], subtasks: [ { id: 1, dependencies: [] }, { id: 2, dependencies: ['1.1'] }, // Valid - depends on another subtask { id: 3, dependencies: ['1.2'] } // Valid - depends on another subtask ] }, { id: 2, dependencies: ['1.3'], // Valid - depends on a subtask from task 1 subtasks: [] } ]; // Set up mock to handle subtask validation mockTaskExists.mockImplementation((tasks, id) => { if (typeof id === 'string' && id.includes('.')) { const [taskId, subtaskId] = id.split('.').map(Number); const task = tasks.find((t) => t.id === taskId); return ( task && task.subtasks && task.subtasks.some((st) => st.id === subtaskId) ); } return tasks.some((task) => task.id === parseInt(id, 10)); }); const result = validateTaskDependencies(tasks); expect(result.valid).toBe(true); expect(result.issues.length).toBe(0); }); test('should detect missing subtask dependencies', () => { const tasks = [ { id: 1, dependencies: [], subtasks: [ { id: 1, dependencies: ['1.4'] }, // Invalid - subtask 4 doesn't exist { id: 2, dependencies: ['2.1'] } // Invalid - task 2 has no subtasks ] }, { id: 2, dependencies: [], subtasks: [] } ]; // Mock taskExists to correctly identify missing subtasks mockTaskExists.mockImplementation((taskArray, depId) => { if (typeof depId === 'string' && depId === '1.4') { return false; // Subtask 1.4 doesn't exist } if (typeof depId === 'string' && depId === '2.1') { return false; // Subtask 2.1 doesn't exist } return true; // All other dependencies exist }); const result = validateTaskDependencies(tasks); expect(result.valid).toBe(false); expect(result.issues.length).toBeGreaterThan(0); // Should detect missing subtask dependencies expect( result.issues.some( (issue) => issue.type === 'missing' && String(issue.taskId) === '1.1' && String(issue.dependencyId) === '1.4' ) ).toBe(true); }); test('should detect circular dependencies between subtasks', () => { const tasks = [ { id: 1, dependencies: [], subtasks: [ { id: 1, dependencies: ['1.2'] }, { id: 2, dependencies: ['1.1'] } // Creates a circular dependency with 1.1 ] } ]; // Mock isCircularDependency for subtasks mockFindCycles.mockReturnValue(true); const result = validateTaskDependencies(tasks); expect(result.valid).toBe(false); expect(result.issues.some((issue) => issue.type === 'circular')).toBe( true ); }); test('should properly validate dependencies between subtasks of the same parent', () => { const tasks = [ { id: 23, dependencies: [], subtasks: [ { id: 8, dependencies: ['23.13'] }, { id: 10, dependencies: ['23.8'] }, { id: 13, dependencies: [] } ] } ]; // Mock taskExists to validate the subtask dependencies mockTaskExists.mockImplementation((taskArray, id) => { if (typeof id === 'string') { if (id === '23.8' || id === '23.10' || id === '23.13') { return true; } } return false; }); const result = validateTaskDependencies(tasks); expect(result.valid).toBe(true); expect(result.issues.length).toBe(0); }); }); describe('removeDuplicateDependencies function', () => { test('should remove duplicate dependencies from tasks', () => { const tasksData = { tasks: [ { id: 1, dependencies: [2, 2, 3, 3, 3] }, { id: 2, dependencies: [3] }, { id: 3, dependencies: [] } ] }; const result = removeDuplicateDependencies(tasksData); expect(result.tasks[0].dependencies).toEqual([2, 3]); expect(result.tasks[1].dependencies).toEqual([3]); expect(result.tasks[2].dependencies).toEqual([]); }); test('should handle empty dependencies array', () => { const tasksData = { tasks: [ { id: 1, dependencies: [] }, { id: 2, dependencies: [1] } ] }; const result = removeDuplicateDependencies(tasksData); expect(result.tasks[0].dependencies).toEqual([]); expect(result.tasks[1].dependencies).toEqual([1]); }); test('should handle tasks with no dependencies property', () => { const tasksData = { tasks: [ { id: 1 }, // No dependencies property { id: 2, dependencies: [1] } ] }; const result = removeDuplicateDependencies(tasksData); expect(result.tasks[0]).not.toHaveProperty('dependencies'); expect(result.tasks[1].dependencies).toEqual([1]); }); }); describe('cleanupSubtaskDependencies function', () => { test('should remove dependencies to non-existent subtasks', () => { const tasksData = { tasks: [ { id: 1, dependencies: [], subtasks: [ { id: 1, dependencies: [] }, { id: 2, dependencies: [3] } // Dependency 3 doesn't exist ] }, { id: 2, dependencies: ['1.2'], // Valid subtask dependency subtasks: [ { id: 1, dependencies: ['1.1'] } // Valid subtask dependency ] } ] }; const result = cleanupSubtaskDependencies(tasksData); // Should remove the invalid dependency to subtask 3 expect(result.tasks[0].subtasks[1].dependencies).toEqual([]); // Should keep valid dependencies expect(result.tasks[1].dependencies).toEqual(['1.2']); expect(result.tasks[1].subtasks[0].dependencies).toEqual(['1.1']); }); test('should handle tasks without subtasks', () => { const tasksData = { tasks: [ { id: 1, dependencies: [] }, { id: 2, dependencies: [1] } ] }; const result = cleanupSubtaskDependencies(tasksData); // Should return the original data unchanged expect(result).toEqual(tasksData); }); }); describe('ensureAtLeastOneIndependentSubtask function', () => { test('should clear dependencies of first subtask if none are independent', () => { const tasksData = { tasks: [ { id: 1, subtasks: [ { id: 1, dependencies: [2] }, { id: 2, dependencies: [1] } ] } ] }; const result = ensureAtLeastOneIndependentSubtask(tasksData); expect(result).toBe(true); expect(tasksData.tasks[0].subtasks[0].dependencies).toEqual([]); expect(tasksData.tasks[0].subtasks[1].dependencies).toEqual([1]); }); test('should not modify tasks if at least one subtask is independent', () => { const tasksData = { tasks: [ { id: 1, subtasks: [ { id: 1, dependencies: [] }, { id: 2, dependencies: [1] } ] } ] }; const result = ensureAtLeastOneIndependentSubtask(tasksData); expect(result).toBe(false); expect(tasksData.tasks[0].subtasks[0].dependencies).toEqual([]); expect(tasksData.tasks[0].subtasks[1].dependencies).toEqual([1]); }); test('should handle tasks without subtasks', () => { const tasksData = { tasks: [{ id: 1 }, { id: 2, dependencies: [1] }] }; const result = ensureAtLeastOneIndependentSubtask(tasksData); expect(result).toBe(false); expect(tasksData).toEqual({ tasks: [{ id: 1 }, { id: 2, dependencies: [1] }] }); }); test('should handle empty subtasks array', () => { const tasksData = { tasks: [{ id: 1, subtasks: [] }] }; const result = ensureAtLeastOneIndependentSubtask(tasksData); expect(result).toBe(false); expect(tasksData).toEqual({ tasks: [{ id: 1, subtasks: [] }] }); }); }); describe('validateAndFixDependencies function', () => { test('should fix multiple dependency issues and return true if changes made', () => { const tasksData = { tasks: [ { id: 1, dependencies: [1, 1, 99], // Self-dependency and duplicate and invalid dependency subtasks: [ { id: 1, dependencies: [2, 2] }, // Duplicate dependencies { id: 2, dependencies: [1] } ] }, { id: 2, dependencies: [1], subtasks: [ { id: 1, dependencies: [99] } // Invalid dependency ] } ] }; // Mock taskExists for validating dependencies mockTaskExists.mockImplementation((tasks, id) => { // Convert id to string for comparison const idStr = String(id); // Handle subtask references (e.g., "1.2") if (idStr.includes('.')) { const [parentId, subtaskId] = idStr.split('.').map(Number); const task = tasks.find((t) => t.id === parentId); return ( task && task.subtasks && task.subtasks.some((st) => st.id === subtaskId) ); } // Handle regular task references const taskId = parseInt(idStr, 10); return taskId === 1 || taskId === 2; // Only tasks 1 and 2 exist }); // Make a copy for verification that original is modified const originalData = JSON.parse(JSON.stringify(tasksData)); const result = validateAndFixDependencies(tasksData); expect(result).toBe(true); // Check that data has been modified expect(tasksData).not.toEqual(originalData); // Check specific changes // 1. Self-dependency removed expect(tasksData.tasks[0].dependencies).not.toContain(1); // 2. Invalid dependency removed expect(tasksData.tasks[0].dependencies).not.toContain(99); // 3. Dependencies have been deduplicated if (tasksData.tasks[0].subtasks[0].dependencies.length > 0) { expect(tasksData.tasks[0].subtasks[0].dependencies).toEqual( expect.arrayContaining([]) ); } // 4. Invalid subtask dependency removed expect(tasksData.tasks[1].subtasks[0].dependencies).toEqual([]); // IMPORTANT: Verify no calls to writeJSON with actual tasks.json expect(mockWriteJSON).not.toHaveBeenCalledWith( 'tasks/tasks.json', expect.anything() ); }); test('should return false if no changes needed', () => { const tasksData = { tasks: [ { id: 1, dependencies: [], subtasks: [ { id: 1, dependencies: [] }, // Already has an independent subtask { id: 2, dependencies: ['1.1'] } ] }, { id: 2, dependencies: [1] } ] }; // Mock taskExists to validate all dependencies as valid mockTaskExists.mockImplementation((tasks, id) => { // Convert id to string for comparison const idStr = String(id); // Handle subtask references if (idStr.includes('.')) { const [parentId, subtaskId] = idStr.split('.').map(Number); const task = tasks.find((t) => t.id === parentId); return ( task && task.subtasks && task.subtasks.some((st) => st.id === subtaskId) ); } // Handle regular task references const taskId = parseInt(idStr, 10); return taskId === 1 || taskId === 2; }); const originalData = JSON.parse(JSON.stringify(tasksData)); const result = validateAndFixDependencies(tasksData); expect(result).toBe(false); // Verify data is unchanged expect(tasksData).toEqual(originalData); // IMPORTANT: Verify no calls to writeJSON with actual tasks.json expect(mockWriteJSON).not.toHaveBeenCalledWith( 'tasks/tasks.json', expect.anything() ); }); test('should handle invalid input', () => { expect(validateAndFixDependencies(null)).toBe(false); expect(validateAndFixDependencies({})).toBe(false); expect(validateAndFixDependencies({ tasks: null })).toBe(false); expect(validateAndFixDependencies({ tasks: 'not an array' })).toBe(false); // IMPORTANT: Verify no calls to writeJSON with actual tasks.json expect(mockWriteJSON).not.toHaveBeenCalledWith( 'tasks/tasks.json', expect.anything() ); }); test('should save changes when tasksPath is provided', () => { const tasksData = { tasks: [ { id: 1, dependencies: [1, 1], // Self-dependency and duplicate subtasks: [ { id: 1, dependencies: [99] } // Invalid dependency ] } ] }; // Mock taskExists for this specific test mockTaskExists.mockImplementation((tasks, id) => { // Convert id to string for comparison const idStr = String(id); // Handle subtask references if (idStr.includes('.')) { const [parentId, subtaskId] = idStr.split('.').map(Number); const task = tasks.find((t) => t.id === parentId); return ( task && task.subtasks && task.subtasks.some((st) => st.id === subtaskId) ); } // Handle regular task references const taskId = parseInt(idStr, 10); return taskId === 1; // Only task 1 exists }); // Copy the original data to verify changes const originalData = JSON.parse(JSON.stringify(tasksData)); // Call the function with our test path instead of the actual tasks.json const result = validateAndFixDependencies(tasksData, TEST_TASKS_PATH); // First verify that the result is true (changes were made) expect(result).toBe(true); // Verify the data was modified expect(tasksData).not.toEqual(originalData); // IMPORTANT: Verify no calls to writeJSON with actual tasks.json expect(mockWriteJSON).not.toHaveBeenCalledWith( 'tasks/tasks.json', expect.anything() ); }); }); describe('canMoveWithDependencies', () => { it('should return canMove: false when conflicts exist', () => { const allTasks = [ { id: 1, tag: 'source', dependencies: [2], title: 'Task 1' }, { id: 2, tag: 'other', dependencies: [], title: 'Task 2' } ]; const result = canMoveWithDependencies('1', 'source', 'target', allTasks); expect(result.canMove).toBe(false); expect(result.conflicts).toBeDefined(); expect(result.conflicts.length).toBeGreaterThan(0); expect(result.dependentTaskIds).toBeDefined(); }); it('should return canMove: true when no conflicts exist', () => { const allTasks = [ { id: 1, tag: 'source', dependencies: [], title: 'Task 1' }, { id: 2, tag: 'target', dependencies: [], title: 'Task 2' } ]; const result = canMoveWithDependencies('1', 'source', 'target', allTasks); expect(result.canMove).toBe(true); expect(result.conflicts).toBeDefined(); expect(result.conflicts.length).toBe(0); expect(result.dependentTaskIds).toBeDefined(); expect(result.dependentTaskIds.length).toBe(0); }); it('should handle subtask lookup correctly', () => { const allTasks = [ { id: 1, tag: 'source', dependencies: [], title: 'Parent Task', subtasks: [ { id: 1, dependencies: [2], title: 'Subtask 1' } ] }, { id: 2, tag: 'other', dependencies: [], title: 'Task 2' } ]; const result = canMoveWithDependencies( '1.1', 'source', 'target', allTasks ); expect(result.canMove).toBe(false); expect(result.conflicts).toBeDefined(); expect(result.conflicts.length).toBeGreaterThan(0); }); it('should return error when task not found', () => { const allTasks = [ { id: 1, tag: 'source', dependencies: [], title: 'Task 1' } ]; const result = canMoveWithDependencies( '999', 'source', 'target', allTasks ); expect(result.canMove).toBe(false); expect(result.error).toBe('Task not found'); expect(result.dependentTaskIds).toEqual([]); expect(result.conflicts).toEqual([]); }); }); }); ``` -------------------------------------------------------------------------------- /scripts/modules/task-manager/scope-adjustment.js: -------------------------------------------------------------------------------- ```javascript /** * scope-adjustment.js * Core logic for dynamic task complexity adjustment (scope-up and scope-down) */ import { z } from 'zod'; import { log, readJSON, writeJSON, getCurrentTag, readComplexityReport, findTaskInComplexityReport } from '../utils.js'; import { generateObjectService, generateTextService } from '../ai-services-unified.js'; import { findTaskById, taskExists } from '../task-manager.js'; import analyzeTaskComplexity from './analyze-task-complexity.js'; import { findComplexityReportPath } from '../../../src/utils/path-utils.js'; /** * Valid strength levels for scope adjustments */ const VALID_STRENGTHS = ['light', 'regular', 'heavy']; /** * Statuses that should be preserved during subtask regeneration * These represent work that has been started or intentionally set by the user */ const PRESERVE_STATUSES = [ 'done', 'in-progress', 'review', 'cancelled', 'deferred', 'blocked' ]; /** * Statuses that should be regenerated during subtask regeneration * These represent work that hasn't been started yet */ const REGENERATE_STATUSES = ['pending']; /** * Validates strength parameter * @param {string} strength - The strength level to validate * @returns {boolean} True if valid, false otherwise */ export function validateStrength(strength) { return VALID_STRENGTHS.includes(strength); } /** * Re-analyzes the complexity of a single task after scope adjustment * @param {Object} task - The task to analyze * @param {string} tasksPath - Path to tasks.json * @param {Object} context - Context containing projectRoot, tag, session * @returns {Promise<number|null>} New complexity score or null if analysis failed */ async function reanalyzeTaskComplexity(task, tasksPath, context) { const { projectRoot, tag, session } = context; try { // Create a minimal tasks data structure for analysis const tasksForAnalysis = { tasks: [task], metadata: { analyzedAt: new Date().toISOString() } }; // Find the complexity report path for this tag const complexityReportPath = findComplexityReportPath( null, { projectRoot, tag }, null ); if (!complexityReportPath) { log('warn', 'No complexity report found - cannot re-analyze complexity'); return null; } // Use analyze-task-complexity to re-analyze just this task const analysisOptions = { file: tasksPath, output: complexityReportPath, id: task.id.toString(), // Analyze only this specific task projectRoot, tag, _filteredTasksData: tasksForAnalysis, // Pass pre-filtered data _originalTaskCount: 1 }; // Run the analysis with proper context await analyzeTaskComplexity(analysisOptions, { session }); // Read the updated complexity report to get the new score const updatedReport = readComplexityReport(complexityReportPath); if (updatedReport) { const taskAnalysis = findTaskInComplexityReport(updatedReport, task.id); if (taskAnalysis) { log( 'info', `Re-analyzed task ${task.id} complexity: ${taskAnalysis.complexityScore}/10` ); return taskAnalysis.complexityScore; } } log( 'warn', `Could not find updated complexity analysis for task ${task.id}` ); return null; } catch (error) { log('error', `Failed to re-analyze task complexity: ${error.message}`); return null; } } /** * Gets the current complexity score for a task from the complexity report * @param {number} taskId - Task ID to look up * @param {Object} context - Context containing projectRoot, tag * @returns {number|null} Current complexity score or null if not found */ function getCurrentComplexityScore(taskId, context) { const { projectRoot, tag } = context; try { // Find the complexity report path for this tag const complexityReportPath = findComplexityReportPath( null, { projectRoot, tag }, null ); if (!complexityReportPath) { return null; } // Read the current complexity report const complexityReport = readComplexityReport(complexityReportPath); if (!complexityReport) { return null; } // Find this task's current complexity const taskAnalysis = findTaskInComplexityReport(complexityReport, taskId); return taskAnalysis ? taskAnalysis.complexityScore : null; } catch (error) { log('debug', `Could not read current complexity score: ${error.message}`); return null; } } /** * Regenerates subtasks for a task based on new complexity while preserving completed work * @param {Object} task - The updated task object * @param {string} tasksPath - Path to tasks.json * @param {Object} context - Context containing projectRoot, tag, session * @param {string} direction - Direction of scope change (up/down) for logging * @param {string} strength - Strength level ('light', 'regular', 'heavy') * @param {number|null} originalComplexity - Original complexity score for smarter adjustments * @returns {Promise<Object>} Object with updated task and regeneration info */ async function regenerateSubtasksForComplexity( task, tasksPath, context, direction, strength = 'regular', originalComplexity = null ) { const { projectRoot, tag, session } = context; // Check if task has subtasks if ( !task.subtasks || !Array.isArray(task.subtasks) || task.subtasks.length === 0 ) { return { updatedTask: task, regenerated: false, preserved: 0, generated: 0 }; } // Identify subtasks to preserve vs regenerate const preservedSubtasks = task.subtasks.filter((subtask) => PRESERVE_STATUSES.includes(subtask.status) ); const pendingSubtasks = task.subtasks.filter((subtask) => REGENERATE_STATUSES.includes(subtask.status) ); // If no pending subtasks, nothing to regenerate if (pendingSubtasks.length === 0) { return { updatedTask: task, regenerated: false, preserved: preservedSubtasks.length, generated: 0 }; } // Calculate appropriate number of total subtasks based on direction, complexity, strength, and original complexity let targetSubtaskCount; const preservedCount = preservedSubtasks.length; const currentPendingCount = pendingSubtasks.length; // Use original complexity to inform decisions (if available) const complexityFactor = originalComplexity ? Math.max(0.5, originalComplexity / 10) : 1.0; const complexityInfo = originalComplexity ? ` (original complexity: ${originalComplexity}/10)` : ''; if (direction === 'up') { // Scope up: More subtasks for increased complexity if (strength === 'light') { const base = Math.max( 5, preservedCount + Math.ceil(currentPendingCount * 1.1) ); targetSubtaskCount = Math.ceil(base * (0.8 + 0.4 * complexityFactor)); } else if (strength === 'regular') { const base = Math.max( 6, preservedCount + Math.ceil(currentPendingCount * 1.3) ); targetSubtaskCount = Math.ceil(base * (0.8 + 0.4 * complexityFactor)); } else { // heavy const base = Math.max( 8, preservedCount + Math.ceil(currentPendingCount * 1.6) ); targetSubtaskCount = Math.ceil(base * (0.8 + 0.6 * complexityFactor)); } } else { // Scope down: Fewer subtasks for decreased complexity // High complexity tasks get reduced more aggressively const aggressiveFactor = originalComplexity >= 8 ? 0.7 : originalComplexity >= 6 ? 0.85 : 1.0; if (strength === 'light') { const base = Math.max( 3, preservedCount + Math.ceil(currentPendingCount * 0.8) ); targetSubtaskCount = Math.ceil(base * aggressiveFactor); } else if (strength === 'regular') { const base = Math.max( 3, preservedCount + Math.ceil(currentPendingCount * 0.5) ); targetSubtaskCount = Math.ceil(base * aggressiveFactor); } else { // heavy // Heavy scope-down should be much more aggressive - aim for only core functionality // Very high complexity tasks (9-10) get reduced to almost nothing const ultraAggressiveFactor = originalComplexity >= 9 ? 0.3 : originalComplexity >= 7 ? 0.5 : 0.7; const base = Math.max( 2, preservedCount + Math.ceil(currentPendingCount * 0.25) ); targetSubtaskCount = Math.max(1, Math.ceil(base * ultraAggressiveFactor)); } } log( 'debug', `Complexity-aware subtask calculation${complexityInfo}: ${currentPendingCount} pending -> target ${targetSubtaskCount} total` ); log( 'debug', `Complexity-aware calculation${complexityInfo}: ${currentPendingCount} pending -> ${targetSubtaskCount} total subtasks (${strength} ${direction})` ); const newSubtasksNeeded = Math.max(1, targetSubtaskCount - preservedCount); try { // Generate new subtasks using AI to match the new complexity level const systemPrompt = `You are an expert project manager who creates task breakdowns that match complexity levels.`; const prompt = `Based on this updated task, generate ${newSubtasksNeeded} NEW subtasks that reflect the ${direction === 'up' ? 'increased' : 'decreased'} complexity level: **Task Title**: ${task.title} **Task Description**: ${task.description} **Implementation Details**: ${task.details} **Test Strategy**: ${task.testStrategy} **Complexity Direction**: This task was recently scoped ${direction} (${strength} strength) to ${direction === 'up' ? 'increase' : 'decrease'} complexity. ${originalComplexity ? `**Original Complexity**: ${originalComplexity}/10 - consider this when determining appropriate scope level.` : ''} ${preservedCount > 0 ? `**Preserved Subtasks**: ${preservedCount} existing subtasks with work already done will be kept.` : ''} Generate subtasks that: ${ direction === 'up' ? strength === 'heavy' ? `- Add comprehensive implementation steps with advanced features - Include extensive error handling, validation, and edge cases - Cover multiple integration scenarios and advanced testing - Provide thorough documentation and optimization approaches` : strength === 'regular' ? `- Add more detailed implementation steps - Include additional error handling and validation - Cover more edge cases and advanced features - Provide more comprehensive testing approaches` : `- Add some additional implementation details - Include basic error handling considerations - Cover a few common edge cases - Enhance testing approaches slightly` : strength === 'heavy' ? `- Focus ONLY on absolutely essential core functionality - Strip out ALL non-critical features (error handling, advanced testing, etc.) - Provide only the minimum viable implementation - Eliminate any complex integrations or advanced scenarios - Aim for the simplest possible working solution` : strength === 'regular' ? `- Focus on core functionality only - Simplify implementation steps - Remove non-essential features - Streamline to basic requirements` : `- Focus mainly on core functionality - Slightly simplify implementation steps - Remove some non-essential features - Streamline most requirements` } Return a JSON object with a "subtasks" array. Each subtask should have: - id: Sequential NUMBER starting from 1 (e.g., 1, 2, 3 - NOT "1", "2", "3") - title: Clear, specific title - description: Detailed description - dependencies: Array of dependency IDs as STRINGS (use format ["${task.id}.1", "${task.id}.2"] for siblings, or empty array [] for no dependencies) - details: Implementation guidance - status: "pending" - testStrategy: Testing approach IMPORTANT: - The 'id' field must be a NUMBER, not a string! - Dependencies must be strings, not numbers! Ensure the JSON is valid and properly formatted.`; // Define subtask schema const subtaskSchema = z.object({ subtasks: z.array( z.object({ id: z.number().int().positive(), title: z.string().min(5), description: z.string().min(10), dependencies: z.array(z.string()), details: z.string().min(20), status: z.string(), testStrategy: z.string() }) ) }); const aiResult = await generateObjectService({ role: context.research ? 'research' : 'main', session: context.session, systemPrompt, prompt, schema: subtaskSchema, objectName: 'subtask_regeneration', commandName: context.commandName || `subtask-regen-${direction}`, outputType: context.outputType || 'cli' }); const generatedSubtasks = aiResult.mainResult.subtasks || []; // Post-process generated subtasks to ensure defaults const processedGeneratedSubtasks = generatedSubtasks.map((subtask) => ({ ...subtask, status: subtask.status || 'pending', testStrategy: subtask.testStrategy || '' })); // Update task with preserved subtasks + newly generated ones task.subtasks = [...preservedSubtasks, ...processedGeneratedSubtasks]; return { updatedTask: task, regenerated: true, preserved: preservedSubtasks.length, generated: processedGeneratedSubtasks.length }; } catch (error) { log( 'warn', `Failed to regenerate subtasks for task ${task.id}: ${error.message}` ); // Don't fail the whole operation if subtask regeneration fails return { updatedTask: task, regenerated: false, preserved: preservedSubtasks.length, generated: 0, error: error.message }; } } /** * Generates AI prompt for scope adjustment * @param {Object} task - The task to adjust * @param {string} direction - 'up' or 'down' * @param {string} strength - 'light', 'regular', or 'heavy' * @param {string} customPrompt - Optional custom instructions * @returns {string} The generated prompt */ function generateScopePrompt(task, direction, strength, customPrompt) { const isUp = direction === 'up'; const strengthDescriptions = { light: isUp ? 'minor enhancements' : 'slight simplifications', regular: isUp ? 'moderate complexity increases' : 'moderate simplifications', heavy: isUp ? 'significant complexity additions' : 'major simplifications' }; let basePrompt = `You are tasked with adjusting the complexity of a task. CURRENT TASK: Title: ${task.title} Description: ${task.description} Details: ${task.details} Test Strategy: ${task.testStrategy || 'Not specified'} ADJUSTMENT REQUIREMENTS: - Direction: ${isUp ? 'INCREASE' : 'DECREASE'} complexity - Strength: ${strength} (${strengthDescriptions[strength]}) - Preserve the core purpose and functionality of the task - Maintain consistency with the existing task structure`; if (isUp) { basePrompt += ` - Add more detailed requirements, edge cases, or advanced features - Include additional implementation considerations - Enhance error handling and validation requirements - Expand testing strategies with more comprehensive scenarios`; } else { basePrompt += ` - Focus on core functionality and essential requirements - Remove or simplify non-essential features - Streamline implementation details - Simplify testing to focus on basic functionality`; } if (customPrompt) { basePrompt += `\n\nCUSTOM INSTRUCTIONS:\n${customPrompt}`; } basePrompt += `\n\nReturn a JSON object with the updated task containing these fields: - title: Updated task title - description: Updated task description - details: Updated implementation details - testStrategy: Updated test strategy - priority: Task priority ('low', 'medium', or 'high') Ensure the JSON is valid and properly formatted.`; return basePrompt; } /** * Adjusts task complexity using AI * @param {Object} task - The task to adjust * @param {string} direction - 'up' or 'down' * @param {string} strength - 'light', 'regular', or 'heavy' * @param {string} customPrompt - Optional custom instructions * @param {Object} context - Context object with projectRoot, tag, etc. * @returns {Promise<Object>} Updated task data and telemetry */ async function adjustTaskComplexity( task, direction, strength, customPrompt, context ) { const systemPrompt = `You are an expert software project manager who helps adjust task complexity while maintaining clarity and actionability.`; const prompt = generateScopePrompt(task, direction, strength, customPrompt); // Define the task schema for structured response using Zod const taskSchema = z.object({ title: z .string() .min(1) .describe('Updated task title reflecting scope adjustment'), description: z .string() .min(1) .describe('Updated task description with adjusted scope'), details: z .string() .min(1) .describe('Updated implementation details with adjusted complexity'), testStrategy: z .string() .min(1) .describe('Updated testing approach for the adjusted scope'), priority: z.enum(['low', 'medium', 'high']).describe('Task priority level') }); const aiResult = await generateObjectService({ role: context.research ? 'research' : 'main', session: context.session, systemPrompt, prompt, schema: taskSchema, objectName: 'updated_task', commandName: context.commandName || `scope-${direction}`, outputType: context.outputType || 'cli' }); const updatedTaskData = aiResult.mainResult; // Ensure priority has a value (in case AI didn't provide one) const processedTaskData = { ...updatedTaskData, priority: updatedTaskData.priority || task.priority || 'medium' }; return { updatedTask: { ...task, ...processedTaskData }, telemetryData: aiResult.telemetryData }; } /** * Increases task complexity (scope-up) * @param {string} tasksPath - Path to tasks.json file * @param {Array<number>} taskIds - Array of task IDs to scope up * @param {string} strength - Strength level ('light', 'regular', 'heavy') * @param {string} customPrompt - Optional custom instructions * @param {Object} context - Context object with projectRoot, tag, etc. * @param {string} outputFormat - Output format ('text' or 'json') * @returns {Promise<Object>} Results of the scope-up operation */ export async function scopeUpTask( tasksPath, taskIds, strength = 'regular', customPrompt = null, context = {}, outputFormat = 'text' ) { // Validate inputs if (!validateStrength(strength)) { throw new Error( `Invalid strength level: ${strength}. Must be one of: ${VALID_STRENGTHS.join(', ')}` ); } const { projectRoot = '.', tag = 'master' } = context; // Read tasks data const data = readJSON(tasksPath, projectRoot, tag); const tasks = data?.tasks || []; // Validate all task IDs exist for (const taskId of taskIds) { if (!taskExists(tasks, taskId)) { throw new Error(`Task with ID ${taskId} not found`); } } const updatedTasks = []; let combinedTelemetryData = null; // Process each task for (const taskId of taskIds) { const taskResult = findTaskById(tasks, taskId); const task = taskResult.task; if (!task) { throw new Error(`Task with ID ${taskId} not found`); } if (outputFormat === 'text') { log('info', `Scoping up task ${taskId}: ${task.title}`); } // Get original complexity score (if available) const originalComplexity = getCurrentComplexityScore(taskId, context); if (originalComplexity && outputFormat === 'text') { log('info', `Original complexity: ${originalComplexity}/10`); } const adjustResult = await adjustTaskComplexity( task, 'up', strength, customPrompt, context ); // Regenerate subtasks based on new complexity while preserving completed work const subtaskResult = await regenerateSubtasksForComplexity( adjustResult.updatedTask, tasksPath, context, 'up', strength, originalComplexity ); // Log subtask regeneration info if in text mode if (outputFormat === 'text' && subtaskResult.regenerated) { log( 'info', `Regenerated ${subtaskResult.generated} pending subtasks (preserved ${subtaskResult.preserved} completed)` ); } // Update task in data const taskIndex = data.tasks.findIndex((t) => t.id === taskId); if (taskIndex !== -1) { data.tasks[taskIndex] = subtaskResult.updatedTask; updatedTasks.push(subtaskResult.updatedTask); } // Re-analyze complexity after scoping (if we have a session for AI calls) if (context.session && originalComplexity) { try { // Write the updated task first so complexity analysis can read it writeJSON(tasksPath, data, projectRoot, tag); // Re-analyze complexity const newComplexity = await reanalyzeTaskComplexity( subtaskResult.updatedTask, tasksPath, context ); if (newComplexity && outputFormat === 'text') { const complexityChange = newComplexity - originalComplexity; const arrow = complexityChange > 0 ? '↗️' : complexityChange < 0 ? '↘️' : '➡️'; log( 'info', `New complexity: ${originalComplexity}/10 ${arrow} ${newComplexity}/10 (${complexityChange > 0 ? '+' : ''}${complexityChange})` ); } } catch (error) { if (outputFormat === 'text') { log('warn', `Could not re-analyze complexity: ${error.message}`); } } } // Combine telemetry data if (adjustResult.telemetryData) { if (!combinedTelemetryData) { combinedTelemetryData = { ...adjustResult.telemetryData }; } else { // Sum up costs and tokens combinedTelemetryData.inputTokens += adjustResult.telemetryData.inputTokens || 0; combinedTelemetryData.outputTokens += adjustResult.telemetryData.outputTokens || 0; combinedTelemetryData.totalTokens += adjustResult.telemetryData.totalTokens || 0; combinedTelemetryData.totalCost += adjustResult.telemetryData.totalCost || 0; } } } // Write updated data writeJSON(tasksPath, data, projectRoot, tag); if (outputFormat === 'text') { log('info', `Successfully scoped up ${updatedTasks.length} task(s)`); } return { updatedTasks, telemetryData: combinedTelemetryData }; } /** * Decreases task complexity (scope-down) * @param {string} tasksPath - Path to tasks.json file * @param {Array<number>} taskIds - Array of task IDs to scope down * @param {string} strength - Strength level ('light', 'regular', 'heavy') * @param {string} customPrompt - Optional custom instructions * @param {Object} context - Context object with projectRoot, tag, etc. * @param {string} outputFormat - Output format ('text' or 'json') * @returns {Promise<Object>} Results of the scope-down operation */ export async function scopeDownTask( tasksPath, taskIds, strength = 'regular', customPrompt = null, context = {}, outputFormat = 'text' ) { // Validate inputs if (!validateStrength(strength)) { throw new Error( `Invalid strength level: ${strength}. Must be one of: ${VALID_STRENGTHS.join(', ')}` ); } const { projectRoot = '.', tag = 'master' } = context; // Read tasks data const data = readJSON(tasksPath, projectRoot, tag); const tasks = data?.tasks || []; // Validate all task IDs exist for (const taskId of taskIds) { if (!taskExists(tasks, taskId)) { throw new Error(`Task with ID ${taskId} not found`); } } const updatedTasks = []; let combinedTelemetryData = null; // Process each task for (const taskId of taskIds) { const taskResult = findTaskById(tasks, taskId); const task = taskResult.task; if (!task) { throw new Error(`Task with ID ${taskId} not found`); } if (outputFormat === 'text') { log('info', `Scoping down task ${taskId}: ${task.title}`); } // Get original complexity score (if available) const originalComplexity = getCurrentComplexityScore(taskId, context); if (originalComplexity && outputFormat === 'text') { log('info', `Original complexity: ${originalComplexity}/10`); } const adjustResult = await adjustTaskComplexity( task, 'down', strength, customPrompt, context ); // Regenerate subtasks based on new complexity while preserving completed work const subtaskResult = await regenerateSubtasksForComplexity( adjustResult.updatedTask, tasksPath, context, 'down', strength, originalComplexity ); // Log subtask regeneration info if in text mode if (outputFormat === 'text' && subtaskResult.regenerated) { log( 'info', `Regenerated ${subtaskResult.generated} pending subtasks (preserved ${subtaskResult.preserved} completed)` ); } // Update task in data const taskIndex = data.tasks.findIndex((t) => t.id === taskId); if (taskIndex !== -1) { data.tasks[taskIndex] = subtaskResult.updatedTask; updatedTasks.push(subtaskResult.updatedTask); } // Re-analyze complexity after scoping (if we have a session for AI calls) if (context.session && originalComplexity) { try { // Write the updated task first so complexity analysis can read it writeJSON(tasksPath, data, projectRoot, tag); // Re-analyze complexity const newComplexity = await reanalyzeTaskComplexity( subtaskResult.updatedTask, tasksPath, context ); if (newComplexity && outputFormat === 'text') { const complexityChange = newComplexity - originalComplexity; const arrow = complexityChange > 0 ? '↗️' : complexityChange < 0 ? '↘️' : '➡️'; log( 'info', `New complexity: ${originalComplexity}/10 ${arrow} ${newComplexity}/10 (${complexityChange > 0 ? '+' : ''}${complexityChange})` ); } } catch (error) { if (outputFormat === 'text') { log('warn', `Could not re-analyze complexity: ${error.message}`); } } } // Combine telemetry data if (adjustResult.telemetryData) { if (!combinedTelemetryData) { combinedTelemetryData = { ...adjustResult.telemetryData }; } else { // Sum up costs and tokens combinedTelemetryData.inputTokens += adjustResult.telemetryData.inputTokens || 0; combinedTelemetryData.outputTokens += adjustResult.telemetryData.outputTokens || 0; combinedTelemetryData.totalTokens += adjustResult.telemetryData.totalTokens || 0; combinedTelemetryData.totalCost += adjustResult.telemetryData.totalCost || 0; } } } // Write updated data writeJSON(tasksPath, data, projectRoot, tag); if (outputFormat === 'text') { log('info', `Successfully scoped down ${updatedTasks.length} task(s)`); } return { updatedTasks, telemetryData: combinedTelemetryData }; } ``` -------------------------------------------------------------------------------- /scripts/modules/utils/contextGatherer.js: -------------------------------------------------------------------------------- ```javascript /** * contextGatherer.js * Comprehensive context gathering utility for Task Master AI operations * Supports task context, file context, project tree, and custom context */ import fs from 'fs'; import path from 'path'; import pkg from 'gpt-tokens'; import Fuse from 'fuse.js'; import { readJSON, findTaskById, truncate, flattenTasksWithSubtasks } from '../utils.js'; const { encode } = pkg; /** * Context Gatherer class for collecting and formatting context from various sources */ export class ContextGatherer { constructor(projectRoot, tag) { this.projectRoot = projectRoot; this.tasksPath = path.join( projectRoot, '.taskmaster', 'tasks', 'tasks.json' ); this.tag = tag; this.allTasks = this._loadAllTasks(); } _loadAllTasks() { try { const data = readJSON(this.tasksPath, this.projectRoot, this.tag); const tasks = data?.tasks || []; return tasks; } catch (error) { console.warn( `Warning: Could not load tasks for ContextGatherer: ${error.message}` ); return []; } } /** * Count tokens in a text string using gpt-tokens * @param {string} text - Text to count tokens for * @returns {number} Token count */ countTokens(text) { if (!text || typeof text !== 'string') { return 0; } try { return encode(text).length; } catch (error) { // Fallback to rough character-based estimation if tokenizer fails // Rough estimate: ~4 characters per token for English text return Math.ceil(text.length / 4); } } /** * Main method to gather context from multiple sources * @param {Object} options - Context gathering options * @param {Array<string>} [options.tasks] - Task/subtask IDs to include * @param {Array<string>} [options.files] - File paths to include * @param {string} [options.customContext] - Additional custom context * @param {boolean} [options.includeProjectTree] - Include project file tree * @param {string} [options.format] - Output format: 'research', 'chat', 'system-prompt' * @param {boolean} [options.includeTokenCounts] - Whether to include token breakdown * @param {string} [options.semanticQuery] - A query string for semantic task searching. * @param {number} [options.maxSemanticResults] - Max number of semantic results. * @param {Array<number>} [options.dependencyTasks] - Array of task IDs to build dependency graphs from. * @returns {Promise<Object>} Object with context string and analysis data */ async gather(options = {}) { const { tasks = [], files = [], customContext = '', includeProjectTree = false, format = 'research', includeTokenCounts = false, semanticQuery, maxSemanticResults = 10, dependencyTasks = [] } = options; const contextSections = []; const finalTaskIds = new Set(tasks.map(String)); let analysisData = null; let tokenBreakdown = null; // Initialize token breakdown if requested if (includeTokenCounts) { tokenBreakdown = { total: 0, customContext: null, tasks: [], files: [], projectTree: null }; } // Semantic Search if (semanticQuery && this.allTasks.length > 0) { const semanticResults = this._performSemanticSearch( semanticQuery, maxSemanticResults ); // Store the analysis data for UI display analysisData = semanticResults.analysisData; semanticResults.tasks.forEach((task) => { finalTaskIds.add(String(task.id)); }); } // Dependency Graph Analysis if (dependencyTasks.length > 0) { const dependencyResults = this._buildDependencyGraphs(dependencyTasks); dependencyResults.allRelatedTaskIds.forEach((id) => finalTaskIds.add(String(id)) ); // We can format and add dependencyResults.graphVisualization later if needed } // Add custom context first if (customContext && customContext.trim()) { const formattedCustomContext = this._formatCustomContext( customContext, format ); contextSections.push(formattedCustomContext); // Calculate tokens for custom context if requested if (includeTokenCounts) { tokenBreakdown.customContext = { tokens: this.countTokens(formattedCustomContext), characters: formattedCustomContext.length }; tokenBreakdown.total += tokenBreakdown.customContext.tokens; } } // Gather context for the final list of tasks if (finalTaskIds.size > 0) { const taskContextResult = await this._gatherTaskContext( Array.from(finalTaskIds), format, includeTokenCounts ); if (taskContextResult.context) { contextSections.push(taskContextResult.context); // Add task breakdown if token counting is enabled if (includeTokenCounts && taskContextResult.breakdown) { tokenBreakdown.tasks = taskContextResult.breakdown; const taskTokens = taskContextResult.breakdown.reduce( (sum, task) => sum + task.tokens, 0 ); tokenBreakdown.total += taskTokens; } } } // Add file context if (files.length > 0) { const fileContextResult = await this._gatherFileContext( files, format, includeTokenCounts ); if (fileContextResult.context) { contextSections.push(fileContextResult.context); // Add file breakdown if token counting is enabled if (includeTokenCounts && fileContextResult.breakdown) { tokenBreakdown.files = fileContextResult.breakdown; const fileTokens = fileContextResult.breakdown.reduce( (sum, file) => sum + file.tokens, 0 ); tokenBreakdown.total += fileTokens; } } } // Add project tree context if (includeProjectTree) { const treeContextResult = await this._gatherProjectTreeContext( format, includeTokenCounts ); if (treeContextResult.context) { contextSections.push(treeContextResult.context); // Add tree breakdown if token counting is enabled if (includeTokenCounts && treeContextResult.breakdown) { tokenBreakdown.projectTree = treeContextResult.breakdown; tokenBreakdown.total += treeContextResult.breakdown.tokens; } } } const finalContext = this._joinContextSections(contextSections, format); const result = { context: finalContext, analysisData: analysisData, contextSections: contextSections.length, finalTaskIds: Array.from(finalTaskIds) }; // Only include tokenBreakdown if it was requested if (includeTokenCounts) { result.tokenBreakdown = tokenBreakdown; } return result; } _performSemanticSearch(query, maxResults) { const searchableTasks = this.allTasks.map((task) => { const dependencyTitles = task.dependencies?.length > 0 ? task.dependencies .map((depId) => this.allTasks.find((t) => t.id === depId)?.title) .filter(Boolean) .join(' ') : ''; return { ...task, dependencyTitles }; }); // Use the exact same approach as add-task.js const searchOptions = { includeScore: true, // Return match scores threshold: 0.4, // Lower threshold = stricter matching (range 0-1) keys: [ { name: 'title', weight: 1.5 }, // Title is most important { name: 'description', weight: 2 }, // Description is very important { name: 'details', weight: 3 }, // Details is most important // Search dependencies to find tasks that depend on similar things { name: 'dependencyTitles', weight: 0.5 } ], // Sort matches by score (lower is better) shouldSort: true, // Allow searching in nested properties useExtendedSearch: true, // Return up to 50 matches limit: 50 }; // Create search index using Fuse.js const fuse = new Fuse(searchableTasks, searchOptions); // Extract significant words and phrases from the prompt (like add-task.js does) const promptWords = query .toLowerCase() .replace(/[^\w\s-]/g, ' ') // Replace non-alphanumeric chars with spaces .split(/\s+/) .filter((word) => word.length > 3); // Words at least 4 chars // Use the user's prompt for fuzzy search const fuzzyResults = fuse.search(query); // Also search for each significant word to catch different aspects const wordResults = []; for (const word of promptWords) { if (word.length > 5) { // Only use significant words const results = fuse.search(word); if (results.length > 0) { wordResults.push(...results); } } } // Merge and deduplicate results const mergedResults = [...fuzzyResults]; // Add word results that aren't already in fuzzyResults for (const wordResult of wordResults) { if (!mergedResults.some((r) => r.item.id === wordResult.item.id)) { mergedResults.push(wordResult); } } // Group search results by relevance const highRelevance = mergedResults .filter((result) => result.score < 0.25) .map((result) => result.item); const mediumRelevance = mergedResults .filter((result) => result.score >= 0.25 && result.score < 0.4) .map((result) => result.item); // Get recent tasks (newest first) const recentTasks = [...this.allTasks] .sort((a, b) => b.id - a.id) .slice(0, 5); // Combine high relevance, medium relevance, and recent tasks // Prioritize high relevance first const allRelevantTasks = [...highRelevance]; // Add medium relevance if not already included for (const task of mediumRelevance) { if (!allRelevantTasks.some((t) => t.id === task.id)) { allRelevantTasks.push(task); } } // Add recent tasks if not already included for (const task of recentTasks) { if (!allRelevantTasks.some((t) => t.id === task.id)) { allRelevantTasks.push(task); } } // Get top N results for context const finalResults = allRelevantTasks.slice(0, maxResults); return { tasks: finalResults, analysisData: { highRelevance: highRelevance, mediumRelevance: mediumRelevance, recentTasks: recentTasks, allRelevantTasks: allRelevantTasks } }; } _buildDependencyContext(taskIds) { const { allRelatedTaskIds, graphs, depthMap } = this._buildDependencyGraphs(taskIds); if (allRelatedTaskIds.size === 0) return ''; const dependentTasks = Array.from(allRelatedTaskIds) .map((id) => this.allTasks.find((t) => t.id === id)) .filter(Boolean) .sort((a, b) => (depthMap.get(a.id) || 0) - (depthMap.get(b.id) || 0)); const uniqueDetailedTasks = dependentTasks.slice(0, 8); let context = `\nThis task relates to a dependency structure with ${dependentTasks.length} related tasks in the chain.`; const directDeps = this.allTasks.filter((t) => taskIds.includes(t.id)); if (directDeps.length > 0) { context += `\n\nDirect dependencies:\n${directDeps .map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`) .join('\n')}`; } const indirectDeps = dependentTasks.filter((t) => !taskIds.includes(t.id)); if (indirectDeps.length > 0) { context += `\n\nIndirect dependencies (dependencies of dependencies):\n${indirectDeps .slice(0, 5) .map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`) .join('\n')}`; if (indirectDeps.length > 5) context += `\n- ... and ${ indirectDeps.length - 5 } more indirect dependencies`; } context += `\n\nDetailed information about dependencies:`; for (const depTask of uniqueDetailedTasks) { const isDirect = taskIds.includes(depTask.id) ? ' [DIRECT DEPENDENCY]' : ''; context += `\n\n------ Task ${depTask.id}${isDirect}: ${depTask.title} ------\n`; context += `Description: ${depTask.description}\n`; if (depTask.dependencies?.length) { context += `Dependencies: ${depTask.dependencies.join(', ')}\n`; } if (depTask.details) { context += `Implementation Details: ${truncate( depTask.details, 400 )}\n`; } } if (graphs.length > 0) { context += '\n\nDependency Chain Visualization:'; context += graphs .map((graph) => this._formatDependencyChain(graph)) .join(''); } return context; } _buildDependencyGraphs(taskIds) { const visited = new Set(); const depthMap = new Map(); const graphs = []; for (const id of taskIds) { const graph = this._buildDependencyGraph(id, visited, depthMap); if (graph) graphs.push(graph); } return { allRelatedTaskIds: visited, graphs, depthMap }; } _buildDependencyGraph(taskId, visited, depthMap, depth = 0) { if (visited.has(taskId) || depth > 5) return null; // Limit recursion depth const task = this.allTasks.find((t) => t.id === taskId); if (!task) return null; visited.add(taskId); if (!depthMap.has(taskId) || depth < depthMap.get(taskId)) { depthMap.set(taskId, depth); } const dependencies = task.dependencies ?.map((depId) => this._buildDependencyGraph(depId, visited, depthMap, depth + 1) ) .filter(Boolean) || []; return { ...task, dependencies }; } _formatDependencyChain(node, prefix = '', isLast = true, depth = 0) { if (depth > 3) return ''; const connector = isLast ? '└── ' : '├── '; let result = `${prefix}${connector}Task ${node.id}: ${node.title}`; if (node.dependencies?.length) { const childPrefix = prefix + (isLast ? ' ' : '│ '); result += node.dependencies .map((dep, index) => this._formatDependencyChain( dep, childPrefix, index === node.dependencies.length - 1, depth + 1 ) ) .join(''); } return '\n' + result; } /** * Parse task ID strings into structured format * Supports formats: "15", "15.2", "16,17.1" * @param {Array<string>} taskIds - Array of task ID strings * @returns {Array<Object>} Parsed task identifiers */ _parseTaskIds(taskIds) { const parsed = []; for (const idStr of taskIds) { if (idStr.includes('.')) { // Subtask format: "15.2" const [parentId, subtaskId] = idStr.split('.'); parsed.push({ type: 'subtask', parentId: parseInt(parentId, 10), subtaskId: parseInt(subtaskId, 10), fullId: idStr }); } else { // Task format: "15" parsed.push({ type: 'task', taskId: parseInt(idStr, 10), fullId: idStr }); } } return parsed; } /** * Gather context from tasks and subtasks * @param {Array<string>} taskIds - Task/subtask IDs * @param {string} format - Output format * @param {boolean} includeTokenCounts - Whether to include token breakdown * @returns {Promise<Object>} Task context result with breakdown */ async _gatherTaskContext(taskIds, format, includeTokenCounts = false) { try { if (!this.allTasks || this.allTasks.length === 0) { return { context: null, breakdown: [] }; } const parsedIds = this._parseTaskIds(taskIds); const contextItems = []; const breakdown = []; for (const parsed of parsedIds) { let formattedItem = null; let itemInfo = null; if (parsed.type === 'task') { const result = findTaskById(this.allTasks, parsed.taskId); if (result.task) { formattedItem = this._formatTaskForContext(result.task, format); itemInfo = { id: parsed.fullId, type: 'task', title: result.task.title, tokens: includeTokenCounts ? this.countTokens(formattedItem) : 0, characters: formattedItem.length }; } } else if (parsed.type === 'subtask') { const parentResult = findTaskById(this.allTasks, parsed.parentId); if (parentResult.task && parentResult.task.subtasks) { const subtask = parentResult.task.subtasks.find( (st) => st.id === parsed.subtaskId ); if (subtask) { formattedItem = this._formatSubtaskForContext( subtask, parentResult.task, format ); itemInfo = { id: parsed.fullId, type: 'subtask', title: subtask.title, parentTitle: parentResult.task.title, tokens: includeTokenCounts ? this.countTokens(formattedItem) : 0, characters: formattedItem.length }; } } } if (formattedItem && itemInfo) { contextItems.push(formattedItem); if (includeTokenCounts) { breakdown.push(itemInfo); } } } if (contextItems.length === 0) { return { context: null, breakdown: [] }; } const finalContext = this._formatTaskContextSection(contextItems, format); return { context: finalContext, breakdown: includeTokenCounts ? breakdown : [] }; } catch (error) { console.warn(`Warning: Could not gather task context: ${error.message}`); return { context: null, breakdown: [] }; } } /** * Format a task for context inclusion * @param {Object} task - Task object * @param {string} format - Output format * @returns {string} Formatted task context */ _formatTaskForContext(task, format) { const sections = []; sections.push(`**Task ${task.id}: ${task.title}**`); sections.push(`Description: ${task.description}`); sections.push(`Status: ${task.status || 'pending'}`); sections.push(`Priority: ${task.priority || 'medium'}`); if (task.dependencies && task.dependencies.length > 0) { sections.push(`Dependencies: ${task.dependencies.join(', ')}`); } if (task.details) { const details = truncate(task.details, 500); sections.push(`Implementation Details: ${details}`); } if (task.testStrategy) { const testStrategy = truncate(task.testStrategy, 300); sections.push(`Test Strategy: ${testStrategy}`); } if (task.subtasks && task.subtasks.length > 0) { sections.push(`Subtasks: ${task.subtasks.length} subtasks defined`); } return sections.join('\n'); } /** * Format a subtask for context inclusion * @param {Object} subtask - Subtask object * @param {Object} parentTask - Parent task object * @param {string} format - Output format * @returns {string} Formatted subtask context */ _formatSubtaskForContext(subtask, parentTask, format) { const sections = []; sections.push( `**Subtask ${parentTask.id}.${subtask.id}: ${subtask.title}**` ); sections.push(`Parent Task: ${parentTask.title}`); sections.push(`Description: ${subtask.description}`); sections.push(`Status: ${subtask.status || 'pending'}`); if (subtask.dependencies && subtask.dependencies.length > 0) { sections.push(`Dependencies: ${subtask.dependencies.join(', ')}`); } if (subtask.details) { const details = truncate(subtask.details, 500); sections.push(`Implementation Details: ${details}`); } return sections.join('\n'); } /** * Gather context from files * @param {Array<string>} filePaths - File paths to read * @param {string} format - Output format * @param {boolean} includeTokenCounts - Whether to include token breakdown * @returns {Promise<Object>} File context result with breakdown */ async _gatherFileContext(filePaths, format, includeTokenCounts = false) { const fileContents = []; const breakdown = []; for (const filePath of filePaths) { try { const fullPath = path.isAbsolute(filePath) ? filePath : path.join(this.projectRoot, filePath); if (!fs.existsSync(fullPath)) { continue; } const stats = fs.statSync(fullPath); if (!stats.isFile()) { continue; } // Check file size (limit to 50KB for context) if (stats.size > 50 * 1024) { continue; } const content = fs.readFileSync(fullPath, 'utf-8'); const relativePath = path.relative(this.projectRoot, fullPath); const fileData = { path: relativePath, size: stats.size, content: content, lastModified: stats.mtime }; fileContents.push(fileData); // Calculate tokens for this individual file if requested if (includeTokenCounts) { const formattedFile = this._formatSingleFileForContext( fileData, format ); breakdown.push({ path: relativePath, sizeKB: Math.round(stats.size / 1024), tokens: this.countTokens(formattedFile), characters: formattedFile.length }); } } catch (error) { console.warn( `Warning: Could not read file ${filePath}: ${error.message}` ); } } if (fileContents.length === 0) { return { context: null, breakdown: [] }; } const finalContext = this._formatFileContextSection(fileContents, format); return { context: finalContext, breakdown: includeTokenCounts ? breakdown : [] }; } /** * Generate project file tree context * @param {string} format - Output format * @param {boolean} includeTokenCounts - Whether to include token breakdown * @returns {Promise<Object>} Project tree context result with breakdown */ async _gatherProjectTreeContext(format, includeTokenCounts = false) { try { const tree = this._generateFileTree(this.projectRoot, 5); // Max depth 5 const finalContext = this._formatProjectTreeSection(tree, format); const breakdown = includeTokenCounts ? { tokens: this.countTokens(finalContext), characters: finalContext.length, fileCount: tree.fileCount || 0, dirCount: tree.dirCount || 0 } : null; return { context: finalContext, breakdown: breakdown }; } catch (error) { console.warn( `Warning: Could not generate project tree: ${error.message}` ); return { context: null, breakdown: null }; } } /** * Format a single file for context (used for token counting) * @param {Object} fileData - File data object * @param {string} format - Output format * @returns {string} Formatted file context */ _formatSingleFileForContext(fileData, format) { const header = `**File: ${fileData.path}** (${Math.round(fileData.size / 1024)}KB)`; const content = `\`\`\`\n${fileData.content}\n\`\`\``; return `${header}\n\n${content}`; } /** * Generate file tree structure * @param {string} dirPath - Directory path * @param {number} maxDepth - Maximum depth to traverse * @param {number} currentDepth - Current depth * @returns {Object} File tree structure */ _generateFileTree(dirPath, maxDepth, currentDepth = 0) { const ignoreDirs = [ '.git', 'node_modules', '.env', 'coverage', 'dist', 'build' ]; const ignoreFiles = ['.DS_Store', '.env', '.env.local', '.env.production']; if (currentDepth >= maxDepth) { return null; } try { const items = fs.readdirSync(dirPath); const tree = { name: path.basename(dirPath), type: 'directory', children: [], fileCount: 0, dirCount: 0 }; for (const item of items) { if (ignoreDirs.includes(item) || ignoreFiles.includes(item)) { continue; } const itemPath = path.join(dirPath, item); const stats = fs.statSync(itemPath); if (stats.isDirectory()) { tree.dirCount++; if (currentDepth < maxDepth - 1) { const subtree = this._generateFileTree( itemPath, maxDepth, currentDepth + 1 ); if (subtree) { tree.children.push(subtree); } } } else { tree.fileCount++; tree.children.push({ name: item, type: 'file', size: stats.size }); } } return tree; } catch (error) { return null; } } /** * Format custom context section * @param {string} customContext - Custom context string * @param {string} format - Output format * @returns {string} Formatted custom context */ _formatCustomContext(customContext, format) { switch (format) { case 'research': return `## Additional Context\n\n${customContext}`; case 'chat': return `**Additional Context:**\n${customContext}`; case 'system-prompt': return `Additional context: ${customContext}`; default: return customContext; } } /** * Format task context section * @param {Array<string>} taskItems - Formatted task items * @param {string} format - Output format * @returns {string} Formatted task context section */ _formatTaskContextSection(taskItems, format) { switch (format) { case 'research': return `## Task Context\n\n${taskItems.join('\n\n---\n\n')}`; case 'chat': return `**Task Context:**\n\n${taskItems.join('\n\n')}`; case 'system-prompt': return `Task context: ${taskItems.join(' | ')}`; default: return taskItems.join('\n\n'); } } /** * Format file context section * @param {Array<Object>} fileContents - File content objects * @param {string} format - Output format * @returns {string} Formatted file context section */ _formatFileContextSection(fileContents, format) { const fileItems = fileContents.map((file) => { const header = `**File: ${file.path}** (${Math.round(file.size / 1024)}KB)`; const content = `\`\`\`\n${file.content}\n\`\`\``; return `${header}\n\n${content}`; }); switch (format) { case 'research': return `## File Context\n\n${fileItems.join('\n\n---\n\n')}`; case 'chat': return `**File Context:**\n\n${fileItems.join('\n\n')}`; case 'system-prompt': return `File context: ${fileContents.map((f) => `${f.path} (${f.content.substring(0, 200)}...)`).join(' | ')}`; default: return fileItems.join('\n\n'); } } /** * Format project tree section * @param {Object} tree - File tree structure * @param {string} format - Output format * @returns {string} Formatted project tree section */ _formatProjectTreeSection(tree, format) { const treeString = this._renderFileTree(tree); switch (format) { case 'research': return `## Project Structure\n\n\`\`\`\n${treeString}\n\`\`\``; case 'chat': return `**Project Structure:**\n\`\`\`\n${treeString}\n\`\`\``; case 'system-prompt': return `Project structure: ${treeString.replace(/\n/g, ' | ')}`; default: return treeString; } } /** * Render file tree as string * @param {Object} tree - File tree structure * @param {string} prefix - Current prefix for indentation * @returns {string} Rendered tree string */ _renderFileTree(tree, prefix = '') { let result = `${prefix}${tree.name}/`; if (tree.fileCount > 0 || tree.dirCount > 0) { result += ` (${tree.fileCount} files, ${tree.dirCount} dirs)`; } result += '\n'; if (tree.children) { tree.children.forEach((child, index) => { const isLast = index === tree.children.length - 1; const childPrefix = prefix + (isLast ? '└── ' : '├── '); const nextPrefix = prefix + (isLast ? ' ' : '│ '); if (child.type === 'directory') { result += this._renderFileTree(child, childPrefix); } else { result += `${childPrefix}${child.name}\n`; } }); } return result; } /** * Join context sections based on format * @param {Array<string>} sections - Context sections * @param {string} format - Output format * @returns {string} Joined context string */ _joinContextSections(sections, format) { if (sections.length === 0) { return ''; } switch (format) { case 'research': return sections.join('\n\n---\n\n'); case 'chat': return sections.join('\n\n'); case 'system-prompt': return sections.join(' '); default: return sections.join('\n\n'); } } } /** * Factory function to create a context gatherer instance * @param {string} projectRoot - Project root directory * @param {string} tag - Tag for the task * @returns {ContextGatherer} Context gatherer instance * @throws {Error} If tag is not provided */ export function createContextGatherer(projectRoot, tag) { if (!tag) { throw new Error('Tag is required'); } return new ContextGatherer(projectRoot, tag); } export default ContextGatherer; ``` -------------------------------------------------------------------------------- /tests/unit/config-manager.test.mjs: -------------------------------------------------------------------------------- ``` // @ts-check /** * Module to test the config-manager.js functionality * This file uses ES module syntax (.mjs) to properly handle imports */ import fs from 'fs'; import path from 'path'; import { jest } from '@jest/globals'; import { fileURLToPath } from 'url'; import { sampleTasks } from '../fixtures/sample-tasks.js'; // Disable chalk's color detection which can cause fs.readFileSync calls process.env.FORCE_COLOR = '0'; // --- Read REAL supported-models.json data BEFORE mocks --- const __filename = fileURLToPath(import.meta.url); // Get current file path const __dirname = path.dirname(__filename); // Get current directory const realSupportedModelsPath = path.resolve( __dirname, '../../scripts/modules/supported-models.json' ); let REAL_SUPPORTED_MODELS_CONTENT; let REAL_SUPPORTED_MODELS_DATA; try { REAL_SUPPORTED_MODELS_CONTENT = fs.readFileSync( realSupportedModelsPath, 'utf-8' ); REAL_SUPPORTED_MODELS_DATA = JSON.parse(REAL_SUPPORTED_MODELS_CONTENT); } catch (err) { console.error( 'FATAL TEST SETUP ERROR: Could not read or parse real supported-models.json', err ); REAL_SUPPORTED_MODELS_CONTENT = '{}'; // Default to empty object on error REAL_SUPPORTED_MODELS_DATA = {}; process.exit(1); // Exit if essential test data can't be loaded } // --- Define Mock Function Instances --- const mockFindProjectRoot = jest.fn(); const mockLog = jest.fn(); const mockResolveEnvVariable = jest.fn(); // --- Mock fs functions directly instead of the whole module --- const mockExistsSync = jest.fn(); const mockReadFileSync = jest.fn(); const mockWriteFileSync = jest.fn(); // Instead of mocking the entire fs module, mock just the functions we need fs.existsSync = mockExistsSync; fs.readFileSync = mockReadFileSync; fs.writeFileSync = mockWriteFileSync; // --- Test Data (Keep as is, ensure DEFAULT_CONFIG is accurate) --- const MOCK_PROJECT_ROOT = '/mock/project'; const MOCK_CONFIG_PATH = path.join(MOCK_PROJECT_ROOT, '.taskmasterconfig'); // Updated DEFAULT_CONFIG reflecting the implementation const DEFAULT_CONFIG = { models: { main: { provider: 'anthropic', modelId: 'claude-3-7-sonnet-20250219', maxTokens: 64000, temperature: 0.2 }, research: { provider: 'perplexity', modelId: 'sonar-pro', maxTokens: 8700, temperature: 0.1 }, fallback: { provider: 'anthropic', modelId: 'claude-3-5-sonnet', maxTokens: 8192, temperature: 0.2 } }, global: { logLevel: 'info', debug: false, defaultSubtasks: 5, defaultPriority: 'medium', projectName: 'Task Master', ollamaBaseURL: 'http://localhost:11434/api' } }; // Other test data (VALID_CUSTOM_CONFIG, PARTIAL_CONFIG, INVALID_PROVIDER_CONFIG) const VALID_CUSTOM_CONFIG = { models: { main: { provider: 'openai', modelId: 'gpt-4o', maxTokens: 4096, temperature: 0.5 }, research: { provider: 'google', modelId: 'gemini-1.5-pro-latest', maxTokens: 8192, temperature: 0.3 }, fallback: { provider: 'anthropic', modelId: 'claude-3-opus-20240229', maxTokens: 100000, temperature: 0.4 } }, global: { logLevel: 'debug', defaultPriority: 'high', projectName: 'My Custom Project' } }; const PARTIAL_CONFIG = { models: { main: { provider: 'openai', modelId: 'gpt-4-turbo' } }, global: { projectName: 'Partial Project' } }; const INVALID_PROVIDER_CONFIG = { models: { main: { provider: 'invalid-provider', modelId: 'some-model' }, research: { provider: 'perplexity', modelId: 'llama-3-sonar-large-32k-online' } }, global: { logLevel: 'warn' } }; // Define spies globally to be restored in afterAll let consoleErrorSpy; let consoleWarnSpy; beforeAll(() => { // Set up console spies consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {}); consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(() => {}); }); afterAll(() => { // Restore all spies jest.restoreAllMocks(); }); describe('Config Manager Module', () => { // Declare variables for imported module let configManager; // Reset mocks before each test for isolation beforeEach(async () => { // Clear all mock calls and reset implementations between tests jest.clearAllMocks(); // Reset the external mock instances for utils mockFindProjectRoot.mockReset(); mockLog.mockReset(); mockResolveEnvVariable.mockReset(); mockExistsSync.mockReset(); mockReadFileSync.mockReset(); mockWriteFileSync.mockReset(); // --- Mock Dependencies BEFORE importing the module under test --- // Mock the 'utils.js' module using doMock (applied at runtime) jest.doMock('../../scripts/modules/utils.js', () => ({ __esModule: true, // Indicate it's an ES module mock findProjectRoot: mockFindProjectRoot, // Use the mock function instance log: mockLog, // Use the mock function instance resolveEnvVariable: mockResolveEnvVariable // Use the mock function instance })); // Dynamically import the module under test AFTER mocking dependencies configManager = await import('../../scripts/modules/config-manager.js'); // --- Default Mock Implementations --- mockFindProjectRoot.mockReturnValue(MOCK_PROJECT_ROOT); // Default for utils.findProjectRoot mockExistsSync.mockReturnValue(true); // Assume files exist by default // Default readFileSync: Return REAL models content, mocked config, or throw error mockReadFileSync.mockImplementation((filePath) => { const baseName = path.basename(filePath); if (baseName === 'supported-models.json') { // Return the REAL file content stringified return REAL_SUPPORTED_MODELS_CONTENT; } else if (filePath === MOCK_CONFIG_PATH) { // Still mock the .taskmasterconfig reads return JSON.stringify(DEFAULT_CONFIG); // Default behavior } // Throw for unexpected reads - helps catch errors throw new Error(`Unexpected fs.readFileSync call in test: ${filePath}`); }); // Default writeFileSync: Do nothing, just allow calls mockWriteFileSync.mockImplementation(() => {}); }); // --- Validation Functions --- describe('Validation Functions', () => { // Tests for validateProvider and validateProviderModelCombination test('validateProvider should return true for valid providers', () => { expect(configManager.validateProvider('openai')).toBe(true); expect(configManager.validateProvider('anthropic')).toBe(true); expect(configManager.validateProvider('google')).toBe(true); expect(configManager.validateProvider('perplexity')).toBe(true); expect(configManager.validateProvider('ollama')).toBe(true); expect(configManager.validateProvider('openrouter')).toBe(true); }); test('validateProvider should return false for invalid providers', () => { expect(configManager.validateProvider('invalid-provider')).toBe(false); expect(configManager.validateProvider('grok')).toBe(false); // Not in mock map expect(configManager.validateProvider('')).toBe(false); expect(configManager.validateProvider(null)).toBe(false); }); test('validateProviderModelCombination should validate known good combinations', () => { // Re-load config to ensure MODEL_MAP is populated from mock (now real data) configManager.getConfig(MOCK_PROJECT_ROOT, true); expect( configManager.validateProviderModelCombination('openai', 'gpt-4o') ).toBe(true); expect( configManager.validateProviderModelCombination( 'anthropic', 'claude-3-5-sonnet-20241022' ) ).toBe(true); }); test('validateProviderModelCombination should return false for known bad combinations', () => { // Re-load config to ensure MODEL_MAP is populated from mock (now real data) configManager.getConfig(MOCK_PROJECT_ROOT, true); expect( configManager.validateProviderModelCombination( 'openai', 'claude-3-opus-20240229' ) ).toBe(false); }); test('validateProviderModelCombination should return true for ollama/openrouter (empty lists in map)', () => { // Re-load config to ensure MODEL_MAP is populated from mock (now real data) configManager.getConfig(MOCK_PROJECT_ROOT, true); expect( configManager.validateProviderModelCombination('ollama', 'any-model') ).toBe(false); expect( configManager.validateProviderModelCombination( 'openrouter', 'any/model' ) ).toBe(false); }); test('validateProviderModelCombination should return true for providers not in map', () => { // Re-load config to ensure MODEL_MAP is populated from mock (now real data) configManager.getConfig(MOCK_PROJECT_ROOT, true); // The implementation returns true if the provider isn't in the map expect( configManager.validateProviderModelCombination( 'unknown-provider', 'some-model' ) ).toBe(true); }); }); // --- getConfig Tests --- describe('getConfig Tests', () => { test('should return default config if .taskmasterconfig does not exist', () => { // Arrange mockExistsSync.mockReturnValue(false); // findProjectRoot mock is set in beforeEach // Act: Call getConfig with explicit root const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Force reload // Assert expect(config).toEqual(DEFAULT_CONFIG); expect(mockFindProjectRoot).not.toHaveBeenCalled(); // Explicit root provided expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH); expect(mockReadFileSync).not.toHaveBeenCalled(); // No read if file doesn't exist expect(consoleWarnSpy).toHaveBeenCalledWith( expect.stringContaining('not found at provided project root') ); }); test.skip('should use findProjectRoot and return defaults if file not found', () => { // TODO: Fix mock interaction, findProjectRoot isn't being registered as called // Arrange mockExistsSync.mockReturnValue(false); // findProjectRoot mock is set in beforeEach // Act: Call getConfig without explicit root const config = configManager.getConfig(null, true); // Force reload // Assert expect(mockFindProjectRoot).toHaveBeenCalled(); // Should be called now expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH); expect(config).toEqual(DEFAULT_CONFIG); expect(mockReadFileSync).not.toHaveBeenCalled(); expect(consoleWarnSpy).toHaveBeenCalledWith( expect.stringContaining('not found at derived root') ); // Adjusted expected warning }); test('should read and merge valid config file with defaults', () => { // Arrange: Override readFileSync for this test mockReadFileSync.mockImplementation((filePath) => { if (filePath === MOCK_CONFIG_PATH) return JSON.stringify(VALID_CUSTOM_CONFIG); if (path.basename(filePath) === 'supported-models.json') { // Provide necessary models for validation within getConfig return JSON.stringify({ openai: [{ id: 'gpt-4o' }], google: [{ id: 'gemini-1.5-pro-latest' }], perplexity: [{ id: 'sonar-pro' }], anthropic: [ { id: 'claude-3-opus-20240229' }, { id: 'claude-3-5-sonnet' }, { id: 'claude-3-7-sonnet-20250219' }, { id: 'claude-3-5-sonnet' } ], ollama: [], openrouter: [] }); } throw new Error(`Unexpected fs.readFileSync call: ${filePath}`); }); mockExistsSync.mockReturnValue(true); // findProjectRoot mock set in beforeEach // Act const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Force reload // Assert: Construct expected merged config const expectedMergedConfig = { models: { main: { ...DEFAULT_CONFIG.models.main, ...VALID_CUSTOM_CONFIG.models.main }, research: { ...DEFAULT_CONFIG.models.research, ...VALID_CUSTOM_CONFIG.models.research }, fallback: { ...DEFAULT_CONFIG.models.fallback, ...VALID_CUSTOM_CONFIG.models.fallback } }, global: { ...DEFAULT_CONFIG.global, ...VALID_CUSTOM_CONFIG.global } }; expect(config).toEqual(expectedMergedConfig); expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH); expect(mockReadFileSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8'); }); test('should merge defaults for partial config file', () => { // Arrange mockReadFileSync.mockImplementation((filePath) => { if (filePath === MOCK_CONFIG_PATH) return JSON.stringify(PARTIAL_CONFIG); if (path.basename(filePath) === 'supported-models.json') { return JSON.stringify({ openai: [{ id: 'gpt-4-turbo' }], perplexity: [{ id: 'sonar-pro' }], anthropic: [ { id: 'claude-3-7-sonnet-20250219' }, { id: 'claude-3-5-sonnet' } ], ollama: [], openrouter: [] }); } throw new Error(`Unexpected fs.readFileSync call: ${filePath}`); }); mockExistsSync.mockReturnValue(true); // findProjectRoot mock set in beforeEach // Act const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Assert: Construct expected merged config const expectedMergedConfig = { models: { main: { ...DEFAULT_CONFIG.models.main, ...PARTIAL_CONFIG.models.main }, research: { ...DEFAULT_CONFIG.models.research }, fallback: { ...DEFAULT_CONFIG.models.fallback } }, global: { ...DEFAULT_CONFIG.global, ...PARTIAL_CONFIG.global } }; expect(config).toEqual(expectedMergedConfig); expect(mockReadFileSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8'); }); test('should handle JSON parsing error and return defaults', () => { // Arrange mockReadFileSync.mockImplementation((filePath) => { if (filePath === MOCK_CONFIG_PATH) return 'invalid json'; // Mock models read needed for initial load before parse error if (path.basename(filePath) === 'supported-models.json') { return JSON.stringify({ anthropic: [{ id: 'claude-3-7-sonnet-20250219' }], perplexity: [{ id: 'sonar-pro' }], fallback: [{ id: 'claude-3-5-sonnet' }], ollama: [], openrouter: [] }); } throw new Error(`Unexpected fs.readFileSync call: ${filePath}`); }); mockExistsSync.mockReturnValue(true); // findProjectRoot mock set in beforeEach // Act const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Assert expect(config).toEqual(DEFAULT_CONFIG); expect(consoleErrorSpy).toHaveBeenCalledWith( expect.stringContaining('Error reading or parsing') ); }); test('should handle file read error and return defaults', () => { // Arrange const readError = new Error('Permission denied'); mockReadFileSync.mockImplementation((filePath) => { if (filePath === MOCK_CONFIG_PATH) throw readError; // Mock models read needed for initial load before read error if (path.basename(filePath) === 'supported-models.json') { return JSON.stringify({ anthropic: [{ id: 'claude-3-7-sonnet-20250219' }], perplexity: [{ id: 'sonar-pro' }], fallback: [{ id: 'claude-3-5-sonnet' }], ollama: [], openrouter: [] }); } throw new Error(`Unexpected fs.readFileSync call: ${filePath}`); }); mockExistsSync.mockReturnValue(true); // findProjectRoot mock set in beforeEach // Act const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Assert expect(config).toEqual(DEFAULT_CONFIG); expect(consoleErrorSpy).toHaveBeenCalledWith( expect.stringContaining( `Permission denied. Using default configuration.` ) ); }); test('should validate provider and fallback to default if invalid', () => { // Arrange mockReadFileSync.mockImplementation((filePath) => { if (filePath === MOCK_CONFIG_PATH) return JSON.stringify(INVALID_PROVIDER_CONFIG); if (path.basename(filePath) === 'supported-models.json') { return JSON.stringify({ perplexity: [{ id: 'llama-3-sonar-large-32k-online' }], anthropic: [ { id: 'claude-3-7-sonnet-20250219' }, { id: 'claude-3-5-sonnet' } ], ollama: [], openrouter: [] }); } throw new Error(`Unexpected fs.readFileSync call: ${filePath}`); }); mockExistsSync.mockReturnValue(true); // findProjectRoot mock set in beforeEach // Act const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Assert expect(consoleWarnSpy).toHaveBeenCalledWith( expect.stringContaining( 'Warning: Invalid main provider "invalid-provider"' ) ); const expectedMergedConfig = { models: { main: { ...DEFAULT_CONFIG.models.main }, research: { ...DEFAULT_CONFIG.models.research, ...INVALID_PROVIDER_CONFIG.models.research }, fallback: { ...DEFAULT_CONFIG.models.fallback } }, global: { ...DEFAULT_CONFIG.global, ...INVALID_PROVIDER_CONFIG.global } }; expect(config).toEqual(expectedMergedConfig); }); }); // --- writeConfig Tests --- describe('writeConfig', () => { test('should write valid config to file', () => { // Arrange (Default mocks are sufficient) // findProjectRoot mock set in beforeEach mockWriteFileSync.mockImplementation(() => {}); // Ensure it doesn't throw // Act const success = configManager.writeConfig( VALID_CUSTOM_CONFIG, MOCK_PROJECT_ROOT ); // Assert expect(success).toBe(true); expect(mockWriteFileSync).toHaveBeenCalledWith( MOCK_CONFIG_PATH, JSON.stringify(VALID_CUSTOM_CONFIG, null, 2) // writeConfig stringifies ); expect(consoleErrorSpy).not.toHaveBeenCalled(); }); test('should return false and log error if write fails', () => { // Arrange const mockWriteError = new Error('Disk full'); mockWriteFileSync.mockImplementation(() => { throw mockWriteError; }); // findProjectRoot mock set in beforeEach // Act const success = configManager.writeConfig( VALID_CUSTOM_CONFIG, MOCK_PROJECT_ROOT ); // Assert expect(success).toBe(false); expect(mockWriteFileSync).toHaveBeenCalled(); expect(consoleErrorSpy).toHaveBeenCalledWith( expect.stringContaining(`Disk full`) ); }); test.skip('should return false if project root cannot be determined', () => { // TODO: Fix mock interaction or function logic, returns true unexpectedly in test // Arrange: Override mock for this specific test mockFindProjectRoot.mockReturnValue(null); // Act: Call without explicit root const success = configManager.writeConfig(VALID_CUSTOM_CONFIG); // Assert expect(success).toBe(false); // Function should return false if root is null expect(mockFindProjectRoot).toHaveBeenCalled(); expect(mockWriteFileSync).not.toHaveBeenCalled(); expect(consoleErrorSpy).toHaveBeenCalledWith( expect.stringContaining('Could not determine project root') ); }); }); // --- Getter Functions --- describe('Getter Functions', () => { test('getMainProvider should return provider from config', () => { // Arrange: Set up readFileSync to return VALID_CUSTOM_CONFIG mockReadFileSync.mockImplementation((filePath) => { if (filePath === MOCK_CONFIG_PATH) return JSON.stringify(VALID_CUSTOM_CONFIG); if (path.basename(filePath) === 'supported-models.json') { return JSON.stringify({ openai: [{ id: 'gpt-4o' }], google: [{ id: 'gemini-1.5-pro-latest' }], anthropic: [ { id: 'claude-3-opus-20240229' }, { id: 'claude-3-7-sonnet-20250219' }, { id: 'claude-3-5-sonnet' } ], perplexity: [{ id: 'sonar-pro' }], ollama: [], openrouter: [] }); // Added perplexity } throw new Error(`Unexpected fs.readFileSync call: ${filePath}`); }); mockExistsSync.mockReturnValue(true); // findProjectRoot mock set in beforeEach // Act const provider = configManager.getMainProvider(MOCK_PROJECT_ROOT); // Assert expect(provider).toBe(VALID_CUSTOM_CONFIG.models.main.provider); }); test('getLogLevel should return logLevel from config', () => { // Arrange: Set up readFileSync to return VALID_CUSTOM_CONFIG mockReadFileSync.mockImplementation((filePath) => { if (filePath === MOCK_CONFIG_PATH) return JSON.stringify(VALID_CUSTOM_CONFIG); if (path.basename(filePath) === 'supported-models.json') { // Provide enough mock model data for validation within getConfig return JSON.stringify({ openai: [{ id: 'gpt-4o' }], google: [{ id: 'gemini-1.5-pro-latest' }], anthropic: [ { id: 'claude-3-opus-20240229' }, { id: 'claude-3-7-sonnet-20250219' }, { id: 'claude-3-5-sonnet' } ], perplexity: [{ id: 'sonar-pro' }], ollama: [], openrouter: [] }); } throw new Error(`Unexpected fs.readFileSync call: ${filePath}`); }); mockExistsSync.mockReturnValue(true); // findProjectRoot mock set in beforeEach // Act const logLevel = configManager.getLogLevel(MOCK_PROJECT_ROOT); // Assert expect(logLevel).toBe(VALID_CUSTOM_CONFIG.global.logLevel); }); // Add more tests for other getters (getResearchProvider, getProjectName, etc.) }); // --- isConfigFilePresent Tests --- describe('isConfigFilePresent', () => { test('should return true if config file exists', () => { mockExistsSync.mockReturnValue(true); // findProjectRoot mock set in beforeEach expect(configManager.isConfigFilePresent(MOCK_PROJECT_ROOT)).toBe(true); expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH); }); test('should return false if config file does not exist', () => { mockExistsSync.mockReturnValue(false); // findProjectRoot mock set in beforeEach expect(configManager.isConfigFilePresent(MOCK_PROJECT_ROOT)).toBe(false); expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH); }); test.skip('should use findProjectRoot if explicitRoot is not provided', () => { // TODO: Fix mock interaction, findProjectRoot isn't being registered as called mockExistsSync.mockReturnValue(true); // findProjectRoot mock set in beforeEach expect(configManager.isConfigFilePresent()).toBe(true); expect(mockFindProjectRoot).toHaveBeenCalled(); // Should be called now }); }); // --- getAllProviders Tests --- describe('getAllProviders', () => { test('should return list of providers from supported-models.json', () => { // Arrange: Ensure config is loaded with real data configManager.getConfig(null, true); // Force load using the mock that returns real data // Act const providers = configManager.getAllProviders(); // Assert // Assert against the actual keys in the REAL loaded data const expectedProviders = Object.keys(REAL_SUPPORTED_MODELS_DATA); expect(providers).toEqual(expect.arrayContaining(expectedProviders)); expect(providers.length).toBe(expectedProviders.length); }); }); // Add tests for getParametersForRole if needed // Note: Tests for setMainModel, setResearchModel were removed as the functions were removed in the implementation. // If similar setter functions exist, add tests for them following the writeConfig pattern. // --- isApiKeySet Tests --- describe('isApiKeySet', () => { const mockSession = { env: {} }; // Mock session for MCP context // Test cases: [providerName, envVarName, keyValue, expectedResult, testName] const testCases = [ // Valid Keys [ 'anthropic', 'ANTHROPIC_API_KEY', 'sk-valid-key', true, 'valid Anthropic key' ], [ 'openai', 'OPENAI_API_KEY', 'sk-another-valid-key', true, 'valid OpenAI key' ], [ 'perplexity', 'PERPLEXITY_API_KEY', 'pplx-valid', true, 'valid Perplexity key' ], [ 'google', 'GOOGLE_API_KEY', 'google-valid-key', true, 'valid Google key' ], [ 'mistral', 'MISTRAL_API_KEY', 'mistral-valid-key', true, 'valid Mistral key' ], [ 'openrouter', 'OPENROUTER_API_KEY', 'or-valid-key', true, 'valid OpenRouter key' ], ['xai', 'XAI_API_KEY', 'xai-valid-key', true, 'valid XAI key'], [ 'azure', 'AZURE_OPENAI_API_KEY', 'azure-valid-key', true, 'valid Azure key' ], // Ollama (special case - no key needed) [ 'ollama', 'OLLAMA_API_KEY', undefined, true, 'Ollama provider (no key needed)' ], // OLLAMA_API_KEY might not be in keyMap // Invalid / Missing Keys [ 'anthropic', 'ANTHROPIC_API_KEY', undefined, false, 'missing Anthropic key' ], ['anthropic', 'ANTHROPIC_API_KEY', null, false, 'null Anthropic key'], ['openai', 'OPENAI_API_KEY', '', false, 'empty OpenAI key'], [ 'perplexity', 'PERPLEXITY_API_KEY', ' ', false, 'whitespace Perplexity key' ], // Placeholder Keys [ 'google', 'GOOGLE_API_KEY', 'YOUR_GOOGLE_API_KEY_HERE', false, 'placeholder Google key (YOUR_..._HERE)' ], [ 'mistral', 'MISTRAL_API_KEY', 'MISTRAL_KEY_HERE', false, 'placeholder Mistral key (..._KEY_HERE)' ], [ 'openrouter', 'OPENROUTER_API_KEY', 'ENTER_OPENROUTER_KEY_HERE', false, 'placeholder OpenRouter key (general ...KEY_HERE)' ], // Unknown provider ['unknownprovider', 'UNKNOWN_KEY', 'any-key', false, 'unknown provider'] ]; testCases.forEach( ([providerName, envVarName, keyValue, expectedResult, testName]) => { test(`should return ${expectedResult} for ${testName} (CLI context)`, () => { // CLI context (resolveEnvVariable uses process.env or .env via projectRoot) mockResolveEnvVariable.mockImplementation((key) => { return key === envVarName ? keyValue : undefined; }); expect( configManager.isApiKeySet(providerName, null, MOCK_PROJECT_ROOT) ).toBe(expectedResult); if (providerName !== 'ollama' && providerName !== 'unknownprovider') { // Ollama and unknown don't try to resolve expect(mockResolveEnvVariable).toHaveBeenCalledWith( envVarName, null, MOCK_PROJECT_ROOT ); } }); test(`should return ${expectedResult} for ${testName} (MCP context)`, () => { // MCP context (resolveEnvVariable uses session.env) const mcpSession = { env: { [envVarName]: keyValue } }; mockResolveEnvVariable.mockImplementation((key, sessionArg) => { return sessionArg && sessionArg.env ? sessionArg.env[key] : undefined; }); expect( configManager.isApiKeySet(providerName, mcpSession, null) ).toBe(expectedResult); if (providerName !== 'ollama' && providerName !== 'unknownprovider') { expect(mockResolveEnvVariable).toHaveBeenCalledWith( envVarName, mcpSession, null ); } }); } ); test('isApiKeySet should log a warning for an unknown provider', () => { mockLog.mockClear(); // Clear previous log calls configManager.isApiKeySet('nonexistentprovider'); expect(mockLog).toHaveBeenCalledWith( 'warn', expect.stringContaining('Unknown provider name: nonexistentprovider') ); }); test('isApiKeySet should handle provider names case-insensitively for keyMap lookup', () => { mockResolveEnvVariable.mockReturnValue('a-valid-key'); expect( configManager.isApiKeySet('Anthropic', null, MOCK_PROJECT_ROOT) ).toBe(true); expect(mockResolveEnvVariable).toHaveBeenCalledWith( 'ANTHROPIC_API_KEY', null, MOCK_PROJECT_ROOT ); mockResolveEnvVariable.mockReturnValue('another-valid-key'); expect(configManager.isApiKeySet('OPENAI', null, MOCK_PROJECT_ROOT)).toBe( true ); expect(mockResolveEnvVariable).toHaveBeenCalledWith( 'OPENAI_API_KEY', null, MOCK_PROJECT_ROOT ); }); }); }); ```