This is page 35 of 50. Use http://codebase.md/eyaltoledano/claude-task-master?page={x} to view the full context.
# Directory Structure
```
├── .changeset
│ ├── config.json
│ └── README.md
├── .claude
│ ├── commands
│ │ └── dedupe.md
│ └── TM_COMMANDS_GUIDE.md
├── .claude-plugin
│ └── marketplace.json
├── .coderabbit.yaml
├── .cursor
│ ├── mcp.json
│ └── rules
│ ├── ai_providers.mdc
│ ├── ai_services.mdc
│ ├── architecture.mdc
│ ├── changeset.mdc
│ ├── commands.mdc
│ ├── context_gathering.mdc
│ ├── cursor_rules.mdc
│ ├── dependencies.mdc
│ ├── dev_workflow.mdc
│ ├── git_workflow.mdc
│ ├── glossary.mdc
│ ├── mcp.mdc
│ ├── new_features.mdc
│ ├── self_improve.mdc
│ ├── tags.mdc
│ ├── taskmaster.mdc
│ ├── tasks.mdc
│ ├── telemetry.mdc
│ ├── test_workflow.mdc
│ ├── tests.mdc
│ ├── ui.mdc
│ └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── enhancements---feature-requests.md
│ │ └── feedback.md
│ ├── PULL_REQUEST_TEMPLATE
│ │ ├── bugfix.md
│ │ ├── config.yml
│ │ ├── feature.md
│ │ └── integration.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── scripts
│ │ ├── auto-close-duplicates.mjs
│ │ ├── backfill-duplicate-comments.mjs
│ │ ├── check-pre-release-mode.mjs
│ │ ├── parse-metrics.mjs
│ │ ├── release.mjs
│ │ ├── tag-extension.mjs
│ │ ├── utils.mjs
│ │ └── validate-changesets.mjs
│ └── workflows
│ ├── auto-close-duplicates.yml
│ ├── backfill-duplicate-comments.yml
│ ├── ci.yml
│ ├── claude-dedupe-issues.yml
│ ├── claude-docs-trigger.yml
│ ├── claude-docs-updater.yml
│ ├── claude-issue-triage.yml
│ ├── claude.yml
│ ├── extension-ci.yml
│ ├── extension-release.yml
│ ├── log-issue-events.yml
│ ├── pre-release.yml
│ ├── release-check.yml
│ ├── release.yml
│ ├── update-models-md.yml
│ └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│ ├── hooks
│ │ ├── tm-code-change-task-tracker.kiro.hook
│ │ ├── tm-complexity-analyzer.kiro.hook
│ │ ├── tm-daily-standup-assistant.kiro.hook
│ │ ├── tm-git-commit-task-linker.kiro.hook
│ │ ├── tm-pr-readiness-checker.kiro.hook
│ │ ├── tm-task-dependency-auto-progression.kiro.hook
│ │ └── tm-test-success-task-completer.kiro.hook
│ ├── settings
│ │ └── mcp.json
│ └── steering
│ ├── dev_workflow.md
│ ├── kiro_rules.md
│ ├── self_improve.md
│ ├── taskmaster_hooks_workflow.md
│ └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│ ├── CLAUDE.md
│ ├── config.json
│ ├── docs
│ │ ├── autonomous-tdd-git-workflow.md
│ │ ├── MIGRATION-ROADMAP.md
│ │ ├── prd-tm-start.txt
│ │ ├── prd.txt
│ │ ├── README.md
│ │ ├── research
│ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│ │ │ ├── 2025-06-14_test-save-functionality.md
│ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│ │ ├── task-template-importing-prd.txt
│ │ ├── tdd-workflow-phase-0-spike.md
│ │ ├── tdd-workflow-phase-1-core-rails.md
│ │ ├── tdd-workflow-phase-1-orchestrator.md
│ │ ├── tdd-workflow-phase-2-pr-resumability.md
│ │ ├── tdd-workflow-phase-3-extensibility-guardrails.md
│ │ ├── test-prd.txt
│ │ └── tm-core-phase-1.txt
│ ├── reports
│ │ ├── task-complexity-report_autonomous-tdd-git-workflow.json
│ │ ├── task-complexity-report_cc-kiro-hooks.json
│ │ ├── task-complexity-report_tdd-phase-1-core-rails.json
│ │ ├── task-complexity-report_tdd-workflow-phase-0.json
│ │ ├── task-complexity-report_test-prd-tag.json
│ │ ├── task-complexity-report_tm-core-phase-1.json
│ │ ├── task-complexity-report.json
│ │ └── tm-core-complexity.json
│ ├── state.json
│ ├── tasks
│ │ ├── task_001_tm-start.txt
│ │ ├── task_002_tm-start.txt
│ │ ├── task_003_tm-start.txt
│ │ ├── task_004_tm-start.txt
│ │ ├── task_007_tm-start.txt
│ │ └── tasks.json
│ └── templates
│ ├── example_prd_rpg.md
│ └── example_prd.md
├── .vscode
│ ├── extensions.json
│ └── settings.json
├── apps
│ ├── cli
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── src
│ │ │ ├── command-registry.ts
│ │ │ ├── commands
│ │ │ │ ├── auth.command.ts
│ │ │ │ ├── autopilot
│ │ │ │ │ ├── abort.command.ts
│ │ │ │ │ ├── commit.command.ts
│ │ │ │ │ ├── complete.command.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── next.command.ts
│ │ │ │ │ ├── resume.command.ts
│ │ │ │ │ ├── shared.ts
│ │ │ │ │ ├── start.command.ts
│ │ │ │ │ └── status.command.ts
│ │ │ │ ├── briefs.command.ts
│ │ │ │ ├── context.command.ts
│ │ │ │ ├── export.command.ts
│ │ │ │ ├── list.command.ts
│ │ │ │ ├── models
│ │ │ │ │ ├── custom-providers.ts
│ │ │ │ │ ├── fetchers.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── prompts.ts
│ │ │ │ │ ├── setup.ts
│ │ │ │ │ └── types.ts
│ │ │ │ ├── next.command.ts
│ │ │ │ ├── set-status.command.ts
│ │ │ │ ├── show.command.ts
│ │ │ │ ├── start.command.ts
│ │ │ │ └── tags.command.ts
│ │ │ ├── index.ts
│ │ │ ├── lib
│ │ │ │ └── model-management.ts
│ │ │ ├── types
│ │ │ │ └── tag-management.d.ts
│ │ │ ├── ui
│ │ │ │ ├── components
│ │ │ │ │ ├── cardBox.component.ts
│ │ │ │ │ ├── dashboard.component.ts
│ │ │ │ │ ├── header.component.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── next-task.component.ts
│ │ │ │ │ ├── suggested-steps.component.ts
│ │ │ │ │ └── task-detail.component.ts
│ │ │ │ ├── display
│ │ │ │ │ ├── messages.ts
│ │ │ │ │ └── tables.ts
│ │ │ │ ├── formatters
│ │ │ │ │ ├── complexity-formatters.ts
│ │ │ │ │ ├── dependency-formatters.ts
│ │ │ │ │ ├── priority-formatters.ts
│ │ │ │ │ ├── status-formatters.spec.ts
│ │ │ │ │ └── status-formatters.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── layout
│ │ │ │ ├── helpers.spec.ts
│ │ │ │ └── helpers.ts
│ │ │ └── utils
│ │ │ ├── auth-helpers.ts
│ │ │ ├── auto-update.ts
│ │ │ ├── brief-selection.ts
│ │ │ ├── display-helpers.ts
│ │ │ ├── error-handler.ts
│ │ │ ├── index.ts
│ │ │ ├── project-root.ts
│ │ │ ├── task-status.ts
│ │ │ ├── ui.spec.ts
│ │ │ └── ui.ts
│ │ ├── tests
│ │ │ ├── integration
│ │ │ │ └── commands
│ │ │ │ └── autopilot
│ │ │ │ └── workflow.test.ts
│ │ │ └── unit
│ │ │ ├── commands
│ │ │ │ ├── autopilot
│ │ │ │ │ └── shared.test.ts
│ │ │ │ ├── list.command.spec.ts
│ │ │ │ └── show.command.spec.ts
│ │ │ └── ui
│ │ │ └── dashboard.component.spec.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── docs
│ │ ├── archive
│ │ │ ├── ai-client-utils-example.mdx
│ │ │ ├── ai-development-workflow.mdx
│ │ │ ├── command-reference.mdx
│ │ │ ├── configuration.mdx
│ │ │ ├── cursor-setup.mdx
│ │ │ ├── examples.mdx
│ │ │ └── Installation.mdx
│ │ ├── best-practices
│ │ │ ├── advanced-tasks.mdx
│ │ │ ├── configuration-advanced.mdx
│ │ │ └── index.mdx
│ │ ├── capabilities
│ │ │ ├── cli-root-commands.mdx
│ │ │ ├── index.mdx
│ │ │ ├── mcp.mdx
│ │ │ ├── rpg-method.mdx
│ │ │ └── task-structure.mdx
│ │ ├── CHANGELOG.md
│ │ ├── command-reference.mdx
│ │ ├── configuration.mdx
│ │ ├── docs.json
│ │ ├── favicon.svg
│ │ ├── getting-started
│ │ │ ├── api-keys.mdx
│ │ │ ├── contribute.mdx
│ │ │ ├── faq.mdx
│ │ │ └── quick-start
│ │ │ ├── configuration-quick.mdx
│ │ │ ├── execute-quick.mdx
│ │ │ ├── installation.mdx
│ │ │ ├── moving-forward.mdx
│ │ │ ├── prd-quick.mdx
│ │ │ ├── quick-start.mdx
│ │ │ ├── requirements.mdx
│ │ │ ├── rules-quick.mdx
│ │ │ └── tasks-quick.mdx
│ │ ├── introduction.mdx
│ │ ├── licensing.md
│ │ ├── logo
│ │ │ ├── dark.svg
│ │ │ ├── light.svg
│ │ │ └── task-master-logo.png
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── style.css
│ │ ├── tdd-workflow
│ │ │ ├── ai-agent-integration.mdx
│ │ │ └── quickstart.mdx
│ │ ├── vercel.json
│ │ └── whats-new.mdx
│ ├── extension
│ │ ├── .vscodeignore
│ │ ├── assets
│ │ │ ├── banner.png
│ │ │ ├── icon-dark.svg
│ │ │ ├── icon-light.svg
│ │ │ ├── icon.png
│ │ │ ├── screenshots
│ │ │ │ ├── kanban-board.png
│ │ │ │ └── task-details.png
│ │ │ └── sidebar-icon.svg
│ │ ├── CHANGELOG.md
│ │ ├── components.json
│ │ ├── docs
│ │ │ ├── extension-CI-setup.md
│ │ │ └── extension-development-guide.md
│ │ ├── esbuild.js
│ │ ├── LICENSE
│ │ ├── package.json
│ │ ├── package.mjs
│ │ ├── package.publish.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── components
│ │ │ │ ├── ConfigView.tsx
│ │ │ │ ├── constants.ts
│ │ │ │ ├── TaskDetails
│ │ │ │ │ ├── AIActionsSection.tsx
│ │ │ │ │ ├── DetailsSection.tsx
│ │ │ │ │ ├── PriorityBadge.tsx
│ │ │ │ │ ├── SubtasksSection.tsx
│ │ │ │ │ ├── TaskMetadataSidebar.tsx
│ │ │ │ │ └── useTaskDetails.ts
│ │ │ │ ├── TaskDetailsView.tsx
│ │ │ │ ├── TaskMasterLogo.tsx
│ │ │ │ └── ui
│ │ │ │ ├── badge.tsx
│ │ │ │ ├── breadcrumb.tsx
│ │ │ │ ├── button.tsx
│ │ │ │ ├── card.tsx
│ │ │ │ ├── collapsible.tsx
│ │ │ │ ├── CollapsibleSection.tsx
│ │ │ │ ├── dropdown-menu.tsx
│ │ │ │ ├── label.tsx
│ │ │ │ ├── scroll-area.tsx
│ │ │ │ ├── separator.tsx
│ │ │ │ ├── shadcn-io
│ │ │ │ │ └── kanban
│ │ │ │ │ └── index.tsx
│ │ │ │ └── textarea.tsx
│ │ │ ├── extension.ts
│ │ │ ├── index.ts
│ │ │ ├── lib
│ │ │ │ └── utils.ts
│ │ │ ├── services
│ │ │ │ ├── config-service.ts
│ │ │ │ ├── error-handler.ts
│ │ │ │ ├── notification-preferences.ts
│ │ │ │ ├── polling-service.ts
│ │ │ │ ├── polling-strategies.ts
│ │ │ │ ├── sidebar-webview-manager.ts
│ │ │ │ ├── task-repository.ts
│ │ │ │ ├── terminal-manager.ts
│ │ │ │ └── webview-manager.ts
│ │ │ ├── test
│ │ │ │ └── extension.test.ts
│ │ │ ├── utils
│ │ │ │ ├── configManager.ts
│ │ │ │ ├── connectionManager.ts
│ │ │ │ ├── errorHandler.ts
│ │ │ │ ├── event-emitter.ts
│ │ │ │ ├── logger.ts
│ │ │ │ ├── mcpClient.ts
│ │ │ │ ├── notificationPreferences.ts
│ │ │ │ └── task-master-api
│ │ │ │ ├── cache
│ │ │ │ │ └── cache-manager.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── mcp-client.ts
│ │ │ │ ├── transformers
│ │ │ │ │ └── task-transformer.ts
│ │ │ │ └── types
│ │ │ │ └── index.ts
│ │ │ └── webview
│ │ │ ├── App.tsx
│ │ │ ├── components
│ │ │ │ ├── AppContent.tsx
│ │ │ │ ├── EmptyState.tsx
│ │ │ │ ├── ErrorBoundary.tsx
│ │ │ │ ├── PollingStatus.tsx
│ │ │ │ ├── PriorityBadge.tsx
│ │ │ │ ├── SidebarView.tsx
│ │ │ │ ├── TagDropdown.tsx
│ │ │ │ ├── TaskCard.tsx
│ │ │ │ ├── TaskEditModal.tsx
│ │ │ │ ├── TaskMasterKanban.tsx
│ │ │ │ ├── ToastContainer.tsx
│ │ │ │ └── ToastNotification.tsx
│ │ │ ├── constants
│ │ │ │ └── index.ts
│ │ │ ├── contexts
│ │ │ │ └── VSCodeContext.tsx
│ │ │ ├── hooks
│ │ │ │ ├── useTaskQueries.ts
│ │ │ │ ├── useVSCodeMessages.ts
│ │ │ │ └── useWebviewHeight.ts
│ │ │ ├── index.css
│ │ │ ├── index.tsx
│ │ │ ├── providers
│ │ │ │ └── QueryProvider.tsx
│ │ │ ├── reducers
│ │ │ │ └── appReducer.ts
│ │ │ ├── sidebar.tsx
│ │ │ ├── types
│ │ │ │ └── index.ts
│ │ │ └── utils
│ │ │ ├── logger.ts
│ │ │ └── toast.ts
│ │ └── tsconfig.json
│ └── mcp
│ ├── CHANGELOG.md
│ ├── package.json
│ ├── src
│ │ ├── index.ts
│ │ ├── shared
│ │ │ ├── types.ts
│ │ │ └── utils.ts
│ │ └── tools
│ │ ├── autopilot
│ │ │ ├── abort.tool.ts
│ │ │ ├── commit.tool.ts
│ │ │ ├── complete.tool.ts
│ │ │ ├── finalize.tool.ts
│ │ │ ├── index.ts
│ │ │ ├── next.tool.ts
│ │ │ ├── resume.tool.ts
│ │ │ ├── start.tool.ts
│ │ │ └── status.tool.ts
│ │ ├── README-ZOD-V3.md
│ │ └── tasks
│ │ ├── get-task.tool.ts
│ │ ├── get-tasks.tool.ts
│ │ └── index.ts
│ ├── tsconfig.json
│ └── vitest.config.ts
├── assets
│ ├── .windsurfrules
│ ├── AGENTS.md
│ ├── claude
│ │ └── TM_COMMANDS_GUIDE.md
│ ├── config.json
│ ├── env.example
│ ├── example_prd_rpg.txt
│ ├── example_prd.txt
│ ├── GEMINI.md
│ ├── gitignore
│ ├── kiro-hooks
│ │ ├── tm-code-change-task-tracker.kiro.hook
│ │ ├── tm-complexity-analyzer.kiro.hook
│ │ ├── tm-daily-standup-assistant.kiro.hook
│ │ ├── tm-git-commit-task-linker.kiro.hook
│ │ ├── tm-pr-readiness-checker.kiro.hook
│ │ ├── tm-task-dependency-auto-progression.kiro.hook
│ │ └── tm-test-success-task-completer.kiro.hook
│ ├── roocode
│ │ ├── .roo
│ │ │ ├── rules-architect
│ │ │ │ └── architect-rules
│ │ │ ├── rules-ask
│ │ │ │ └── ask-rules
│ │ │ ├── rules-code
│ │ │ │ └── code-rules
│ │ │ ├── rules-debug
│ │ │ │ └── debug-rules
│ │ │ ├── rules-orchestrator
│ │ │ │ └── orchestrator-rules
│ │ │ └── rules-test
│ │ │ └── test-rules
│ │ └── .roomodes
│ ├── rules
│ │ ├── cursor_rules.mdc
│ │ ├── dev_workflow.mdc
│ │ ├── self_improve.mdc
│ │ ├── taskmaster_hooks_workflow.mdc
│ │ └── taskmaster.mdc
│ └── scripts_README.md
├── bin
│ └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE_CODE_PLUGIN.md
├── CLAUDE.md
├── context
│ ├── chats
│ │ ├── add-task-dependencies-1.md
│ │ └── max-min-tokens.txt.md
│ ├── fastmcp-core.txt
│ ├── fastmcp-docs.txt
│ ├── MCP_INTEGRATION.md
│ ├── mcp-js-sdk-docs.txt
│ ├── mcp-protocol-repo.txt
│ ├── mcp-protocol-schema-03262025.json
│ └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│ ├── claude-code-integration.md
│ ├── CLI-COMMANDER-PATTERN.md
│ ├── command-reference.md
│ ├── configuration.md
│ ├── contributor-docs
│ │ ├── testing-roo-integration.md
│ │ └── worktree-setup.md
│ ├── cross-tag-task-movement.md
│ ├── examples
│ │ ├── claude-code-usage.md
│ │ └── codex-cli-usage.md
│ ├── examples.md
│ ├── licensing.md
│ ├── mcp-provider-guide.md
│ ├── mcp-provider.md
│ ├── migration-guide.md
│ ├── models.md
│ ├── providers
│ │ ├── codex-cli.md
│ │ └── gemini-cli.md
│ ├── README.md
│ ├── scripts
│ │ └── models-json-to-markdown.js
│ ├── task-structure.md
│ └── tutorial.md
├── images
│ ├── hamster-hiring.png
│ └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│ ├── server.js
│ └── src
│ ├── core
│ │ ├── __tests__
│ │ │ └── context-manager.test.js
│ │ ├── context-manager.js
│ │ ├── direct-functions
│ │ │ ├── add-dependency.js
│ │ │ ├── add-subtask.js
│ │ │ ├── add-tag.js
│ │ │ ├── add-task.js
│ │ │ ├── analyze-task-complexity.js
│ │ │ ├── cache-stats.js
│ │ │ ├── clear-subtasks.js
│ │ │ ├── complexity-report.js
│ │ │ ├── copy-tag.js
│ │ │ ├── create-tag-from-branch.js
│ │ │ ├── delete-tag.js
│ │ │ ├── expand-all-tasks.js
│ │ │ ├── expand-task.js
│ │ │ ├── fix-dependencies.js
│ │ │ ├── generate-task-files.js
│ │ │ ├── initialize-project.js
│ │ │ ├── list-tags.js
│ │ │ ├── models.js
│ │ │ ├── move-task-cross-tag.js
│ │ │ ├── move-task.js
│ │ │ ├── next-task.js
│ │ │ ├── parse-prd.js
│ │ │ ├── remove-dependency.js
│ │ │ ├── remove-subtask.js
│ │ │ ├── remove-task.js
│ │ │ ├── rename-tag.js
│ │ │ ├── research.js
│ │ │ ├── response-language.js
│ │ │ ├── rules.js
│ │ │ ├── scope-down.js
│ │ │ ├── scope-up.js
│ │ │ ├── set-task-status.js
│ │ │ ├── update-subtask-by-id.js
│ │ │ ├── update-task-by-id.js
│ │ │ ├── update-tasks.js
│ │ │ ├── use-tag.js
│ │ │ └── validate-dependencies.js
│ │ ├── task-master-core.js
│ │ └── utils
│ │ ├── env-utils.js
│ │ └── path-utils.js
│ ├── custom-sdk
│ │ ├── errors.js
│ │ ├── index.js
│ │ ├── json-extractor.js
│ │ ├── language-model.js
│ │ ├── message-converter.js
│ │ └── schema-converter.js
│ ├── index.js
│ ├── logger.js
│ ├── providers
│ │ └── mcp-provider.js
│ └── tools
│ ├── add-dependency.js
│ ├── add-subtask.js
│ ├── add-tag.js
│ ├── add-task.js
│ ├── analyze.js
│ ├── clear-subtasks.js
│ ├── complexity-report.js
│ ├── copy-tag.js
│ ├── delete-tag.js
│ ├── expand-all.js
│ ├── expand-task.js
│ ├── fix-dependencies.js
│ ├── generate.js
│ ├── get-operation-status.js
│ ├── index.js
│ ├── initialize-project.js
│ ├── list-tags.js
│ ├── models.js
│ ├── move-task.js
│ ├── next-task.js
│ ├── parse-prd.js
│ ├── README-ZOD-V3.md
│ ├── remove-dependency.js
│ ├── remove-subtask.js
│ ├── remove-task.js
│ ├── rename-tag.js
│ ├── research.js
│ ├── response-language.js
│ ├── rules.js
│ ├── scope-down.js
│ ├── scope-up.js
│ ├── set-task-status.js
│ ├── tool-registry.js
│ ├── update-subtask.js
│ ├── update-task.js
│ ├── update.js
│ ├── use-tag.js
│ ├── utils.js
│ └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│ ├── ai-sdk-provider-grok-cli
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── errors.test.ts
│ │ │ ├── errors.ts
│ │ │ ├── grok-cli-language-model.ts
│ │ │ ├── grok-cli-provider.test.ts
│ │ │ ├── grok-cli-provider.ts
│ │ │ ├── index.ts
│ │ │ ├── json-extractor.test.ts
│ │ │ ├── json-extractor.ts
│ │ │ ├── message-converter.test.ts
│ │ │ ├── message-converter.ts
│ │ │ └── types.ts
│ │ └── tsconfig.json
│ ├── build-config
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── src
│ │ │ └── tsdown.base.ts
│ │ └── tsconfig.json
│ ├── claude-code-plugin
│ │ ├── .claude-plugin
│ │ │ └── plugin.json
│ │ ├── .gitignore
│ │ ├── agents
│ │ │ ├── task-checker.md
│ │ │ ├── task-executor.md
│ │ │ └── task-orchestrator.md
│ │ ├── CHANGELOG.md
│ │ ├── commands
│ │ │ ├── add-dependency.md
│ │ │ ├── add-subtask.md
│ │ │ ├── add-task.md
│ │ │ ├── analyze-complexity.md
│ │ │ ├── analyze-project.md
│ │ │ ├── auto-implement-tasks.md
│ │ │ ├── command-pipeline.md
│ │ │ ├── complexity-report.md
│ │ │ ├── convert-task-to-subtask.md
│ │ │ ├── expand-all-tasks.md
│ │ │ ├── expand-task.md
│ │ │ ├── fix-dependencies.md
│ │ │ ├── generate-tasks.md
│ │ │ ├── help.md
│ │ │ ├── init-project-quick.md
│ │ │ ├── init-project.md
│ │ │ ├── install-taskmaster.md
│ │ │ ├── learn.md
│ │ │ ├── list-tasks-by-status.md
│ │ │ ├── list-tasks-with-subtasks.md
│ │ │ ├── list-tasks.md
│ │ │ ├── next-task.md
│ │ │ ├── parse-prd-with-research.md
│ │ │ ├── parse-prd.md
│ │ │ ├── project-status.md
│ │ │ ├── quick-install-taskmaster.md
│ │ │ ├── remove-all-subtasks.md
│ │ │ ├── remove-dependency.md
│ │ │ ├── remove-subtask.md
│ │ │ ├── remove-subtasks.md
│ │ │ ├── remove-task.md
│ │ │ ├── setup-models.md
│ │ │ ├── show-task.md
│ │ │ ├── smart-workflow.md
│ │ │ ├── sync-readme.md
│ │ │ ├── tm-main.md
│ │ │ ├── to-cancelled.md
│ │ │ ├── to-deferred.md
│ │ │ ├── to-done.md
│ │ │ ├── to-in-progress.md
│ │ │ ├── to-pending.md
│ │ │ ├── to-review.md
│ │ │ ├── update-single-task.md
│ │ │ ├── update-task.md
│ │ │ ├── update-tasks-from-id.md
│ │ │ ├── validate-dependencies.md
│ │ │ └── view-models.md
│ │ ├── mcp.json
│ │ └── package.json
│ ├── tm-bridge
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── add-tag-bridge.ts
│ │ │ ├── bridge-types.ts
│ │ │ ├── bridge-utils.ts
│ │ │ ├── expand-bridge.ts
│ │ │ ├── index.ts
│ │ │ ├── tags-bridge.ts
│ │ │ ├── update-bridge.ts
│ │ │ └── use-tag-bridge.ts
│ │ └── tsconfig.json
│ └── tm-core
│ ├── .gitignore
│ ├── CHANGELOG.md
│ ├── docs
│ │ └── listTasks-architecture.md
│ ├── package.json
│ ├── POC-STATUS.md
│ ├── README.md
│ ├── src
│ │ ├── common
│ │ │ ├── constants
│ │ │ │ ├── index.ts
│ │ │ │ ├── paths.ts
│ │ │ │ └── providers.ts
│ │ │ ├── errors
│ │ │ │ ├── index.ts
│ │ │ │ └── task-master-error.ts
│ │ │ ├── interfaces
│ │ │ │ ├── configuration.interface.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── storage.interface.ts
│ │ │ ├── logger
│ │ │ │ ├── factory.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── logger.spec.ts
│ │ │ │ └── logger.ts
│ │ │ ├── mappers
│ │ │ │ ├── TaskMapper.test.ts
│ │ │ │ └── TaskMapper.ts
│ │ │ ├── types
│ │ │ │ ├── database.types.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── legacy.ts
│ │ │ │ └── repository-types.ts
│ │ │ └── utils
│ │ │ ├── git-utils.ts
│ │ │ ├── id-generator.ts
│ │ │ ├── index.ts
│ │ │ ├── path-helpers.ts
│ │ │ ├── path-normalizer.spec.ts
│ │ │ ├── path-normalizer.ts
│ │ │ ├── project-root-finder.spec.ts
│ │ │ ├── project-root-finder.ts
│ │ │ ├── run-id-generator.spec.ts
│ │ │ └── run-id-generator.ts
│ │ ├── index.ts
│ │ ├── modules
│ │ │ ├── ai
│ │ │ │ ├── index.ts
│ │ │ │ ├── interfaces
│ │ │ │ │ └── ai-provider.interface.ts
│ │ │ │ └── providers
│ │ │ │ ├── base-provider.ts
│ │ │ │ └── index.ts
│ │ │ ├── auth
│ │ │ │ ├── auth-domain.spec.ts
│ │ │ │ ├── auth-domain.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ ├── auth-manager.spec.ts
│ │ │ │ │ └── auth-manager.ts
│ │ │ │ ├── services
│ │ │ │ │ ├── context-store.ts
│ │ │ │ │ ├── oauth-service.ts
│ │ │ │ │ ├── organization.service.ts
│ │ │ │ │ ├── supabase-session-storage.spec.ts
│ │ │ │ │ └── supabase-session-storage.ts
│ │ │ │ └── types.ts
│ │ │ ├── briefs
│ │ │ │ ├── briefs-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── brief-service.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── utils
│ │ │ │ └── url-parser.ts
│ │ │ ├── commands
│ │ │ │ └── index.ts
│ │ │ ├── config
│ │ │ │ ├── config-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ ├── config-manager.spec.ts
│ │ │ │ │ └── config-manager.ts
│ │ │ │ └── services
│ │ │ │ ├── config-loader.service.spec.ts
│ │ │ │ ├── config-loader.service.ts
│ │ │ │ ├── config-merger.service.spec.ts
│ │ │ │ ├── config-merger.service.ts
│ │ │ │ ├── config-persistence.service.spec.ts
│ │ │ │ ├── config-persistence.service.ts
│ │ │ │ ├── environment-config-provider.service.spec.ts
│ │ │ │ ├── environment-config-provider.service.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── runtime-state-manager.service.spec.ts
│ │ │ │ └── runtime-state-manager.service.ts
│ │ │ ├── dependencies
│ │ │ │ └── index.ts
│ │ │ ├── execution
│ │ │ │ ├── executors
│ │ │ │ │ ├── base-executor.ts
│ │ │ │ │ ├── claude-executor.ts
│ │ │ │ │ └── executor-factory.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── executor-service.ts
│ │ │ │ └── types.ts
│ │ │ ├── git
│ │ │ │ ├── adapters
│ │ │ │ │ ├── git-adapter.test.ts
│ │ │ │ │ └── git-adapter.ts
│ │ │ │ ├── git-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── services
│ │ │ │ ├── branch-name-generator.spec.ts
│ │ │ │ ├── branch-name-generator.ts
│ │ │ │ ├── commit-message-generator.test.ts
│ │ │ │ ├── commit-message-generator.ts
│ │ │ │ ├── scope-detector.test.ts
│ │ │ │ ├── scope-detector.ts
│ │ │ │ ├── template-engine.test.ts
│ │ │ │ └── template-engine.ts
│ │ │ ├── integration
│ │ │ │ ├── clients
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── supabase-client.ts
│ │ │ │ ├── integration-domain.ts
│ │ │ │ └── services
│ │ │ │ ├── export.service.ts
│ │ │ │ ├── task-expansion.service.ts
│ │ │ │ └── task-retrieval.service.ts
│ │ │ ├── reports
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ └── complexity-report-manager.ts
│ │ │ │ └── types.ts
│ │ │ ├── storage
│ │ │ │ ├── adapters
│ │ │ │ │ ├── activity-logger.ts
│ │ │ │ │ ├── api-storage.ts
│ │ │ │ │ └── file-storage
│ │ │ │ │ ├── file-operations.ts
│ │ │ │ │ ├── file-storage.ts
│ │ │ │ │ ├── format-handler.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── path-resolver.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── storage-factory.ts
│ │ │ │ └── utils
│ │ │ │ └── api-client.ts
│ │ │ ├── tasks
│ │ │ │ ├── entities
│ │ │ │ │ └── task.entity.ts
│ │ │ │ ├── parser
│ │ │ │ │ └── index.ts
│ │ │ │ ├── repositories
│ │ │ │ │ ├── supabase
│ │ │ │ │ │ ├── dependency-fetcher.ts
│ │ │ │ │ │ ├── index.ts
│ │ │ │ │ │ └── supabase-repository.ts
│ │ │ │ │ └── task-repository.interface.ts
│ │ │ │ ├── services
│ │ │ │ │ ├── preflight-checker.service.ts
│ │ │ │ │ ├── tag.service.ts
│ │ │ │ │ ├── task-execution-service.ts
│ │ │ │ │ ├── task-loader.service.ts
│ │ │ │ │ └── task-service.ts
│ │ │ │ └── tasks-domain.ts
│ │ │ ├── ui
│ │ │ │ └── index.ts
│ │ │ └── workflow
│ │ │ ├── managers
│ │ │ │ ├── workflow-state-manager.spec.ts
│ │ │ │ └── workflow-state-manager.ts
│ │ │ ├── orchestrators
│ │ │ │ ├── workflow-orchestrator.test.ts
│ │ │ │ └── workflow-orchestrator.ts
│ │ │ ├── services
│ │ │ │ ├── test-result-validator.test.ts
│ │ │ │ ├── test-result-validator.ts
│ │ │ │ ├── test-result-validator.types.ts
│ │ │ │ ├── workflow-activity-logger.ts
│ │ │ │ └── workflow.service.ts
│ │ │ ├── types.ts
│ │ │ └── workflow-domain.ts
│ │ ├── subpath-exports.test.ts
│ │ ├── tm-core.ts
│ │ └── utils
│ │ └── time.utils.ts
│ ├── tests
│ │ ├── auth
│ │ │ └── auth-refresh.test.ts
│ │ ├── integration
│ │ │ ├── auth-token-refresh.test.ts
│ │ │ ├── list-tasks.test.ts
│ │ │ └── storage
│ │ │ └── activity-logger.test.ts
│ │ ├── mocks
│ │ │ └── mock-provider.ts
│ │ ├── setup.ts
│ │ └── unit
│ │ ├── base-provider.test.ts
│ │ ├── executor.test.ts
│ │ └── smoke.test.ts
│ ├── tsconfig.json
│ └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│ ├── create-worktree.sh
│ ├── dev.js
│ ├── init.js
│ ├── list-worktrees.sh
│ ├── modules
│ │ ├── ai-services-unified.js
│ │ ├── bridge-utils.js
│ │ ├── commands.js
│ │ ├── config-manager.js
│ │ ├── dependency-manager.js
│ │ ├── index.js
│ │ ├── prompt-manager.js
│ │ ├── supported-models.json
│ │ ├── sync-readme.js
│ │ ├── task-manager
│ │ │ ├── add-subtask.js
│ │ │ ├── add-task.js
│ │ │ ├── analyze-task-complexity.js
│ │ │ ├── clear-subtasks.js
│ │ │ ├── expand-all-tasks.js
│ │ │ ├── expand-task.js
│ │ │ ├── find-next-task.js
│ │ │ ├── generate-task-files.js
│ │ │ ├── is-task-dependent.js
│ │ │ ├── list-tasks.js
│ │ │ ├── migrate.js
│ │ │ ├── models.js
│ │ │ ├── move-task.js
│ │ │ ├── parse-prd
│ │ │ │ ├── index.js
│ │ │ │ ├── parse-prd-config.js
│ │ │ │ ├── parse-prd-helpers.js
│ │ │ │ ├── parse-prd-non-streaming.js
│ │ │ │ ├── parse-prd-streaming.js
│ │ │ │ └── parse-prd.js
│ │ │ ├── remove-subtask.js
│ │ │ ├── remove-task.js
│ │ │ ├── research.js
│ │ │ ├── response-language.js
│ │ │ ├── scope-adjustment.js
│ │ │ ├── set-task-status.js
│ │ │ ├── tag-management.js
│ │ │ ├── task-exists.js
│ │ │ ├── update-single-task-status.js
│ │ │ ├── update-subtask-by-id.js
│ │ │ ├── update-task-by-id.js
│ │ │ └── update-tasks.js
│ │ ├── task-manager.js
│ │ ├── ui.js
│ │ ├── update-config-tokens.js
│ │ ├── utils
│ │ │ ├── contextGatherer.js
│ │ │ ├── fuzzyTaskSearch.js
│ │ │ └── git-utils.js
│ │ └── utils.js
│ ├── task-complexity-report.json
│ ├── test-claude-errors.js
│ └── test-claude.js
├── sonar-project.properties
├── src
│ ├── ai-providers
│ │ ├── anthropic.js
│ │ ├── azure.js
│ │ ├── base-provider.js
│ │ ├── bedrock.js
│ │ ├── claude-code.js
│ │ ├── codex-cli.js
│ │ ├── gemini-cli.js
│ │ ├── google-vertex.js
│ │ ├── google.js
│ │ ├── grok-cli.js
│ │ ├── groq.js
│ │ ├── index.js
│ │ ├── lmstudio.js
│ │ ├── ollama.js
│ │ ├── openai-compatible.js
│ │ ├── openai.js
│ │ ├── openrouter.js
│ │ ├── perplexity.js
│ │ ├── xai.js
│ │ ├── zai-coding.js
│ │ └── zai.js
│ ├── constants
│ │ ├── commands.js
│ │ ├── paths.js
│ │ ├── profiles.js
│ │ ├── rules-actions.js
│ │ ├── task-priority.js
│ │ └── task-status.js
│ ├── profiles
│ │ ├── amp.js
│ │ ├── base-profile.js
│ │ ├── claude.js
│ │ ├── cline.js
│ │ ├── codex.js
│ │ ├── cursor.js
│ │ ├── gemini.js
│ │ ├── index.js
│ │ ├── kilo.js
│ │ ├── kiro.js
│ │ ├── opencode.js
│ │ ├── roo.js
│ │ ├── trae.js
│ │ ├── vscode.js
│ │ ├── windsurf.js
│ │ └── zed.js
│ ├── progress
│ │ ├── base-progress-tracker.js
│ │ ├── cli-progress-factory.js
│ │ ├── parse-prd-tracker.js
│ │ ├── progress-tracker-builder.js
│ │ └── tracker-ui.js
│ ├── prompts
│ │ ├── add-task.json
│ │ ├── analyze-complexity.json
│ │ ├── expand-task.json
│ │ ├── parse-prd.json
│ │ ├── README.md
│ │ ├── research.json
│ │ ├── schemas
│ │ │ ├── parameter.schema.json
│ │ │ ├── prompt-template.schema.json
│ │ │ ├── README.md
│ │ │ └── variant.schema.json
│ │ ├── update-subtask.json
│ │ ├── update-task.json
│ │ └── update-tasks.json
│ ├── provider-registry
│ │ └── index.js
│ ├── schemas
│ │ ├── add-task.js
│ │ ├── analyze-complexity.js
│ │ ├── base-schemas.js
│ │ ├── expand-task.js
│ │ ├── parse-prd.js
│ │ ├── registry.js
│ │ ├── update-subtask.js
│ │ ├── update-task.js
│ │ └── update-tasks.js
│ ├── task-master.js
│ ├── ui
│ │ ├── confirm.js
│ │ ├── indicators.js
│ │ └── parse-prd.js
│ └── utils
│ ├── asset-resolver.js
│ ├── create-mcp-config.js
│ ├── format.js
│ ├── getVersion.js
│ ├── logger-utils.js
│ ├── manage-gitignore.js
│ ├── path-utils.js
│ ├── profiles.js
│ ├── rule-transformer.js
│ ├── stream-parser.js
│ └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│ ├── e2e
│ │ ├── e2e_helpers.sh
│ │ ├── parse_llm_output.cjs
│ │ ├── run_e2e.sh
│ │ ├── run_fallback_verification.sh
│ │ └── test_llm_analysis.sh
│ ├── fixtures
│ │ ├── .taskmasterconfig
│ │ ├── sample-claude-response.js
│ │ ├── sample-prd.txt
│ │ └── sample-tasks.js
│ ├── helpers
│ │ └── tool-counts.js
│ ├── integration
│ │ ├── claude-code-error-handling.test.js
│ │ ├── claude-code-optional.test.js
│ │ ├── cli
│ │ │ ├── commands.test.js
│ │ │ ├── complex-cross-tag-scenarios.test.js
│ │ │ └── move-cross-tag.test.js
│ │ ├── manage-gitignore.test.js
│ │ ├── mcp-server
│ │ │ └── direct-functions.test.js
│ │ ├── move-task-cross-tag.integration.test.js
│ │ ├── move-task-simple.integration.test.js
│ │ ├── profiles
│ │ │ ├── amp-init-functionality.test.js
│ │ │ ├── claude-init-functionality.test.js
│ │ │ ├── cline-init-functionality.test.js
│ │ │ ├── codex-init-functionality.test.js
│ │ │ ├── cursor-init-functionality.test.js
│ │ │ ├── gemini-init-functionality.test.js
│ │ │ ├── opencode-init-functionality.test.js
│ │ │ ├── roo-files-inclusion.test.js
│ │ │ ├── roo-init-functionality.test.js
│ │ │ ├── rules-files-inclusion.test.js
│ │ │ ├── trae-init-functionality.test.js
│ │ │ ├── vscode-init-functionality.test.js
│ │ │ └── windsurf-init-functionality.test.js
│ │ └── providers
│ │ └── temperature-support.test.js
│ ├── manual
│ │ ├── progress
│ │ │ ├── parse-prd-analysis.js
│ │ │ ├── test-parse-prd.js
│ │ │ └── TESTING_GUIDE.md
│ │ └── prompts
│ │ ├── prompt-test.js
│ │ └── README.md
│ ├── README.md
│ ├── setup.js
│ └── unit
│ ├── ai-providers
│ │ ├── base-provider.test.js
│ │ ├── claude-code.test.js
│ │ ├── codex-cli.test.js
│ │ ├── gemini-cli.test.js
│ │ ├── lmstudio.test.js
│ │ ├── mcp-components.test.js
│ │ ├── openai-compatible.test.js
│ │ ├── openai.test.js
│ │ ├── provider-registry.test.js
│ │ ├── zai-coding.test.js
│ │ ├── zai-provider.test.js
│ │ ├── zai-schema-introspection.test.js
│ │ └── zai.test.js
│ ├── ai-services-unified.test.js
│ ├── commands.test.js
│ ├── config-manager.test.js
│ ├── config-manager.test.mjs
│ ├── dependency-manager.test.js
│ ├── init.test.js
│ ├── initialize-project.test.js
│ ├── kebab-case-validation.test.js
│ ├── manage-gitignore.test.js
│ ├── mcp
│ │ └── tools
│ │ ├── __mocks__
│ │ │ └── move-task.js
│ │ ├── add-task.test.js
│ │ ├── analyze-complexity.test.js
│ │ ├── expand-all.test.js
│ │ ├── get-tasks.test.js
│ │ ├── initialize-project.test.js
│ │ ├── move-task-cross-tag-options.test.js
│ │ ├── move-task-cross-tag.test.js
│ │ ├── remove-task.test.js
│ │ └── tool-registration.test.js
│ ├── mcp-providers
│ │ ├── mcp-components.test.js
│ │ └── mcp-provider.test.js
│ ├── parse-prd.test.js
│ ├── profiles
│ │ ├── amp-integration.test.js
│ │ ├── claude-integration.test.js
│ │ ├── cline-integration.test.js
│ │ ├── codex-integration.test.js
│ │ ├── cursor-integration.test.js
│ │ ├── gemini-integration.test.js
│ │ ├── kilo-integration.test.js
│ │ ├── kiro-integration.test.js
│ │ ├── mcp-config-validation.test.js
│ │ ├── opencode-integration.test.js
│ │ ├── profile-safety-check.test.js
│ │ ├── roo-integration.test.js
│ │ ├── rule-transformer-cline.test.js
│ │ ├── rule-transformer-cursor.test.js
│ │ ├── rule-transformer-gemini.test.js
│ │ ├── rule-transformer-kilo.test.js
│ │ ├── rule-transformer-kiro.test.js
│ │ ├── rule-transformer-opencode.test.js
│ │ ├── rule-transformer-roo.test.js
│ │ ├── rule-transformer-trae.test.js
│ │ ├── rule-transformer-vscode.test.js
│ │ ├── rule-transformer-windsurf.test.js
│ │ ├── rule-transformer-zed.test.js
│ │ ├── rule-transformer.test.js
│ │ ├── selective-profile-removal.test.js
│ │ ├── subdirectory-support.test.js
│ │ ├── trae-integration.test.js
│ │ ├── vscode-integration.test.js
│ │ ├── windsurf-integration.test.js
│ │ └── zed-integration.test.js
│ ├── progress
│ │ └── base-progress-tracker.test.js
│ ├── prompt-manager.test.js
│ ├── prompts
│ │ ├── expand-task-prompt.test.js
│ │ └── prompt-migration.test.js
│ ├── scripts
│ │ └── modules
│ │ ├── commands
│ │ │ ├── move-cross-tag.test.js
│ │ │ └── README.md
│ │ ├── dependency-manager
│ │ │ ├── circular-dependencies.test.js
│ │ │ ├── cross-tag-dependencies.test.js
│ │ │ └── fix-dependencies-command.test.js
│ │ ├── task-manager
│ │ │ ├── add-subtask.test.js
│ │ │ ├── add-task.test.js
│ │ │ ├── analyze-task-complexity.test.js
│ │ │ ├── clear-subtasks.test.js
│ │ │ ├── complexity-report-tag-isolation.test.js
│ │ │ ├── expand-all-tasks.test.js
│ │ │ ├── expand-task.test.js
│ │ │ ├── find-next-task.test.js
│ │ │ ├── generate-task-files.test.js
│ │ │ ├── list-tasks.test.js
│ │ │ ├── models-baseurl.test.js
│ │ │ ├── move-task-cross-tag.test.js
│ │ │ ├── move-task.test.js
│ │ │ ├── parse-prd-schema.test.js
│ │ │ ├── parse-prd.test.js
│ │ │ ├── remove-subtask.test.js
│ │ │ ├── remove-task.test.js
│ │ │ ├── research.test.js
│ │ │ ├── scope-adjustment.test.js
│ │ │ ├── set-task-status.test.js
│ │ │ ├── setup.js
│ │ │ ├── update-single-task-status.test.js
│ │ │ ├── update-subtask-by-id.test.js
│ │ │ ├── update-task-by-id.test.js
│ │ │ └── update-tasks.test.js
│ │ ├── ui
│ │ │ └── cross-tag-error-display.test.js
│ │ └── utils-tag-aware-paths.test.js
│ ├── task-finder.test.js
│ ├── task-manager
│ │ ├── clear-subtasks.test.js
│ │ ├── move-task.test.js
│ │ ├── tag-boundary.test.js
│ │ └── tag-management.test.js
│ ├── task-master.test.js
│ ├── ui
│ │ └── indicators.test.js
│ ├── ui.test.js
│ ├── utils-strip-ansi.test.js
│ └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
├── turbo.json
└── update-task-migration-plan.md
```
# Files
--------------------------------------------------------------------------------
/docs/configuration.md:
--------------------------------------------------------------------------------
```markdown
# Configuration
Taskmaster uses two primary methods for configuration:
1. **`.taskmaster/config.json` File (Recommended - New Structure)**
- This JSON file stores most configuration settings, including AI model selections, parameters, logging levels, and project defaults.
- **Location:** This file is created in the `.taskmaster/` directory when you run the `task-master models --setup` interactive setup or initialize a new project with `task-master init`.
- **Migration:** Existing projects with `.taskmasterconfig` in the root will continue to work, but should be migrated to the new structure using `task-master migrate`.
- **Management:** Use the `task-master models --setup` command (or `models` MCP tool) to interactively create and manage this file. You can also set specific models directly using `task-master models --set-<role>=<model_id>`, adding `--ollama` or `--openrouter` flags for custom models. Manual editing is possible but not recommended unless you understand the structure.
- **Example Structure:**
```json
{
"models": {
"main": {
"provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 64000,
"temperature": 0.2,
"baseURL": "https://api.anthropic.com/v1"
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1,
"baseURL": "https://api.perplexity.ai/v1"
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3-5-sonnet",
"maxTokens": 64000,
"temperature": 0.2
}
},
"global": {
"logLevel": "info",
"debug": false,
"defaultNumTasks": 10,
"defaultSubtasks": 5,
"defaultPriority": "medium",
"defaultTag": "master",
"projectName": "Your Project Name",
"ollamaBaseURL": "http://localhost:11434/api",
"azureBaseURL": "https://your-endpoint.azure.com/openai/deployments",
"vertexProjectId": "your-gcp-project-id",
"vertexLocation": "us-central1",
"responseLanguage": "English"
}
}
```
> For MCP-specific setup and troubleshooting, see [Provider-Specific Configuration](#provider-specific-configuration).
2. **Legacy `.taskmasterconfig` File (Backward Compatibility)**
- For projects that haven't migrated to the new structure yet.
- **Location:** Project root directory.
- **Migration:** Use `task-master migrate` to move this to `.taskmaster/config.json`.
- **Deprecation:** While still supported, you'll see warnings encouraging migration to the new structure.
## MCP Tool Loading Configuration
### TASK_MASTER_TOOLS Environment Variable
The `TASK_MASTER_TOOLS` environment variable controls which tools are loaded by the Task Master MCP server. This allows you to optimize token usage based on your workflow needs.
> Note
> Prefer setting `TASK_MASTER_TOOLS` in your MCP client's `env` block (e.g., `.cursor/mcp.json`) or in CI/deployment env. The `.env` file is reserved for API keys/endpoints; avoid persisting non-secret settings there.
#### Configuration Options
- **`all`** (default): Loads all 36 available tools (~21,000 tokens)
- Best for: Users who need the complete feature set
- Use when: Working with complex projects requiring all Task Master features
- Backward compatibility: This is the default to maintain compatibility with existing installations
- **`standard`**: Loads 15 commonly used tools (~10,000 tokens, 50% reduction)
- Best for: Regular task management workflows
- Tools included: All core tools plus project initialization, complexity analysis, task generation, and more
- Use when: You need a balanced set of features with reduced token usage
- **`core`** (or `lean`): Loads 7 essential tools (~5,000 tokens, 70% reduction)
- Best for: Daily development with minimal token overhead
- Tools included: `get_tasks`, `next_task`, `get_task`, `set_task_status`, `update_subtask`, `parse_prd`, `expand_task`
- Use when: Working in large contexts where token usage is critical
- Note: "lean" is an alias for "core" (same tools, token estimate and recommended use). You can refer to it as either "core" or "lean" when configuring.
- **Custom list**: Comma-separated list of specific tool names
- Best for: Specialized workflows requiring specific tools
- Example: `"get_tasks,next_task,set_task_status"`
- Use when: You know exactly which tools you need
#### How to Configure
1. **In MCP configuration files** (`.cursor/mcp.json`, `.vscode/mcp.json`, etc.) - **Recommended**:
```jsonc
{
"mcpServers": {
"task-master-ai": {
"env": {
"TASK_MASTER_TOOLS": "standard", // Set tool loading mode
// API keys can still use .env for security
}
}
}
}
```
2. **Via Claude Code CLI**:
```bash
claude mcp add task-master-ai --scope user \
--env TASK_MASTER_TOOLS="core" \
-- npx -y task-master-ai@latest
```
3. **In CI/deployment environment variables**:
```bash
export TASK_MASTER_TOOLS="standard"
node mcp-server/server.js
```
#### Tool Loading Behavior
- When `TASK_MASTER_TOOLS` is unset or empty, the system defaults to `"all"`
- Invalid tool names in a user-specified list are ignored (a warning is emitted for each)
- If every tool name in a custom list is invalid, the system falls back to `"all"`
- Tool names are case-insensitive (e.g., `"CORE"`, `"core"`, and `"Core"` are treated identically)
## Environment Variables (`.env` file or MCP `env` block - For API Keys Only)
- Used **exclusively** for sensitive API keys and specific endpoint URLs.
- **Location:**
- For CLI usage: Create a `.env` file in your project root.
- For MCP/Cursor usage: Configure keys in the `env` section of your `.cursor/mcp.json` file.
- **Required API Keys (Depending on configured providers):**
- `ANTHROPIC_API_KEY`: Your Anthropic API key.
- `PERPLEXITY_API_KEY`: Your Perplexity API key.
- `OPENAI_API_KEY`: Your OpenAI API key.
- `GOOGLE_API_KEY`: Your Google API key (also used for Vertex AI provider).
- `MISTRAL_API_KEY`: Your Mistral API key.
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
- `XAI_API_KEY`: Your X-AI API key.
- **Optional Endpoint Overrides:**
- **Per-role `baseURL` in `.taskmasterconfig`:** You can add a `baseURL` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
- **Environment Variable Overrides (`<PROVIDER>_BASE_URL`):** For greater flexibility, especially with third-party services, you can set an environment variable like `OPENAI_BASE_URL` or `MISTRAL_BASE_URL`. This will override any `baseURL` set in the configuration file for that provider. This is the recommended way to connect to OpenAI-compatible APIs.
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseURL` for the Azure model role).
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
- `VERTEX_PROJECT_ID`: Your Google Cloud project ID for Vertex AI. Required when using the 'vertex' provider.
- `VERTEX_LOCATION`: Google Cloud region for Vertex AI (e.g., 'us-central1'). Default is 'us-central1'.
- `GOOGLE_APPLICATION_CREDENTIALS`: Path to service account credentials JSON file for Google Cloud auth (alternative to API key for Vertex AI).
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmaster/config.json`** (or `.taskmasterconfig` for unmigrated projects), not environment variables.
## Tagged Task Lists Configuration (v0.17+)
Taskmaster includes a tagged task lists system for multi-context task management.
### Global Tag Settings
```json
"global": {
"defaultTag": "master"
}
```
- **`defaultTag`** (string): Default tag context for new operations (default: "master")
### Git Integration
Task Master provides manual git integration through the `--from-branch` option:
- **Manual Tag Creation**: Use `task-master add-tag --from-branch` to create a tag based on your current git branch name
- **User Control**: No automatic tag switching - you control when and how tags are created
- **Flexible Workflow**: Supports any git workflow without imposing rigid branch-tag mappings
## State Management File
Taskmaster uses `.taskmaster/state.json` to track tagged system runtime information:
```json
{
"currentTag": "master",
"lastSwitched": "2025-06-11T20:26:12.598Z",
"migrationNoticeShown": true
}
```
- **`currentTag`**: Currently active tag context
- **`lastSwitched`**: Timestamp of last tag switch
- **`migrationNoticeShown`**: Whether migration notice has been displayed
This file is automatically created during tagged system migration and should not be manually edited.
## Example `.env` File (for API Keys)
```
# Required API keys for providers configured in .taskmaster/config.json
ANTHROPIC_API_KEY=sk-ant-api03-your-key-here
PERPLEXITY_API_KEY=pplx-your-key-here
# OPENAI_API_KEY=sk-your-key-here
# GOOGLE_API_KEY=AIzaSy...
# AZURE_OPENAI_API_KEY=your-azure-openai-api-key-here
# etc.
# Optional Endpoint Overrides
# Use a specific provider's base URL, e.g., for an OpenAI-compatible API
# OPENAI_BASE_URL=https://api.third-party.com/v1
#
# Azure OpenAI Configuration
# AZURE_OPENAI_ENDPOINT=https://your-resource-name.openai.azure.com/ or https://your-endpoint-name.cognitiveservices.azure.com/openai/deployments
# OLLAMA_BASE_URL=http://custom-ollama-host:11434/api
# Google Vertex AI Configuration (Required if using 'vertex' provider)
# VERTEX_PROJECT_ID=your-gcp-project-id
```
## Troubleshooting
### Configuration Errors
- If Task Master reports errors about missing configuration or cannot find the config file, run `task-master models --setup` in your project root to create or repair the file.
- For new projects, config will be created at `.taskmaster/config.json`. For legacy projects, you may want to use `task-master migrate` to move to the new structure.
- Ensure API keys are correctly placed in your `.env` file (for CLI) or `.cursor/mcp.json` (for MCP) and are valid for the providers selected in your config file.
### If `task-master init` doesn't respond:
Try running it with Node directly:
```bash
node node_modules/claude-task-master/scripts/init.js
```
Or clone the repository and run:
```bash
git clone https://github.com/eyaltoledano/claude-task-master.git
cd claude-task-master
node scripts/init.js
```
## Provider-Specific Configuration
### MCP (Model Context Protocol) Provider
1. **Prerequisites**:
- An active MCP session with sampling capability
- MCP client with sampling support (e.g. VS Code)
- No API keys required (uses session-based authentication)
2. **Configuration**:
```json
{
"models": {
"main": {
"provider": "mcp",
"modelId": "mcp-sampling"
},
"research": {
"provider": "mcp",
"modelId": "mcp-sampling"
}
}
}
```
3. **Available Model IDs**:
- `mcp-sampling` - General text generation using MCP client sampling (supports all roles)
- `claude-3-5-sonnet-20241022` - High-performance model for general tasks (supports all roles)
- `claude-3-opus-20240229` - Enhanced reasoning model for complex tasks (supports all roles)
4. **Features**:
- ✅ **Text Generation**: Standard AI text generation via MCP sampling
- ✅ **Object Generation**: Full schema-driven structured output generation
- ✅ **PRD Parsing**: Parse Product Requirements Documents into structured tasks
- ✅ **Task Creation**: AI-powered task creation with validation
- ✅ **Session Management**: Automatic session detection and context handling
- ✅ **Error Recovery**: Robust error handling and fallback mechanisms
5. **Usage Requirements**:
- Must be running in an MCP context (session must be available)
- Session must provide `clientCapabilities.sampling` capability
6. **Best Practices**:
- Always configure a non-MCP fallback provider
- Use `mcp` for main/research roles when in MCP environments
- Test sampling capability before production use
7. **Setup Commands**:
```bash
# Set MCP provider for main role
task-master models set-main --provider mcp --model claude-3-5-sonnet-20241022
# Set MCP provider for research role
task-master models set-research --provider mcp --model claude-3-opus-20240229
# Verify configuration
task-master models list
```
8. **Troubleshooting**:
- "MCP provider requires session context" → Ensure running in MCP environment
- See the [MCP Provider Guide](./mcp-provider-guide.md) for detailed troubleshooting
### MCP Timeout Configuration
Long-running AI operations in taskmaster-ai can exceed the default 60-second MCP timeout. Operations like `parse_prd`, `expand_task`, `research`, and `analyze_project_complexity` may take 2-5 minutes to complete.
#### Adding Timeout Configuration
Add a `timeout` parameter to your MCP configuration to extend the timeout limit. The timeout configuration works identically across MCP clients including Cursor, Windsurf, and RooCode:
```json
{
"mcpServers": {
"task-master-ai": {
"command": "npx",
"args": ["-y", "--package=task-master-ai", "task-master-ai"],
"timeout": 300,
"env": {
"ANTHROPIC_API_KEY": "your-anthropic-api-key"
}
}
}
}
```
**Configuration Details:**
- **`timeout: 300`** - Sets timeout to 300 seconds (5 minutes)
- **Value range**: 1-3600 seconds (1 second to 1 hour)
- **Recommended**: 300 seconds provides sufficient time for most AI operations
- **Format**: Integer value in seconds (not milliseconds)
#### Automatic Setup
When adding taskmaster rules for supported editors, the timeout configuration is automatically included:
```bash
# Automatically includes timeout configuration
task-master rules add cursor
task-master rules add roo
task-master rules add windsurf
task-master rules add vscode
```
#### Troubleshooting Timeouts
If you're still experiencing timeout errors:
1. **Verify configuration**: Check that `timeout: 300` is present in your MCP config
2. **Restart editor**: Restart your editor after making configuration changes
3. **Increase timeout**: For very complex operations, try `timeout: 600` (10 minutes)
4. **Check API keys**: Ensure required API keys are properly configured
**Expected behavior:**
- **Before fix**: Operations fail after 60 seconds with `MCP request timed out after 60000ms`
- **After fix**: Operations complete successfully within the configured timeout limit
### Google Vertex AI Configuration
Google Vertex AI is Google Cloud's enterprise AI platform and requires specific configuration:
1. **Prerequisites**:
- A Google Cloud account with Vertex AI API enabled
- Either a Google API key with Vertex AI permissions OR a service account with appropriate roles
- A Google Cloud project ID
2. **Authentication Options**:
- **API Key**: Set the `GOOGLE_API_KEY` environment variable
- **Service Account**: Set `GOOGLE_APPLICATION_CREDENTIALS` to point to your service account JSON file
3. **Required Configuration**:
- Set `VERTEX_PROJECT_ID` to your Google Cloud project ID
- Set `VERTEX_LOCATION` to your preferred Google Cloud region (default: us-central1)
4. **Example Setup**:
```bash
# In .env file
GOOGLE_API_KEY=AIzaSyXXXXXXXXXXXXXXXXXXXXXXXXX
VERTEX_PROJECT_ID=my-gcp-project-123
VERTEX_LOCATION=us-central1
```
Or using service account:
```bash
# In .env file
GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json
VERTEX_PROJECT_ID=my-gcp-project-123
VERTEX_LOCATION=us-central1
```
5. **In .taskmaster/config.json**:
```json
"global": {
"vertexProjectId": "my-gcp-project-123",
"vertexLocation": "us-central1"
}
```
### Azure OpenAI Configuration
Azure OpenAI provides enterprise-grade OpenAI models through Microsoft's Azure cloud platform and requires specific configuration:
1. **Prerequisites**:
- An Azure account with an active subscription
- Azure OpenAI service resource created in the Azure portal
- Azure OpenAI API key and endpoint URL
- Deployed models (e.g., gpt-4o, gpt-4o-mini, gpt-4.1, etc) in your Azure OpenAI resource
2. **Authentication**:
- Set the `AZURE_OPENAI_API_KEY` environment variable with your Azure OpenAI API key
- Configure the endpoint URL using one of the methods below
3. **Configuration Options**:
**Option 1: Using Global Azure Base URL (affects all Azure models)**
```json
// In .taskmaster/config.json
{
"models": {
"main": {
"provider": "azure",
"modelId": "gpt-4o",
"maxTokens": 16000,
"temperature": 0.7
},
"fallback": {
"provider": "azure",
"modelId": "gpt-4o-mini",
"maxTokens": 10000,
"temperature": 0.7
}
},
"global": {
"azureBaseURL": "https://your-resource-name.azure.com/openai/deployments"
}
}
```
**Option 2: Using Per-Model Base URLs (recommended for flexibility)**
```json
// In .taskmaster/config.json
{
"models": {
"main": {
"provider": "azure",
"modelId": "gpt-4o",
"maxTokens": 16000,
"temperature": 0.7,
"baseURL": "https://your-resource-name.azure.com/openai/deployments"
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1
},
"fallback": {
"provider": "azure",
"modelId": "gpt-4o-mini",
"maxTokens": 10000,
"temperature": 0.7,
"baseURL": "https://your-resource-name.azure.com/openai/deployments"
}
}
}
```
4. **Environment Variables**:
```bash
# In .env file
AZURE_OPENAI_API_KEY=your-azure-openai-api-key-here
# Optional: Override endpoint for all Azure models
AZURE_OPENAI_ENDPOINT=https://your-resource-name.azure.com/openai/deployments
```
5. **Important Notes**:
- **Model Deployment Names**: The `modelId` in your configuration should match the **deployment name** you created in Azure OpenAI Studio, not the underlying model name
- **Base URL Priority**: Per-model `baseURL` settings override the global `azureBaseURL` setting
- **Endpoint Format**: When using per-model `baseURL`, use the full path including `/openai/deployments`
6. **Troubleshooting**:
**"Resource not found" errors:**
- Ensure your `baseURL` includes the full path: `https://your-resource-name.openai.azure.com/openai/deployments`
- Verify that your deployment name in `modelId` exactly matches what's configured in Azure OpenAI Studio
- Check that your Azure OpenAI resource is in the correct region and properly deployed
**Authentication errors:**
- Verify your `AZURE_OPENAI_API_KEY` is correct and has not expired
- Ensure your Azure OpenAI resource has the necessary permissions
- Check that your subscription has not been suspended or reached quota limits
**Model availability errors:**
- Confirm the model is deployed in your Azure OpenAI resource
- Verify the deployment name matches your configuration exactly (case-sensitive)
- Ensure the model deployment is in a "Succeeded" state in Azure OpenAI Studio
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.
### Codex CLI Provider
The Codex CLI provider integrates Task Master with OpenAI's Codex CLI, allowing you to use ChatGPT subscription models via OAuth authentication.
1. **Prerequisites**:
- Node.js >= 18
- Codex CLI >= 0.42.0 (>= 0.44.0 recommended)
- ChatGPT subscription: Plus, Pro, Business, Edu, or Enterprise (for OAuth access to GPT-5 models)
2. **Installation**:
```bash
npm install -g @openai/codex
```
3. **Authentication** (OAuth - Primary Method):
```bash
codex login
```
This will open a browser window for OAuth authentication with your ChatGPT account. Once authenticated, Task Master will automatically use these credentials.
4. **Optional API Key Method**:
While OAuth is the primary and recommended authentication method, you can optionally set an OpenAI API key:
```bash
# In .env file
OPENAI_API_KEY=sk-your-openai-api-key-here
```
**Note**: The API key will only be injected if explicitly provided. OAuth is always preferred.
5. **Configuration**:
```json
// In .taskmaster/config.json
{
"models": {
"main": {
"provider": "codex-cli",
"modelId": "gpt-5-codex",
"maxTokens": 128000,
"temperature": 0.2
},
"fallback": {
"provider": "codex-cli",
"modelId": "gpt-5",
"maxTokens": 128000,
"temperature": 0.2
}
},
"codexCli": {
"allowNpx": true,
"skipGitRepoCheck": true,
"approvalMode": "on-failure",
"sandboxMode": "workspace-write"
}
}
```
6. **Available Models**:
- `gpt-5` - Latest GPT-5 model (272K max input, 128K max output)
- `gpt-5-codex` - GPT-5 optimized for agentic software engineering (272K max input, 128K max output)
7. **Codex CLI Settings (`codexCli` section)**:
The `codexCli` section in your configuration file supports the following options:
- **`allowNpx`** (boolean, default: `false`): Allow fallback to `npx @openai/codex` if CLI not found on PATH
- **`skipGitRepoCheck`** (boolean, default: `false`): Skip git repository safety check (recommended for CI/non-repo usage)
- **`approvalMode`** (string): Control command execution approval
- `"untrusted"`: Require approval for all commands
- `"on-failure"`: Only require approval after a command fails (default)
- `"on-request"`: Approve only when explicitly requested
- `"never"`: Never require approval (not recommended)
- **`sandboxMode`** (string): Control filesystem access
- `"read-only"`: Read-only access
- `"workspace-write"`: Allow writes to workspace (default)
- `"danger-full-access"`: Full filesystem access (use with caution)
- **`codexPath`** (string, optional): Custom path to codex CLI executable
- **`cwd`** (string, optional): Working directory for Codex CLI execution
- **`fullAuto`** (boolean, optional): Fully automatic mode (equivalent to `--full-auto` flag)
- **`dangerouslyBypassApprovalsAndSandbox`** (boolean, optional): Bypass all safety checks (dangerous!)
- **`color`** (string, optional): Color handling - `"always"`, `"never"`, or `"auto"`
- **`outputLastMessageFile`** (string, optional): Write last agent message to specified file
- **`verbose`** (boolean, optional): Enable verbose logging
- **`env`** (object, optional): Additional environment variables for Codex CLI
8. **Command-Specific Settings** (optional):
You can override settings for specific Task Master commands:
```json
{
"codexCli": {
"allowNpx": true,
"approvalMode": "on-failure",
"commandSpecific": {
"parse-prd": {
"approvalMode": "never",
"verbose": true
},
"expand": {
"sandboxMode": "read-only"
}
}
}
}
```
9. **Codebase Features**:
The Codex CLI provider is codebase-capable, meaning it can analyze and interact with your project files. Codebase analysis features are automatically enabled when using `codex-cli` as your provider and `enableCodebaseAnalysis` is set to `true` in your global configuration (default).
10. **Setup Commands**:
```bash
# Set Codex CLI for main role
task-master models --set-main gpt-5-codex --codex-cli
# Set Codex CLI for fallback role
task-master models --set-fallback gpt-5 --codex-cli
# Verify configuration
task-master models
```
11. **Troubleshooting**:
**"codex: command not found" error:**
- Install Codex CLI globally: `npm install -g @openai/codex`
- Verify installation: `codex --version`
- Alternatively, enable `allowNpx: true` in your codexCli configuration
**"Not logged in" errors:**
- Run `codex login` to authenticate with your ChatGPT account
- Verify authentication status: `codex` (opens interactive CLI)
**"Old version" warnings:**
- Check version: `codex --version`
- Upgrade: `npm install -g @openai/codex@latest`
- Minimum version: 0.42.0, recommended: >= 0.44.0
**"Model not available" errors:**
- Only `gpt-5` and `gpt-5-codex` are available via OAuth subscription
- Verify your ChatGPT subscription is active
- For other OpenAI models, use the standard `openai` provider with an API key
**API key not being used:**
- API key is only injected when explicitly provided
- OAuth authentication is always preferred
- If you want to use an API key, ensure `OPENAI_API_KEY` is set in your `.env` file
12. **Important Notes**:
- OAuth subscription required for model access (no API key needed for basic operation)
- Limited to OAuth-available models only (`gpt-5` and `gpt-5-codex`)
- Pricing information is not available for OAuth models (shows as "Unknown" in cost calculations)
- See [Codex CLI Provider Documentation](./providers/codex-cli.md) for more details
```
--------------------------------------------------------------------------------
/packages/tm-core/src/modules/storage/adapters/api-storage.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview API-based storage implementation using repository pattern
* This provides storage via repository abstraction for flexibility
*/
import type { SupabaseClient } from '@supabase/supabase-js';
import {
ERROR_CODES,
TaskMasterError
} from '../../../common/errors/task-master-error.js';
import type {
IStorage,
LoadTasksOptions,
StorageStats,
UpdateStatusResult
} from '../../../common/interfaces/storage.interface.js';
import { getLogger } from '../../../common/logger/factory.js';
import type {
Task,
TaskMetadata,
TaskStatus,
TaskTag
} from '../../../common/types/index.js';
import { AuthManager } from '../../auth/managers/auth-manager.js';
import { BriefsDomain } from '../../briefs/briefs-domain.js';
import {
type ExpandTaskResult,
TaskExpansionService
} from '../../integration/services/task-expansion.service.js';
import { TaskRetrievalService } from '../../integration/services/task-retrieval.service.js';
import { SupabaseRepository } from '../../tasks/repositories/supabase/index.js';
import type { TaskRepository } from '../../tasks/repositories/task-repository.interface.js';
import { ApiClient } from '../utils/api-client.js';
/**
* API storage configuration
*/
export interface ApiStorageConfig {
/** Supabase client instance */
supabaseClient?: SupabaseClient;
/** Custom repository implementation */
repository?: TaskRepository;
/** Project ID for scoping */
projectId: string;
/** Enable request retries */
enableRetry?: boolean;
/** Maximum retry attempts */
maxRetries?: number;
}
/**
* Response from the update task with prompt API endpoint
*/
interface UpdateTaskWithPromptResponse {
success: boolean;
task: {
id: string;
displayId: string | null;
title: string;
description: string | null;
status: string;
priority: string | null;
};
message: string;
}
/**
* ApiStorage implementation using repository pattern
* Provides flexibility to swap between different backend implementations
*/
export class ApiStorage implements IStorage {
private readonly repository: TaskRepository;
private readonly projectId: string;
private readonly enableRetry: boolean;
private readonly maxRetries: number;
private initialized = false;
private tagsCache: Map<string, TaskTag> = new Map();
private apiClient?: ApiClient;
private expansionService?: TaskExpansionService;
private retrievalService?: TaskRetrievalService;
private readonly logger = getLogger('ApiStorage');
constructor(config: ApiStorageConfig) {
this.validateConfig(config);
// Use provided repository or create Supabase repository
if (config.repository) {
this.repository = config.repository;
} else if (config.supabaseClient) {
// TODO: SupabaseRepository doesn't implement all TaskRepository methods yet
// Cast for now until full implementation is complete
this.repository = new SupabaseRepository(
config.supabaseClient
) as unknown as TaskRepository;
} else {
throw new TaskMasterError(
'Either repository or supabaseClient must be provided',
ERROR_CODES.MISSING_CONFIGURATION
);
}
this.projectId = config.projectId;
this.enableRetry = config.enableRetry ?? true;
this.maxRetries = config.maxRetries ?? 3;
}
/**
* Validate API storage configuration
*/
private validateConfig(config: ApiStorageConfig): void {
if (!config.projectId) {
throw new TaskMasterError(
'Project ID is required for API storage',
ERROR_CODES.MISSING_CONFIGURATION
);
}
if (!config.repository && !config.supabaseClient) {
throw new TaskMasterError(
'Either repository or supabaseClient must be provided',
ERROR_CODES.MISSING_CONFIGURATION
);
}
}
/**
* Initialize the API storage
*/
async initialize(): Promise<void> {
if (this.initialized) return;
try {
// Load initial tags
await this.loadTagsIntoCache();
this.initialized = true;
} catch (error) {
throw new TaskMasterError(
'Failed to initialize API storage',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'initialize' },
error as Error
);
}
}
/**
* Get the storage type
*/
getStorageType(): 'api' {
return 'api';
}
/**
* Get the current brief name
* @returns The brief name if a brief is selected, null otherwise
*/
getCurrentBriefName(): string | null {
const authManager = AuthManager.getInstance();
const context = authManager.getContext();
return context?.briefName || null;
}
/**
* Get all briefs (tags) with detailed statistics including task counts
* In API storage, tags are called "briefs"
* Delegates to BriefsDomain for brief statistics calculation
*/
async getTagsWithStats(): Promise<{
tags: Array<{
name: string;
isCurrent: boolean;
taskCount: number;
completedTasks: number;
statusBreakdown: Record<string, number>;
subtaskCounts?: {
totalSubtasks: number;
subtasksByStatus: Record<string, number>;
};
created?: string;
description?: string;
status?: string;
briefId?: string;
}>;
currentTag: string | null;
totalTags: number;
}> {
await this.ensureInitialized();
try {
// Delegate to BriefsDomain which owns brief operations
const briefsDomain = new BriefsDomain();
return await briefsDomain.getBriefsWithStats(
this.repository,
this.projectId
);
} catch (error) {
throw new TaskMasterError(
'Failed to get tags with stats from API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'getTagsWithStats' },
error as Error
);
}
}
/**
* Load tags into cache
* In our API-based system, "tags" represent briefs
*/
private async loadTagsIntoCache(): Promise<void> {
try {
const authManager = AuthManager.getInstance();
const context = authManager.getContext();
// If we have a selected brief, create a virtual "tag" for it
if (context?.briefId) {
// Create a virtual tag representing the current brief
const briefTag: TaskTag = {
name: context.briefId,
tasks: [], // Will be populated when tasks are loaded
metadata: {
briefId: context.briefId,
briefName: context.briefName,
organizationId: context.orgId
}
};
this.tagsCache.clear();
this.tagsCache.set(context.briefId, briefTag);
}
} catch (error) {
// If no brief is selected, that's okay - user needs to select one first
console.debug('No brief selected, starting with empty cache');
}
}
/**
* Load tasks from API
* In our system, the tag parameter represents a brief ID
*/
async loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]> {
await this.ensureInitialized();
try {
const context =
AuthManager.getInstance().ensureBriefSelected('loadTasks');
// Load tasks from the current brief context with filters pushed to repository
const tasks = await this.retryOperation(() =>
this.repository.getTasks(this.projectId, options)
);
// Update the tag cache with the loaded task IDs
const briefTag = this.tagsCache.get(context.briefId);
if (briefTag) {
briefTag.tasks = tasks.map((task) => task.id);
}
return tasks;
} catch (error) {
this.wrapError(error, 'Failed to load tasks from API', {
operation: 'loadTasks',
tag,
context: 'brief-based loading'
});
}
}
/**
* Save tasks to API
*/
async saveTasks(tasks: Task[], tag?: string): Promise<void> {
await this.ensureInitialized();
try {
if (tag) {
// Update tag with task IDs
const tagData = this.tagsCache.get(tag) || {
name: tag,
tasks: [],
metadata: {}
};
tagData.tasks = tasks.map((t) => t.id);
// Save or update tag
if (this.tagsCache.has(tag)) {
await this.repository.updateTag(this.projectId, tag, tagData);
} else {
await this.repository.createTag(this.projectId, tagData);
}
this.tagsCache.set(tag, tagData);
}
// Save tasks using bulk operation
await this.retryOperation(() =>
this.repository.bulkCreateTasks(this.projectId, tasks)
);
} catch (error) {
throw new TaskMasterError(
'Failed to save tasks to API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'saveTasks', tag, taskCount: tasks.length },
error as Error
);
}
}
/**
* Load a single task by ID (supports UUID or display ID like HAM-123)
*/
async loadTask(taskId: string, tag?: string): Promise<Task | null> {
await this.ensureInitialized();
try {
const retrievalService = this.getRetrievalService();
return await this.retryOperation(() => retrievalService.getTask(taskId));
} catch (error) {
this.wrapError(error, 'Failed to load task from API', {
operation: 'loadTask',
taskId,
tag
});
}
}
/**
* Save a single task
*/
async saveTask(task: Task, tag?: string): Promise<void> {
await this.ensureInitialized();
try {
// Check if task exists
const existing = await this.repository.getTask(this.projectId, task.id);
if (existing) {
await this.retryOperation(() =>
this.repository.updateTask(this.projectId, task.id, task)
);
} else {
await this.retryOperation(() =>
this.repository.createTask(this.projectId, task)
);
}
// Update tag if specified
if (tag) {
const tagData = this.tagsCache.get(tag);
if (tagData && !tagData.tasks.includes(task.id)) {
tagData.tasks.push(task.id);
await this.repository.updateTag(this.projectId, tag, tagData);
}
}
} catch (error) {
throw new TaskMasterError(
'Failed to save task to API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'saveTask', taskId: task.id, tag },
error as Error
);
}
}
/**
* Delete a task
*/
async deleteTask(taskId: string, tag?: string): Promise<void> {
await this.ensureInitialized();
try {
await this.retryOperation(() =>
this.repository.deleteTask(this.projectId, taskId)
);
// Remove from tag if specified
if (tag) {
const tagData = this.tagsCache.get(tag);
if (tagData) {
tagData.tasks = tagData.tasks.filter((id) => id !== taskId);
await this.repository.updateTag(this.projectId, tag, tagData);
}
}
} catch (error) {
throw new TaskMasterError(
'Failed to delete task from API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'deleteTask', taskId, tag },
error as Error
);
}
}
/**
* List available tags (briefs in our system)
*/
async listTags(): Promise<string[]> {
await this.ensureInitialized();
try {
const authManager = AuthManager.getInstance();
const context = authManager.getContext();
// In our API-based system, we only have one "tag" at a time - the current brief
if (context?.briefId) {
// Ensure the current brief is in our cache
await this.loadTagsIntoCache();
return [context.briefId];
}
// No brief selected, return empty array
return [];
} catch (error) {
throw new TaskMasterError(
'Failed to list tags from API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'listTags' },
error as Error
);
}
}
/**
* Load metadata
*/
async loadMetadata(tag?: string): Promise<TaskMetadata | null> {
await this.ensureInitialized();
try {
if (tag) {
const tagData = this.tagsCache.get(tag);
return (tagData?.metadata as TaskMetadata) || null;
}
// Return global metadata if no tag specified
// This could be stored in a special system tag
const systemTag = await this.repository.getTag(this.projectId, '_system');
return (systemTag?.metadata as TaskMetadata) || null;
} catch (error) {
throw new TaskMasterError(
'Failed to load metadata from API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'loadMetadata', tag },
error as Error
);
}
}
/**
* Save metadata
*/
async saveMetadata(metadata: TaskMetadata, tag?: string): Promise<void> {
await this.ensureInitialized();
try {
if (tag) {
const tagData = this.tagsCache.get(tag) || {
name: tag,
tasks: [],
metadata: {}
};
tagData.metadata = metadata as any;
if (this.tagsCache.has(tag)) {
await this.repository.updateTag(this.projectId, tag, tagData);
} else {
await this.repository.createTag(this.projectId, tagData);
}
this.tagsCache.set(tag, tagData);
} else {
// Save to system tag
const systemTag: TaskTag = {
name: '_system',
tasks: [],
metadata: metadata as any
};
const existing = await this.repository.getTag(
this.projectId,
'_system'
);
if (existing) {
await this.repository.updateTag(this.projectId, '_system', systemTag);
} else {
await this.repository.createTag(this.projectId, systemTag);
}
}
} catch (error) {
throw new TaskMasterError(
'Failed to save metadata to API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'saveMetadata', tag },
error as Error
);
}
}
/**
* Check if storage exists
*/
async exists(): Promise<boolean> {
try {
await this.initialize();
return true;
} catch {
return false;
}
}
/**
* Append tasks to existing storage
*/
async appendTasks(tasks: Task[], tag?: string): Promise<void> {
await this.ensureInitialized();
try {
// Use bulk create - repository should handle duplicates
await this.retryOperation(() =>
this.repository.bulkCreateTasks(this.projectId, tasks)
);
// Update tag if specified
if (tag) {
const tagData = this.tagsCache.get(tag) || {
name: tag,
tasks: [],
metadata: {}
};
const newTaskIds = tasks.map((t) => t.id);
tagData.tasks = [...new Set([...tagData.tasks, ...newTaskIds])];
if (this.tagsCache.has(tag)) {
await this.repository.updateTag(this.projectId, tag, tagData);
} else {
await this.repository.createTag(this.projectId, tagData);
}
this.tagsCache.set(tag, tagData);
}
} catch (error) {
throw new TaskMasterError(
'Failed to append tasks to API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'appendTasks', tag, taskCount: tasks.length },
error as Error
);
}
}
/**
* Update a specific task
*/
async updateTask(
taskId: string,
updates: Partial<Task>,
tag?: string
): Promise<void> {
await this.ensureInitialized();
try {
await this.retryOperation(() =>
this.repository.updateTask(this.projectId, taskId, updates)
);
} catch (error) {
throw new TaskMasterError(
'Failed to update task via API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'updateTask', taskId, tag },
error as Error
);
}
}
/**
* Update task with AI-powered prompt
* Sends prompt to backend for server-side AI processing
*/
async updateTaskWithPrompt(
taskId: string,
prompt: string,
tag?: string,
options?: { useResearch?: boolean; mode?: 'append' | 'update' | 'rewrite' }
): Promise<void> {
await this.ensureInitialized();
const mode = options?.mode ?? 'append';
try {
// Use the API client - all auth, error handling, etc. is centralized
const apiClient = this.getApiClient();
const result = await apiClient.patch<UpdateTaskWithPromptResponse>(
`/ai/api/v1/tasks/${taskId}/prompt`,
{ prompt, mode }
);
if (!result.success) {
// API returned success: false
throw new Error(
result.message ||
`Update failed for task ${taskId}. The server did not provide details.`
);
}
// Log success with task details
this.logger.info(
`Successfully updated task ${result.task.displayId || result.task.id} using AI prompt (mode: ${mode})`
);
this.logger.info(` Title: ${result.task.title}`);
this.logger.info(` Status: ${result.task.status}`);
if (result.message) {
this.logger.info(` ${result.message}`);
}
} catch (error) {
// If it's already a TaskMasterError, just add context and re-throw
if (error instanceof TaskMasterError) {
throw error.withContext({
operation: 'updateTaskWithPrompt',
taskId,
tag,
promptLength: prompt.length,
mode
});
}
// For other errors, wrap them
const errorMessage =
error instanceof Error ? error.message : String(error);
throw new TaskMasterError(
errorMessage,
ERROR_CODES.STORAGE_ERROR,
{
operation: 'updateTaskWithPrompt',
taskId,
tag,
promptLength: prompt.length,
mode
},
error as Error
);
}
}
/**
* Expand task into subtasks with AI-powered generation
* Sends task to backend for server-side AI processing
*/
async expandTaskWithPrompt(
taskId: string,
_tag?: string,
options?: {
numSubtasks?: number;
useResearch?: boolean;
additionalContext?: string;
force?: boolean;
}
): Promise<ExpandTaskResult> {
await this.ensureInitialized();
const expansionService = this.getExpansionService();
return await expansionService.expandTask(taskId, options);
}
/**
* Update task or subtask status by ID - for API storage
*/
async updateTaskStatus(
taskId: string,
newStatus: TaskStatus,
tag?: string
): Promise<UpdateStatusResult> {
await this.ensureInitialized();
try {
AuthManager.getInstance().ensureBriefSelected('updateTaskStatus');
const existingTask = await this.retryOperation(() =>
this.repository.getTask(this.projectId, taskId)
);
if (!existingTask) {
throw new Error(`Task ${taskId} not found`);
}
const oldStatus = existingTask.status;
if (oldStatus === newStatus) {
return {
success: true,
oldStatus,
newStatus,
taskId
};
}
// Update the task/subtask status
await this.retryOperation(() =>
this.repository.updateTask(this.projectId, taskId, {
status: newStatus,
updatedAt: new Date().toISOString()
})
);
// Note: Parent status auto-adjustment is handled by the backend API service
// which has its own business logic for managing task relationships
return {
success: true,
oldStatus,
newStatus,
taskId
};
} catch (error) {
this.wrapError(error, 'Failed to update task status via API', {
operation: 'updateTaskStatus',
taskId,
newStatus,
tag
});
}
}
/**
* Get all available tags
*/
async getAllTags(): Promise<string[]> {
return this.listTags();
}
/**
* Create a new tag (brief)
* Not supported with API storage - users must create briefs via web interface
*/
async createTag(
tagName: string,
_options?: { copyFrom?: string; description?: string }
): Promise<void> {
throw new TaskMasterError(
'Tag creation is not supported with API storage. Please create briefs through Hamster Studio.',
ERROR_CODES.NOT_IMPLEMENTED,
{ storageType: 'api', operation: 'createTag', tagName }
);
}
/**
* Delete all tasks for a tag
*/
async deleteTag(tag: string): Promise<void> {
await this.ensureInitialized();
try {
await this.retryOperation(() =>
this.repository.deleteTag(this.projectId, tag)
);
this.tagsCache.delete(tag);
} catch (error) {
throw new TaskMasterError(
'Failed to delete tag via API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'deleteTag', tag },
error as Error
);
}
}
/**
* Rename a tag
*/
async renameTag(oldTag: string, newTag: string): Promise<void> {
await this.ensureInitialized();
try {
const tagData = this.tagsCache.get(oldTag);
if (!tagData) {
throw new Error(`Tag ${oldTag} not found`);
}
// Create new tag with same data
const newTagData = { ...tagData, name: newTag };
await this.repository.createTag(this.projectId, newTagData);
// Delete old tag
await this.repository.deleteTag(this.projectId, oldTag);
// Update cache
this.tagsCache.delete(oldTag);
this.tagsCache.set(newTag, newTagData);
} catch (error) {
throw new TaskMasterError(
'Failed to rename tag via API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'renameTag', oldTag, newTag },
error as Error
);
}
}
/**
* Copy a tag
*/
async copyTag(sourceTag: string, targetTag: string): Promise<void> {
await this.ensureInitialized();
try {
const sourceData = this.tagsCache.get(sourceTag);
if (!sourceData) {
throw new Error(`Source tag ${sourceTag} not found`);
}
// Create new tag with copied data
const targetData = { ...sourceData, name: targetTag };
await this.repository.createTag(this.projectId, targetData);
// Update cache
this.tagsCache.set(targetTag, targetData);
} catch (error) {
throw new TaskMasterError(
'Failed to copy tag via API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'copyTag', sourceTag, targetTag },
error as Error
);
}
}
/**
* Get storage statistics
*/
async getStats(): Promise<StorageStats> {
await this.ensureInitialized();
try {
const tasks = await this.repository.getTasks(this.projectId);
const tags = await this.repository.getTags(this.projectId);
const tagStats = tags.map((tag) => ({
tag: tag.name,
taskCount: tag.tasks.length,
lastModified: new Date().toISOString() // TODO: Get actual last modified from tag data
}));
return {
totalTasks: tasks.length,
totalTags: tags.length,
storageSize: 0, // Not applicable for API storage
lastModified: new Date().toISOString(),
tagStats
};
} catch (error) {
throw new TaskMasterError(
'Failed to get stats from API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'getStats' },
error as Error
);
}
}
/**
* Create backup
*/
async backup(): Promise<string> {
await this.ensureInitialized();
try {
// Export all data
await this.repository.getTasks(this.projectId);
await this.repository.getTags(this.projectId);
// TODO: In a real implementation, this would:
// 1. Create backup data structure with tasks and tags
// 2. Save the backup to a storage service
// For now, return a backup identifier
return `backup-${this.projectId}-${Date.now()}`;
} catch (error) {
throw new TaskMasterError(
'Failed to create backup via API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'backup' },
error as Error
);
}
}
/**
* Restore from backup
*/
async restore(backupId: string): Promise<void> {
await this.ensureInitialized();
// This would restore from a backup service
// Implementation depends on backup strategy
throw new TaskMasterError(
'Restore not implemented for API storage',
ERROR_CODES.NOT_IMPLEMENTED,
{ operation: 'restore', backupId }
);
}
/**
* Clear all data
*/
async clear(): Promise<void> {
await this.ensureInitialized();
try {
// Delete all tasks
const tasks = await this.repository.getTasks(this.projectId);
if (tasks.length > 0) {
await this.repository.bulkDeleteTasks(
this.projectId,
tasks.map((t) => t.id)
);
}
// Delete all tags
const tags = await this.repository.getTags(this.projectId);
for (const tag of tags) {
await this.repository.deleteTag(this.projectId, tag.name);
}
// Clear cache
this.tagsCache.clear();
} catch (error) {
throw new TaskMasterError(
'Failed to clear data via API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'clear' },
error as Error
);
}
}
/**
* Close connection
*/
async close(): Promise<void> {
this.initialized = false;
this.tagsCache.clear();
}
/**
* Ensure storage is initialized
*/
private async ensureInitialized(): Promise<void> {
if (!this.initialized) {
await this.initialize();
}
}
/**
* Get or create API client instance with auth
*/
private getApiClient(): ApiClient {
if (!this.apiClient) {
const apiEndpoint =
process.env.TM_BASE_DOMAIN || process.env.TM_PUBLIC_BASE_DOMAIN;
if (!apiEndpoint) {
throw new TaskMasterError(
'API endpoint not configured. Please set TM_PUBLIC_BASE_DOMAIN environment variable.',
ERROR_CODES.MISSING_CONFIGURATION,
{ operation: 'getApiClient' }
);
}
const context =
AuthManager.getInstance().ensureBriefSelected('getApiClient');
const authManager = AuthManager.getInstance();
this.apiClient = new ApiClient({
baseUrl: apiEndpoint,
authManager,
accountId: context.orgId
});
}
return this.apiClient;
}
/**
* Get or create TaskExpansionService instance
*/
private getExpansionService(): TaskExpansionService {
if (!this.expansionService) {
const apiClient = this.getApiClient();
const authManager = AuthManager.getInstance();
this.expansionService = new TaskExpansionService(
this.repository,
this.projectId,
apiClient,
authManager
);
}
return this.expansionService;
}
/**
* Get or create TaskRetrievalService instance
*/
private getRetrievalService(): TaskRetrievalService {
if (!this.retrievalService) {
const apiClient = this.getApiClient();
const authManager = AuthManager.getInstance();
this.retrievalService = new TaskRetrievalService(
this.repository,
this.projectId,
apiClient,
authManager
);
}
return this.retrievalService;
}
/**
* Retry an operation with exponential backoff
*/
private async retryOperation<T>(
operation: () => Promise<T>,
attempt = 1
): Promise<T> {
try {
return await operation();
} catch (error) {
if (this.enableRetry && attempt < this.maxRetries) {
const delay = Math.pow(2, attempt) * 1000;
await new Promise((resolve) => setTimeout(resolve, delay));
return this.retryOperation(operation, attempt + 1);
}
throw error;
}
}
/**
* Wrap an error unless it's already a NO_BRIEF_SELECTED error
*/
private wrapError(
error: unknown,
message: string,
context: Record<string, unknown>
): never {
// If it's already a NO_BRIEF_SELECTED error, don't wrap it
if (
error instanceof TaskMasterError &&
error.is(ERROR_CODES.NO_BRIEF_SELECTED)
) {
throw error;
}
throw new TaskMasterError(
message,
ERROR_CODES.STORAGE_ERROR,
context,
error as Error
);
}
}
```
--------------------------------------------------------------------------------
/docs/models.md:
--------------------------------------------------------------------------------
```markdown
# Available Models as of November 18, 2025
## Main Models
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
| ----------- | ---------------------------------------------- | --------- | ---------- | ----------- |
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
| anthropic | claude-sonnet-4-5-20250929 | 0.73 | 3 | 15 |
| anthropic | claude-haiku-4-5-20251001 | 0.45 | 1 | 5 |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| claude-code | haiku | 0.45 | 0 | 0 |
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-3-pro-preview | 0.762 | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
| grok-cli | grok-4-latest | 0.7 | 0 | 0 |
| grok-cli | grok-3-latest | 0.65 | 0 | 0 |
| grok-cli | grok-3-fast | 0.6 | 0 | 0 |
| grok-cli | grok-3-mini-fast | 0.55 | 0 | 0 |
| openai | gpt-4o | 0.332 | 2.5 | 10 |
| openai | o1 | 0.489 | 15 | 60 |
| openai | o3 | 0.5 | 2 | 8 |
| openai | o3-mini | 0.493 | 1.1 | 4.4 |
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
| openai | o1-mini | 0.4 | 1.1 | 4.4 |
| openai | o1-pro | — | 150 | 600 |
| openai | gpt-4-5-preview | 0.38 | 75 | 150 |
| openai | gpt-4-1-mini | — | 0.4 | 1.6 |
| openai | gpt-4-1-nano | — | 0.1 | 0.4 |
| openai | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
| openai | gpt-5 | 0.749 | 5 | 20 |
| google | gemini-3-pro-preview | 0.762 | 2 | 12 |
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
| google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 |
| google | gemini-2.0-flash-lite | — | — | — |
| xai | grok-3 | — | 3 | 15 |
| xai | grok-3-fast | — | 5 | 25 |
| xai | grok-4 | — | 3 | 15 |
| groq | moonshotai/kimi-k2-instruct | 0.66 | 1 | 3 |
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
| groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 |
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
| groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 |
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
| groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 |
| groq | whisper-large-v3 | — | 0.11 | 0 |
| perplexity | sonar-pro | — | 3 | 15 |
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
| openrouter | deepseek/deepseek-chat-v3-0324 | — | 0.27 | 1.1 |
| openrouter | openai/gpt-4.1 | — | 2 | 8 |
| openrouter | openai/gpt-4.1-mini | — | 0.4 | 1.6 |
| openrouter | openai/gpt-4.1-nano | — | 0.1 | 0.4 |
| openrouter | openai/o3 | — | 10 | 40 |
| openrouter | openai/codex-mini | — | 1.5 | 6 |
| openrouter | openai/gpt-4o-mini | — | 0.15 | 0.6 |
| openrouter | openai/o4-mini | 0.45 | 1.1 | 4.4 |
| openrouter | openai/o4-mini-high | — | 1.1 | 4.4 |
| openrouter | openai/o1-pro | — | 150 | 600 |
| openrouter | meta-llama/llama-3.3-70b-instruct | — | 120 | 600 |
| openrouter | meta-llama/llama-4-maverick | — | 0.18 | 0.6 |
| openrouter | meta-llama/llama-4-scout | — | 0.08 | 0.3 |
| openrouter | qwen/qwen-max | — | 1.6 | 6.4 |
| openrouter | qwen/qwen-turbo | — | 0.05 | 0.2 |
| openrouter | qwen/qwen3-235b-a22b | — | 0.14 | 2 |
| openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 |
| openrouter | mistralai/devstral-small | — | 0.1 | 0.3 |
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
| zai | glm-4.6 | 0.68 | 0.6 | 2.2 |
| zai | glm-4.5 | 0.65 | 0.6 | 2.2 |
| zai | glm-4.5-air | 0.62 | 0.2 | 1.1 |
| zai-coding | glm-4.6 | 0.68 | 0 | 0 |
| zai-coding | glm-4.5 | 0.65 | 0 | 0 |
| zai-coding | glm-4.5-air | 0.62 | 0 | 0 |
| ollama | gpt-oss:latest | 0.607 | 0 | 0 |
| ollama | gpt-oss:20b | 0.607 | 0 | 0 |
| ollama | gpt-oss:120b | 0.624 | 0 | 0 |
| ollama | devstral:latest | — | 0 | 0 |
| ollama | qwen3:latest | — | 0 | 0 |
| ollama | qwen3:14b | — | 0 | 0 |
| ollama | qwen3:32b | — | 0 | 0 |
| ollama | mistral-small3.1:latest | — | 0 | 0 |
| ollama | llama3.3:latest | — | 0 | 0 |
| ollama | phi4:latest | — | 0 | 0 |
| azure | gpt-4o | 0.332 | 2.5 | 10 |
| azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
| azure | gpt-4-1 | — | 2 | 10 |
| bedrock | us.anthropic.claude-3-haiku-20240307-v1:0 | 0.4 | 0.25 | 1.25 |
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-haiku-20241022-v1:0 | 0.4 | 0.8 | 4 |
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
## Research Models
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| claude-code | haiku | 0.45 | 0 | 0 |
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-3-pro-preview | 0.762 | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
| grok-cli | grok-4-latest | 0.7 | 0 | 0 |
| grok-cli | grok-3-latest | 0.65 | 0 | 0 |
| grok-cli | grok-3-fast | 0.6 | 0 | 0 |
| grok-cli | grok-3-mini-fast | 0.55 | 0 | 0 |
| openai | gpt-4o-search-preview | 0.33 | 2.5 | 10 |
| openai | gpt-4o-mini-search-preview | 0.3 | 0.15 | 0.6 |
| google | gemini-3-pro-preview | 0.762 | 2 | 12 |
| xai | grok-3 | — | 3 | 15 |
| xai | grok-3-fast | — | 5 | 25 |
| xai | grok-4 | — | 3 | 15 |
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
| perplexity | sonar-pro | — | 3 | 15 |
| perplexity | sonar | — | 1 | 1 |
| perplexity | sonar-deep-research | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
| zai | glm-4.6 | 0.68 | 0.6 | 2.2 |
| zai | glm-4.5 | 0.65 | 0.6 | 2.2 |
| zai | glm-4.5-air | 0.62 | 0.2 | 1.1 |
| zai-coding | glm-4.6 | 0.68 | 0 | 0 |
| zai-coding | glm-4.5 | 0.65 | 0 | 0 |
| zai-coding | glm-4.5-air | 0.62 | 0 | 0 |
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
| bedrock | us.deepseek.r1-v1:0 | — | 1.35 | 5.4 |
## Fallback Models
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
| ----------- | ---------------------------------------------- | --------- | ---------- | ----------- |
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
| anthropic | claude-sonnet-4-5-20250929 | 0.73 | 3 | 15 |
| anthropic | claude-haiku-4-5-20251001 | 0.45 | 1 | 5 |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| claude-code | haiku | 0.45 | 0 | 0 |
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-3-pro-preview | 0.762 | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
| grok-cli | grok-4-latest | 0.7 | 0 | 0 |
| grok-cli | grok-3-latest | 0.65 | 0 | 0 |
| grok-cli | grok-3-fast | 0.6 | 0 | 0 |
| grok-cli | grok-3-mini-fast | 0.55 | 0 | 0 |
| openai | gpt-4o | 0.332 | 2.5 | 10 |
| openai | o3 | 0.5 | 2 | 8 |
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
| openai | gpt-5 | 0.749 | 5 | 20 |
| google | gemini-3-pro-preview | 0.762 | 2 | 12 |
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
| google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 |
| google | gemini-2.0-flash-lite | — | — | — |
| xai | grok-3 | — | 3 | 15 |
| xai | grok-3-fast | — | 5 | 25 |
| xai | grok-4 | — | 3 | 15 |
| groq | moonshotai/kimi-k2-instruct | 0.66 | 1 | 3 |
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
| groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 |
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
| groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 |
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
| groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 |
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
| openrouter | openai/gpt-4.1 | — | 2 | 8 |
| openrouter | openai/gpt-4.1-mini | — | 0.4 | 1.6 |
| openrouter | openai/gpt-4.1-nano | — | 0.1 | 0.4 |
| openrouter | openai/o3 | — | 10 | 40 |
| openrouter | openai/codex-mini | — | 1.5 | 6 |
| openrouter | openai/gpt-4o-mini | — | 0.15 | 0.6 |
| openrouter | openai/o4-mini | 0.45 | 1.1 | 4.4 |
| openrouter | openai/o4-mini-high | — | 1.1 | 4.4 |
| openrouter | openai/o1-pro | — | 150 | 600 |
| openrouter | meta-llama/llama-3.3-70b-instruct | — | 120 | 600 |
| openrouter | meta-llama/llama-4-maverick | — | 0.18 | 0.6 |
| openrouter | meta-llama/llama-4-scout | — | 0.08 | 0.3 |
| openrouter | qwen/qwen-max | — | 1.6 | 6.4 |
| openrouter | qwen/qwen-turbo | — | 0.05 | 0.2 |
| openrouter | qwen/qwen3-235b-a22b | — | 0.14 | 2 |
| openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 |
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
| zai | glm-4.6 | 0.68 | 0.6 | 2.2 |
| zai | glm-4.5 | 0.65 | 0.6 | 2.2 |
| zai | glm-4.5-air | 0.62 | 0.2 | 1.1 |
| zai-coding | glm-4.6 | 0.68 | 0 | 0 |
| zai-coding | glm-4.5 | 0.65 | 0 | 0 |
| zai-coding | glm-4.5-air | 0.62 | 0 | 0 |
| ollama | gpt-oss:latest | 0.607 | 0 | 0 |
| ollama | gpt-oss:20b | 0.607 | 0 | 0 |
| ollama | gpt-oss:120b | 0.624 | 0 | 0 |
| ollama | devstral:latest | — | 0 | 0 |
| ollama | qwen3:latest | — | 0 | 0 |
| ollama | qwen3:14b | — | 0 | 0 |
| ollama | qwen3:32b | — | 0 | 0 |
| ollama | mistral-small3.1:latest | — | 0 | 0 |
| ollama | llama3.3:latest | — | 0 | 0 |
| ollama | phi4:latest | — | 0 | 0 |
| azure | gpt-4o | 0.332 | 2.5 | 10 |
| azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
| azure | gpt-4-1 | — | 2 | 10 |
| bedrock | us.anthropic.claude-3-haiku-20240307-v1:0 | 0.4 | 0.25 | 1.25 |
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-haiku-20241022-v1:0 | 0.4 | 0.8 | 4 |
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
## Unsupported Models
| Provider | Model Name | Reason |
| ---------- | --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| openrouter | deepseek/deepseek-chat-v3-0324:free | Free OpenRouter models are not supported due to severe rate limits, lack of tool use support, and other reliability issues that make them impractical for production use. |
| openrouter | mistralai/mistral-small-3.1-24b-instruct:free | Free OpenRouter models are not supported due to severe rate limits, lack of tool use support, and other reliability issues that make them impractical for production use. |
| openrouter | thudm/glm-4-32b:free | Free OpenRouter models are not supported due to severe rate limits, lack of tool use support, and other reliability issues that make them impractical for production use. |
```
--------------------------------------------------------------------------------
/scripts/modules/task-manager/scope-adjustment.js:
--------------------------------------------------------------------------------
```javascript
/**
* scope-adjustment.js
* Core logic for dynamic task complexity adjustment (scope-up and scope-down)
*/
import { z } from 'zod';
import {
log,
readJSON,
writeJSON,
getCurrentTag,
readComplexityReport,
findTaskInComplexityReport
} from '../utils.js';
import {
generateObjectService,
generateTextService
} from '../ai-services-unified.js';
import { findTaskById, taskExists } from '../task-manager.js';
import analyzeTaskComplexity from './analyze-task-complexity.js';
import { findComplexityReportPath } from '../../../src/utils/path-utils.js';
/**
* Valid strength levels for scope adjustments
*/
const VALID_STRENGTHS = ['light', 'regular', 'heavy'];
/**
* Statuses that should be preserved during subtask regeneration
* These represent work that has been started or intentionally set by the user
*/
const PRESERVE_STATUSES = [
'done',
'in-progress',
'review',
'cancelled',
'deferred',
'blocked'
];
/**
* Statuses that should be regenerated during subtask regeneration
* These represent work that hasn't been started yet
*/
const REGENERATE_STATUSES = ['pending'];
/**
* Validates strength parameter
* @param {string} strength - The strength level to validate
* @returns {boolean} True if valid, false otherwise
*/
export function validateStrength(strength) {
return VALID_STRENGTHS.includes(strength);
}
/**
* Re-analyzes the complexity of a single task after scope adjustment
* @param {Object} task - The task to analyze
* @param {string} tasksPath - Path to tasks.json
* @param {Object} context - Context containing projectRoot, tag, session
* @returns {Promise<number|null>} New complexity score or null if analysis failed
*/
async function reanalyzeTaskComplexity(task, tasksPath, context) {
const { projectRoot, tag, session } = context;
try {
// Create a minimal tasks data structure for analysis
const tasksForAnalysis = {
tasks: [task],
metadata: { analyzedAt: new Date().toISOString() }
};
// Find the complexity report path for this tag
const complexityReportPath = findComplexityReportPath(
null,
{ projectRoot, tag },
null
);
if (!complexityReportPath) {
log('warn', 'No complexity report found - cannot re-analyze complexity');
return null;
}
// Use analyze-task-complexity to re-analyze just this task
const analysisOptions = {
file: tasksPath,
output: complexityReportPath,
id: task.id.toString(), // Analyze only this specific task
projectRoot,
tag,
_filteredTasksData: tasksForAnalysis, // Pass pre-filtered data
_originalTaskCount: 1
};
// Run the analysis with proper context
await analyzeTaskComplexity(analysisOptions, { session });
// Read the updated complexity report to get the new score
const updatedReport = readComplexityReport(complexityReportPath);
if (updatedReport) {
const taskAnalysis = findTaskInComplexityReport(updatedReport, task.id);
if (taskAnalysis) {
log(
'info',
`Re-analyzed task ${task.id} complexity: ${taskAnalysis.complexityScore}/10`
);
return taskAnalysis.complexityScore;
}
}
log(
'warn',
`Could not find updated complexity analysis for task ${task.id}`
);
return null;
} catch (error) {
log('error', `Failed to re-analyze task complexity: ${error.message}`);
return null;
}
}
/**
* Gets the current complexity score for a task from the complexity report
* @param {number} taskId - Task ID to look up
* @param {Object} context - Context containing projectRoot, tag
* @returns {number|null} Current complexity score or null if not found
*/
function getCurrentComplexityScore(taskId, context) {
const { projectRoot, tag } = context;
try {
// Find the complexity report path for this tag
const complexityReportPath = findComplexityReportPath(
null,
{ projectRoot, tag },
null
);
if (!complexityReportPath) {
return null;
}
// Read the current complexity report
const complexityReport = readComplexityReport(complexityReportPath);
if (!complexityReport) {
return null;
}
// Find this task's current complexity
const taskAnalysis = findTaskInComplexityReport(complexityReport, taskId);
return taskAnalysis ? taskAnalysis.complexityScore : null;
} catch (error) {
log('debug', `Could not read current complexity score: ${error.message}`);
return null;
}
}
/**
* Regenerates subtasks for a task based on new complexity while preserving completed work
* @param {Object} task - The updated task object
* @param {string} tasksPath - Path to tasks.json
* @param {Object} context - Context containing projectRoot, tag, session
* @param {string} direction - Direction of scope change (up/down) for logging
* @param {string} strength - Strength level ('light', 'regular', 'heavy')
* @param {number|null} originalComplexity - Original complexity score for smarter adjustments
* @returns {Promise<Object>} Object with updated task and regeneration info
*/
async function regenerateSubtasksForComplexity(
task,
tasksPath,
context,
direction,
strength = 'regular',
originalComplexity = null
) {
const { projectRoot, tag, session } = context;
// Check if task has subtasks
if (
!task.subtasks ||
!Array.isArray(task.subtasks) ||
task.subtasks.length === 0
) {
return {
updatedTask: task,
regenerated: false,
preserved: 0,
generated: 0
};
}
// Identify subtasks to preserve vs regenerate
const preservedSubtasks = task.subtasks.filter((subtask) =>
PRESERVE_STATUSES.includes(subtask.status)
);
const pendingSubtasks = task.subtasks.filter((subtask) =>
REGENERATE_STATUSES.includes(subtask.status)
);
// If no pending subtasks, nothing to regenerate
if (pendingSubtasks.length === 0) {
return {
updatedTask: task,
regenerated: false,
preserved: preservedSubtasks.length,
generated: 0
};
}
// Calculate appropriate number of total subtasks based on direction, complexity, strength, and original complexity
let targetSubtaskCount;
const preservedCount = preservedSubtasks.length;
const currentPendingCount = pendingSubtasks.length;
// Use original complexity to inform decisions (if available)
const complexityFactor = originalComplexity
? Math.max(0.5, originalComplexity / 10)
: 1.0;
const complexityInfo = originalComplexity
? ` (original complexity: ${originalComplexity}/10)`
: '';
if (direction === 'up') {
// Scope up: More subtasks for increased complexity
if (strength === 'light') {
const base = Math.max(
5,
preservedCount + Math.ceil(currentPendingCount * 1.1)
);
targetSubtaskCount = Math.ceil(base * (0.8 + 0.4 * complexityFactor));
} else if (strength === 'regular') {
const base = Math.max(
6,
preservedCount + Math.ceil(currentPendingCount * 1.3)
);
targetSubtaskCount = Math.ceil(base * (0.8 + 0.4 * complexityFactor));
} else {
// heavy
const base = Math.max(
8,
preservedCount + Math.ceil(currentPendingCount * 1.6)
);
targetSubtaskCount = Math.ceil(base * (0.8 + 0.6 * complexityFactor));
}
} else {
// Scope down: Fewer subtasks for decreased complexity
// High complexity tasks get reduced more aggressively
const aggressiveFactor =
originalComplexity >= 8 ? 0.7 : originalComplexity >= 6 ? 0.85 : 1.0;
if (strength === 'light') {
const base = Math.max(
3,
preservedCount + Math.ceil(currentPendingCount * 0.8)
);
targetSubtaskCount = Math.ceil(base * aggressiveFactor);
} else if (strength === 'regular') {
const base = Math.max(
3,
preservedCount + Math.ceil(currentPendingCount * 0.5)
);
targetSubtaskCount = Math.ceil(base * aggressiveFactor);
} else {
// heavy
// Heavy scope-down should be much more aggressive - aim for only core functionality
// Very high complexity tasks (9-10) get reduced to almost nothing
const ultraAggressiveFactor =
originalComplexity >= 9 ? 0.3 : originalComplexity >= 7 ? 0.5 : 0.7;
const base = Math.max(
2,
preservedCount + Math.ceil(currentPendingCount * 0.25)
);
targetSubtaskCount = Math.max(1, Math.ceil(base * ultraAggressiveFactor));
}
}
log(
'debug',
`Complexity-aware subtask calculation${complexityInfo}: ${currentPendingCount} pending -> target ${targetSubtaskCount} total`
);
log(
'debug',
`Complexity-aware calculation${complexityInfo}: ${currentPendingCount} pending -> ${targetSubtaskCount} total subtasks (${strength} ${direction})`
);
const newSubtasksNeeded = Math.max(1, targetSubtaskCount - preservedCount);
try {
// Generate new subtasks using AI to match the new complexity level
const systemPrompt = `You are an expert project manager who creates task breakdowns that match complexity levels.`;
const prompt = `Based on this updated task, generate ${newSubtasksNeeded} NEW subtasks that reflect the ${direction === 'up' ? 'increased' : 'decreased'} complexity level:
**Task Title**: ${task.title}
**Task Description**: ${task.description}
**Implementation Details**: ${task.details}
**Test Strategy**: ${task.testStrategy}
**Complexity Direction**: This task was recently scoped ${direction} (${strength} strength) to ${direction === 'up' ? 'increase' : 'decrease'} complexity.
${originalComplexity ? `**Original Complexity**: ${originalComplexity}/10 - consider this when determining appropriate scope level.` : ''}
${preservedCount > 0 ? `**Preserved Subtasks**: ${preservedCount} existing subtasks with work already done will be kept.` : ''}
Generate subtasks that:
${
direction === 'up'
? strength === 'heavy'
? `- Add comprehensive implementation steps with advanced features
- Include extensive error handling, validation, and edge cases
- Cover multiple integration scenarios and advanced testing
- Provide thorough documentation and optimization approaches`
: strength === 'regular'
? `- Add more detailed implementation steps
- Include additional error handling and validation
- Cover more edge cases and advanced features
- Provide more comprehensive testing approaches`
: `- Add some additional implementation details
- Include basic error handling considerations
- Cover a few common edge cases
- Enhance testing approaches slightly`
: strength === 'heavy'
? `- Focus ONLY on absolutely essential core functionality
- Strip out ALL non-critical features (error handling, advanced testing, etc.)
- Provide only the minimum viable implementation
- Eliminate any complex integrations or advanced scenarios
- Aim for the simplest possible working solution`
: strength === 'regular'
? `- Focus on core functionality only
- Simplify implementation steps
- Remove non-essential features
- Streamline to basic requirements`
: `- Focus mainly on core functionality
- Slightly simplify implementation steps
- Remove some non-essential features
- Streamline most requirements`
}
Return a JSON object with a "subtasks" array. Each subtask should have:
- id: Sequential NUMBER starting from 1 (e.g., 1, 2, 3 - NOT "1", "2", "3")
- title: Clear, specific title
- description: Detailed description
- dependencies: Array of dependency IDs as STRINGS (use format ["${task.id}.1", "${task.id}.2"] for siblings, or empty array [] for no dependencies)
- details: Implementation guidance
- status: "pending"
- testStrategy: Testing approach
IMPORTANT:
- The 'id' field must be a NUMBER, not a string!
- Dependencies must be strings, not numbers!
Ensure the JSON is valid and properly formatted.`;
// Define subtask schema
const subtaskSchema = z.object({
subtasks: z.array(
z.object({
id: z.int().positive(),
title: z.string().min(5),
description: z.string().min(10),
dependencies: z.array(z.string()),
details: z.string().min(20),
status: z.string(),
testStrategy: z.string()
})
)
});
const aiResult = await generateObjectService({
role: context.research ? 'research' : 'main',
session: context.session,
systemPrompt,
prompt,
schema: subtaskSchema,
objectName: 'subtask_regeneration',
commandName: context.commandName || `subtask-regen-${direction}`,
outputType: context.outputType || 'cli'
});
const generatedSubtasks = aiResult.mainResult.subtasks || [];
// Post-process generated subtasks to ensure defaults
const processedGeneratedSubtasks = generatedSubtasks.map((subtask) => ({
...subtask,
status: subtask.status || 'pending',
testStrategy: subtask.testStrategy || ''
}));
// Ensure new subtasks have unique sequential IDs after the preserved ones
const maxPreservedId = preservedSubtasks.reduce(
(max, st) => Math.max(max, st.id || 0),
0
);
let nextId = maxPreservedId + 1;
const idMapping = new Map();
const normalizedGeneratedSubtasks = processedGeneratedSubtasks
.map((st) => {
const originalId = st.id;
const newId = nextId++;
idMapping.set(originalId, newId);
return {
...st,
id: newId
};
})
.map((st) => ({
...st,
dependencies: (st.dependencies || []).map((dep) => {
if (typeof dep !== 'string' || !dep.startsWith(`${task.id}.`)) {
return dep;
}
const [, siblingIdPart] = dep.split('.');
const originalSiblingId = Number.parseInt(siblingIdPart, 10);
const remappedSiblingId = idMapping.get(originalSiblingId);
return remappedSiblingId ? `${task.id}.${remappedSiblingId}` : dep;
})
}));
// Update task with preserved subtasks + newly generated ones
task.subtasks = [...preservedSubtasks, ...normalizedGeneratedSubtasks];
return {
updatedTask: task,
regenerated: true,
preserved: preservedSubtasks.length,
generated: normalizedGeneratedSubtasks.length
};
} catch (error) {
log(
'warn',
`Failed to regenerate subtasks for task ${task.id}: ${error.message}`
);
// Don't fail the whole operation if subtask regeneration fails
return {
updatedTask: task,
regenerated: false,
preserved: preservedSubtasks.length,
generated: 0,
error: error.message
};
}
}
/**
* Generates AI prompt for scope adjustment
* @param {Object} task - The task to adjust
* @param {string} direction - 'up' or 'down'
* @param {string} strength - 'light', 'regular', or 'heavy'
* @param {string} customPrompt - Optional custom instructions
* @returns {string} The generated prompt
*/
function generateScopePrompt(task, direction, strength, customPrompt) {
const isUp = direction === 'up';
const strengthDescriptions = {
light: isUp ? 'minor enhancements' : 'slight simplifications',
regular: isUp
? 'moderate complexity increases'
: 'moderate simplifications',
heavy: isUp ? 'significant complexity additions' : 'major simplifications'
};
let basePrompt = `You are tasked with adjusting the complexity of a task.
CURRENT TASK:
Title: ${task.title}
Description: ${task.description}
Details: ${task.details}
Test Strategy: ${task.testStrategy || 'Not specified'}
ADJUSTMENT REQUIREMENTS:
- Direction: ${isUp ? 'INCREASE' : 'DECREASE'} complexity
- Strength: ${strength} (${strengthDescriptions[strength]})
- Preserve the core purpose and functionality of the task
- Maintain consistency with the existing task structure`;
if (isUp) {
basePrompt += `
- Add more detailed requirements, edge cases, or advanced features
- Include additional implementation considerations
- Enhance error handling and validation requirements
- Expand testing strategies with more comprehensive scenarios`;
} else {
basePrompt += `
- Focus on core functionality and essential requirements
- Remove or simplify non-essential features
- Streamline implementation details
- Simplify testing to focus on basic functionality`;
}
if (customPrompt) {
basePrompt += `\n\nCUSTOM INSTRUCTIONS:\n${customPrompt}`;
}
basePrompt += `\n\nReturn a JSON object with the updated task containing these fields:
- title: Updated task title
- description: Updated task description
- details: Updated implementation details
- testStrategy: Updated test strategy
- priority: Task priority ('low', 'medium', or 'high')
Ensure the JSON is valid and properly formatted.`;
return basePrompt;
}
/**
* Adjusts task complexity using AI
* @param {Object} task - The task to adjust
* @param {string} direction - 'up' or 'down'
* @param {string} strength - 'light', 'regular', or 'heavy'
* @param {string} customPrompt - Optional custom instructions
* @param {Object} context - Context object with projectRoot, tag, etc.
* @returns {Promise<Object>} Updated task data and telemetry
*/
async function adjustTaskComplexity(
task,
direction,
strength,
customPrompt,
context
) {
const systemPrompt = `You are an expert software project manager who helps adjust task complexity while maintaining clarity and actionability.`;
const prompt = generateScopePrompt(task, direction, strength, customPrompt);
// Define the task schema for structured response using Zod
const taskSchema = z.object({
title: z
.string()
.min(1)
.describe('Updated task title reflecting scope adjustment'),
description: z
.string()
.min(1)
.describe('Updated task description with adjusted scope'),
details: z
.string()
.min(1)
.describe('Updated implementation details with adjusted complexity'),
testStrategy: z
.string()
.min(1)
.describe('Updated testing approach for the adjusted scope'),
priority: z.enum(['low', 'medium', 'high']).describe('Task priority level')
});
const aiResult = await generateObjectService({
role: context.research ? 'research' : 'main',
session: context.session,
systemPrompt,
prompt,
schema: taskSchema,
objectName: 'updated_task',
commandName: context.commandName || `scope-${direction}`,
outputType: context.outputType || 'cli'
});
const updatedTaskData = aiResult.mainResult;
// Ensure priority has a value (in case AI didn't provide one)
const processedTaskData = {
...updatedTaskData,
priority: updatedTaskData.priority || task.priority || 'medium'
};
return {
updatedTask: {
...task,
...processedTaskData
},
telemetryData: aiResult.telemetryData
};
}
/**
* Increases task complexity (scope-up)
* @param {string} tasksPath - Path to tasks.json file
* @param {Array<number>} taskIds - Array of task IDs to scope up
* @param {string} strength - Strength level ('light', 'regular', 'heavy')
* @param {string} customPrompt - Optional custom instructions
* @param {Object} context - Context object with projectRoot, tag, etc.
* @param {string} outputFormat - Output format ('text' or 'json')
* @returns {Promise<Object>} Results of the scope-up operation
*/
export async function scopeUpTask(
tasksPath,
taskIds,
strength = 'regular',
customPrompt = null,
context = {},
outputFormat = 'text'
) {
// Validate inputs
if (!validateStrength(strength)) {
throw new Error(
`Invalid strength level: ${strength}. Must be one of: ${VALID_STRENGTHS.join(', ')}`
);
}
const { projectRoot = '.', tag = 'master' } = context;
// Read tasks data
const data = readJSON(tasksPath, projectRoot, tag);
const tasks = data?.tasks || [];
// Validate all task IDs exist
for (const taskId of taskIds) {
if (!taskExists(tasks, taskId)) {
throw new Error(`Task with ID ${taskId} not found`);
}
}
const updatedTasks = [];
let combinedTelemetryData = null;
// Process each task
for (const taskId of taskIds) {
const taskResult = findTaskById(tasks, taskId);
const task = taskResult.task;
if (!task) {
throw new Error(`Task with ID ${taskId} not found`);
}
if (outputFormat === 'text') {
log('info', `Scoping up task ${taskId}: ${task.title}`);
}
// Get original complexity score (if available)
const originalComplexity = getCurrentComplexityScore(taskId, context);
if (originalComplexity && outputFormat === 'text') {
log('info', `Original complexity: ${originalComplexity}/10`);
}
const adjustResult = await adjustTaskComplexity(
task,
'up',
strength,
customPrompt,
context
);
// Regenerate subtasks based on new complexity while preserving completed work
const subtaskResult = await regenerateSubtasksForComplexity(
adjustResult.updatedTask,
tasksPath,
context,
'up',
strength,
originalComplexity
);
// Log subtask regeneration info if in text mode
if (outputFormat === 'text' && subtaskResult.regenerated) {
log(
'info',
`Regenerated ${subtaskResult.generated} pending subtasks (preserved ${subtaskResult.preserved} completed)`
);
}
// Update task in data
const taskIndex = data.tasks.findIndex((t) => t.id === taskId);
if (taskIndex !== -1) {
data.tasks[taskIndex] = subtaskResult.updatedTask;
updatedTasks.push(subtaskResult.updatedTask);
}
// Re-analyze complexity after scoping (if we have a session for AI calls)
if (context.session && originalComplexity) {
try {
// Write the updated task first so complexity analysis can read it
writeJSON(tasksPath, data, projectRoot, tag);
// Re-analyze complexity
const newComplexity = await reanalyzeTaskComplexity(
subtaskResult.updatedTask,
tasksPath,
context
);
if (newComplexity && outputFormat === 'text') {
const complexityChange = newComplexity - originalComplexity;
const arrow =
complexityChange > 0 ? '↗️' : complexityChange < 0 ? '↘️' : '➡️';
log(
'info',
`New complexity: ${originalComplexity}/10 ${arrow} ${newComplexity}/10 (${complexityChange > 0 ? '+' : ''}${complexityChange})`
);
}
} catch (error) {
if (outputFormat === 'text') {
log('warn', `Could not re-analyze complexity: ${error.message}`);
}
}
}
// Combine telemetry data
if (adjustResult.telemetryData) {
if (!combinedTelemetryData) {
combinedTelemetryData = { ...adjustResult.telemetryData };
} else {
// Sum up costs and tokens
combinedTelemetryData.inputTokens +=
adjustResult.telemetryData.inputTokens || 0;
combinedTelemetryData.outputTokens +=
adjustResult.telemetryData.outputTokens || 0;
combinedTelemetryData.totalTokens +=
adjustResult.telemetryData.totalTokens || 0;
combinedTelemetryData.totalCost +=
adjustResult.telemetryData.totalCost || 0;
}
}
}
// Write updated data
writeJSON(tasksPath, data, projectRoot, tag);
if (outputFormat === 'text') {
log('info', `Successfully scoped up ${updatedTasks.length} task(s)`);
}
return {
updatedTasks,
telemetryData: combinedTelemetryData
};
}
/**
* Decreases task complexity (scope-down)
* @param {string} tasksPath - Path to tasks.json file
* @param {Array<number>} taskIds - Array of task IDs to scope down
* @param {string} strength - Strength level ('light', 'regular', 'heavy')
* @param {string} customPrompt - Optional custom instructions
* @param {Object} context - Context object with projectRoot, tag, etc.
* @param {string} outputFormat - Output format ('text' or 'json')
* @returns {Promise<Object>} Results of the scope-down operation
*/
export async function scopeDownTask(
tasksPath,
taskIds,
strength = 'regular',
customPrompt = null,
context = {},
outputFormat = 'text'
) {
// Validate inputs
if (!validateStrength(strength)) {
throw new Error(
`Invalid strength level: ${strength}. Must be one of: ${VALID_STRENGTHS.join(', ')}`
);
}
const { projectRoot = '.', tag = 'master' } = context;
// Read tasks data
const data = readJSON(tasksPath, projectRoot, tag);
const tasks = data?.tasks || [];
// Validate all task IDs exist
for (const taskId of taskIds) {
if (!taskExists(tasks, taskId)) {
throw new Error(`Task with ID ${taskId} not found`);
}
}
const updatedTasks = [];
let combinedTelemetryData = null;
// Process each task
for (const taskId of taskIds) {
const taskResult = findTaskById(tasks, taskId);
const task = taskResult.task;
if (!task) {
throw new Error(`Task with ID ${taskId} not found`);
}
if (outputFormat === 'text') {
log('info', `Scoping down task ${taskId}: ${task.title}`);
}
// Get original complexity score (if available)
const originalComplexity = getCurrentComplexityScore(taskId, context);
if (originalComplexity && outputFormat === 'text') {
log('info', `Original complexity: ${originalComplexity}/10`);
}
const adjustResult = await adjustTaskComplexity(
task,
'down',
strength,
customPrompt,
context
);
// Regenerate subtasks based on new complexity while preserving completed work
const subtaskResult = await regenerateSubtasksForComplexity(
adjustResult.updatedTask,
tasksPath,
context,
'down',
strength,
originalComplexity
);
// Log subtask regeneration info if in text mode
if (outputFormat === 'text' && subtaskResult.regenerated) {
log(
'info',
`Regenerated ${subtaskResult.generated} pending subtasks (preserved ${subtaskResult.preserved} completed)`
);
}
// Update task in data
const taskIndex = data.tasks.findIndex((t) => t.id === taskId);
if (taskIndex !== -1) {
data.tasks[taskIndex] = subtaskResult.updatedTask;
updatedTasks.push(subtaskResult.updatedTask);
}
// Re-analyze complexity after scoping (if we have a session for AI calls)
if (context.session && originalComplexity) {
try {
// Write the updated task first so complexity analysis can read it
writeJSON(tasksPath, data, projectRoot, tag);
// Re-analyze complexity
const newComplexity = await reanalyzeTaskComplexity(
subtaskResult.updatedTask,
tasksPath,
context
);
if (newComplexity && outputFormat === 'text') {
const complexityChange = newComplexity - originalComplexity;
const arrow =
complexityChange > 0 ? '↗️' : complexityChange < 0 ? '↘️' : '➡️';
log(
'info',
`New complexity: ${originalComplexity}/10 ${arrow} ${newComplexity}/10 (${complexityChange > 0 ? '+' : ''}${complexityChange})`
);
}
} catch (error) {
if (outputFormat === 'text') {
log('warn', `Could not re-analyze complexity: ${error.message}`);
}
}
}
// Combine telemetry data
if (adjustResult.telemetryData) {
if (!combinedTelemetryData) {
combinedTelemetryData = { ...adjustResult.telemetryData };
} else {
// Sum up costs and tokens
combinedTelemetryData.inputTokens +=
adjustResult.telemetryData.inputTokens || 0;
combinedTelemetryData.outputTokens +=
adjustResult.telemetryData.outputTokens || 0;
combinedTelemetryData.totalTokens +=
adjustResult.telemetryData.totalTokens || 0;
combinedTelemetryData.totalCost +=
adjustResult.telemetryData.totalCost || 0;
}
}
}
// Write updated data
writeJSON(tasksPath, data, projectRoot, tag);
if (outputFormat === 'text') {
log('info', `Successfully scoped down ${updatedTasks.length} task(s)`);
}
return {
updatedTasks,
telemetryData: combinedTelemetryData
};
}
```
--------------------------------------------------------------------------------
/scripts/modules/utils/contextGatherer.js:
--------------------------------------------------------------------------------
```javascript
/**
* contextGatherer.js
* Comprehensive context gathering utility for Task Master AI operations
* Supports task context, file context, project tree, and custom context
*/
import fs from 'fs';
import path from 'path';
import pkg from 'gpt-tokens';
import Fuse from 'fuse.js';
import {
readJSON,
findTaskById,
truncate,
flattenTasksWithSubtasks
} from '../utils.js';
const { encode } = pkg;
/**
* Context Gatherer class for collecting and formatting context from various sources
*/
export class ContextGatherer {
constructor(projectRoot, tag) {
this.projectRoot = projectRoot;
this.tasksPath = path.join(
projectRoot,
'.taskmaster',
'tasks',
'tasks.json'
);
this.tag = tag;
this.allTasks = this._loadAllTasks();
}
_loadAllTasks() {
try {
const data = readJSON(this.tasksPath, this.projectRoot, this.tag);
const tasks = data?.tasks || [];
return tasks;
} catch (error) {
console.warn(
`Warning: Could not load tasks for ContextGatherer: ${error.message}`
);
return [];
}
}
/**
* Count tokens in a text string using gpt-tokens
* @param {string} text - Text to count tokens for
* @returns {number} Token count
*/
countTokens(text) {
if (!text || typeof text !== 'string') {
return 0;
}
try {
return encode(text).length;
} catch (error) {
// Fallback to rough character-based estimation if tokenizer fails
// Rough estimate: ~4 characters per token for English text
return Math.ceil(text.length / 4);
}
}
/**
* Main method to gather context from multiple sources
* @param {Object} options - Context gathering options
* @param {Array<string>} [options.tasks] - Task/subtask IDs to include
* @param {Array<string>} [options.files] - File paths to include
* @param {string} [options.customContext] - Additional custom context
* @param {boolean} [options.includeProjectTree] - Include project file tree
* @param {string} [options.format] - Output format: 'research', 'chat', 'system-prompt'
* @param {boolean} [options.includeTokenCounts] - Whether to include token breakdown
* @param {string} [options.semanticQuery] - A query string for semantic task searching.
* @param {number} [options.maxSemanticResults] - Max number of semantic results.
* @param {Array<number>} [options.dependencyTasks] - Array of task IDs to build dependency graphs from.
* @returns {Promise<Object>} Object with context string and analysis data
*/
async gather(options = {}) {
const {
tasks = [],
files = [],
customContext = '',
includeProjectTree = false,
format = 'research',
includeTokenCounts = false,
semanticQuery,
maxSemanticResults = 10,
dependencyTasks = []
} = options;
const contextSections = [];
const finalTaskIds = new Set(tasks.map(String));
let analysisData = null;
let tokenBreakdown = null;
// Initialize token breakdown if requested
if (includeTokenCounts) {
tokenBreakdown = {
total: 0,
customContext: null,
tasks: [],
files: [],
projectTree: null
};
}
// Semantic Search
if (semanticQuery && this.allTasks.length > 0) {
const semanticResults = this._performSemanticSearch(
semanticQuery,
maxSemanticResults
);
// Store the analysis data for UI display
analysisData = semanticResults.analysisData;
semanticResults.tasks.forEach((task) => {
finalTaskIds.add(String(task.id));
});
}
// Dependency Graph Analysis
if (dependencyTasks.length > 0) {
const dependencyResults = this._buildDependencyGraphs(dependencyTasks);
dependencyResults.allRelatedTaskIds.forEach((id) =>
finalTaskIds.add(String(id))
);
// We can format and add dependencyResults.graphVisualization later if needed
}
// Add custom context first
if (customContext && customContext.trim()) {
const formattedCustomContext = this._formatCustomContext(
customContext,
format
);
contextSections.push(formattedCustomContext);
// Calculate tokens for custom context if requested
if (includeTokenCounts) {
tokenBreakdown.customContext = {
tokens: this.countTokens(formattedCustomContext),
characters: formattedCustomContext.length
};
tokenBreakdown.total += tokenBreakdown.customContext.tokens;
}
}
// Gather context for the final list of tasks
if (finalTaskIds.size > 0) {
const taskContextResult = await this._gatherTaskContext(
Array.from(finalTaskIds),
format,
includeTokenCounts
);
if (taskContextResult.context) {
contextSections.push(taskContextResult.context);
// Add task breakdown if token counting is enabled
if (includeTokenCounts && taskContextResult.breakdown) {
tokenBreakdown.tasks = taskContextResult.breakdown;
const taskTokens = taskContextResult.breakdown.reduce(
(sum, task) => sum + task.tokens,
0
);
tokenBreakdown.total += taskTokens;
}
}
}
// Add file context
if (files.length > 0) {
const fileContextResult = await this._gatherFileContext(
files,
format,
includeTokenCounts
);
if (fileContextResult.context) {
contextSections.push(fileContextResult.context);
// Add file breakdown if token counting is enabled
if (includeTokenCounts && fileContextResult.breakdown) {
tokenBreakdown.files = fileContextResult.breakdown;
const fileTokens = fileContextResult.breakdown.reduce(
(sum, file) => sum + file.tokens,
0
);
tokenBreakdown.total += fileTokens;
}
}
}
// Add project tree context
if (includeProjectTree) {
const treeContextResult = await this._gatherProjectTreeContext(
format,
includeTokenCounts
);
if (treeContextResult.context) {
contextSections.push(treeContextResult.context);
// Add tree breakdown if token counting is enabled
if (includeTokenCounts && treeContextResult.breakdown) {
tokenBreakdown.projectTree = treeContextResult.breakdown;
tokenBreakdown.total += treeContextResult.breakdown.tokens;
}
}
}
const finalContext = this._joinContextSections(contextSections, format);
const result = {
context: finalContext,
analysisData: analysisData,
contextSections: contextSections.length,
finalTaskIds: Array.from(finalTaskIds)
};
// Only include tokenBreakdown if it was requested
if (includeTokenCounts) {
result.tokenBreakdown = tokenBreakdown;
}
return result;
}
_performSemanticSearch(query, maxResults) {
const searchableTasks = this.allTasks.map((task) => {
const dependencyTitles =
task.dependencies?.length > 0
? task.dependencies
.map((depId) => this.allTasks.find((t) => t.id === depId)?.title)
.filter(Boolean)
.join(' ')
: '';
return { ...task, dependencyTitles };
});
// Use the exact same approach as add-task.js
const searchOptions = {
includeScore: true, // Return match scores
threshold: 0.4, // Lower threshold = stricter matching (range 0-1)
keys: [
{ name: 'title', weight: 1.5 }, // Title is most important
{ name: 'description', weight: 2 }, // Description is very important
{ name: 'details', weight: 3 }, // Details is most important
// Search dependencies to find tasks that depend on similar things
{ name: 'dependencyTitles', weight: 0.5 }
],
// Sort matches by score (lower is better)
shouldSort: true,
// Allow searching in nested properties
useExtendedSearch: true,
// Return up to 50 matches
limit: 50
};
// Create search index using Fuse.js
const fuse = new Fuse(searchableTasks, searchOptions);
// Extract significant words and phrases from the prompt (like add-task.js does)
const promptWords = query
.toLowerCase()
.replace(/[^\w\s-]/g, ' ') // Replace non-alphanumeric chars with spaces
.split(/\s+/)
.filter((word) => word.length > 3); // Words at least 4 chars
// Use the user's prompt for fuzzy search
const fuzzyResults = fuse.search(query);
// Also search for each significant word to catch different aspects
const wordResults = [];
for (const word of promptWords) {
if (word.length > 5) {
// Only use significant words
const results = fuse.search(word);
if (results.length > 0) {
wordResults.push(...results);
}
}
}
// Merge and deduplicate results
const mergedResults = [...fuzzyResults];
// Add word results that aren't already in fuzzyResults
for (const wordResult of wordResults) {
if (!mergedResults.some((r) => r.item.id === wordResult.item.id)) {
mergedResults.push(wordResult);
}
}
// Group search results by relevance
const highRelevance = mergedResults
.filter((result) => result.score < 0.25)
.map((result) => result.item);
const mediumRelevance = mergedResults
.filter((result) => result.score >= 0.25 && result.score < 0.4)
.map((result) => result.item);
// Get recent tasks (newest first)
const recentTasks = [...this.allTasks]
.sort((a, b) => b.id - a.id)
.slice(0, 5);
// Combine high relevance, medium relevance, and recent tasks
// Prioritize high relevance first
const allRelevantTasks = [...highRelevance];
// Add medium relevance if not already included
for (const task of mediumRelevance) {
if (!allRelevantTasks.some((t) => t.id === task.id)) {
allRelevantTasks.push(task);
}
}
// Add recent tasks if not already included
for (const task of recentTasks) {
if (!allRelevantTasks.some((t) => t.id === task.id)) {
allRelevantTasks.push(task);
}
}
// Get top N results for context
const finalResults = allRelevantTasks.slice(0, maxResults);
return {
tasks: finalResults,
analysisData: {
highRelevance: highRelevance,
mediumRelevance: mediumRelevance,
recentTasks: recentTasks,
allRelevantTasks: allRelevantTasks
}
};
}
_buildDependencyContext(taskIds) {
const { allRelatedTaskIds, graphs, depthMap } =
this._buildDependencyGraphs(taskIds);
if (allRelatedTaskIds.size === 0) return '';
const dependentTasks = Array.from(allRelatedTaskIds)
.map((id) => this.allTasks.find((t) => t.id === id))
.filter(Boolean)
.sort((a, b) => (depthMap.get(a.id) || 0) - (depthMap.get(b.id) || 0));
const uniqueDetailedTasks = dependentTasks.slice(0, 8);
let context = `\nThis task relates to a dependency structure with ${dependentTasks.length} related tasks in the chain.`;
const directDeps = this.allTasks.filter((t) => taskIds.includes(t.id));
if (directDeps.length > 0) {
context += `\n\nDirect dependencies:\n${directDeps
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
.join('\n')}`;
}
const indirectDeps = dependentTasks.filter((t) => !taskIds.includes(t.id));
if (indirectDeps.length > 0) {
context += `\n\nIndirect dependencies (dependencies of dependencies):\n${indirectDeps
.slice(0, 5)
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
.join('\n')}`;
if (indirectDeps.length > 5)
context += `\n- ... and ${
indirectDeps.length - 5
} more indirect dependencies`;
}
context += `\n\nDetailed information about dependencies:`;
for (const depTask of uniqueDetailedTasks) {
const isDirect = taskIds.includes(depTask.id)
? ' [DIRECT DEPENDENCY]'
: '';
context += `\n\n------ Task ${depTask.id}${isDirect}: ${depTask.title} ------\n`;
context += `Description: ${depTask.description}\n`;
if (depTask.dependencies?.length) {
context += `Dependencies: ${depTask.dependencies.join(', ')}\n`;
}
if (depTask.details) {
context += `Implementation Details: ${truncate(
depTask.details,
400
)}\n`;
}
}
if (graphs.length > 0) {
context += '\n\nDependency Chain Visualization:';
context += graphs
.map((graph) => this._formatDependencyChain(graph))
.join('');
}
return context;
}
_buildDependencyGraphs(taskIds) {
const visited = new Set();
const depthMap = new Map();
const graphs = [];
for (const id of taskIds) {
const graph = this._buildDependencyGraph(id, visited, depthMap);
if (graph) graphs.push(graph);
}
return { allRelatedTaskIds: visited, graphs, depthMap };
}
_buildDependencyGraph(taskId, visited, depthMap, depth = 0) {
if (visited.has(taskId) || depth > 5) return null; // Limit recursion depth
const task = this.allTasks.find((t) => t.id === taskId);
if (!task) return null;
visited.add(taskId);
if (!depthMap.has(taskId) || depth < depthMap.get(taskId)) {
depthMap.set(taskId, depth);
}
const dependencies =
task.dependencies
?.map((depId) =>
this._buildDependencyGraph(depId, visited, depthMap, depth + 1)
)
.filter(Boolean) || [];
return { ...task, dependencies };
}
_formatDependencyChain(node, prefix = '', isLast = true, depth = 0) {
if (depth > 3) return '';
const connector = isLast ? '└── ' : '├── ';
let result = `${prefix}${connector}Task ${node.id}: ${node.title}`;
if (node.dependencies?.length) {
const childPrefix = prefix + (isLast ? ' ' : '│ ');
result += node.dependencies
.map((dep, index) =>
this._formatDependencyChain(
dep,
childPrefix,
index === node.dependencies.length - 1,
depth + 1
)
)
.join('');
}
return '\n' + result;
}
/**
* Parse task ID strings into structured format
* Supports formats: "15", "15.2", "16,17.1"
* @param {Array<string>} taskIds - Array of task ID strings
* @returns {Array<Object>} Parsed task identifiers
*/
_parseTaskIds(taskIds) {
const parsed = [];
for (const idStr of taskIds) {
if (idStr.includes('.')) {
// Subtask format: "15.2"
const [parentId, subtaskId] = idStr.split('.');
parsed.push({
type: 'subtask',
parentId: parseInt(parentId, 10),
subtaskId: parseInt(subtaskId, 10),
fullId: idStr
});
} else {
// Task format: "15"
parsed.push({
type: 'task',
taskId: parseInt(idStr, 10),
fullId: idStr
});
}
}
return parsed;
}
/**
* Gather context from tasks and subtasks
* @param {Array<string>} taskIds - Task/subtask IDs
* @param {string} format - Output format
* @param {boolean} includeTokenCounts - Whether to include token breakdown
* @returns {Promise<Object>} Task context result with breakdown
*/
async _gatherTaskContext(taskIds, format, includeTokenCounts = false) {
try {
if (!this.allTasks || this.allTasks.length === 0) {
return { context: null, breakdown: [] };
}
const parsedIds = this._parseTaskIds(taskIds);
const contextItems = [];
const breakdown = [];
for (const parsed of parsedIds) {
let formattedItem = null;
let itemInfo = null;
if (parsed.type === 'task') {
const result = findTaskById(this.allTasks, parsed.taskId);
if (result.task) {
formattedItem = this._formatTaskForContext(result.task, format);
itemInfo = {
id: parsed.fullId,
type: 'task',
title: result.task.title,
tokens: includeTokenCounts ? this.countTokens(formattedItem) : 0,
characters: formattedItem.length
};
}
} else if (parsed.type === 'subtask') {
const parentResult = findTaskById(this.allTasks, parsed.parentId);
if (parentResult.task && parentResult.task.subtasks) {
const subtask = parentResult.task.subtasks.find(
(st) => st.id === parsed.subtaskId
);
if (subtask) {
formattedItem = this._formatSubtaskForContext(
subtask,
parentResult.task,
format
);
itemInfo = {
id: parsed.fullId,
type: 'subtask',
title: subtask.title,
parentTitle: parentResult.task.title,
tokens: includeTokenCounts
? this.countTokens(formattedItem)
: 0,
characters: formattedItem.length
};
}
}
}
if (formattedItem && itemInfo) {
contextItems.push(formattedItem);
if (includeTokenCounts) {
breakdown.push(itemInfo);
}
}
}
if (contextItems.length === 0) {
return { context: null, breakdown: [] };
}
const finalContext = this._formatTaskContextSection(contextItems, format);
return {
context: finalContext,
breakdown: includeTokenCounts ? breakdown : []
};
} catch (error) {
console.warn(`Warning: Could not gather task context: ${error.message}`);
return { context: null, breakdown: [] };
}
}
/**
* Format a task for context inclusion
* @param {Object} task - Task object
* @param {string} format - Output format
* @returns {string} Formatted task context
*/
_formatTaskForContext(task, format) {
const sections = [];
sections.push(`**Task ${task.id}: ${task.title}**`);
sections.push(`Description: ${task.description}`);
sections.push(`Status: ${task.status || 'pending'}`);
sections.push(`Priority: ${task.priority || 'medium'}`);
if (task.dependencies && task.dependencies.length > 0) {
sections.push(`Dependencies: ${task.dependencies.join(', ')}`);
}
if (task.details) {
const details = truncate(task.details, 500);
sections.push(`Implementation Details: ${details}`);
}
if (task.testStrategy) {
const testStrategy = truncate(task.testStrategy, 300);
sections.push(`Test Strategy: ${testStrategy}`);
}
if (task.subtasks && task.subtasks.length > 0) {
sections.push(`Subtasks: ${task.subtasks.length} subtasks defined`);
}
return sections.join('\n');
}
/**
* Format a subtask for context inclusion
* @param {Object} subtask - Subtask object
* @param {Object} parentTask - Parent task object
* @param {string} format - Output format
* @returns {string} Formatted subtask context
*/
_formatSubtaskForContext(subtask, parentTask, format) {
const sections = [];
sections.push(
`**Subtask ${parentTask.id}.${subtask.id}: ${subtask.title}**`
);
sections.push(`Parent Task: ${parentTask.title}`);
sections.push(`Description: ${subtask.description}`);
sections.push(`Status: ${subtask.status || 'pending'}`);
if (subtask.dependencies && subtask.dependencies.length > 0) {
sections.push(`Dependencies: ${subtask.dependencies.join(', ')}`);
}
if (subtask.details) {
const details = truncate(subtask.details, 500);
sections.push(`Implementation Details: ${details}`);
}
return sections.join('\n');
}
/**
* Gather context from files
* @param {Array<string>} filePaths - File paths to read
* @param {string} format - Output format
* @param {boolean} includeTokenCounts - Whether to include token breakdown
* @returns {Promise<Object>} File context result with breakdown
*/
async _gatherFileContext(filePaths, format, includeTokenCounts = false) {
const fileContents = [];
const breakdown = [];
for (const filePath of filePaths) {
try {
const fullPath = path.isAbsolute(filePath)
? filePath
: path.join(this.projectRoot, filePath);
if (!fs.existsSync(fullPath)) {
continue;
}
const stats = fs.statSync(fullPath);
if (!stats.isFile()) {
continue;
}
// Check file size (limit to 50KB for context)
if (stats.size > 50 * 1024) {
continue;
}
const content = fs.readFileSync(fullPath, 'utf-8');
const relativePath = path.relative(this.projectRoot, fullPath);
const fileData = {
path: relativePath,
size: stats.size,
content: content,
lastModified: stats.mtime
};
fileContents.push(fileData);
// Calculate tokens for this individual file if requested
if (includeTokenCounts) {
const formattedFile = this._formatSingleFileForContext(
fileData,
format
);
breakdown.push({
path: relativePath,
sizeKB: Math.round(stats.size / 1024),
tokens: this.countTokens(formattedFile),
characters: formattedFile.length
});
}
} catch (error) {
console.warn(
`Warning: Could not read file ${filePath}: ${error.message}`
);
}
}
if (fileContents.length === 0) {
return { context: null, breakdown: [] };
}
const finalContext = this._formatFileContextSection(fileContents, format);
return {
context: finalContext,
breakdown: includeTokenCounts ? breakdown : []
};
}
/**
* Generate project file tree context
* @param {string} format - Output format
* @param {boolean} includeTokenCounts - Whether to include token breakdown
* @returns {Promise<Object>} Project tree context result with breakdown
*/
async _gatherProjectTreeContext(format, includeTokenCounts = false) {
try {
const tree = this._generateFileTree(this.projectRoot, 5); // Max depth 5
const finalContext = this._formatProjectTreeSection(tree, format);
const breakdown = includeTokenCounts
? {
tokens: this.countTokens(finalContext),
characters: finalContext.length,
fileCount: tree.fileCount || 0,
dirCount: tree.dirCount || 0
}
: null;
return {
context: finalContext,
breakdown: breakdown
};
} catch (error) {
console.warn(
`Warning: Could not generate project tree: ${error.message}`
);
return { context: null, breakdown: null };
}
}
/**
* Format a single file for context (used for token counting)
* @param {Object} fileData - File data object
* @param {string} format - Output format
* @returns {string} Formatted file context
*/
_formatSingleFileForContext(fileData, format) {
const header = `**File: ${fileData.path}** (${Math.round(fileData.size / 1024)}KB)`;
const content = `\`\`\`\n${fileData.content}\n\`\`\``;
return `${header}\n\n${content}`;
}
/**
* Generate file tree structure
* @param {string} dirPath - Directory path
* @param {number} maxDepth - Maximum depth to traverse
* @param {number} currentDepth - Current depth
* @returns {Object} File tree structure
*/
_generateFileTree(dirPath, maxDepth, currentDepth = 0) {
const ignoreDirs = [
'.git',
'node_modules',
'.env',
'coverage',
'dist',
'build'
];
const ignoreFiles = ['.DS_Store', '.env', '.env.local', '.env.production'];
if (currentDepth >= maxDepth) {
return null;
}
try {
const items = fs.readdirSync(dirPath);
const tree = {
name: path.basename(dirPath),
type: 'directory',
children: [],
fileCount: 0,
dirCount: 0
};
for (const item of items) {
if (ignoreDirs.includes(item) || ignoreFiles.includes(item)) {
continue;
}
const itemPath = path.join(dirPath, item);
const stats = fs.statSync(itemPath);
if (stats.isDirectory()) {
tree.dirCount++;
if (currentDepth < maxDepth - 1) {
const subtree = this._generateFileTree(
itemPath,
maxDepth,
currentDepth + 1
);
if (subtree) {
tree.children.push(subtree);
}
}
} else {
tree.fileCount++;
tree.children.push({
name: item,
type: 'file',
size: stats.size
});
}
}
return tree;
} catch (error) {
return null;
}
}
/**
* Format custom context section
* @param {string} customContext - Custom context string
* @param {string} format - Output format
* @returns {string} Formatted custom context
*/
_formatCustomContext(customContext, format) {
switch (format) {
case 'research':
return `## Additional Context\n\n${customContext}`;
case 'chat':
return `**Additional Context:**\n${customContext}`;
case 'system-prompt':
return `Additional context: ${customContext}`;
default:
return customContext;
}
}
/**
* Format task context section
* @param {Array<string>} taskItems - Formatted task items
* @param {string} format - Output format
* @returns {string} Formatted task context section
*/
_formatTaskContextSection(taskItems, format) {
switch (format) {
case 'research':
return `## Task Context\n\n${taskItems.join('\n\n---\n\n')}`;
case 'chat':
return `**Task Context:**\n\n${taskItems.join('\n\n')}`;
case 'system-prompt':
return `Task context: ${taskItems.join(' | ')}`;
default:
return taskItems.join('\n\n');
}
}
/**
* Format file context section
* @param {Array<Object>} fileContents - File content objects
* @param {string} format - Output format
* @returns {string} Formatted file context section
*/
_formatFileContextSection(fileContents, format) {
const fileItems = fileContents.map((file) => {
const header = `**File: ${file.path}** (${Math.round(file.size / 1024)}KB)`;
const content = `\`\`\`\n${file.content}\n\`\`\``;
return `${header}\n\n${content}`;
});
switch (format) {
case 'research':
return `## File Context\n\n${fileItems.join('\n\n---\n\n')}`;
case 'chat':
return `**File Context:**\n\n${fileItems.join('\n\n')}`;
case 'system-prompt':
return `File context: ${fileContents.map((f) => `${f.path} (${f.content.substring(0, 200)}...)`).join(' | ')}`;
default:
return fileItems.join('\n\n');
}
}
/**
* Format project tree section
* @param {Object} tree - File tree structure
* @param {string} format - Output format
* @returns {string} Formatted project tree section
*/
_formatProjectTreeSection(tree, format) {
const treeString = this._renderFileTree(tree);
switch (format) {
case 'research':
return `## Project Structure\n\n\`\`\`\n${treeString}\n\`\`\``;
case 'chat':
return `**Project Structure:**\n\`\`\`\n${treeString}\n\`\`\``;
case 'system-prompt':
return `Project structure: ${treeString.replace(/\n/g, ' | ')}`;
default:
return treeString;
}
}
/**
* Render file tree as string
* @param {Object} tree - File tree structure
* @param {string} prefix - Current prefix for indentation
* @returns {string} Rendered tree string
*/
_renderFileTree(tree, prefix = '') {
let result = `${prefix}${tree.name}/`;
if (tree.fileCount > 0 || tree.dirCount > 0) {
result += ` (${tree.fileCount} files, ${tree.dirCount} dirs)`;
}
result += '\n';
if (tree.children) {
tree.children.forEach((child, index) => {
const isLast = index === tree.children.length - 1;
const childPrefix = prefix + (isLast ? '└── ' : '├── ');
const nextPrefix = prefix + (isLast ? ' ' : '│ ');
if (child.type === 'directory') {
result += this._renderFileTree(child, childPrefix);
} else {
result += `${childPrefix}${child.name}\n`;
}
});
}
return result;
}
/**
* Join context sections based on format
* @param {Array<string>} sections - Context sections
* @param {string} format - Output format
* @returns {string} Joined context string
*/
_joinContextSections(sections, format) {
if (sections.length === 0) {
return '';
}
switch (format) {
case 'research':
return sections.join('\n\n---\n\n');
case 'chat':
return sections.join('\n\n');
case 'system-prompt':
return sections.join(' ');
default:
return sections.join('\n\n');
}
}
}
/**
* Factory function to create a context gatherer instance
* @param {string} projectRoot - Project root directory
* @param {string} tag - Tag for the task
* @returns {ContextGatherer} Context gatherer instance
* @throws {Error} If tag is not provided
*/
export function createContextGatherer(projectRoot, tag) {
if (!tag) {
throw new Error('Tag is required');
}
return new ContextGatherer(projectRoot, tag);
}
export default ContextGatherer;
```