#
tokens: 48230/50000 4/975 files (page 38/50)
lines: off (toggle) GitHub
raw markdown copy
This is page 38 of 50. Use http://codebase.md/eyaltoledano/claude-task-master?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .changeset
│   ├── config.json
│   └── README.md
├── .claude
│   ├── commands
│   │   └── dedupe.md
│   └── TM_COMMANDS_GUIDE.md
├── .claude-plugin
│   └── marketplace.json
├── .coderabbit.yaml
├── .cursor
│   ├── mcp.json
│   └── rules
│       ├── ai_providers.mdc
│       ├── ai_services.mdc
│       ├── architecture.mdc
│       ├── changeset.mdc
│       ├── commands.mdc
│       ├── context_gathering.mdc
│       ├── cursor_rules.mdc
│       ├── dependencies.mdc
│       ├── dev_workflow.mdc
│       ├── git_workflow.mdc
│       ├── glossary.mdc
│       ├── mcp.mdc
│       ├── new_features.mdc
│       ├── self_improve.mdc
│       ├── tags.mdc
│       ├── taskmaster.mdc
│       ├── tasks.mdc
│       ├── telemetry.mdc
│       ├── test_workflow.mdc
│       ├── tests.mdc
│       ├── ui.mdc
│       └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── enhancements---feature-requests.md
│   │   └── feedback.md
│   ├── PULL_REQUEST_TEMPLATE
│   │   ├── bugfix.md
│   │   ├── config.yml
│   │   ├── feature.md
│   │   └── integration.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── scripts
│   │   ├── auto-close-duplicates.mjs
│   │   ├── backfill-duplicate-comments.mjs
│   │   ├── check-pre-release-mode.mjs
│   │   ├── parse-metrics.mjs
│   │   ├── release.mjs
│   │   ├── tag-extension.mjs
│   │   ├── utils.mjs
│   │   └── validate-changesets.mjs
│   └── workflows
│       ├── auto-close-duplicates.yml
│       ├── backfill-duplicate-comments.yml
│       ├── ci.yml
│       ├── claude-dedupe-issues.yml
│       ├── claude-docs-trigger.yml
│       ├── claude-docs-updater.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── extension-ci.yml
│       ├── extension-release.yml
│       ├── log-issue-events.yml
│       ├── pre-release.yml
│       ├── release-check.yml
│       ├── release.yml
│       ├── update-models-md.yml
│       └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│   ├── hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── settings
│   │   └── mcp.json
│   └── steering
│       ├── dev_workflow.md
│       ├── kiro_rules.md
│       ├── self_improve.md
│       ├── taskmaster_hooks_workflow.md
│       └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│   ├── CLAUDE.md
│   ├── config.json
│   ├── docs
│   │   ├── autonomous-tdd-git-workflow.md
│   │   ├── MIGRATION-ROADMAP.md
│   │   ├── prd-tm-start.txt
│   │   ├── prd.txt
│   │   ├── README.md
│   │   ├── research
│   │   │   ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│   │   │   ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│   │   │   ├── 2025-06-14_test-save-functionality.md
│   │   │   ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│   │   │   └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│   │   ├── task-template-importing-prd.txt
│   │   ├── tdd-workflow-phase-0-spike.md
│   │   ├── tdd-workflow-phase-1-core-rails.md
│   │   ├── tdd-workflow-phase-1-orchestrator.md
│   │   ├── tdd-workflow-phase-2-pr-resumability.md
│   │   ├── tdd-workflow-phase-3-extensibility-guardrails.md
│   │   ├── test-prd.txt
│   │   └── tm-core-phase-1.txt
│   ├── reports
│   │   ├── task-complexity-report_autonomous-tdd-git-workflow.json
│   │   ├── task-complexity-report_cc-kiro-hooks.json
│   │   ├── task-complexity-report_tdd-phase-1-core-rails.json
│   │   ├── task-complexity-report_tdd-workflow-phase-0.json
│   │   ├── task-complexity-report_test-prd-tag.json
│   │   ├── task-complexity-report_tm-core-phase-1.json
│   │   ├── task-complexity-report.json
│   │   └── tm-core-complexity.json
│   ├── state.json
│   ├── tasks
│   │   ├── task_001_tm-start.txt
│   │   ├── task_002_tm-start.txt
│   │   ├── task_003_tm-start.txt
│   │   ├── task_004_tm-start.txt
│   │   ├── task_007_tm-start.txt
│   │   └── tasks.json
│   └── templates
│       ├── example_prd_rpg.md
│       └── example_prd.md
├── .vscode
│   ├── extensions.json
│   └── settings.json
├── apps
│   ├── cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   ├── command-registry.ts
│   │   │   ├── commands
│   │   │   │   ├── auth.command.ts
│   │   │   │   ├── autopilot
│   │   │   │   │   ├── abort.command.ts
│   │   │   │   │   ├── commit.command.ts
│   │   │   │   │   ├── complete.command.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next.command.ts
│   │   │   │   │   ├── resume.command.ts
│   │   │   │   │   ├── shared.ts
│   │   │   │   │   ├── start.command.ts
│   │   │   │   │   └── status.command.ts
│   │   │   │   ├── briefs.command.ts
│   │   │   │   ├── context.command.ts
│   │   │   │   ├── export.command.ts
│   │   │   │   ├── list.command.ts
│   │   │   │   ├── models
│   │   │   │   │   ├── custom-providers.ts
│   │   │   │   │   ├── fetchers.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── prompts.ts
│   │   │   │   │   ├── setup.ts
│   │   │   │   │   └── types.ts
│   │   │   │   ├── next.command.ts
│   │   │   │   ├── set-status.command.ts
│   │   │   │   ├── show.command.ts
│   │   │   │   ├── start.command.ts
│   │   │   │   └── tags.command.ts
│   │   │   ├── index.ts
│   │   │   ├── lib
│   │   │   │   └── model-management.ts
│   │   │   ├── types
│   │   │   │   └── tag-management.d.ts
│   │   │   ├── ui
│   │   │   │   ├── components
│   │   │   │   │   ├── cardBox.component.ts
│   │   │   │   │   ├── dashboard.component.ts
│   │   │   │   │   ├── header.component.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next-task.component.ts
│   │   │   │   │   ├── suggested-steps.component.ts
│   │   │   │   │   └── task-detail.component.ts
│   │   │   │   ├── display
│   │   │   │   │   ├── messages.ts
│   │   │   │   │   └── tables.ts
│   │   │   │   ├── formatters
│   │   │   │   │   ├── complexity-formatters.ts
│   │   │   │   │   ├── dependency-formatters.ts
│   │   │   │   │   ├── priority-formatters.ts
│   │   │   │   │   ├── status-formatters.spec.ts
│   │   │   │   │   └── status-formatters.ts
│   │   │   │   ├── index.ts
│   │   │   │   └── layout
│   │   │   │       ├── helpers.spec.ts
│   │   │   │       └── helpers.ts
│   │   │   └── utils
│   │   │       ├── auth-helpers.ts
│   │   │       ├── auto-update.ts
│   │   │       ├── brief-selection.ts
│   │   │       ├── display-helpers.ts
│   │   │       ├── error-handler.ts
│   │   │       ├── index.ts
│   │   │       ├── project-root.ts
│   │   │       ├── task-status.ts
│   │   │       ├── ui.spec.ts
│   │   │       └── ui.ts
│   │   ├── tests
│   │   │   ├── integration
│   │   │   │   └── commands
│   │   │   │       └── autopilot
│   │   │   │           └── workflow.test.ts
│   │   │   └── unit
│   │   │       ├── commands
│   │   │       │   ├── autopilot
│   │   │       │   │   └── shared.test.ts
│   │   │       │   ├── list.command.spec.ts
│   │   │       │   └── show.command.spec.ts
│   │   │       └── ui
│   │   │           └── dashboard.component.spec.ts
│   │   ├── tsconfig.json
│   │   └── vitest.config.ts
│   ├── docs
│   │   ├── archive
│   │   │   ├── ai-client-utils-example.mdx
│   │   │   ├── ai-development-workflow.mdx
│   │   │   ├── command-reference.mdx
│   │   │   ├── configuration.mdx
│   │   │   ├── cursor-setup.mdx
│   │   │   ├── examples.mdx
│   │   │   └── Installation.mdx
│   │   ├── best-practices
│   │   │   ├── advanced-tasks.mdx
│   │   │   ├── configuration-advanced.mdx
│   │   │   └── index.mdx
│   │   ├── capabilities
│   │   │   ├── cli-root-commands.mdx
│   │   │   ├── index.mdx
│   │   │   ├── mcp.mdx
│   │   │   ├── rpg-method.mdx
│   │   │   └── task-structure.mdx
│   │   ├── CHANGELOG.md
│   │   ├── command-reference.mdx
│   │   ├── configuration.mdx
│   │   ├── docs.json
│   │   ├── favicon.svg
│   │   ├── getting-started
│   │   │   ├── api-keys.mdx
│   │   │   ├── contribute.mdx
│   │   │   ├── faq.mdx
│   │   │   └── quick-start
│   │   │       ├── configuration-quick.mdx
│   │   │       ├── execute-quick.mdx
│   │   │       ├── installation.mdx
│   │   │       ├── moving-forward.mdx
│   │   │       ├── prd-quick.mdx
│   │   │       ├── quick-start.mdx
│   │   │       ├── requirements.mdx
│   │   │       ├── rules-quick.mdx
│   │   │       └── tasks-quick.mdx
│   │   ├── introduction.mdx
│   │   ├── licensing.md
│   │   ├── logo
│   │   │   ├── dark.svg
│   │   │   ├── light.svg
│   │   │   └── task-master-logo.png
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── style.css
│   │   ├── tdd-workflow
│   │   │   ├── ai-agent-integration.mdx
│   │   │   └── quickstart.mdx
│   │   ├── vercel.json
│   │   └── whats-new.mdx
│   ├── extension
│   │   ├── .vscodeignore
│   │   ├── assets
│   │   │   ├── banner.png
│   │   │   ├── icon-dark.svg
│   │   │   ├── icon-light.svg
│   │   │   ├── icon.png
│   │   │   ├── screenshots
│   │   │   │   ├── kanban-board.png
│   │   │   │   └── task-details.png
│   │   │   └── sidebar-icon.svg
│   │   ├── CHANGELOG.md
│   │   ├── components.json
│   │   ├── docs
│   │   │   ├── extension-CI-setup.md
│   │   │   └── extension-development-guide.md
│   │   ├── esbuild.js
│   │   ├── LICENSE
│   │   ├── package.json
│   │   ├── package.mjs
│   │   ├── package.publish.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── components
│   │   │   │   ├── ConfigView.tsx
│   │   │   │   ├── constants.ts
│   │   │   │   ├── TaskDetails
│   │   │   │   │   ├── AIActionsSection.tsx
│   │   │   │   │   ├── DetailsSection.tsx
│   │   │   │   │   ├── PriorityBadge.tsx
│   │   │   │   │   ├── SubtasksSection.tsx
│   │   │   │   │   ├── TaskMetadataSidebar.tsx
│   │   │   │   │   └── useTaskDetails.ts
│   │   │   │   ├── TaskDetailsView.tsx
│   │   │   │   ├── TaskMasterLogo.tsx
│   │   │   │   └── ui
│   │   │   │       ├── badge.tsx
│   │   │   │       ├── breadcrumb.tsx
│   │   │   │       ├── button.tsx
│   │   │   │       ├── card.tsx
│   │   │   │       ├── collapsible.tsx
│   │   │   │       ├── CollapsibleSection.tsx
│   │   │   │       ├── dropdown-menu.tsx
│   │   │   │       ├── label.tsx
│   │   │   │       ├── scroll-area.tsx
│   │   │   │       ├── separator.tsx
│   │   │   │       ├── shadcn-io
│   │   │   │       │   └── kanban
│   │   │   │       │       └── index.tsx
│   │   │   │       └── textarea.tsx
│   │   │   ├── extension.ts
│   │   │   ├── index.ts
│   │   │   ├── lib
│   │   │   │   └── utils.ts
│   │   │   ├── services
│   │   │   │   ├── config-service.ts
│   │   │   │   ├── error-handler.ts
│   │   │   │   ├── notification-preferences.ts
│   │   │   │   ├── polling-service.ts
│   │   │   │   ├── polling-strategies.ts
│   │   │   │   ├── sidebar-webview-manager.ts
│   │   │   │   ├── task-repository.ts
│   │   │   │   ├── terminal-manager.ts
│   │   │   │   └── webview-manager.ts
│   │   │   ├── test
│   │   │   │   └── extension.test.ts
│   │   │   ├── utils
│   │   │   │   ├── configManager.ts
│   │   │   │   ├── connectionManager.ts
│   │   │   │   ├── errorHandler.ts
│   │   │   │   ├── event-emitter.ts
│   │   │   │   ├── logger.ts
│   │   │   │   ├── mcpClient.ts
│   │   │   │   ├── notificationPreferences.ts
│   │   │   │   └── task-master-api
│   │   │   │       ├── cache
│   │   │   │       │   └── cache-manager.ts
│   │   │   │       ├── index.ts
│   │   │   │       ├── mcp-client.ts
│   │   │   │       ├── transformers
│   │   │   │       │   └── task-transformer.ts
│   │   │   │       └── types
│   │   │   │           └── index.ts
│   │   │   └── webview
│   │   │       ├── App.tsx
│   │   │       ├── components
│   │   │       │   ├── AppContent.tsx
│   │   │       │   ├── EmptyState.tsx
│   │   │       │   ├── ErrorBoundary.tsx
│   │   │       │   ├── PollingStatus.tsx
│   │   │       │   ├── PriorityBadge.tsx
│   │   │       │   ├── SidebarView.tsx
│   │   │       │   ├── TagDropdown.tsx
│   │   │       │   ├── TaskCard.tsx
│   │   │       │   ├── TaskEditModal.tsx
│   │   │       │   ├── TaskMasterKanban.tsx
│   │   │       │   ├── ToastContainer.tsx
│   │   │       │   └── ToastNotification.tsx
│   │   │       ├── constants
│   │   │       │   └── index.ts
│   │   │       ├── contexts
│   │   │       │   └── VSCodeContext.tsx
│   │   │       ├── hooks
│   │   │       │   ├── useTaskQueries.ts
│   │   │       │   ├── useVSCodeMessages.ts
│   │   │       │   └── useWebviewHeight.ts
│   │   │       ├── index.css
│   │   │       ├── index.tsx
│   │   │       ├── providers
│   │   │       │   └── QueryProvider.tsx
│   │   │       ├── reducers
│   │   │       │   └── appReducer.ts
│   │   │       ├── sidebar.tsx
│   │   │       ├── types
│   │   │       │   └── index.ts
│   │   │       └── utils
│   │   │           ├── logger.ts
│   │   │           └── toast.ts
│   │   └── tsconfig.json
│   └── mcp
│       ├── CHANGELOG.md
│       ├── package.json
│       ├── src
│       │   ├── index.ts
│       │   ├── shared
│       │   │   ├── types.ts
│       │   │   └── utils.ts
│       │   └── tools
│       │       ├── autopilot
│       │       │   ├── abort.tool.ts
│       │       │   ├── commit.tool.ts
│       │       │   ├── complete.tool.ts
│       │       │   ├── finalize.tool.ts
│       │       │   ├── index.ts
│       │       │   ├── next.tool.ts
│       │       │   ├── resume.tool.ts
│       │       │   ├── start.tool.ts
│       │       │   └── status.tool.ts
│       │       ├── README-ZOD-V3.md
│       │       └── tasks
│       │           ├── get-task.tool.ts
│       │           ├── get-tasks.tool.ts
│       │           └── index.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── assets
│   ├── .windsurfrules
│   ├── AGENTS.md
│   ├── claude
│   │   └── TM_COMMANDS_GUIDE.md
│   ├── config.json
│   ├── env.example
│   ├── example_prd_rpg.txt
│   ├── example_prd.txt
│   ├── GEMINI.md
│   ├── gitignore
│   ├── kiro-hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── roocode
│   │   ├── .roo
│   │   │   ├── rules-architect
│   │   │   │   └── architect-rules
│   │   │   ├── rules-ask
│   │   │   │   └── ask-rules
│   │   │   ├── rules-code
│   │   │   │   └── code-rules
│   │   │   ├── rules-debug
│   │   │   │   └── debug-rules
│   │   │   ├── rules-orchestrator
│   │   │   │   └── orchestrator-rules
│   │   │   └── rules-test
│   │   │       └── test-rules
│   │   └── .roomodes
│   ├── rules
│   │   ├── cursor_rules.mdc
│   │   ├── dev_workflow.mdc
│   │   ├── self_improve.mdc
│   │   ├── taskmaster_hooks_workflow.mdc
│   │   └── taskmaster.mdc
│   └── scripts_README.md
├── bin
│   └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE_CODE_PLUGIN.md
├── CLAUDE.md
├── context
│   ├── chats
│   │   ├── add-task-dependencies-1.md
│   │   └── max-min-tokens.txt.md
│   ├── fastmcp-core.txt
│   ├── fastmcp-docs.txt
│   ├── MCP_INTEGRATION.md
│   ├── mcp-js-sdk-docs.txt
│   ├── mcp-protocol-repo.txt
│   ├── mcp-protocol-schema-03262025.json
│   └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│   ├── claude-code-integration.md
│   ├── CLI-COMMANDER-PATTERN.md
│   ├── command-reference.md
│   ├── configuration.md
│   ├── contributor-docs
│   │   ├── testing-roo-integration.md
│   │   └── worktree-setup.md
│   ├── cross-tag-task-movement.md
│   ├── examples
│   │   ├── claude-code-usage.md
│   │   └── codex-cli-usage.md
│   ├── examples.md
│   ├── licensing.md
│   ├── mcp-provider-guide.md
│   ├── mcp-provider.md
│   ├── migration-guide.md
│   ├── models.md
│   ├── providers
│   │   ├── codex-cli.md
│   │   └── gemini-cli.md
│   ├── README.md
│   ├── scripts
│   │   └── models-json-to-markdown.js
│   ├── task-structure.md
│   └── tutorial.md
├── images
│   ├── hamster-hiring.png
│   └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│   ├── server.js
│   └── src
│       ├── core
│       │   ├── __tests__
│       │   │   └── context-manager.test.js
│       │   ├── context-manager.js
│       │   ├── direct-functions
│       │   │   ├── add-dependency.js
│       │   │   ├── add-subtask.js
│       │   │   ├── add-tag.js
│       │   │   ├── add-task.js
│       │   │   ├── analyze-task-complexity.js
│       │   │   ├── cache-stats.js
│       │   │   ├── clear-subtasks.js
│       │   │   ├── complexity-report.js
│       │   │   ├── copy-tag.js
│       │   │   ├── create-tag-from-branch.js
│       │   │   ├── delete-tag.js
│       │   │   ├── expand-all-tasks.js
│       │   │   ├── expand-task.js
│       │   │   ├── fix-dependencies.js
│       │   │   ├── generate-task-files.js
│       │   │   ├── initialize-project.js
│       │   │   ├── list-tags.js
│       │   │   ├── models.js
│       │   │   ├── move-task-cross-tag.js
│       │   │   ├── move-task.js
│       │   │   ├── next-task.js
│       │   │   ├── parse-prd.js
│       │   │   ├── remove-dependency.js
│       │   │   ├── remove-subtask.js
│       │   │   ├── remove-task.js
│       │   │   ├── rename-tag.js
│       │   │   ├── research.js
│       │   │   ├── response-language.js
│       │   │   ├── rules.js
│       │   │   ├── scope-down.js
│       │   │   ├── scope-up.js
│       │   │   ├── set-task-status.js
│       │   │   ├── update-subtask-by-id.js
│       │   │   ├── update-task-by-id.js
│       │   │   ├── update-tasks.js
│       │   │   ├── use-tag.js
│       │   │   └── validate-dependencies.js
│       │   ├── task-master-core.js
│       │   └── utils
│       │       ├── env-utils.js
│       │       └── path-utils.js
│       ├── custom-sdk
│       │   ├── errors.js
│       │   ├── index.js
│       │   ├── json-extractor.js
│       │   ├── language-model.js
│       │   ├── message-converter.js
│       │   └── schema-converter.js
│       ├── index.js
│       ├── logger.js
│       ├── providers
│       │   └── mcp-provider.js
│       └── tools
│           ├── add-dependency.js
│           ├── add-subtask.js
│           ├── add-tag.js
│           ├── add-task.js
│           ├── analyze.js
│           ├── clear-subtasks.js
│           ├── complexity-report.js
│           ├── copy-tag.js
│           ├── delete-tag.js
│           ├── expand-all.js
│           ├── expand-task.js
│           ├── fix-dependencies.js
│           ├── generate.js
│           ├── get-operation-status.js
│           ├── index.js
│           ├── initialize-project.js
│           ├── list-tags.js
│           ├── models.js
│           ├── move-task.js
│           ├── next-task.js
│           ├── parse-prd.js
│           ├── README-ZOD-V3.md
│           ├── remove-dependency.js
│           ├── remove-subtask.js
│           ├── remove-task.js
│           ├── rename-tag.js
│           ├── research.js
│           ├── response-language.js
│           ├── rules.js
│           ├── scope-down.js
│           ├── scope-up.js
│           ├── set-task-status.js
│           ├── tool-registry.js
│           ├── update-subtask.js
│           ├── update-task.js
│           ├── update.js
│           ├── use-tag.js
│           ├── utils.js
│           └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│   ├── ai-sdk-provider-grok-cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── errors.test.ts
│   │   │   ├── errors.ts
│   │   │   ├── grok-cli-language-model.ts
│   │   │   ├── grok-cli-provider.test.ts
│   │   │   ├── grok-cli-provider.ts
│   │   │   ├── index.ts
│   │   │   ├── json-extractor.test.ts
│   │   │   ├── json-extractor.ts
│   │   │   ├── message-converter.test.ts
│   │   │   ├── message-converter.ts
│   │   │   └── types.ts
│   │   └── tsconfig.json
│   ├── build-config
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   └── tsdown.base.ts
│   │   └── tsconfig.json
│   ├── claude-code-plugin
│   │   ├── .claude-plugin
│   │   │   └── plugin.json
│   │   ├── .gitignore
│   │   ├── agents
│   │   │   ├── task-checker.md
│   │   │   ├── task-executor.md
│   │   │   └── task-orchestrator.md
│   │   ├── CHANGELOG.md
│   │   ├── commands
│   │   │   ├── add-dependency.md
│   │   │   ├── add-subtask.md
│   │   │   ├── add-task.md
│   │   │   ├── analyze-complexity.md
│   │   │   ├── analyze-project.md
│   │   │   ├── auto-implement-tasks.md
│   │   │   ├── command-pipeline.md
│   │   │   ├── complexity-report.md
│   │   │   ├── convert-task-to-subtask.md
│   │   │   ├── expand-all-tasks.md
│   │   │   ├── expand-task.md
│   │   │   ├── fix-dependencies.md
│   │   │   ├── generate-tasks.md
│   │   │   ├── help.md
│   │   │   ├── init-project-quick.md
│   │   │   ├── init-project.md
│   │   │   ├── install-taskmaster.md
│   │   │   ├── learn.md
│   │   │   ├── list-tasks-by-status.md
│   │   │   ├── list-tasks-with-subtasks.md
│   │   │   ├── list-tasks.md
│   │   │   ├── next-task.md
│   │   │   ├── parse-prd-with-research.md
│   │   │   ├── parse-prd.md
│   │   │   ├── project-status.md
│   │   │   ├── quick-install-taskmaster.md
│   │   │   ├── remove-all-subtasks.md
│   │   │   ├── remove-dependency.md
│   │   │   ├── remove-subtask.md
│   │   │   ├── remove-subtasks.md
│   │   │   ├── remove-task.md
│   │   │   ├── setup-models.md
│   │   │   ├── show-task.md
│   │   │   ├── smart-workflow.md
│   │   │   ├── sync-readme.md
│   │   │   ├── tm-main.md
│   │   │   ├── to-cancelled.md
│   │   │   ├── to-deferred.md
│   │   │   ├── to-done.md
│   │   │   ├── to-in-progress.md
│   │   │   ├── to-pending.md
│   │   │   ├── to-review.md
│   │   │   ├── update-single-task.md
│   │   │   ├── update-task.md
│   │   │   ├── update-tasks-from-id.md
│   │   │   ├── validate-dependencies.md
│   │   │   └── view-models.md
│   │   ├── mcp.json
│   │   └── package.json
│   ├── tm-bridge
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── add-tag-bridge.ts
│   │   │   ├── bridge-types.ts
│   │   │   ├── bridge-utils.ts
│   │   │   ├── expand-bridge.ts
│   │   │   ├── index.ts
│   │   │   ├── tags-bridge.ts
│   │   │   ├── update-bridge.ts
│   │   │   └── use-tag-bridge.ts
│   │   └── tsconfig.json
│   └── tm-core
│       ├── .gitignore
│       ├── CHANGELOG.md
│       ├── docs
│       │   └── listTasks-architecture.md
│       ├── package.json
│       ├── POC-STATUS.md
│       ├── README.md
│       ├── src
│       │   ├── common
│       │   │   ├── constants
│       │   │   │   ├── index.ts
│       │   │   │   ├── paths.ts
│       │   │   │   └── providers.ts
│       │   │   ├── errors
│       │   │   │   ├── index.ts
│       │   │   │   └── task-master-error.ts
│       │   │   ├── interfaces
│       │   │   │   ├── configuration.interface.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── storage.interface.ts
│       │   │   ├── logger
│       │   │   │   ├── factory.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── logger.spec.ts
│       │   │   │   └── logger.ts
│       │   │   ├── mappers
│       │   │   │   ├── TaskMapper.test.ts
│       │   │   │   └── TaskMapper.ts
│       │   │   ├── types
│       │   │   │   ├── database.types.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── legacy.ts
│       │   │   │   └── repository-types.ts
│       │   │   └── utils
│       │   │       ├── git-utils.ts
│       │   │       ├── id-generator.ts
│       │   │       ├── index.ts
│       │   │       ├── path-helpers.ts
│       │   │       ├── path-normalizer.spec.ts
│       │   │       ├── path-normalizer.ts
│       │   │       ├── project-root-finder.spec.ts
│       │   │       ├── project-root-finder.ts
│       │   │       ├── run-id-generator.spec.ts
│       │   │       └── run-id-generator.ts
│       │   ├── index.ts
│       │   ├── modules
│       │   │   ├── ai
│       │   │   │   ├── index.ts
│       │   │   │   ├── interfaces
│       │   │   │   │   └── ai-provider.interface.ts
│       │   │   │   └── providers
│       │   │   │       ├── base-provider.ts
│       │   │   │       └── index.ts
│       │   │   ├── auth
│       │   │   │   ├── auth-domain.spec.ts
│       │   │   │   ├── auth-domain.ts
│       │   │   │   ├── config.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   ├── auth-manager.spec.ts
│       │   │   │   │   └── auth-manager.ts
│       │   │   │   ├── services
│       │   │   │   │   ├── context-store.ts
│       │   │   │   │   ├── oauth-service.ts
│       │   │   │   │   ├── organization.service.ts
│       │   │   │   │   ├── supabase-session-storage.spec.ts
│       │   │   │   │   └── supabase-session-storage.ts
│       │   │   │   └── types.ts
│       │   │   ├── briefs
│       │   │   │   ├── briefs-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── brief-service.ts
│       │   │   │   ├── types.ts
│       │   │   │   └── utils
│       │   │   │       └── url-parser.ts
│       │   │   ├── commands
│       │   │   │   └── index.ts
│       │   │   ├── config
│       │   │   │   ├── config-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   ├── config-manager.spec.ts
│       │   │   │   │   └── config-manager.ts
│       │   │   │   └── services
│       │   │   │       ├── config-loader.service.spec.ts
│       │   │   │       ├── config-loader.service.ts
│       │   │   │       ├── config-merger.service.spec.ts
│       │   │   │       ├── config-merger.service.ts
│       │   │   │       ├── config-persistence.service.spec.ts
│       │   │   │       ├── config-persistence.service.ts
│       │   │   │       ├── environment-config-provider.service.spec.ts
│       │   │   │       ├── environment-config-provider.service.ts
│       │   │   │       ├── index.ts
│       │   │   │       ├── runtime-state-manager.service.spec.ts
│       │   │   │       └── runtime-state-manager.service.ts
│       │   │   ├── dependencies
│       │   │   │   └── index.ts
│       │   │   ├── execution
│       │   │   │   ├── executors
│       │   │   │   │   ├── base-executor.ts
│       │   │   │   │   ├── claude-executor.ts
│       │   │   │   │   └── executor-factory.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── executor-service.ts
│       │   │   │   └── types.ts
│       │   │   ├── git
│       │   │   │   ├── adapters
│       │   │   │   │   ├── git-adapter.test.ts
│       │   │   │   │   └── git-adapter.ts
│       │   │   │   ├── git-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── services
│       │   │   │       ├── branch-name-generator.spec.ts
│       │   │   │       ├── branch-name-generator.ts
│       │   │   │       ├── commit-message-generator.test.ts
│       │   │   │       ├── commit-message-generator.ts
│       │   │   │       ├── scope-detector.test.ts
│       │   │   │       ├── scope-detector.ts
│       │   │   │       ├── template-engine.test.ts
│       │   │   │       └── template-engine.ts
│       │   │   ├── integration
│       │   │   │   ├── clients
│       │   │   │   │   ├── index.ts
│       │   │   │   │   └── supabase-client.ts
│       │   │   │   ├── integration-domain.ts
│       │   │   │   └── services
│       │   │   │       ├── export.service.ts
│       │   │   │       ├── task-expansion.service.ts
│       │   │   │       └── task-retrieval.service.ts
│       │   │   ├── reports
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   └── complexity-report-manager.ts
│       │   │   │   └── types.ts
│       │   │   ├── storage
│       │   │   │   ├── adapters
│       │   │   │   │   ├── activity-logger.ts
│       │   │   │   │   ├── api-storage.ts
│       │   │   │   │   └── file-storage
│       │   │   │   │       ├── file-operations.ts
│       │   │   │   │       ├── file-storage.ts
│       │   │   │   │       ├── format-handler.ts
│       │   │   │   │       ├── index.ts
│       │   │   │   │       └── path-resolver.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── storage-factory.ts
│       │   │   │   └── utils
│       │   │   │       └── api-client.ts
│       │   │   ├── tasks
│       │   │   │   ├── entities
│       │   │   │   │   └── task.entity.ts
│       │   │   │   ├── parser
│       │   │   │   │   └── index.ts
│       │   │   │   ├── repositories
│       │   │   │   │   ├── supabase
│       │   │   │   │   │   ├── dependency-fetcher.ts
│       │   │   │   │   │   ├── index.ts
│       │   │   │   │   │   └── supabase-repository.ts
│       │   │   │   │   └── task-repository.interface.ts
│       │   │   │   ├── services
│       │   │   │   │   ├── preflight-checker.service.ts
│       │   │   │   │   ├── tag.service.ts
│       │   │   │   │   ├── task-execution-service.ts
│       │   │   │   │   ├── task-loader.service.ts
│       │   │   │   │   └── task-service.ts
│       │   │   │   └── tasks-domain.ts
│       │   │   ├── ui
│       │   │   │   └── index.ts
│       │   │   └── workflow
│       │   │       ├── managers
│       │   │       │   ├── workflow-state-manager.spec.ts
│       │   │       │   └── workflow-state-manager.ts
│       │   │       ├── orchestrators
│       │   │       │   ├── workflow-orchestrator.test.ts
│       │   │       │   └── workflow-orchestrator.ts
│       │   │       ├── services
│       │   │       │   ├── test-result-validator.test.ts
│       │   │       │   ├── test-result-validator.ts
│       │   │       │   ├── test-result-validator.types.ts
│       │   │       │   ├── workflow-activity-logger.ts
│       │   │       │   └── workflow.service.ts
│       │   │       ├── types.ts
│       │   │       └── workflow-domain.ts
│       │   ├── subpath-exports.test.ts
│       │   ├── tm-core.ts
│       │   └── utils
│       │       └── time.utils.ts
│       ├── tests
│       │   ├── auth
│       │   │   └── auth-refresh.test.ts
│       │   ├── integration
│       │   │   ├── auth-token-refresh.test.ts
│       │   │   ├── list-tasks.test.ts
│       │   │   └── storage
│       │   │       └── activity-logger.test.ts
│       │   ├── mocks
│       │   │   └── mock-provider.ts
│       │   ├── setup.ts
│       │   └── unit
│       │       ├── base-provider.test.ts
│       │       ├── executor.test.ts
│       │       └── smoke.test.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│   ├── create-worktree.sh
│   ├── dev.js
│   ├── init.js
│   ├── list-worktrees.sh
│   ├── modules
│   │   ├── ai-services-unified.js
│   │   ├── bridge-utils.js
│   │   ├── commands.js
│   │   ├── config-manager.js
│   │   ├── dependency-manager.js
│   │   ├── index.js
│   │   ├── prompt-manager.js
│   │   ├── supported-models.json
│   │   ├── sync-readme.js
│   │   ├── task-manager
│   │   │   ├── add-subtask.js
│   │   │   ├── add-task.js
│   │   │   ├── analyze-task-complexity.js
│   │   │   ├── clear-subtasks.js
│   │   │   ├── expand-all-tasks.js
│   │   │   ├── expand-task.js
│   │   │   ├── find-next-task.js
│   │   │   ├── generate-task-files.js
│   │   │   ├── is-task-dependent.js
│   │   │   ├── list-tasks.js
│   │   │   ├── migrate.js
│   │   │   ├── models.js
│   │   │   ├── move-task.js
│   │   │   ├── parse-prd
│   │   │   │   ├── index.js
│   │   │   │   ├── parse-prd-config.js
│   │   │   │   ├── parse-prd-helpers.js
│   │   │   │   ├── parse-prd-non-streaming.js
│   │   │   │   ├── parse-prd-streaming.js
│   │   │   │   └── parse-prd.js
│   │   │   ├── remove-subtask.js
│   │   │   ├── remove-task.js
│   │   │   ├── research.js
│   │   │   ├── response-language.js
│   │   │   ├── scope-adjustment.js
│   │   │   ├── set-task-status.js
│   │   │   ├── tag-management.js
│   │   │   ├── task-exists.js
│   │   │   ├── update-single-task-status.js
│   │   │   ├── update-subtask-by-id.js
│   │   │   ├── update-task-by-id.js
│   │   │   └── update-tasks.js
│   │   ├── task-manager.js
│   │   ├── ui.js
│   │   ├── update-config-tokens.js
│   │   ├── utils
│   │   │   ├── contextGatherer.js
│   │   │   ├── fuzzyTaskSearch.js
│   │   │   └── git-utils.js
│   │   └── utils.js
│   ├── task-complexity-report.json
│   ├── test-claude-errors.js
│   └── test-claude.js
├── sonar-project.properties
├── src
│   ├── ai-providers
│   │   ├── anthropic.js
│   │   ├── azure.js
│   │   ├── base-provider.js
│   │   ├── bedrock.js
│   │   ├── claude-code.js
│   │   ├── codex-cli.js
│   │   ├── gemini-cli.js
│   │   ├── google-vertex.js
│   │   ├── google.js
│   │   ├── grok-cli.js
│   │   ├── groq.js
│   │   ├── index.js
│   │   ├── lmstudio.js
│   │   ├── ollama.js
│   │   ├── openai-compatible.js
│   │   ├── openai.js
│   │   ├── openrouter.js
│   │   ├── perplexity.js
│   │   ├── xai.js
│   │   ├── zai-coding.js
│   │   └── zai.js
│   ├── constants
│   │   ├── commands.js
│   │   ├── paths.js
│   │   ├── profiles.js
│   │   ├── rules-actions.js
│   │   ├── task-priority.js
│   │   └── task-status.js
│   ├── profiles
│   │   ├── amp.js
│   │   ├── base-profile.js
│   │   ├── claude.js
│   │   ├── cline.js
│   │   ├── codex.js
│   │   ├── cursor.js
│   │   ├── gemini.js
│   │   ├── index.js
│   │   ├── kilo.js
│   │   ├── kiro.js
│   │   ├── opencode.js
│   │   ├── roo.js
│   │   ├── trae.js
│   │   ├── vscode.js
│   │   ├── windsurf.js
│   │   └── zed.js
│   ├── progress
│   │   ├── base-progress-tracker.js
│   │   ├── cli-progress-factory.js
│   │   ├── parse-prd-tracker.js
│   │   ├── progress-tracker-builder.js
│   │   └── tracker-ui.js
│   ├── prompts
│   │   ├── add-task.json
│   │   ├── analyze-complexity.json
│   │   ├── expand-task.json
│   │   ├── parse-prd.json
│   │   ├── README.md
│   │   ├── research.json
│   │   ├── schemas
│   │   │   ├── parameter.schema.json
│   │   │   ├── prompt-template.schema.json
│   │   │   ├── README.md
│   │   │   └── variant.schema.json
│   │   ├── update-subtask.json
│   │   ├── update-task.json
│   │   └── update-tasks.json
│   ├── provider-registry
│   │   └── index.js
│   ├── schemas
│   │   ├── add-task.js
│   │   ├── analyze-complexity.js
│   │   ├── base-schemas.js
│   │   ├── expand-task.js
│   │   ├── parse-prd.js
│   │   ├── registry.js
│   │   ├── update-subtask.js
│   │   ├── update-task.js
│   │   └── update-tasks.js
│   ├── task-master.js
│   ├── ui
│   │   ├── confirm.js
│   │   ├── indicators.js
│   │   └── parse-prd.js
│   └── utils
│       ├── asset-resolver.js
│       ├── create-mcp-config.js
│       ├── format.js
│       ├── getVersion.js
│       ├── logger-utils.js
│       ├── manage-gitignore.js
│       ├── path-utils.js
│       ├── profiles.js
│       ├── rule-transformer.js
│       ├── stream-parser.js
│       └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│   ├── e2e
│   │   ├── e2e_helpers.sh
│   │   ├── parse_llm_output.cjs
│   │   ├── run_e2e.sh
│   │   ├── run_fallback_verification.sh
│   │   └── test_llm_analysis.sh
│   ├── fixtures
│   │   ├── .taskmasterconfig
│   │   ├── sample-claude-response.js
│   │   ├── sample-prd.txt
│   │   └── sample-tasks.js
│   ├── helpers
│   │   └── tool-counts.js
│   ├── integration
│   │   ├── claude-code-error-handling.test.js
│   │   ├── claude-code-optional.test.js
│   │   ├── cli
│   │   │   ├── commands.test.js
│   │   │   ├── complex-cross-tag-scenarios.test.js
│   │   │   └── move-cross-tag.test.js
│   │   ├── manage-gitignore.test.js
│   │   ├── mcp-server
│   │   │   └── direct-functions.test.js
│   │   ├── move-task-cross-tag.integration.test.js
│   │   ├── move-task-simple.integration.test.js
│   │   ├── profiles
│   │   │   ├── amp-init-functionality.test.js
│   │   │   ├── claude-init-functionality.test.js
│   │   │   ├── cline-init-functionality.test.js
│   │   │   ├── codex-init-functionality.test.js
│   │   │   ├── cursor-init-functionality.test.js
│   │   │   ├── gemini-init-functionality.test.js
│   │   │   ├── opencode-init-functionality.test.js
│   │   │   ├── roo-files-inclusion.test.js
│   │   │   ├── roo-init-functionality.test.js
│   │   │   ├── rules-files-inclusion.test.js
│   │   │   ├── trae-init-functionality.test.js
│   │   │   ├── vscode-init-functionality.test.js
│   │   │   └── windsurf-init-functionality.test.js
│   │   └── providers
│   │       └── temperature-support.test.js
│   ├── manual
│   │   ├── progress
│   │   │   ├── parse-prd-analysis.js
│   │   │   ├── test-parse-prd.js
│   │   │   └── TESTING_GUIDE.md
│   │   └── prompts
│   │       ├── prompt-test.js
│   │       └── README.md
│   ├── README.md
│   ├── setup.js
│   └── unit
│       ├── ai-providers
│       │   ├── base-provider.test.js
│       │   ├── claude-code.test.js
│       │   ├── codex-cli.test.js
│       │   ├── gemini-cli.test.js
│       │   ├── lmstudio.test.js
│       │   ├── mcp-components.test.js
│       │   ├── openai-compatible.test.js
│       │   ├── openai.test.js
│       │   ├── provider-registry.test.js
│       │   ├── zai-coding.test.js
│       │   ├── zai-provider.test.js
│       │   ├── zai-schema-introspection.test.js
│       │   └── zai.test.js
│       ├── ai-services-unified.test.js
│       ├── commands.test.js
│       ├── config-manager.test.js
│       ├── config-manager.test.mjs
│       ├── dependency-manager.test.js
│       ├── init.test.js
│       ├── initialize-project.test.js
│       ├── kebab-case-validation.test.js
│       ├── manage-gitignore.test.js
│       ├── mcp
│       │   └── tools
│       │       ├── __mocks__
│       │       │   └── move-task.js
│       │       ├── add-task.test.js
│       │       ├── analyze-complexity.test.js
│       │       ├── expand-all.test.js
│       │       ├── get-tasks.test.js
│       │       ├── initialize-project.test.js
│       │       ├── move-task-cross-tag-options.test.js
│       │       ├── move-task-cross-tag.test.js
│       │       ├── remove-task.test.js
│       │       └── tool-registration.test.js
│       ├── mcp-providers
│       │   ├── mcp-components.test.js
│       │   └── mcp-provider.test.js
│       ├── parse-prd.test.js
│       ├── profiles
│       │   ├── amp-integration.test.js
│       │   ├── claude-integration.test.js
│       │   ├── cline-integration.test.js
│       │   ├── codex-integration.test.js
│       │   ├── cursor-integration.test.js
│       │   ├── gemini-integration.test.js
│       │   ├── kilo-integration.test.js
│       │   ├── kiro-integration.test.js
│       │   ├── mcp-config-validation.test.js
│       │   ├── opencode-integration.test.js
│       │   ├── profile-safety-check.test.js
│       │   ├── roo-integration.test.js
│       │   ├── rule-transformer-cline.test.js
│       │   ├── rule-transformer-cursor.test.js
│       │   ├── rule-transformer-gemini.test.js
│       │   ├── rule-transformer-kilo.test.js
│       │   ├── rule-transformer-kiro.test.js
│       │   ├── rule-transformer-opencode.test.js
│       │   ├── rule-transformer-roo.test.js
│       │   ├── rule-transformer-trae.test.js
│       │   ├── rule-transformer-vscode.test.js
│       │   ├── rule-transformer-windsurf.test.js
│       │   ├── rule-transformer-zed.test.js
│       │   ├── rule-transformer.test.js
│       │   ├── selective-profile-removal.test.js
│       │   ├── subdirectory-support.test.js
│       │   ├── trae-integration.test.js
│       │   ├── vscode-integration.test.js
│       │   ├── windsurf-integration.test.js
│       │   └── zed-integration.test.js
│       ├── progress
│       │   └── base-progress-tracker.test.js
│       ├── prompt-manager.test.js
│       ├── prompts
│       │   ├── expand-task-prompt.test.js
│       │   └── prompt-migration.test.js
│       ├── scripts
│       │   └── modules
│       │       ├── commands
│       │       │   ├── move-cross-tag.test.js
│       │       │   └── README.md
│       │       ├── dependency-manager
│       │       │   ├── circular-dependencies.test.js
│       │       │   ├── cross-tag-dependencies.test.js
│       │       │   └── fix-dependencies-command.test.js
│       │       ├── task-manager
│       │       │   ├── add-subtask.test.js
│       │       │   ├── add-task.test.js
│       │       │   ├── analyze-task-complexity.test.js
│       │       │   ├── clear-subtasks.test.js
│       │       │   ├── complexity-report-tag-isolation.test.js
│       │       │   ├── expand-all-tasks.test.js
│       │       │   ├── expand-task.test.js
│       │       │   ├── find-next-task.test.js
│       │       │   ├── generate-task-files.test.js
│       │       │   ├── list-tasks.test.js
│       │       │   ├── models-baseurl.test.js
│       │       │   ├── move-task-cross-tag.test.js
│       │       │   ├── move-task.test.js
│       │       │   ├── parse-prd-schema.test.js
│       │       │   ├── parse-prd.test.js
│       │       │   ├── remove-subtask.test.js
│       │       │   ├── remove-task.test.js
│       │       │   ├── research.test.js
│       │       │   ├── scope-adjustment.test.js
│       │       │   ├── set-task-status.test.js
│       │       │   ├── setup.js
│       │       │   ├── update-single-task-status.test.js
│       │       │   ├── update-subtask-by-id.test.js
│       │       │   ├── update-task-by-id.test.js
│       │       │   └── update-tasks.test.js
│       │       ├── ui
│       │       │   └── cross-tag-error-display.test.js
│       │       └── utils-tag-aware-paths.test.js
│       ├── task-finder.test.js
│       ├── task-manager
│       │   ├── clear-subtasks.test.js
│       │   ├── move-task.test.js
│       │   ├── tag-boundary.test.js
│       │   └── tag-management.test.js
│       ├── task-master.test.js
│       ├── ui
│       │   └── indicators.test.js
│       ├── ui.test.js
│       ├── utils-strip-ansi.test.js
│       └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
├── turbo.json
└── update-task-migration-plan.md
```

# Files

--------------------------------------------------------------------------------
/tests/unit/ai-services-unified.test.js:
--------------------------------------------------------------------------------

```javascript
import { jest } from '@jest/globals';

// Mock config-manager
const mockGetMainProvider = jest.fn();
const mockGetMainModelId = jest.fn();
const mockGetResearchProvider = jest.fn();
const mockGetResearchModelId = jest.fn();
const mockGetFallbackProvider = jest.fn();
const mockGetFallbackModelId = jest.fn();
const mockGetParametersForRole = jest.fn();
const mockGetResponseLanguage = jest.fn();
const mockGetUserId = jest.fn();
const mockGetDebugFlag = jest.fn();
const mockIsApiKeySet = jest.fn();

// --- Mock MODEL_MAP Data ---
// Provide a simplified structure sufficient for cost calculation tests
const mockModelMap = {
	anthropic: [
		{
			id: 'test-main-model',
			cost_per_1m_tokens: { input: 3, output: 15, currency: 'USD' }
		},
		{
			id: 'test-fallback-model',
			cost_per_1m_tokens: { input: 3, output: 15, currency: 'USD' }
		}
	],
	perplexity: [
		{
			id: 'test-research-model',
			cost_per_1m_tokens: { input: 1, output: 1, currency: 'USD' }
		}
	],
	openai: [
		{
			id: 'test-openai-model',
			cost_per_1m_tokens: { input: 2, output: 6, currency: 'USD' }
		}
	]
	// Add other providers/models if needed for specific tests
};
const mockGetBaseUrlForRole = jest.fn();
const mockGetAllProviders = jest.fn();
const mockGetOllamaBaseURL = jest.fn();
const mockGetAzureBaseURL = jest.fn();
const mockGetBedrockBaseURL = jest.fn();
const mockGetVertexProjectId = jest.fn();
const mockGetVertexLocation = jest.fn();
const mockGetAvailableModels = jest.fn();
const mockValidateProvider = jest.fn();
const mockValidateProviderModelCombination = jest.fn();
const mockGetConfig = jest.fn();
const mockWriteConfig = jest.fn();
const mockIsConfigFilePresent = jest.fn();
const mockGetMcpApiKeyStatus = jest.fn();
const mockGetMainMaxTokens = jest.fn();
const mockGetMainTemperature = jest.fn();
const mockGetResearchMaxTokens = jest.fn();
const mockGetResearchTemperature = jest.fn();
const mockGetFallbackMaxTokens = jest.fn();
const mockGetFallbackTemperature = jest.fn();
const mockGetLogLevel = jest.fn();
const mockGetDefaultNumTasks = jest.fn();
const mockGetDefaultSubtasks = jest.fn();
const mockGetDefaultPriority = jest.fn();
const mockGetProjectName = jest.fn();

jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
	// Core config access
	getConfig: mockGetConfig,
	writeConfig: mockWriteConfig,
	isConfigFilePresent: mockIsConfigFilePresent,
	ConfigurationError: class ConfigurationError extends Error {
		constructor(message) {
			super(message);
			this.name = 'ConfigurationError';
		}
	},

	// Validation
	validateProvider: mockValidateProvider,
	validateProviderModelCombination: mockValidateProviderModelCombination,
	VALID_PROVIDERS: ['anthropic', 'perplexity', 'openai', 'google'],
	MODEL_MAP: mockModelMap,
	getAvailableModels: mockGetAvailableModels,

	// Role-specific getters
	getMainProvider: mockGetMainProvider,
	getMainModelId: mockGetMainModelId,
	getMainMaxTokens: mockGetMainMaxTokens,
	getMainTemperature: mockGetMainTemperature,
	getResearchProvider: mockGetResearchProvider,
	getResearchModelId: mockGetResearchModelId,
	getResearchMaxTokens: mockGetResearchMaxTokens,
	getResearchTemperature: mockGetResearchTemperature,
	getFallbackProvider: mockGetFallbackProvider,
	getFallbackModelId: mockGetFallbackModelId,
	getFallbackMaxTokens: mockGetFallbackMaxTokens,
	getFallbackTemperature: mockGetFallbackTemperature,
	getParametersForRole: mockGetParametersForRole,
	getResponseLanguage: mockGetResponseLanguage,
	getUserId: mockGetUserId,
	getDebugFlag: mockGetDebugFlag,
	getBaseUrlForRole: mockGetBaseUrlForRole,

	// Global settings
	getLogLevel: mockGetLogLevel,
	getDefaultNumTasks: mockGetDefaultNumTasks,
	getDefaultSubtasks: mockGetDefaultSubtasks,
	getDefaultPriority: mockGetDefaultPriority,
	getProjectName: mockGetProjectName,

	// API Key and provider functions
	isApiKeySet: mockIsApiKeySet,
	getAllProviders: mockGetAllProviders,
	getOllamaBaseURL: mockGetOllamaBaseURL,
	getAzureBaseURL: mockGetAzureBaseURL,
	getBedrockBaseURL: mockGetBedrockBaseURL,
	getVertexProjectId: mockGetVertexProjectId,
	getVertexLocation: mockGetVertexLocation,
	getMcpApiKeyStatus: mockGetMcpApiKeyStatus,

	// Providers without API keys
	providersWithoutApiKeys: ['ollama', 'bedrock', 'gemini-cli', 'codex-cli']
}));

// Mock AI Provider Classes with proper methods
const mockAnthropicProvider = {
	generateText: jest.fn(),
	streamText: jest.fn(),
	generateObject: jest.fn(),
	getRequiredApiKeyName: jest.fn(() => 'ANTHROPIC_API_KEY'),
	isRequiredApiKey: jest.fn(() => true)
};

const mockPerplexityProvider = {
	generateText: jest.fn(),
	streamText: jest.fn(),
	generateObject: jest.fn(),
	getRequiredApiKeyName: jest.fn(() => 'PERPLEXITY_API_KEY'),
	isRequiredApiKey: jest.fn(() => true)
};

const mockOpenAIProvider = {
	generateText: jest.fn(),
	streamText: jest.fn(),
	generateObject: jest.fn(),
	getRequiredApiKeyName: jest.fn(() => 'OPENAI_API_KEY'),
	isRequiredApiKey: jest.fn(() => true)
};

const mockOllamaProvider = {
	generateText: jest.fn(),
	streamText: jest.fn(),
	generateObject: jest.fn(),
	getRequiredApiKeyName: jest.fn(() => null),
	isRequiredApiKey: jest.fn(() => false)
};

// Codex CLI mock provider instance
const mockCodexProvider = {
	generateText: jest.fn(),
	streamText: jest.fn(),
	generateObject: jest.fn(),
	getRequiredApiKeyName: jest.fn(() => 'OPENAI_API_KEY'),
	isRequiredApiKey: jest.fn(() => false)
};

// Claude Code mock provider instance
const mockClaudeProvider = {
	generateText: jest.fn(),
	streamText: jest.fn(),
	generateObject: jest.fn(),
	getRequiredApiKeyName: jest.fn(() => 'CLAUDE_CODE_API_KEY'),
	isRequiredApiKey: jest.fn(() => false)
};

// Mock the provider classes to return our mock instances
jest.unstable_mockModule('../../src/ai-providers/index.js', () => ({
	AnthropicAIProvider: jest.fn(() => mockAnthropicProvider),
	PerplexityAIProvider: jest.fn(() => mockPerplexityProvider),
	GoogleAIProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'GOOGLE_GENERATIVE_AI_API_KEY'),
		isRequiredApiKey: jest.fn(() => true)
	})),
	OpenAIProvider: jest.fn(() => mockOpenAIProvider),
	XAIProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'XAI_API_KEY'),
		isRequiredApiKey: jest.fn(() => true)
	})),
	GroqProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'GROQ_API_KEY'),
		isRequiredApiKey: jest.fn(() => true)
	})),
	OpenRouterAIProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'OPENROUTER_API_KEY'),
		isRequiredApiKey: jest.fn(() => true)
	})),
	OllamaAIProvider: jest.fn(() => mockOllamaProvider),
	BedrockAIProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'AWS_ACCESS_KEY_ID'),
		isRequiredApiKey: jest.fn(() => false)
	})),
	AzureProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'AZURE_API_KEY'),
		isRequiredApiKey: jest.fn(() => true)
	})),
	VertexAIProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => null),
		isRequiredApiKey: jest.fn(() => false)
	})),
	ClaudeCodeProvider: jest.fn(() => mockClaudeProvider),
	GeminiCliProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'GEMINI_API_KEY'),
		isRequiredApiKey: jest.fn(() => false)
	})),
	CodexCliProvider: jest.fn(() => mockCodexProvider),
	GrokCliProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'XAI_API_KEY'),
		isRequiredApiKey: jest.fn(() => false)
	})),
	OpenAICompatibleProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'OPENAI_COMPATIBLE_API_KEY'),
		isRequiredApiKey: jest.fn(() => true)
	})),
	ZAIProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'ZAI_API_KEY'),
		isRequiredApiKey: jest.fn(() => true)
	})),
	ZAICodingProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'ZAI_API_KEY'),
		isRequiredApiKey: jest.fn(() => true)
	})),
	LMStudioProvider: jest.fn(() => ({
		generateText: jest.fn(),
		streamText: jest.fn(),
		generateObject: jest.fn(),
		getRequiredApiKeyName: jest.fn(() => 'LMSTUDIO_API_KEY'),
		isRequiredApiKey: jest.fn(() => false)
	}))
}));

// Mock utils logger, API key resolver, AND findProjectRoot
const mockLog = jest.fn();
const mockResolveEnvVariable = jest.fn();
const mockFindProjectRoot = jest.fn();
const mockIsSilentMode = jest.fn();
const mockLogAiUsage = jest.fn();
const mockFindCycles = jest.fn();
const mockFormatTaskId = jest.fn();
const mockTaskExists = jest.fn();
const mockFindTaskById = jest.fn();
const mockTruncate = jest.fn();
const mockToKebabCase = jest.fn();
const mockDetectCamelCaseFlags = jest.fn();
const mockDisableSilentMode = jest.fn();
const mockEnableSilentMode = jest.fn();
const mockGetTaskManager = jest.fn();
const mockAddComplexityToTask = jest.fn();
const mockReadJSON = jest.fn();
const mockWriteJSON = jest.fn();
const mockSanitizePrompt = jest.fn();
const mockReadComplexityReport = jest.fn();
const mockFindTaskInComplexityReport = jest.fn();
const mockAggregateTelemetry = jest.fn();
const mockGetCurrentTag = jest.fn(() => 'master');
const mockResolveTag = jest.fn(() => 'master');
const mockGetTasksForTag = jest.fn(() => []);

jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({
	LOG_LEVELS: { error: 0, warn: 1, info: 2, debug: 3 },
	log: mockLog,
	resolveEnvVariable: mockResolveEnvVariable,
	findProjectRoot: mockFindProjectRoot,
	isSilentMode: mockIsSilentMode,
	logAiUsage: mockLogAiUsage,
	findCycles: mockFindCycles,
	formatTaskId: mockFormatTaskId,
	taskExists: mockTaskExists,
	findTaskById: mockFindTaskById,
	truncate: mockTruncate,
	toKebabCase: mockToKebabCase,
	detectCamelCaseFlags: mockDetectCamelCaseFlags,
	disableSilentMode: mockDisableSilentMode,
	enableSilentMode: mockEnableSilentMode,
	getTaskManager: mockGetTaskManager,
	addComplexityToTask: mockAddComplexityToTask,
	readJSON: mockReadJSON,
	writeJSON: mockWriteJSON,
	sanitizePrompt: mockSanitizePrompt,
	readComplexityReport: mockReadComplexityReport,
	findTaskInComplexityReport: mockFindTaskInComplexityReport,
	aggregateTelemetry: mockAggregateTelemetry,
	getCurrentTag: mockGetCurrentTag,
	resolveTag: mockResolveTag,
	getTasksForTag: mockGetTasksForTag
}));

// Import the module to test (AFTER mocks)
const { generateTextService } = await import(
	'../../scripts/modules/ai-services-unified.js'
);

describe('Unified AI Services', () => {
	const fakeProjectRoot = '/fake/project/root'; // Define for reuse

	beforeEach(() => {
		// Clear mocks before each test
		jest.clearAllMocks(); // Clears all mocks

		// Set default mock behaviors
		mockGetMainProvider.mockReturnValue('anthropic');
		mockGetMainModelId.mockReturnValue('test-main-model');
		mockGetResearchProvider.mockReturnValue('perplexity');
		mockGetResearchModelId.mockReturnValue('test-research-model');
		mockGetFallbackProvider.mockReturnValue('anthropic');
		mockGetFallbackModelId.mockReturnValue('test-fallback-model');
		mockGetParametersForRole.mockImplementation((role) => {
			if (role === 'main') return { maxTokens: 100, temperature: 0.5 };
			if (role === 'research') return { maxTokens: 200, temperature: 0.3 };
			if (role === 'fallback') return { maxTokens: 150, temperature: 0.6 };
			return { maxTokens: 100, temperature: 0.5 }; // Default
		});
		mockGetResponseLanguage.mockReturnValue('English');
		mockResolveEnvVariable.mockImplementation((key) => {
			if (key === 'ANTHROPIC_API_KEY') return 'mock-anthropic-key';
			if (key === 'PERPLEXITY_API_KEY') return 'mock-perplexity-key';
			if (key === 'OPENAI_API_KEY') return 'mock-openai-key';
			if (key === 'OLLAMA_API_KEY') return 'mock-ollama-key';
			return null;
		});

		// Set a default behavior for the new mock
		mockFindProjectRoot.mockReturnValue(fakeProjectRoot);
		mockGetDebugFlag.mockReturnValue(false);
		mockGetUserId.mockReturnValue('test-user-id'); // Add default mock for getUserId
		mockIsApiKeySet.mockReturnValue(true); // Default to true for most tests
		mockGetBaseUrlForRole.mockReturnValue(null); // Default to no base URL
	});

	describe('generateTextService', () => {
		test('should use main provider/model and succeed', async () => {
			mockAnthropicProvider.generateText.mockResolvedValue({
				text: 'Main provider response',
				usage: { inputTokens: 10, outputTokens: 20, totalTokens: 30 }
			});

			const params = {
				role: 'main',
				session: { env: {} },
				systemPrompt: 'System',
				prompt: 'Test'
			};
			const result = await generateTextService(params);

			expect(result.mainResult).toBe('Main provider response');
			expect(result).toHaveProperty('telemetryData');
			expect(mockGetMainProvider).toHaveBeenCalledWith(fakeProjectRoot);
			expect(mockGetMainModelId).toHaveBeenCalledWith(fakeProjectRoot);
			expect(mockGetParametersForRole).toHaveBeenCalledWith(
				'main',
				fakeProjectRoot
			);
			expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(1);
			expect(mockPerplexityProvider.generateText).not.toHaveBeenCalled();
		});

		test('should fall back to fallback provider if main fails', async () => {
			const mainError = new Error('Main provider failed');
			mockAnthropicProvider.generateText
				.mockRejectedValueOnce(mainError)
				.mockResolvedValueOnce({
					text: 'Fallback provider response',
					usage: { inputTokens: 15, outputTokens: 25, totalTokens: 40 }
				});

			const explicitRoot = '/explicit/test/root';
			const params = {
				role: 'main',
				prompt: 'Fallback test',
				projectRoot: explicitRoot
			};
			const result = await generateTextService(params);

			expect(result.mainResult).toBe('Fallback provider response');
			expect(result).toHaveProperty('telemetryData');
			expect(mockGetMainProvider).toHaveBeenCalledWith(explicitRoot);
			expect(mockGetFallbackProvider).toHaveBeenCalledWith(explicitRoot);
			expect(mockGetParametersForRole).toHaveBeenCalledWith(
				'main',
				explicitRoot
			);
			expect(mockGetParametersForRole).toHaveBeenCalledWith(
				'fallback',
				explicitRoot
			);

			expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2);
			expect(mockPerplexityProvider.generateText).not.toHaveBeenCalled();
			expect(mockLog).toHaveBeenCalledWith(
				'error',
				expect.stringContaining('Service call failed for role main')
			);
			expect(mockLog).toHaveBeenCalledWith(
				'debug',
				expect.stringContaining('New AI service call with role: fallback')
			);
		});

		test('should fall back to research provider if main and fallback fail', async () => {
			const mainError = new Error('Main failed');
			const fallbackError = new Error('Fallback failed');
			mockAnthropicProvider.generateText
				.mockRejectedValueOnce(mainError)
				.mockRejectedValueOnce(fallbackError);
			mockPerplexityProvider.generateText.mockResolvedValue({
				text: 'Research provider response',
				usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 }
			});

			const params = { role: 'main', prompt: 'Research fallback test' };
			const result = await generateTextService(params);

			expect(result.mainResult).toBe('Research provider response');
			expect(result).toHaveProperty('telemetryData');
			expect(mockGetMainProvider).toHaveBeenCalledWith(fakeProjectRoot);
			expect(mockGetFallbackProvider).toHaveBeenCalledWith(fakeProjectRoot);
			expect(mockGetResearchProvider).toHaveBeenCalledWith(fakeProjectRoot);
			expect(mockGetParametersForRole).toHaveBeenCalledWith(
				'main',
				fakeProjectRoot
			);
			expect(mockGetParametersForRole).toHaveBeenCalledWith(
				'fallback',
				fakeProjectRoot
			);
			expect(mockGetParametersForRole).toHaveBeenCalledWith(
				'research',
				fakeProjectRoot
			);

			expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2);
			expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1);
			expect(mockLog).toHaveBeenCalledWith(
				'error',
				expect.stringContaining('Service call failed for role fallback')
			);
			expect(mockLog).toHaveBeenCalledWith(
				'debug',
				expect.stringContaining('New AI service call with role: research')
			);
		});

		test('should throw error if all providers in sequence fail', async () => {
			mockAnthropicProvider.generateText.mockRejectedValue(
				new Error('Anthropic failed')
			);
			mockPerplexityProvider.generateText.mockRejectedValue(
				new Error('Perplexity failed')
			);

			const params = { role: 'main', prompt: 'All fail test' };

			await expect(generateTextService(params)).rejects.toThrow(
				'Perplexity failed' // Error from the last attempt (research)
			);

			expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2); // main, fallback
			expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1); // research
		});

		test('should handle retryable errors correctly', async () => {
			const retryableError = new Error('Rate limit');
			mockAnthropicProvider.generateText
				.mockRejectedValueOnce(retryableError) // Fails once
				.mockResolvedValueOnce({
					// Succeeds on retry
					text: 'Success after retry',
					usage: { inputTokens: 5, outputTokens: 10, totalTokens: 15 }
				});

			const params = { role: 'main', prompt: 'Retry success test' };
			const result = await generateTextService(params);

			expect(result.mainResult).toBe('Success after retry');
			expect(result).toHaveProperty('telemetryData');
			expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2); // Initial + 1 retry
			expect(mockLog).toHaveBeenCalledWith(
				'info',
				expect.stringContaining(
					'Something went wrong on the provider side. Retrying'
				)
			);
		});

		test('should use default project root or handle null if findProjectRoot returns null', async () => {
			mockFindProjectRoot.mockReturnValue(null); // Simulate not finding root
			mockAnthropicProvider.generateText.mockResolvedValue({
				text: 'Response with no root',
				usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 }
			});

			const params = { role: 'main', prompt: 'No root test' }; // No explicit root passed
			await generateTextService(params);

			expect(mockGetMainProvider).toHaveBeenCalledWith(null);
			expect(mockGetParametersForRole).toHaveBeenCalledWith('main', null);
			expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(1);
		});

		test('should use configured responseLanguage in system prompt', async () => {
			mockGetResponseLanguage.mockReturnValue('中文');
			mockAnthropicProvider.generateText.mockResolvedValue('中文回复');

			const params = {
				role: 'main',
				systemPrompt: 'You are an assistant',
				prompt: 'Hello'
			};
			await generateTextService(params);

			expect(mockAnthropicProvider.generateText).toHaveBeenCalledWith(
				expect.objectContaining({
					messages: [
						{
							role: 'system',
							content: expect.stringContaining('Always respond in 中文')
						},
						{ role: 'user', content: 'Hello' }
					]
				})
			);
			expect(mockGetResponseLanguage).toHaveBeenCalledWith(fakeProjectRoot);
		});

		test('should pass custom projectRoot to getResponseLanguage', async () => {
			const customRoot = '/custom/project/root';
			mockGetResponseLanguage.mockReturnValue('Español');
			mockAnthropicProvider.generateText.mockResolvedValue(
				'Respuesta en Español'
			);

			const params = {
				role: 'main',
				systemPrompt: 'You are an assistant',
				prompt: 'Hello',
				projectRoot: customRoot
			};
			await generateTextService(params);

			expect(mockGetResponseLanguage).toHaveBeenCalledWith(customRoot);
			expect(mockAnthropicProvider.generateText).toHaveBeenCalledWith(
				expect.objectContaining({
					messages: [
						{
							role: 'system',
							content: expect.stringContaining('Always respond in Español')
						},
						{ role: 'user', content: 'Hello' }
					]
				})
			);
		});

		// Add more tests for edge cases:
		// - Missing API keys (should throw from _resolveApiKey)
		// - Unsupported provider configured (should skip and log)
		// - Missing provider/model config for a role (should skip and log)
		// - Missing prompt
		// - Different initial roles (research, fallback)
		// - generateObjectService (mock schema, check object result)
		// - streamTextService (more complex to test, might need stream helpers)
		test('should skip provider with missing API key and try next in fallback sequence', async () => {
			// Mock anthropic to throw API key error
			mockAnthropicProvider.generateText.mockRejectedValue(
				new Error(
					"Required API key ANTHROPIC_API_KEY for provider 'anthropic' is not set in environment, session, or .env file."
				)
			);

			// Mock perplexity text response (since we'll skip anthropic)
			mockPerplexityProvider.generateText.mockResolvedValue({
				text: 'Perplexity response (skipped to research)',
				usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 }
			});

			const params = {
				role: 'main',
				prompt: 'Skip main provider test',
				session: { env: {} }
			};

			const result = await generateTextService(params);

			// Should have gotten the perplexity response
			expect(result.mainResult).toBe(
				'Perplexity response (skipped to research)'
			);

			// Should log an error for the failed provider
			expect(mockLog).toHaveBeenCalledWith(
				'error',
				expect.stringContaining(`Service call failed for role main`)
			);

			// Should attempt to call anthropic provider first
			expect(mockAnthropicProvider.generateText).toHaveBeenCalled();

			// Should call perplexity provider after anthropic fails
			expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1);
		});

		test('should skip multiple providers with missing API keys and use first available', async () => {
			// Define different providers for testing multiple skips
			mockGetFallbackProvider.mockReturnValue('openai'); // Different from main
			mockGetFallbackModelId.mockReturnValue('test-openai-model');

			// Mock providers to throw API key errors (simulating _resolveApiKey behavior)
			mockAnthropicProvider.generateText.mockRejectedValue(
				new Error(
					"Required API key ANTHROPIC_API_KEY for provider 'anthropic' is not set in environment, session, or .env file."
				)
			);
			mockOpenAIProvider.generateText.mockRejectedValue(
				new Error(
					"Required API key OPENAI_API_KEY for provider 'openai' is not set in environment, session, or .env file."
				)
			);

			// Mock perplexity text response (since we'll skip to research)
			mockPerplexityProvider.generateText.mockResolvedValue({
				text: 'Research response after skipping main and fallback',
				usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 }
			});

			const params = {
				role: 'main',
				prompt: 'Skip multiple providers test',
				session: { env: {} }
			};

			const result = await generateTextService(params);

			// Should have gotten the perplexity (research) response
			expect(result.mainResult).toBe(
				'Research response after skipping main and fallback'
			);

			// Should log errors for both skipped providers
			expect(mockLog).toHaveBeenCalledWith(
				'error',
				expect.stringContaining(`Service call failed for role main`)
			);
			expect(mockLog).toHaveBeenCalledWith(
				'error',
				expect.stringContaining(`Service call failed for role fallback`)
			);

			// Should call all providers in sequence until one succeeds
			expect(mockAnthropicProvider.generateText).toHaveBeenCalled();
			expect(mockOpenAIProvider.generateText).toHaveBeenCalled();

			// Should call perplexity provider which succeeds
			expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1);
		});

		test('should throw error if all providers in sequence have missing API keys', async () => {
			// Mock all providers to throw API key errors
			mockAnthropicProvider.generateText.mockRejectedValue(
				new Error(
					"Required API key ANTHROPIC_API_KEY for provider 'anthropic' is not set in environment, session, or .env file."
				)
			);
			mockPerplexityProvider.generateText.mockRejectedValue(
				new Error(
					"Required API key PERPLEXITY_API_KEY for provider 'perplexity' is not set in environment, session, or .env file."
				)
			);

			const params = {
				role: 'main',
				prompt: 'All API keys missing test',
				session: { env: {} }
			};

			// Should throw error since all providers would fail
			await expect(generateTextService(params)).rejects.toThrow(
				"Required API key PERPLEXITY_API_KEY for provider 'perplexity' is not set"
			);

			// Should log errors for all failed providers
			expect(mockLog).toHaveBeenCalledWith(
				'error',
				expect.stringContaining(`Service call failed for role main`)
			);
			expect(mockLog).toHaveBeenCalledWith(
				'error',
				expect.stringContaining(`Service call failed for role fallback`)
			);
			expect(mockLog).toHaveBeenCalledWith(
				'error',
				expect.stringContaining(`Service call failed for role research`)
			);

			// Should log final error
			expect(mockLog).toHaveBeenCalledWith(
				'error',
				expect.stringContaining(
					'All roles in the sequence [main, fallback, research] failed.'
				)
			);

			// Should attempt to call all providers in sequence
			expect(mockAnthropicProvider.generateText).toHaveBeenCalled();
			expect(mockPerplexityProvider.generateText).toHaveBeenCalled();
		});

		test('should not check API key for Ollama provider and try to use it', async () => {
			// Setup: Set main provider to ollama
			mockGetMainProvider.mockReturnValue('ollama');
			mockGetMainModelId.mockReturnValue('llama3');

			// Mock Ollama text generation to succeed
			mockOllamaProvider.generateText.mockResolvedValue({
				text: 'Ollama response (no API key required)',
				usage: { inputTokens: 10, outputTokens: 10, totalTokens: 20 }
			});

			const params = {
				role: 'main',
				prompt: 'Ollama special case test',
				session: { env: {} }
			};

			const result = await generateTextService(params);

			// Should have gotten the Ollama response
			expect(result.mainResult).toBe('Ollama response (no API key required)');

			// isApiKeySet shouldn't be called for Ollama
			// Note: This is indirect - the code just doesn't check isApiKeySet for ollama
			// so we're verifying ollama provider was called despite isApiKeySet being mocked to false
			mockIsApiKeySet.mockReturnValue(false); // Should be ignored for Ollama

			// Should call Ollama provider
			expect(mockOllamaProvider.generateText).toHaveBeenCalledTimes(1);
		});

		test('should correctly use the provided session for API key resolution', async () => {
			// Mock custom session object with env vars
			const customSession = { env: { ANTHROPIC_API_KEY: 'session-api-key' } };

			// Mock the anthropic response - if API key resolution works, this will be called
			mockAnthropicProvider.generateText.mockResolvedValue({
				text: 'Anthropic response with session key',
				usage: { inputTokens: 10, outputTokens: 10, totalTokens: 20 }
			});

			const params = {
				role: 'main',
				prompt: 'Session API key test',
				session: customSession
			};

			const result = await generateTextService(params);

			// Should have successfully resolved API key from session and called provider
			expect(mockAnthropicProvider.generateText).toHaveBeenCalled();

			// Should have gotten the anthropic response
			expect(result.mainResult).toBe('Anthropic response with session key');
		});

		// --- Codex CLI specific tests ---
		test('should use codex-cli provider without API key (OAuth)', async () => {
			// Arrange codex-cli as main provider
			mockGetMainProvider.mockReturnValue('codex-cli');
			mockGetMainModelId.mockReturnValue('gpt-5-codex');
			mockGetParametersForRole.mockReturnValue({
				maxTokens: 128000,
				temperature: 1
			});
			mockGetResponseLanguage.mockReturnValue('English');
			// No API key in env
			mockResolveEnvVariable.mockReturnValue(null);
			// Mock codex generateText response
			mockCodexProvider.generateText.mockResolvedValueOnce({
				text: 'ok',
				usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
			});

			const { generateTextService } = await import(
				'../../scripts/modules/ai-services-unified.js'
			);

			const result = await generateTextService({
				role: 'main',
				prompt: 'Hello Codex',
				projectRoot: fakeProjectRoot
			});

			expect(result.mainResult).toBe('ok');
			expect(mockCodexProvider.generateText).toHaveBeenCalledWith(
				expect.objectContaining({
					modelId: 'gpt-5-codex',
					apiKey: null,
					maxTokens: 128000
				})
			);
		});

		test('should pass apiKey to codex-cli when provided', async () => {
			// Arrange codex-cli as main provider
			mockGetMainProvider.mockReturnValue('codex-cli');
			mockGetMainModelId.mockReturnValue('gpt-5-codex');
			mockGetParametersForRole.mockReturnValue({
				maxTokens: 128000,
				temperature: 1
			});
			mockGetResponseLanguage.mockReturnValue('English');
			// Provide API key via env resolver
			mockResolveEnvVariable.mockReturnValue('sk-test');
			// Mock codex generateText response
			mockCodexProvider.generateText.mockResolvedValueOnce({
				text: 'ok-with-key',
				usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 }
			});

			const { generateTextService } = await import(
				'../../scripts/modules/ai-services-unified.js'
			);

			const result = await generateTextService({
				role: 'main',
				prompt: 'Hello Codex',
				projectRoot: fakeProjectRoot
			});

			expect(result.mainResult).toBe('ok-with-key');
			expect(mockCodexProvider.generateText).toHaveBeenCalledWith(
				expect.objectContaining({
					modelId: 'gpt-5-codex',
					apiKey: 'sk-test'
				})
			);
		});

		// --- Claude Code specific test ---
		test('should pass temperature to claude-code provider (provider handles filtering)', async () => {
			mockGetMainProvider.mockReturnValue('claude-code');
			mockGetMainModelId.mockReturnValue('sonnet');
			mockGetParametersForRole.mockReturnValue({
				maxTokens: 64000,
				temperature: 0.7
			});
			mockGetResponseLanguage.mockReturnValue('English');
			mockResolveEnvVariable.mockReturnValue(null);

			mockClaudeProvider.generateText.mockResolvedValueOnce({
				text: 'ok-claude',
				usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
			});

			const { generateTextService } = await import(
				'../../scripts/modules/ai-services-unified.js'
			);

			const result = await generateTextService({
				role: 'main',
				prompt: 'Hello Claude',
				projectRoot: fakeProjectRoot
			});

			expect(result.mainResult).toBe('ok-claude');
			// The provider (BaseAIProvider) is responsible for filtering it based on supportsTemperature
			const callArgs = mockClaudeProvider.generateText.mock.calls[0][0];
			expect(callArgs).toHaveProperty('temperature', 0.7);
			expect(callArgs.maxTokens).toBe(64000);
		});
	});
});

```

--------------------------------------------------------------------------------
/tests/unit/scripts/modules/task-manager/complexity-report-tag-isolation.test.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Tests for complexity report tag isolation functionality
 * Verifies that different tags maintain separate complexity reports
 */

import { jest } from '@jest/globals';
import fs from 'fs';
import path from 'path';

// Mock fs module - consolidated single registration
const mockExistsSync = jest.fn();
const mockReadFileSync = jest.fn();
const mockWriteFileSync = jest.fn();
const mockUnlinkSync = jest.fn();
const mockMkdirSync = jest.fn();
const mockReaddirSync = jest.fn(() => []);
const mockStatSync = jest.fn(() => ({ isDirectory: () => false }));

jest.unstable_mockModule('fs', () => ({
	default: {
		existsSync: mockExistsSync,
		readFileSync: mockReadFileSync,
		writeFileSync: mockWriteFileSync,
		unlinkSync: mockUnlinkSync,
		mkdirSync: mockMkdirSync,
		readdirSync: mockReaddirSync,
		statSync: mockStatSync
	},
	existsSync: mockExistsSync,
	readFileSync: mockReadFileSync,
	writeFileSync: mockWriteFileSync,
	unlinkSync: mockUnlinkSync,
	mkdirSync: mockMkdirSync,
	readdirSync: mockReaddirSync,
	statSync: mockStatSync
}));

// Mock the dependencies
jest.unstable_mockModule('../../../../../src/utils/path-utils.js', () => ({
	resolveComplexityReportOutputPath: jest.fn(),
	findComplexityReportPath: jest.fn(),
	findConfigPath: jest.fn(),
	findPRDPath: jest.fn(() => '/mock/project/root/.taskmaster/docs/PRD.md'),
	findTasksPath: jest.fn(
		() => '/mock/project/root/.taskmaster/tasks/tasks.json'
	),
	findProjectRoot: jest.fn(() => '/mock/project/root'),
	normalizeProjectRoot: jest.fn((root) => root)
}));

jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
	readJSON: jest.fn(),
	writeJSON: jest.fn(),
	log: jest.fn(),
	isSilentMode: jest.fn(() => false),
	enableSilentMode: jest.fn(),
	disableSilentMode: jest.fn(),
	flattenTasksWithSubtasks: jest.fn((tasks) => tasks),
	getTagAwareFilePath: jest.fn((basePath, tag, projectRoot) => {
		if (tag && tag !== 'master') {
			const dir = path.dirname(basePath);
			const ext = path.extname(basePath);
			const name = path.basename(basePath, ext);
			return path.join(projectRoot || '.', dir, `${name}_${tag}${ext}`);
		}
		return path.join(projectRoot || '.', basePath);
	}),
	findTaskById: jest.fn((tasks, taskId) => {
		if (!tasks || !Array.isArray(tasks)) {
			return { task: null, originalSubtaskCount: null, originalSubtasks: null };
		}
		const id = parseInt(taskId, 10);
		const task = tasks.find((t) => t.id === id);
		return task
			? { task, originalSubtaskCount: null, originalSubtasks: null }
			: { task: null, originalSubtaskCount: null, originalSubtasks: null };
	}),
	taskExists: jest.fn((tasks, taskId) => {
		if (!tasks || !Array.isArray(tasks)) return false;
		const id = parseInt(taskId, 10);
		return tasks.some((t) => t.id === id);
	}),
	formatTaskId: jest.fn((id) => `Task ${id}`),
	findCycles: jest.fn(() => []),
	truncate: jest.fn((text) => text),
	addComplexityToTask: jest.fn((task, complexity) => ({ ...task, complexity })),
	aggregateTelemetry: jest.fn((telemetryArray) => telemetryArray[0] || {}),
	ensureTagMetadata: jest.fn((tagObj) => tagObj),
	getCurrentTag: jest.fn(() => 'master'),
	resolveTag: jest.fn(() => 'master'),
	markMigrationForNotice: jest.fn(),
	performCompleteTagMigration: jest.fn(),
	setTasksForTag: jest.fn(),
	getTasksForTag: jest.fn((data, tag) => data[tag]?.tasks || []),
	findProjectRoot: jest.fn(() => '/mock/project/root'),
	readComplexityReport: jest.fn(),
	findTaskInComplexityReport: jest.fn(),
	resolveEnvVariable: jest.fn((varName) => `mock_${varName}`),
	isEmpty: jest.fn(() => false),
	normalizeProjectRoot: jest.fn((root) => root),
	slugifyTagForFilePath: jest.fn((tagName) => {
		if (!tagName || typeof tagName !== 'string') {
			return 'unknown-tag';
		}
		return tagName.replace(/[^a-zA-Z0-9_-]/g, '-').toLowerCase();
	}),
	createTagAwareFilePath: jest.fn((basePath, tag, projectRoot) => {
		if (tag && tag !== 'master') {
			const dir = path.dirname(basePath);
			const ext = path.extname(basePath);
			const name = path.basename(basePath, ext);
			// Use the slugified tag
			const slugifiedTag = tag.replace(/[^a-zA-Z0-9_-]/g, '-').toLowerCase();
			return path.join(
				projectRoot || '.',
				dir,
				`${name}_${slugifiedTag}${ext}`
			);
		}
		return path.join(projectRoot || '.', basePath);
	}),
	traverseDependencies: jest.fn((sourceTasks, allTasks, options = {}) => []),
	CONFIG: {
		defaultSubtasks: 3
	}
}));

jest.unstable_mockModule(
	'../../../../../scripts/modules/ai-services-unified.js',
	() => ({
		generateTextService: jest.fn().mockImplementation((params) => {
			const commandName = params?.commandName || 'default';

			if (commandName === 'analyze-complexity') {
				// Check if this is for a specific tag test by looking at the prompt
				const isFeatureTag =
					params?.prompt?.includes('feature') || params?.role === 'feature';
				const isMasterTag =
					params?.prompt?.includes('master') || params?.role === 'master';

				let taskTitle = 'Test Task';
				if (isFeatureTag) {
					taskTitle = 'Feature Task 1';
				} else if (isMasterTag) {
					taskTitle = 'Master Task 1';
				}

				return Promise.resolve({
					mainResult: JSON.stringify([
						{
							taskId: 1,
							taskTitle: taskTitle,
							complexityScore: 7,
							recommendedSubtasks: 4,
							expansionPrompt: 'Break down this task',
							reasoning: 'This task is moderately complex'
						},
						{
							taskId: 2,
							taskTitle: 'Task 2',
							complexityScore: 5,
							recommendedSubtasks: 3,
							expansionPrompt: 'Break down this task with a focus on task 2.',
							reasoning:
								'Automatically added due to missing analysis in AI response.'
						}
					]),
					telemetryData: {
						timestamp: new Date().toISOString(),
						commandName: 'analyze-complexity',
						modelUsed: 'claude-3-5-sonnet',
						providerName: 'anthropic',
						inputTokens: 1000,
						outputTokens: 500,
						totalTokens: 1500,
						totalCost: 0.012414,
						currency: 'USD'
					}
				});
			} else {
				// Default for expand-task and others
				return Promise.resolve({
					mainResult: JSON.stringify({
						subtasks: [
							{
								id: 1,
								title: 'Subtask 1',
								description: 'First subtask',
								dependencies: [],
								details: 'Implementation details',
								status: 'pending',
								testStrategy: 'Test strategy'
							}
						]
					}),
					telemetryData: {
						timestamp: new Date().toISOString(),
						commandName: commandName || 'expand-task',
						modelUsed: 'claude-3-5-sonnet',
						providerName: 'anthropic',
						inputTokens: 1000,
						outputTokens: 500,
						totalTokens: 1500,
						totalCost: 0.012414,
						currency: 'USD'
					}
				});
			}
		}),
		streamTextService: jest.fn().mockResolvedValue({
			mainResult: async function* () {
				yield '{"tasks":[';
				yield '{"id":1,"title":"Test Task","priority":"high"}';
				yield ']}';
			},
			telemetryData: {
				timestamp: new Date().toISOString(),
				commandName: 'analyze-complexity',
				modelUsed: 'claude-3-5-sonnet',
				providerName: 'anthropic',
				inputTokens: 1000,
				outputTokens: 500,
				totalTokens: 1500,
				totalCost: 0.012414,
				currency: 'USD'
			}
		}),
		generateObjectService: jest.fn().mockImplementation((params) => {
			const commandName = params?.commandName || 'default';

			if (commandName === 'analyze-complexity') {
				// Check if this is for a specific tag test by looking at the prompt
				const isFeatureTag =
					params?.prompt?.includes('feature') || params?.role === 'feature';
				const isMasterTag =
					params?.prompt?.includes('master') || params?.role === 'master';

				let taskTitle = 'Test Task';
				if (isFeatureTag) {
					taskTitle = 'Feature Task 1';
				} else if (isMasterTag) {
					taskTitle = 'Master Task 1';
				}

				return Promise.resolve({
					mainResult: {
						complexityAnalysis: [
							{
								taskId: 1,
								taskTitle: taskTitle,
								complexityScore: 7,
								recommendedSubtasks: 4,
								expansionPrompt: 'Break down this task',
								reasoning: 'This task is moderately complex'
							},
							{
								taskId: 2,
								taskTitle: 'Task 2',
								complexityScore: 5,
								recommendedSubtasks: 3,
								expansionPrompt: 'Break down this task with a focus on task 2.',
								reasoning:
									'Automatically added due to missing analysis in AI response.'
							}
						]
					},
					telemetryData: {
						timestamp: new Date().toISOString(),
						commandName: 'analyze-complexity',
						modelUsed: 'claude-3-5-sonnet',
						providerName: 'anthropic',
						inputTokens: 1000,
						outputTokens: 500,
						totalTokens: 1500,
						totalCost: 0.012414,
						currency: 'USD'
					}
				});
			}

			// Default response for expand-task and others
			return Promise.resolve({
				mainResult: {
					subtasks: [
						{
							id: 1,
							title: 'Subtask 1',
							description: 'First subtask',
							dependencies: [],
							details: 'Implementation details',
							status: 'pending',
							testStrategy: 'Test strategy'
						}
					]
				},
				telemetryData: {
					timestamp: new Date().toISOString(),
					commandName: 'expand-task',
					modelUsed: 'claude-3-5-sonnet',
					providerName: 'anthropic',
					inputTokens: 1000,
					outputTokens: 500,
					totalTokens: 1500,
					totalCost: 0.012414,
					currency: 'USD'
				}
			});
		})
	})
);

jest.unstable_mockModule(
	'../../../../../scripts/modules/config-manager.js',
	() => ({
		// Core config access
		getConfig: jest.fn(() => ({
			models: { main: { provider: 'anthropic', modelId: 'claude-3-5-sonnet' } },
			global: { projectName: 'Test Project' }
		})),
		writeConfig: jest.fn(() => true),
		ConfigurationError: class extends Error {},
		isConfigFilePresent: jest.fn(() => true),

		// Validation
		validateProvider: jest.fn(() => true),
		validateProviderModelCombination: jest.fn(() => true),
		VALIDATED_PROVIDERS: ['anthropic', 'openai', 'perplexity'],
		CUSTOM_PROVIDERS: { OLLAMA: 'ollama', BEDROCK: 'bedrock' },
		ALL_PROVIDERS: ['anthropic', 'openai', 'perplexity', 'ollama', 'bedrock'],
		MODEL_MAP: {
			anthropic: [
				{
					id: 'claude-3-5-sonnet',
					cost_per_1m_tokens: { input: 3, output: 15 }
				}
			],
			openai: [{ id: 'gpt-4', cost_per_1m_tokens: { input: 30, output: 60 } }]
		},
		getAvailableModels: jest.fn(() => [
			{
				id: 'claude-3-5-sonnet',
				name: 'Claude 3.5 Sonnet',
				provider: 'anthropic'
			},
			{ id: 'gpt-4', name: 'GPT-4', provider: 'openai' }
		]),

		// Role-specific getters
		getMainProvider: jest.fn(() => 'anthropic'),
		getMainModelId: jest.fn(() => 'claude-3-5-sonnet'),
		getMainMaxTokens: jest.fn(() => 4000),
		getMainTemperature: jest.fn(() => 0.7),
		getResearchProvider: jest.fn(() => 'perplexity'),
		getResearchModelId: jest.fn(() => 'sonar-pro'),
		getResearchMaxTokens: jest.fn(() => 8700),
		getResearchTemperature: jest.fn(() => 0.1),
		getFallbackProvider: jest.fn(() => 'anthropic'),
		getFallbackModelId: jest.fn(() => 'claude-3-5-sonnet'),
		getFallbackMaxTokens: jest.fn(() => 4000),
		getFallbackTemperature: jest.fn(() => 0.7),
		getBaseUrlForRole: jest.fn(() => undefined),

		// Global setting getters
		getLogLevel: jest.fn(() => 'info'),
		getDebugFlag: jest.fn(() => false),
		getDefaultNumTasks: jest.fn(() => 10),
		getDefaultSubtasks: jest.fn(() => 5),
		getDefaultPriority: jest.fn(() => 'medium'),
		getProjectName: jest.fn(() => 'Test Project'),
		getOllamaBaseURL: jest.fn(() => 'http://localhost:11434/api'),
		getAzureBaseURL: jest.fn(() => undefined),
		getBedrockBaseURL: jest.fn(() => undefined),
		getParametersForRole: jest.fn(() => ({
			maxTokens: 4000,
			temperature: 0.7
		})),
		getUserId: jest.fn(() => '1234567890'),

		// API Key Checkers
		isApiKeySet: jest.fn(() => true),
		getMcpApiKeyStatus: jest.fn(() => true),

		// Additional functions
		getAllProviders: jest.fn(() => ['anthropic', 'openai', 'perplexity']),
		getVertexProjectId: jest.fn(() => undefined),
		getVertexLocation: jest.fn(() => undefined),
		hasCodebaseAnalysis: jest.fn(() => false)
	})
);

jest.unstable_mockModule(
	'../../../../../scripts/modules/prompt-manager.js',
	() => ({
		getPromptManager: jest.fn().mockReturnValue({
			loadPrompt: jest.fn().mockReturnValue({
				systemPrompt: 'Mocked system prompt',
				userPrompt: 'Mocked user prompt'
			})
		})
	})
);

jest.unstable_mockModule(
	'../../../../../scripts/modules/utils/contextGatherer.js',
	() => {
		class MockContextGatherer {
			constructor(projectRoot, tag) {
				this.projectRoot = projectRoot;
				this.tag = tag;
				this.allTasks = [];
			}

			async gather(options = {}) {
				return {
					context: 'Mock context gathered',
					analysisData: null,
					contextSections: 1,
					finalTaskIds: options.tasks || []
				};
			}
		}

		return {
			default: MockContextGatherer,
			ContextGatherer: MockContextGatherer,
			createContextGatherer: jest.fn(
				(projectRoot, tag) => new MockContextGatherer(projectRoot, tag)
			)
		};
	}
);

jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
	startLoadingIndicator: jest.fn(() => ({ stop: jest.fn() })),
	stopLoadingIndicator: jest.fn(),
	displayAiUsageSummary: jest.fn(),
	displayBanner: jest.fn(),
	getStatusWithColor: jest.fn((status) => status),
	succeedLoadingIndicator: jest.fn(),
	failLoadingIndicator: jest.fn(),
	warnLoadingIndicator: jest.fn(),
	infoLoadingIndicator: jest.fn(),
	displayContextAnalysis: jest.fn(),
	createProgressBar: jest.fn(() => ({
		start: jest.fn(),
		stop: jest.fn(),
		update: jest.fn()
	})),
	displayTable: jest.fn(),
	displayBox: jest.fn(),
	displaySuccess: jest.fn(),
	displayError: jest.fn(),
	displayWarning: jest.fn(),
	displayInfo: jest.fn(),
	displayTaskDetails: jest.fn(),
	displayTaskList: jest.fn(),
	displayComplexityReport: jest.fn(),
	displayNextTask: jest.fn(),
	displayDependencyStatus: jest.fn(),
	displayMigrationNotice: jest.fn(),
	formatDependenciesWithStatus: jest.fn((deps) => deps),
	formatTaskId: jest.fn((id) => `Task ${id}`),
	formatPriority: jest.fn((priority) => priority),
	formatDuration: jest.fn((duration) => duration),
	formatDate: jest.fn((date) => date),
	formatComplexityScore: jest.fn((score) => score),
	formatTelemetryData: jest.fn((data) => data),
	formatContextSummary: jest.fn((context) => context),
	formatTagName: jest.fn((tag) => tag),
	formatFilePath: jest.fn((path) => path),
	getComplexityWithColor: jest.fn((complexity) => complexity),
	getPriorityWithColor: jest.fn((priority) => priority),
	getTagWithColor: jest.fn((tag) => tag),
	getDependencyWithColor: jest.fn((dep) => dep),
	getTelemetryWithColor: jest.fn((data) => data),
	getContextWithColor: jest.fn((context) => context)
}));

// fs module already mocked at top of file with shared spy references

// Mock @tm/bridge module
jest.unstable_mockModule('@tm/bridge', () => ({
	tryExpandViaRemote: jest.fn().mockResolvedValue(null)
}));

// Mock bridge-utils module
jest.unstable_mockModule(
	'../../../../../scripts/modules/bridge-utils.js',
	() => ({
		createBridgeLogger: jest.fn(() => ({
			logger: {
				info: jest.fn(),
				warn: jest.fn(),
				error: jest.fn(),
				debug: jest.fn()
			},
			report: jest.fn(),
			isMCP: false
		}))
	})
);

// Import the mocked modules
const { resolveComplexityReportOutputPath, findComplexityReportPath } =
	await import('../../../../../src/utils/path-utils.js');

const { readJSON, writeJSON, getTagAwareFilePath } = await import(
	'../../../../../scripts/modules/utils.js'
);

const { generateTextService, generateObjectService, streamTextService } =
	await import('../../../../../scripts/modules/ai-services-unified.js');

// Import the modules under test
const { default: analyzeTaskComplexity } = await import(
	'../../../../../scripts/modules/task-manager/analyze-task-complexity.js'
);

const { default: expandTask } = await import(
	'../../../../../scripts/modules/task-manager/expand-task.js'
);

describe('Complexity Report Tag Isolation', () => {
	const projectRoot = '/mock/project/root';
	const sampleTasks = {
		tasks: [
			{
				id: 1,
				title: 'Task 1',
				description: 'First task',
				status: 'pending'
			},
			{
				id: 2,
				title: 'Task 2',
				description: 'Second task',
				status: 'pending'
			}
		]
	};

	const sampleComplexityReport = {
		meta: {
			generatedAt: new Date().toISOString(),
			tasksAnalyzed: 2,
			totalTasks: 2,
			analysisCount: 2,
			thresholdScore: 5,
			projectName: 'Test Project',
			usedResearch: false
		},
		complexityAnalysis: [
			{
				taskId: 1,
				taskTitle: 'Task 1',
				complexityScore: 7,
				recommendedSubtasks: 4,
				expansionPrompt: 'Break down this task',
				reasoning: 'This task is moderately complex'
			},
			{
				taskId: 2,
				taskTitle: 'Task 2',
				complexityScore: 5,
				recommendedSubtasks: 3,
				expansionPrompt: 'Break down this task',
				reasoning: 'This task is moderately complex'
			}
		]
	};

	beforeEach(() => {
		jest.clearAllMocks();

		// Default mock implementations
		readJSON.mockReturnValue(sampleTasks);
		mockExistsSync.mockReturnValue(false);
		mockMkdirSync.mockImplementation(() => {});

		// Mock resolveComplexityReportOutputPath to return tag-aware paths
		resolveComplexityReportOutputPath.mockImplementation(
			(explicitPath, args) => {
				const tag = args?.tag;
				if (explicitPath) {
					return explicitPath;
				}

				let filename = 'task-complexity-report.json';
				if (tag && tag !== 'master') {
					// Use slugified tag for cross-platform compatibility
					const slugifiedTag = tag
						.replace(/[^a-zA-Z0-9_-]/g, '-')
						.toLowerCase();
					filename = `task-complexity-report_${slugifiedTag}.json`;
				}

				return path.join(projectRoot, '.taskmaster/reports', filename);
			}
		);

		// Mock findComplexityReportPath to return tag-aware paths
		findComplexityReportPath.mockImplementation((explicitPath, args) => {
			const tag = args?.tag;
			if (explicitPath) {
				return explicitPath;
			}

			let filename = 'task-complexity-report.json';
			if (tag && tag !== 'master') {
				filename = `task-complexity-report_${tag}.json`;
			}

			return path.join(projectRoot, '.taskmaster/reports', filename);
		});
	});

	describe('Path Resolution Tag Isolation', () => {
		test('should resolve master tag to default filename', () => {
			const result = resolveComplexityReportOutputPath(null, {
				tag: 'master',
				projectRoot
			});
			expect(result).toBe(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report.json'
				)
			);
		});

		test('should resolve non-master tag to tag-specific filename', () => {
			const result = resolveComplexityReportOutputPath(null, {
				tag: 'feature-auth',
				projectRoot
			});
			expect(result).toBe(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report_feature-auth.json'
				)
			);
		});

		test('should resolve undefined tag to default filename', () => {
			const result = resolveComplexityReportOutputPath(null, { projectRoot });
			expect(result).toBe(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report.json'
				)
			);
		});

		test('should respect explicit path over tag-aware resolution', () => {
			const explicitPath = '/custom/path/report.json';
			const result = resolveComplexityReportOutputPath(explicitPath, {
				tag: 'feature-auth',
				projectRoot
			});
			expect(result).toBe(explicitPath);
		});
	});

	describe('Analysis Generation Tag Isolation', () => {
		test('should generate master tag report to default location', async () => {
			const options = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: 'master'
			};

			await analyzeTaskComplexity(options, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			expect(resolveComplexityReportOutputPath).toHaveBeenCalledWith(
				undefined,
				expect.objectContaining({
					tag: 'master',
					projectRoot
				}),
				expect.any(Function)
			);

			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report.json'
				),
				expect.any(String),
				'utf8'
			);
		});

		test('should generate feature tag report to tag-specific location', async () => {
			const options = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: 'feature-auth'
			};

			await analyzeTaskComplexity(options, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			expect(resolveComplexityReportOutputPath).toHaveBeenCalledWith(
				undefined,
				expect.objectContaining({
					tag: 'feature-auth',
					projectRoot
				}),
				expect.any(Function)
			);

			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report_feature-auth.json'
				),
				expect.any(String),
				'utf8'
			);
		});

		test('should not overwrite master report when analyzing feature tag', async () => {
			// First, analyze master tag
			const masterOptions = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: 'master'
			};

			await analyzeTaskComplexity(masterOptions, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			// Clear mocks to verify separate calls
			jest.clearAllMocks();
			readJSON.mockReturnValue(sampleTasks);

			// Then, analyze feature tag
			const featureOptions = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: 'feature-auth'
			};

			await analyzeTaskComplexity(featureOptions, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			// Verify that the feature tag analysis wrote to its own file
			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report_feature-auth.json'
				),
				expect.any(String),
				'utf8'
			);

			// Verify that it did NOT write to the master file
			expect(mockWriteFileSync).not.toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report.json'
				),
				expect.any(String),
				'utf8'
			);
		});
	});

	describe('Report Reading Tag Isolation', () => {
		test('should read master tag report from default location', async () => {
			// Mock existing master report
			mockExistsSync.mockImplementation((filepath) => {
				return filepath.endsWith('task-complexity-report.json');
			});
			mockReadFileSync.mockReturnValue(JSON.stringify(sampleComplexityReport));

			const options = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: 'master'
			};

			await analyzeTaskComplexity(options, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			expect(mockExistsSync).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report.json'
				)
			);
		});

		test('should read feature tag report from tag-specific location', async () => {
			// Mock existing feature tag report
			mockExistsSync.mockImplementation((filepath) => {
				return filepath.endsWith('task-complexity-report_feature-auth.json');
			});
			mockReadFileSync.mockReturnValue(JSON.stringify(sampleComplexityReport));

			const options = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: 'feature-auth'
			};

			await analyzeTaskComplexity(options, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			expect(mockExistsSync).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report_feature-auth.json'
				)
			);
		});

		test('should not read master report when working with feature tag', async () => {
			// Mock that feature tag report exists but master doesn't
			mockExistsSync.mockImplementation((filepath) => {
				return filepath.endsWith('task-complexity-report_feature-auth.json');
			});
			mockReadFileSync.mockReturnValue(JSON.stringify(sampleComplexityReport));

			const options = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: 'feature-auth'
			};

			await analyzeTaskComplexity(options, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			// Should check for feature tag report
			expect(mockExistsSync).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report_feature-auth.json'
				)
			);

			// Should NOT check for master report
			expect(mockExistsSync).not.toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report.json'
				)
			);
		});
	});

	describe('Expand Task Tag Isolation', () => {
		test('should use tag-specific complexity report for expansion', async () => {
			// Mock existing feature tag report
			mockExistsSync.mockImplementation((filepath) => {
				return filepath.endsWith('task-complexity-report_feature-auth.json');
			});
			mockReadFileSync.mockReturnValue(JSON.stringify(sampleComplexityReport));

			const tasksPath = path.join(projectRoot, 'tasks/tasks.json');
			const taskId = 1;
			const numSubtasks = 3;

			await expandTask(
				tasksPath,
				taskId,
				numSubtasks,
				false, // useResearch
				'', // additionalContext
				{
					projectRoot,
					tag: 'feature-auth',
					complexityReportPath: path.join(
						projectRoot,
						'.taskmaster/reports',
						'task-complexity-report_feature-auth.json'
					),
					mcpLog: {
						info: jest.fn(),
						warn: jest.fn(),
						error: jest.fn(),
						debug: jest.fn(),
						success: jest.fn()
					}
				},
				false // force
			);

			// Should read from feature tag report
			expect(readJSON).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report_feature-auth.json'
				)
			);
		});

		test('should use master complexity report for master tag expansion', async () => {
			// Mock existing master report
			mockExistsSync.mockImplementation((filepath) => {
				return filepath.endsWith('task-complexity-report.json');
			});
			mockReadFileSync.mockReturnValue(JSON.stringify(sampleComplexityReport));

			const tasksPath = path.join(projectRoot, 'tasks/tasks.json');
			const taskId = 1;
			const numSubtasks = 3;

			await expandTask(
				tasksPath,
				taskId,
				numSubtasks,
				false, // useResearch
				'', // additionalContext
				{
					projectRoot,
					tag: 'master',
					complexityReportPath: path.join(
						projectRoot,
						'.taskmaster/reports',
						'task-complexity-report.json'
					),
					mcpLog: {
						info: jest.fn(),
						warn: jest.fn(),
						error: jest.fn(),
						debug: jest.fn(),
						success: jest.fn()
					}
				},
				false // force
			);

			// Should read from master report
			expect(readJSON).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report.json'
				)
			);
		});
	});

	describe('Cross-Tag Contamination Prevention', () => {
		test('should maintain separate reports for different tags', async () => {
			// Create different complexity reports for different tags
			const masterReport = {
				...sampleComplexityReport,
				complexityAnalysis: [
					{
						taskId: 1,
						taskTitle: 'Master Task 1',
						complexityScore: 8,
						recommendedSubtasks: 5,
						expansionPrompt: 'Master expansion',
						reasoning: 'Master task reasoning'
					}
				]
			};

			const featureReport = {
				...sampleComplexityReport,
				complexityAnalysis: [
					{
						taskId: 1,
						taskTitle: 'Feature Task 1',
						complexityScore: 6,
						recommendedSubtasks: 3,
						expansionPrompt: 'Feature expansion',
						reasoning: 'Feature task reasoning'
					}
				]
			};

			// Mock file system to return different reports for different paths
			mockExistsSync.mockImplementation((filepath) => {
				return filepath.includes('task-complexity-report');
			});

			mockReadFileSync.mockImplementation((filepath) => {
				if (filepath.includes('task-complexity-report_feature-auth.json')) {
					return JSON.stringify(featureReport);
				} else if (filepath.includes('task-complexity-report.json')) {
					return JSON.stringify(masterReport);
				}
				return '{}';
			});

			// Analyze master tag
			const masterOptions = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: 'master'
			};

			await analyzeTaskComplexity(masterOptions, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			// Verify that master report was written to master location
			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report.json'
				),
				expect.stringContaining('"taskTitle": "Test Task"'),
				'utf8'
			);

			// Clear mocks
			jest.clearAllMocks();
			readJSON.mockReturnValue(sampleTasks);

			// Analyze feature tag
			const featureOptions = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: 'feature-auth'
			};

			await analyzeTaskComplexity(featureOptions, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			// Verify that feature report was written to feature location
			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report_feature-auth.json'
				),
				expect.stringContaining('"taskTitle": "Test Task"'),
				'utf8'
			);
		});
	});

	describe('Edge Cases', () => {
		test('should handle empty tag gracefully', async () => {
			const options = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: ''
			};

			await analyzeTaskComplexity(options, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			expect(resolveComplexityReportOutputPath).toHaveBeenCalledWith(
				undefined,
				expect.objectContaining({
					tag: '',
					projectRoot
				}),
				expect.any(Function)
			);
		});

		test('should handle null tag gracefully', async () => {
			const options = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: null
			};

			await analyzeTaskComplexity(options, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			expect(resolveComplexityReportOutputPath).toHaveBeenCalledWith(
				undefined,
				expect.objectContaining({
					tag: null,
					projectRoot
				}),
				expect.any(Function)
			);
		});

		test('should handle special characters in tag names', async () => {
			const options = {
				file: 'tasks/tasks.json',
				threshold: '5',
				projectRoot,
				tag: 'feature/user-auth-v2'
			};

			await analyzeTaskComplexity(options, {
				projectRoot,
				mcpLog: {
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					debug: jest.fn(),
					success: jest.fn()
				}
			});

			expect(resolveComplexityReportOutputPath).toHaveBeenCalledWith(
				undefined,
				expect.objectContaining({
					tag: 'feature/user-auth-v2',
					projectRoot
				}),
				expect.any(Function)
			);

			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(
					projectRoot,
					'.taskmaster/reports',
					'task-complexity-report_feature-user-auth-v2.json'
				),
				expect.any(String),
				'utf8'
			);
		});
	});
});

```

--------------------------------------------------------------------------------
/.taskmaster/docs/autonomous-tdd-git-workflow.md:
--------------------------------------------------------------------------------

```markdown
## Summary

- Put the existing git and test workflows on rails: a repeatable, automated process that can run autonomously, with guardrails and a compact TUI for visibility.

- Flow: for a selected task, create a branch named with the tag + task id → generate tests for the first subtask (red) using the Surgical Test Generator → implement code (green) → verify tests → commit → repeat per subtask → final verify → push → open PR against the default branch.

- Build on existing rules: .cursor/rules/git_workflow.mdc, .cursor/rules/test_workflow.mdc, .claude/agents/surgical-test-generator.md, and existing CLI/core services.

## Goals

- Deterministic, resumable automation to execute the TDD loop per subtask with minimal human intervention.

- Strong guardrails: never commit to the default branch; only commit when tests pass; enforce status transitions; persist logs/state for debuggability.

- Visibility: a compact terminal UI (like lazygit) to pick tag, view tasks, and start work; right-side pane opens an executor terminal (via tmux) for agent coding.

- Extensible: framework-agnostic test generation via the Surgical Test Generator; detect and use the repo’s test command for execution with coverage thresholds.

## Non‑Goals (initial)

- Full multi-language runner parity beyond detection and executing the project’s test command.

- Complex GUI; start with CLI/TUI + tmux pane. IDE/extension can hook into the same state later.

- Rich executor selection UX (codex/gemini/claude) — we’ll prompt per run; defaults can come later.

## Success Criteria

- One command can autonomously complete a task's subtasks via TDD and open a PR when done.

- All commits made on a branch that includes the tag and task id (see Branch Naming); no commits to the default branch directly.

- Every subtask iteration: failing tests added first (red), then code added to pass them (green), commit only after green.

- End-to-end logs + artifacts stored in .taskmaster/reports/runs/<timestamp-or-id>/.

## Success Metrics (Phase 1)

- **Adoption**: 80% of tasks in a pilot repo completed via `tm autopilot`
- **Safety**: 0 commits to default branch; 100% of commits have green tests
- **Efficiency**: Average time from task start to PR < 30min for simple subtasks
- **Reliability**: < 5% of runs require manual intervention (timeout/conflicts)

## User Stories

- As a developer, I can run tm autopilot <taskId> and watch a structured, safe workflow execute.

- As a reviewer, I can inspect commits per subtask, and a PR summarizing the work when the task completes.

- As an operator, I can see current step, active subtask, tests status, and logs in a compact CLI view and read a final run report.

## Example Workflow Traces

### Happy Path: Complete a 3-subtask feature

```bash
# Developer starts
$ tm autopilot 42
→ Checks preflight: ✓ clean tree, ✓ npm test detected
→ Creates branch: analytics/task-42-user-metrics
→ Subtask 42.1: "Add metrics schema"
  RED: generates test_metrics_schema.test.js → 3 failures
  GREEN: implements schema.js → all pass
  COMMIT: "feat(metrics): add metrics schema (task 42.1)"
→ Subtask 42.2: "Add collection endpoint"
  RED: generates test_metrics_endpoint.test.js → 5 failures
  GREEN: implements api/metrics.js → all pass
  COMMIT: "feat(metrics): add collection endpoint (task 42.2)"
→ Subtask 42.3: "Add dashboard widget"
  RED: generates test_metrics_widget.test.js → 4 failures
  GREEN: implements components/MetricsWidget.jsx → all pass
  COMMIT: "feat(metrics): add dashboard widget (task 42.3)"
→ Final: all 3 subtasks complete
  ✓ Run full test suite → all pass
  ✓ Coverage check → 85% (meets 80% threshold)
  PUSH: confirms with user → pushed to origin
  PR: opens #123 "Task #42 [analytics]: User metrics tracking"

✓ Task 42 complete. PR: https://github.com/org/repo/pull/123
  Run report: .taskmaster/reports/runs/2025-01-15-142033/
```

### Error Recovery: Failing tests timeout

```bash
$ tm autopilot 42
→ Subtask 42.2 GREEN phase: attempt 1 fails (2 tests still red)
→ Subtask 42.2 GREEN phase: attempt 2 fails (1 test still red)
→ Subtask 42.2 GREEN phase: attempt 3 fails (1 test still red)

⚠️  Paused: Could not achieve green state after 3 attempts
📋 State saved to: .taskmaster/reports/runs/2025-01-15-142033/
    Last error: "POST /api/metrics returns 500 instead of 201"

Next steps:
  - Review diff: git diff HEAD
  - Inspect logs: cat .taskmaster/reports/runs/2025-01-15-142033/log.jsonl
  - Check test output: cat .taskmaster/reports/runs/2025-01-15-142033/test-results/subtask-42.2-green-attempt3.json
  - Resume after manual fix: tm autopilot --resume

# Developer manually fixes the issue, then:
$ tm autopilot --resume
→ Resuming subtask 42.2 GREEN phase
  GREEN: all tests pass
  COMMIT: "feat(metrics): add collection endpoint (task 42.2)"
→ Continuing to subtask 42.3...
```

### Dry Run: Preview before execution

```bash
$ tm autopilot 42 --dry-run
Autopilot Plan for Task #42 [analytics]: User metrics tracking
─────────────────────────────────────────────────────────────
Preflight:
  ✓ Working tree is clean
  ✓ Test command detected: npm test
  ✓ Tools available: git, gh, node, npm
  ✓ Current branch: main (will create new branch)

Branch & Tag:
  → Create branch: analytics/task-42-user-metrics
  → Set active tag: analytics

Subtasks (3 pending):
  1. 42.1: Add metrics schema
     - RED: generate tests in src/__tests__/schema.test.js
     - GREEN: implement src/schema.js
     - COMMIT: "feat(metrics): add metrics schema (task 42.1)"

  2. 42.2: Add collection endpoint [depends on 42.1]
     - RED: generate tests in src/api/__tests__/metrics.test.js
     - GREEN: implement src/api/metrics.js
     - COMMIT: "feat(metrics): add collection endpoint (task 42.2)"

  3. 42.3: Add dashboard widget [depends on 42.2]
     - RED: generate tests in src/components/__tests__/MetricsWidget.test.jsx
     - GREEN: implement src/components/MetricsWidget.jsx
     - COMMIT: "feat(metrics): add dashboard widget (task 42.3)"

Finalization:
  → Run full test suite with coverage
  → Push branch to origin (will confirm)
  → Create PR targeting main

Run without --dry-run to execute.
```

## High‑Level Workflow

1) Pre‑flight

   - Verify clean working tree or confirm staging/commit policy (configurable).

   - Detect repo type and the project’s test command (e.g., npm test, pnpm test, pytest, go test).

   - Validate tools: git, gh (optional for PR), node/npm, and (if used) claude CLI.

   - Load TaskMaster state and selected task; if no subtasks exist, automatically run “expand” before working.

2) Branch & Tag Setup

   - Checkout default branch and update (optional), then create a branch using Branch Naming (below).

   - Map branch ↔ tag via existing tag management; explicitly set active tag to the branch’s tag.

3) Subtask Loop (for each pending/in-progress subtask in dependency order)

   - Select next eligible subtask using tm-core TaskService getNextTask() and subtask eligibility logic.

   - Red: generate or update failing tests for the subtask

     - Use the Surgical Test Generator system prompt .claude/agents/surgical-test-generator.md) to produce high-signal tests following project conventions.

     - Run tests to confirm red; record results. If not red (already passing), skip to next subtask or escalate.

   - Green: implement code to pass tests

     - Use executor to implement changes (initial: claude CLI prompt with focused context).

     - Re-run tests until green or timeout/backoff policy triggers.

   - Commit: when green

     - Commit tests + code with conventional commit message. Optionally update subtask status to done.

     - Persist run step metadata/logs.

4) Finalization

   - Run full test suite and coverage (if configured); optionally lint/format.

   - Commit any final adjustments.

   - Push branch (ask user to confirm); create PR (via gh pr create) targeting the default branch. Title format: Task #<id> [<tag>]: <title>.

5) Post‑Run

   - Update task status if desired (e.g., review).

   - Persist run report (JSON + markdown summary) to .taskmaster/reports/runs/<run-id>/.

## Guardrails

- Never commit to the default branch.

- Commit only if all tests (targeted and suite) pass; allow override flags.

- Enforce 80% coverage thresholds (lines/branches/functions/statements) by default; configurable.

- Timebox/model ops and retries; if not green within N attempts, pause with actionable state for resume.

- Always log actions, commands, and outcomes; include dry-run mode.

- Ask before branch creation, pushing, and opening a PR unless --no-confirm is set.

## Integration Points (Current Repo)

- CLI: apps/cli provides command structure and UI components.

  - New command: tm autopilot (alias: task-master autopilot).

  - Reuse UI components under apps/cli/src/ui/components/ for headers/task details/next-task.

- Core services: packages/tm-core

  - TaskService for selection, status, tags.

  - TaskExecutionService for prompt formatting and executor prep.

  - Executors: claude executor and ExecutorFactory to run external tools.

  - Proposed new: WorkflowOrchestrator to drive the autonomous loop and emit progress events.

- Tag/Git utilities: scripts/modules/utils/git-utils.js and scripts/modules/task-manager/tag-management.js for branch→tag mapping and explicit tag switching.

- Rules: .cursor/rules/git_workflow.mdc and .cursor/rules/test_workflow.mdc to steer behavior and ensure consistency.

- Test generation prompt: .claude/agents/surgical-test-generator.md.

## Proposed Components

- Orchestrator (tm-core): WorkflowOrchestrator (new)

  - State machine driving phases: Preflight → Branch/Tag → SubtaskIter (Red/Green/Commit) → Finalize → PR.

  - Exposes an evented API (progress events) that the CLI can render.

  - Stores run state artifacts.

- Test Runner Adapter

  - Detects and runs tests via the project’s test command (e.g., npm test), with targeted runs where feasible.

  - API: runTargeted(files/pattern), runAll(), report summary (failures, duration, coverage), enforce 80% threshold by default.

- Git/PR Adapter

  - Encapsulates git ops: branch create/checkout, add/commit, push.

  - Optional gh integration to open PR; fallback to instructions if gh unavailable.

  - Confirmation gates for branch creation and pushes.

- Prompt/Exec Adapter

  - Uses existing executor service to call the selected coding assistant (initially claude) with tight prompts: task/subtask context, surgical tests first, then minimal code to green.

- Run State + Reporting

  - JSONL log of steps, timestamps, commands, test results.

  - Markdown summary for PR description and post-run artifact.

## CLI UX (MVP)

- Command: tm autopilot [taskId]

  - Flags: --dry-run, --no-push, --no-pr, --no-confirm, --force, --max-attempts <n>, --runner <auto|custom>, --commit-scope <scope>

  - Output: compact header (project, tag, branch), current phase, subtask line, last test summary, next actions.

- Resume: If interrupted, tm autopilot --resume picks up from last checkpoint in run state.

### TUI with tmux (Linear Execution)

- Left pane: Tag selector, task list (status/priority), start/expand shortcuts; "Start" triggers the next task or a selected task.

- Right pane: Executor terminal (tmux split) that runs the coding agent (claude-code/codex). Autopilot can hand over to the right pane during green.

- MCP integration: use MCP tools for task queries/updates and for shell/test invocations where available.

## TUI Layout (tmux-based)

### Pane Structure

```
┌─────────────────────────────────────┬──────────────────────────────────┐
│ Task Navigator (left)               │ Executor Terminal (right)        │
│                                     │                                  │
│ Project: my-app                     │ $ tm autopilot --executor-mode   │
│ Branch: analytics/task-42           │ > Running subtask 42.2 GREEN...  │
│ Tag: analytics                      │ > Implementing endpoint...       │
│                                     │ > Tests: 3 passed, 0 failed      │
│ Tasks:                              │ > Ready to commit                │
│ → 42 [in-progress] User metrics     │                                  │
│   → 42.1 [done] Schema              │ [Live output from Claude Code]   │
│   → 42.2 [active] Endpoint ◀        │                                  │
│   → 42.3 [pending] Dashboard        │                                  │
│                                     │                                  │
│ [s] start  [p] pause  [q] quit      │                                  │
└─────────────────────────────────────┴──────────────────────────────────┘
```

### Implementation Notes

- **Left pane**: `apps/cli/src/ui/tui/navigator.ts` (new, uses `blessed` or `ink`)
- **Right pane**: spawned via `tmux split-window -h` running `tm autopilot --executor-mode`
- **Communication**: shared state file `.taskmaster/state/current-run.json` + file watching or event stream
- **Keybindings**:
  - `s` - Start selected task
  - `p` - Pause/resume current run
  - `q` - Quit (with confirmation if run active)
  - `↑/↓` - Navigate task list
  - `Enter` - Expand/collapse subtasks

## Prompt Composition (Detailed)

### System Prompt Assembly

Prompts are composed in three layers:

1. **Base rules** (loaded in order from `.cursor/rules/` and `.claude/agents/`):
   - `git_workflow.mdc` → git commit conventions, branch policy, PR guidelines
   - `test_workflow.mdc` → TDD loop requirements, coverage thresholds, test structure
   - `surgical-test-generator.md` → test generation methodology, project-specific test patterns

2. **Task context injection**:
   ```
   You are implementing:
   Task #42 [analytics]: User metrics tracking
   Subtask 42.2: Add collection endpoint

   Description:
   Implement POST /api/metrics endpoint to collect user metrics events

   Acceptance criteria:
   - POST /api/metrics accepts { userId, eventType, timestamp }
   - Validates input schema (reject missing/invalid fields)
   - Persists to database
   - Returns 201 on success with created record
   - Returns 400 on validation errors

   Dependencies:
   - Subtask 42.1 (metrics schema) is complete

   Current phase: RED (generate failing tests)
   Test command: npm test
   Test file convention: src/**/*.test.js (vitest framework detected)
   Branch: analytics/task-42-user-metrics
   Project language: JavaScript (Node.js)
   ```

3. **Phase-specific instructions**:
   - **RED phase**: "Generate minimal failing tests for this subtask. Do NOT implement any production code. Only create test files. Confirm tests fail with clear error messages indicating missing implementation."
   - **GREEN phase**: "Implement minimal code to pass the failing tests. Follow existing project patterns in `src/`. Only modify files necessary for this subtask. Keep changes focused and reviewable."

### Example Full Prompt (RED Phase)

```markdown
<SYSTEM PROMPT>
[Contents of .cursor/rules/git_workflow.mdc]
[Contents of .cursor/rules/test_workflow.mdc]
[Contents of .claude/agents/surgical-test-generator.md]

<TASK CONTEXT>
You are implementing:
Task #42.2: Add collection endpoint

Description:
Implement POST /api/metrics endpoint to collect user metrics events

Acceptance criteria:
- POST /api/metrics accepts { userId, eventType, timestamp }
- Validates input schema (reject missing/invalid fields)
- Persists to database using MetricsSchema from subtask 42.1
- Returns 201 on success with created record
- Returns 400 on validation errors with details

Dependencies: Subtask 42.1 (metrics schema) is complete

<INSTRUCTION>
Generate failing tests for this subtask. Follow project conventions:
- Test file: src/api/__tests__/metrics.test.js
- Framework: vitest (detected from package.json)
- Test cases to cover:
  * POST /api/metrics with valid payload → should return 201 (will fail: endpoint not implemented)
  * POST /api/metrics with missing userId → should return 400 (will fail: validation not implemented)
  * POST /api/metrics with invalid timestamp → should return 400 (will fail: validation not implemented)
  * POST /api/metrics should persist to database → should save record (will fail: persistence not implemented)

Do NOT implement the endpoint code yet. Only create test file(s).
Confirm tests fail with messages like "Cannot POST /api/metrics" or "endpoint not defined".

Output format:
1. File path to create: src/api/__tests__/metrics.test.js
2. Complete test code
3. Command to run: npm test src/api/__tests__/metrics.test.js
```

### Example Full Prompt (GREEN Phase)

```markdown
<SYSTEM PROMPT>
[Contents of .cursor/rules/git_workflow.mdc]
[Contents of .cursor/rules/test_workflow.mdc]

<TASK CONTEXT>
Task #42.2: Add collection endpoint
[same context as RED phase]

<CURRENT STATE>
Tests created in RED phase:
- src/api/__tests__/metrics.test.js
- 5 tests written, all failing as expected

Test output:
```
FAIL src/api/__tests__/metrics.test.js
  POST /api/metrics
    ✗ should return 201 with valid payload (endpoint not found)
    ✗ should return 400 with missing userId (endpoint not found)
    ✗ should return 400 with invalid timestamp (endpoint not found)
    ✗ should persist to database (endpoint not found)
```

<INSTRUCTION>
Implement minimal code to make all tests pass.

Guidelines:
- Create/modify file: src/api/metrics.js
- Use existing patterns from src/api/ (e.g., src/api/users.js for reference)
- Import MetricsSchema from subtask 42.1 (src/models/schema.js)
- Implement validation, persistence, and response handling
- Follow project error handling conventions
- Keep implementation focused on this subtask only

After implementation:
1. Run tests: npm test src/api/__tests__/metrics.test.js
2. Confirm all 5 tests pass
3. Report results

Output format:
1. File(s) created/modified
2. Implementation code
3. Test command and results
```

### Prompt Loading Configuration

See `.taskmaster/config.json` → `prompts` section for paths and load order.

## Configuration Schema

### .taskmaster/config.json

```json
{
  "autopilot": {
    "enabled": true,
    "requireCleanWorkingTree": true,
    "commitTemplate": "{type}({scope}): {msg}",
    "defaultCommitType": "feat",
    "maxGreenAttempts": 3,
    "testTimeout": 300000
  },
  "test": {
    "runner": "auto",
    "coverageThresholds": {
      "lines": 80,
      "branches": 80,
      "functions": 80,
      "statements": 80
    },
    "targetedRunPattern": "**/*.test.js"
  },
  "git": {
    "branchPattern": "{tag}/task-{id}-{slug}",
    "pr": {
      "enabled": true,
      "base": "default",
      "bodyTemplate": ".taskmaster/templates/pr-body.md"
    }
  },
  "prompts": {
    "rulesPath": ".cursor/rules",
    "testGeneratorPath": ".claude/agents/surgical-test-generator.md",
    "loadOrder": ["git_workflow.mdc", "test_workflow.mdc"]
  }
}
```

### Configuration Fields

#### autopilot
- `enabled` (boolean): Enable/disable autopilot functionality
- `requireCleanWorkingTree` (boolean): Require clean git state before starting
- `commitTemplate` (string): Template for commit messages (tokens: `{type}`, `{scope}`, `{msg}`)
- `defaultCommitType` (string): Default commit type (feat, fix, chore, etc.)
- `maxGreenAttempts` (number): Maximum retry attempts to achieve green tests (default: 3)
- `testTimeout` (number): Timeout in milliseconds per test run (default: 300000 = 5min)

#### test
- `runner` (string): Test runner detection mode (`"auto"` or explicit command like `"npm test"`)
- `coverageThresholds` (object): Minimum coverage percentages required
  - `lines`, `branches`, `functions`, `statements` (number): Threshold percentages (0-100)
- `targetedRunPattern` (string): Glob pattern for targeted subtask test runs

#### git
- `branchPattern` (string): Branch naming pattern (tokens: `{tag}`, `{id}`, `{slug}`)
- `pr.enabled` (boolean): Enable automatic PR creation
- `pr.base` (string): Target branch for PRs (`"default"` uses repo default, or specify like `"main"`)
- `pr.bodyTemplate` (string): Path to PR body template file (optional)

#### prompts
- `rulesPath` (string): Directory containing rule files (e.g., `.cursor/rules`)
- `testGeneratorPath` (string): Path to test generator prompt file
- `loadOrder` (array): Order to load rule files from `rulesPath`

### Environment Variables

```bash
# Required for executor
ANTHROPIC_API_KEY=sk-ant-...          # Claude API key

# Optional: for PR creation
GITHUB_TOKEN=ghp_...                  # GitHub personal access token

# Optional: for other executors (future)
OPENAI_API_KEY=sk-...
GOOGLE_API_KEY=...
```

## Run Artifacts & Observability

### Per-Run Artifact Structure

Each autopilot run creates a timestamped directory with complete traceability:

```
.taskmaster/reports/runs/2025-01-15-142033/
├── manifest.json          # run metadata (task id, start/end time, status)
├── log.jsonl              # timestamped event stream
├── commits.txt            # list of commit SHAs made during run
├── test-results/
│   ├── subtask-42.1-red.json
│   ├── subtask-42.1-green.json
│   ├── subtask-42.2-red.json
│   ├── subtask-42.2-green-attempt1.json
│   ├── subtask-42.2-green-attempt2.json
│   ├── subtask-42.2-green-attempt3.json
│   └── final-suite.json
└── pr.md                  # generated PR body
```

### manifest.json Format

```json
{
  "runId": "2025-01-15-142033",
  "taskId": "42",
  "tag": "analytics",
  "branch": "analytics/task-42-user-metrics",
  "startTime": "2025-01-15T14:20:33Z",
  "endTime": "2025-01-15T14:45:12Z",
  "status": "completed",
  "subtasksCompleted": ["42.1", "42.2", "42.3"],
  "subtasksFailed": [],
  "totalCommits": 3,
  "prUrl": "https://github.com/org/repo/pull/123",
  "finalCoverage": {
    "lines": 85.3,
    "branches": 82.1,
    "functions": 88.9,
    "statements": 85.0
  }
}
```

### log.jsonl Format

Event stream in JSON Lines format for easy parsing and debugging:

```jsonl
{"ts":"2025-01-15T14:20:33Z","phase":"preflight","status":"ok","details":{"testCmd":"npm test","gitClean":true}}
{"ts":"2025-01-15T14:20:45Z","phase":"branch","status":"ok","branch":"analytics/task-42-user-metrics"}
{"ts":"2025-01-15T14:21:00Z","phase":"red","subtask":"42.1","status":"ok","tests":{"failed":3,"passed":0}}
{"ts":"2025-01-15T14:22:15Z","phase":"green","subtask":"42.1","status":"ok","tests":{"passed":3,"failed":0},"attempts":2}
{"ts":"2025-01-15T14:22:20Z","phase":"commit","subtask":"42.1","status":"ok","sha":"a1b2c3d","message":"feat(metrics): add metrics schema (task 42.1)"}
{"ts":"2025-01-15T14:23:00Z","phase":"red","subtask":"42.2","status":"ok","tests":{"failed":5,"passed":0}}
{"ts":"2025-01-15T14:25:30Z","phase":"green","subtask":"42.2","status":"error","tests":{"passed":3,"failed":2},"attempts":3,"error":"Max attempts reached"}
{"ts":"2025-01-15T14:25:35Z","phase":"pause","reason":"max_attempts","nextAction":"manual_review"}
```

### Test Results Format

Each test run stores detailed results:

```json
{
  "subtask": "42.2",
  "phase": "green",
  "attempt": 3,
  "timestamp": "2025-01-15T14:25:30Z",
  "command": "npm test src/api/__tests__/metrics.test.js",
  "exitCode": 1,
  "duration": 2340,
  "summary": {
    "total": 5,
    "passed": 3,
    "failed": 2,
    "skipped": 0
  },
  "failures": [
    {
      "test": "POST /api/metrics should return 201 with valid payload",
      "error": "Expected status 201, got 500",
      "stack": "..."
    }
  ],
  "coverage": {
    "lines": 78.5,
    "branches": 75.0,
    "functions": 80.0,
    "statements": 78.5
  }
}
```

## Execution Model

### Orchestration vs Direct Execution

The autopilot system uses an **orchestration model** rather than direct code execution:

**Orchestrator Role** (tm-core WorkflowOrchestrator):
- Maintains state machine tracking current phase (RED/GREEN/COMMIT) per subtask
- Validates preconditions (tests pass, git state clean, etc.)
- Returns "work units" describing what needs to be done next
- Records completion and advances to next phase
- Persists state for resumability

**Executor Role** (Claude Code/AI session via MCP):
- Queries orchestrator for next work unit
- Executes the work (generates tests, writes code, runs tests, makes commits)
- Reports results back to orchestrator
- Handles file operations and tool invocations

**Why This Approach?**
- Leverages existing AI capabilities (Claude Code) rather than duplicating them
- MCP protocol provides clean separation between state management and execution
- Allows human oversight and intervention at each phase
- Simpler to implement: orchestrator is pure state logic, no code generation needed
- Enables multiple executor types (Claude Code, other AI tools, human developers)

**Example Flow**:
```typescript
// Claude Code (via MCP) queries orchestrator
const workUnit = await orchestrator.getNextWorkUnit('42');
// => {
//      phase: 'RED',
//      subtask: '42.1',
//      action: 'Generate failing tests for metrics schema',
//      context: { title, description, dependencies, testFile: 'src/__tests__/schema.test.js' }
//    }

// Claude Code executes the work (writes test file, runs tests)
// Then reports back
await orchestrator.completeWorkUnit('42', '42.1', 'RED', {
  success: true,
  testsCreated: ['src/__tests__/schema.test.js'],
  testsFailed: 3
});

// Query again for next phase
const nextWorkUnit = await orchestrator.getNextWorkUnit('42');
// => { phase: 'GREEN', subtask: '42.1', action: 'Implement code to pass tests', ... }
```

## Design Decisions

### Why commit per subtask instead of per task?

**Decision**: Commit after each subtask's green state, not after the entire task.

**Rationale**:
- Atomic commits make code review easier (reviewers can see logical progression)
- Easier to revert a single subtask if it causes issues downstream
- Matches the TDD loop's natural checkpoint and cognitive boundary
- Provides resumability points if the run is interrupted

**Trade-off**: More commits per task (can use squash-merge in PRs if desired)

### Why not support parallel subtask execution?

**Decision**: Sequential subtask execution in Phase 1; parallel execution deferred to Phase 3.

**Rationale**:
- Subtasks often have implicit dependencies (e.g., schema before endpoint, endpoint before UI)
- Simpler orchestrator state machine (less complexity = faster to ship)
- Parallel execution requires explicit dependency DAG and conflict resolution
- Can be added in Phase 3 once core workflow is proven stable

**Trade-off**: Slower for truly independent subtasks (mitigated by keeping subtasks small and focused)

### Why require 80% coverage by default?

**Decision**: Enforce 80% coverage threshold (lines/branches/functions/statements) before allowing commits.

**Rationale**:
- Industry standard baseline for production code quality
- Forces test generation to be comprehensive, not superficial
- Configurable per project via `.taskmaster/config.json` if too strict
- Prevents "green tests" that only test happy paths

**Trade-off**: May require more test generation iterations; can be lowered per project

### Why use tmux instead of a rich GUI?

**Decision**: MVP uses tmux split panes for TUI, not Electron/web-based GUI.

**Rationale**:
- Tmux is universally available on dev machines; no installation burden
- Terminal-first workflows match developer mental model (no context switching)
- Simpler to implement and maintain; can add GUI later via extensions
- State stored in files allows IDE/extension integration without coupling

**Trade-off**: Less visual polish than GUI; requires tmux familiarity

### Why not support multiple executors (codex/gemini/claude) in Phase 1?

**Decision**: Start with Claude executor only; add others in Phase 2+.

**Rationale**:
- Reduces scope and complexity for initial delivery
- Claude Code already integrated with existing executor service
- Executor abstraction already exists; adding more is straightforward later
- Different executors may need different prompt strategies (requires experimentation)

**Trade-off**: Users locked to Claude initially; can work around with manual executor selection

## Risks and Mitigations

- Model hallucination/large diffs: restrict prompt scope; enforce minimal changes; show diff previews (optional) before commit.

- Flaky tests: allow retries, isolate targeted runs for speed, then full suite before commit.

- Environment variability: detect runners/tools; provide fallbacks and actionable errors.

- PR creation fails: still push and print manual commands; persist PR body to reuse.

## Open Questions

1) Slugging rules for branch names; any length limits or normalization beyond {slug} token sanitize?

2) PR body standard sections beyond run report (e.g., checklist, coverage table)?

3) Default executor prompt fine-tuning once codex/gemini integration is available.

4) Where to store persistent TUI state (pane layout, last selection) in .taskmaster/state.json?

## Branch Naming

- Include both the tag and the task id in the branch name to make lineage explicit.

- Default pattern: <tag>/task-<id>[-slug] (e.g., master/task-12, tag-analytics/task-4-user-auth).

- Configurable via .taskmaster/config.json: git.branchPattern supports tokens {tag}, {id}, {slug}.

## PR Base Branch

- Use the repository’s default branch (detected via git) unless overridden.

- Title format: Task #<id> [<tag>]: <title>.

## RPG Mapping (Repository Planning Graph)

Functional nodes (capabilities):

- Autopilot Orchestration → drives TDD loop and lifecycle

- Test Generation (Surgical) → produces failing tests from subtask context

- Test Execution + Coverage → runs suite, enforces thresholds

- Git/Branch/PR Management → safe operations and PR creation

- TUI/Terminal Integration → interactive control and visibility via tmux

- MCP Integration → structured task/status/context operations

Structural nodes (code organization):

- packages/tm-core:

  - services/workflow-orchestrator.ts (new)

  - services/test-runner-adapter.ts (new)

  - services/git-adapter.ts (new)

  - existing: task-service.ts, task-execution-service.ts, executors/*

- apps/cli:

  - src/commands/autopilot.command.ts (new)

  - src/ui/tui/ (new tmux/TUI helpers)

- scripts/modules:

  - reuse utils/git-utils.js, task-manager/tag-management.js

- .claude/agents/:

  - surgical-test-generator.md

Edges (data/control flow):

- Autopilot → Test Generation → Test Execution → Git Commit → loop

- Autopilot → Git Adapter (branch, tag, PR)

- Autopilot → TUI (event stream) → tmux pane control

- Autopilot → MCP tools for task/status updates

- Test Execution → Coverage gate → Autopilot decision

Topological traversal (implementation order):

1) Git/Test adapters (foundations)

2) Orchestrator skeleton + events

3) CLI autopilot command and dry-run

4) Surgical test-gen integration and execution gate

5) PR creation, run reports, resumability

## Phased Roadmap

- Phase 0: Spike

  - Implement CLI skeleton tm autopilot with dry-run showing planned steps from a real task + subtasks.

  - Detect test runner (package.json) and git state; render a preflight report.

- Phase 1: Core Rails (State Machine & Orchestration)

  - Implement WorkflowOrchestrator in tm-core as a **state machine** that tracks TDD phases per subtask.

  - Orchestrator **guides** the current AI session (Claude Code/MCP client) rather than executing code itself.

  - Add Git/Test adapters for status checks and validation (not direct execution).

  - WorkflowOrchestrator API:
    - `getNextWorkUnit(taskId)` → returns next phase to execute (RED/GREEN/COMMIT) with context
    - `completeWorkUnit(taskId, subtaskId, phase, result)` → records completion and advances state
    - `getRunState(taskId)` → returns current progress and resumability data

  - MCP integration: expose work unit endpoints so Claude Code can query "what to do next" and report back.

  - Branch/tag mapping via existing tag-management APIs.

  - Run report persisted under .taskmaster/reports/runs/ with state checkpoints for resumability.

- Phase 2: PR + Resumability

  - Add gh PR creation with well-formed body using the run report.

  - Introduce resumable checkpoints and --resume flag.

  - Add coverage enforcement and optional lint/format step.

- Phase 3: Extensibility + Guardrails

  - Add support for basic pytest/go test adapters.

  - Add safeguards: diff preview mode, manual confirm gates, aggressive minimal-change prompts.

  - Optional: small TUI panel and extension panel leveraging the same run state file.

## References (Repo)

- Test Workflow: .cursor/rules/test_workflow.mdc

- Git Workflow: .cursor/rules/git_workflow.mdc

- CLI: apps/cli/src/commands/start.command.ts, apps/cli/src/ui/components/*.ts

- Core Services: packages/tm-core/src/services/task-service.ts, task-execution-service.ts

- Executors: packages/tm-core/src/executors/*

- Git Utilities: scripts/modules/utils/git-utils.js

- Tag Management: scripts/modules/task-manager/tag-management.js

 - Surgical Test Generator: .claude/agents/surgical-test-generator.md


```

--------------------------------------------------------------------------------
/tests/unit/dependency-manager.test.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Dependency Manager module tests
 */

import { jest } from '@jest/globals';
import {
	sampleTasks,
	crossLevelDependencyTasks
} from '../fixtures/sample-tasks.js';

// Create mock functions that we can control in tests
const mockTaskExists = jest.fn();
const mockFormatTaskId = jest.fn();
const mockFindCycles = jest.fn();
const mockLog = jest.fn();
const mockReadJSON = jest.fn();
const mockWriteJSON = jest.fn();

// Mock the utils module using the same pattern as move-task-cross-tag.test.js
jest.mock('../../scripts/modules/utils.js', () => ({
	log: mockLog,
	readJSON: mockReadJSON,
	writeJSON: mockWriteJSON,
	taskExists: mockTaskExists,
	formatTaskId: mockFormatTaskId,
	findCycles: mockFindCycles,
	traverseDependencies: jest.fn(() => []),
	isSilentMode: jest.fn(() => true),
	findProjectRoot: jest.fn(() => '/test'),
	resolveEnvVariable: jest.fn(() => undefined),
	isEmpty: jest.fn((v) =>
		v == null
			? true
			: Array.isArray(v)
				? v.length === 0
				: typeof v === 'object'
					? Object.keys(v).length === 0
					: false
	),
	// Common extras
	enableSilentMode: jest.fn(),
	disableSilentMode: jest.fn(),
	getTaskManager: jest.fn(async () => ({})),
	getTagAwareFilePath: jest.fn((basePath, _tag, projectRoot = '.') => basePath),
	readComplexityReport: jest.fn(() => null)
}));

jest.mock('path');
jest.mock('chalk', () => ({
	green: jest.fn((text) => `<green>${text}</green>`),
	yellow: jest.fn((text) => `<yellow>${text}</yellow>`),
	red: jest.fn((text) => `<red>${text}</red>`),
	cyan: jest.fn((text) => `<cyan>${text}</cyan>`),
	bold: jest.fn((text) => `<bold>${text}</bold>`)
}));

jest.mock('boxen', () => jest.fn((text) => `[boxed: ${text}]`));

// Now import SUT after mocks are in place
import {
	validateTaskDependencies,
	isCircularDependency,
	removeDuplicateDependencies,
	cleanupSubtaskDependencies,
	ensureAtLeastOneIndependentSubtask,
	validateAndFixDependencies,
	canMoveWithDependencies
} from '../../scripts/modules/dependency-manager.js';

jest.mock('../../scripts/modules/ui.js', () => ({
	displayBanner: jest.fn()
}));

jest.mock('../../scripts/modules/task-manager.js', () => ({
	generateTaskFiles: jest.fn()
}));

// Use a temporary path for test files - Jest will clean up the temp directory
const TEST_TASKS_PATH = '/tmp/jest-test-tasks.json';

describe('Dependency Manager Module', () => {
	beforeEach(() => {
		jest.clearAllMocks();

		// Set default implementations
		mockTaskExists.mockImplementation((tasks, id) => {
			if (Array.isArray(tasks)) {
				if (typeof id === 'string' && id.includes('.')) {
					const [taskId, subtaskId] = id.split('.').map(Number);
					const task = tasks.find((t) => t.id === taskId);
					return (
						task &&
						task.subtasks &&
						task.subtasks.some((st) => st.id === subtaskId)
					);
				}
				return tasks.some(
					(task) => task.id === (typeof id === 'string' ? parseInt(id, 10) : id)
				);
			}
			return false;
		});

		mockFormatTaskId.mockImplementation((id) => {
			if (typeof id === 'string' && id.includes('.')) {
				return id;
			}
			return parseInt(id, 10);
		});

		mockFindCycles.mockImplementation((tasks) => {
			// Simplified cycle detection for testing
			const dependencyMap = new Map();

			// Build dependency map
			tasks.forEach((task) => {
				if (task.dependencies) {
					dependencyMap.set(task.id, task.dependencies);
				}
			});

			const visited = new Set();
			const recursionStack = new Set();

			function dfs(taskId) {
				visited.add(taskId);
				recursionStack.add(taskId);

				const dependencies = dependencyMap.get(taskId) || [];
				for (const depId of dependencies) {
					if (!visited.has(depId)) {
						if (dfs(depId)) return true;
					} else if (recursionStack.has(depId)) {
						return true;
					}
				}

				recursionStack.delete(taskId);
				return false;
			}

			// Check for cycles starting from each unvisited node
			for (const taskId of dependencyMap.keys()) {
				if (!visited.has(taskId)) {
					if (dfs(taskId)) return true;
				}
			}

			return false;
		});
	});

	describe('isCircularDependency function', () => {
		test('should detect a direct circular dependency', () => {
			const tasks = [
				{ id: 1, dependencies: [2] },
				{ id: 2, dependencies: [1] }
			];

			const result = isCircularDependency(tasks, 1);
			expect(result).toBe(true);
		});

		test('should detect an indirect circular dependency', () => {
			const tasks = [
				{ id: 1, dependencies: [2] },
				{ id: 2, dependencies: [3] },
				{ id: 3, dependencies: [1] }
			];

			const result = isCircularDependency(tasks, 1);
			expect(result).toBe(true);
		});

		test('should return false for non-circular dependencies', () => {
			const tasks = [
				{ id: 1, dependencies: [2] },
				{ id: 2, dependencies: [3] },
				{ id: 3, dependencies: [] }
			];

			const result = isCircularDependency(tasks, 1);
			expect(result).toBe(false);
		});

		test('should handle a task with no dependencies', () => {
			const tasks = [
				{ id: 1, dependencies: [] },
				{ id: 2, dependencies: [1] }
			];

			const result = isCircularDependency(tasks, 1);
			expect(result).toBe(false);
		});

		test('should handle a task depending on itself', () => {
			const tasks = [{ id: 1, dependencies: [1] }];

			const result = isCircularDependency(tasks, 1);
			expect(result).toBe(true);
		});

		test('should handle subtask dependencies correctly', () => {
			const tasks = [
				{
					id: 1,
					dependencies: [],
					subtasks: [
						{ id: 1, dependencies: ['1.2'] },
						{ id: 2, dependencies: ['1.3'] },
						{ id: 3, dependencies: ['1.1'] }
					]
				}
			];

			// This creates a circular dependency: 1.1 -> 1.2 -> 1.3 -> 1.1
			const result = isCircularDependency(tasks, '1.1', ['1.3', '1.2']);
			expect(result).toBe(true);
		});

		test('should allow non-circular subtask dependencies within same parent', () => {
			const tasks = [
				{
					id: 1,
					dependencies: [],
					subtasks: [
						{ id: 1, dependencies: [] },
						{ id: 2, dependencies: ['1.1'] },
						{ id: 3, dependencies: ['1.2'] }
					]
				}
			];

			// This is a valid dependency chain: 1.3 -> 1.2 -> 1.1
			const result = isCircularDependency(tasks, '1.1', []);
			expect(result).toBe(false);
		});

		test('should properly handle dependencies between subtasks of the same parent', () => {
			const tasks = [
				{
					id: 1,
					dependencies: [],
					subtasks: [
						{ id: 1, dependencies: [] },
						{ id: 2, dependencies: ['1.1'] },
						{ id: 3, dependencies: [] }
					]
				}
			];

			// Check if adding a dependency from subtask 1.3 to 1.2 creates a circular dependency
			// This should be false as 1.3 -> 1.2 -> 1.1 is a valid chain
			mockTaskExists.mockImplementation(() => true);
			const result = isCircularDependency(tasks, '1.3', ['1.2']);
			expect(result).toBe(false);
		});

		test('should correctly detect circular dependencies in subtasks of the same parent', () => {
			const tasks = [
				{
					id: 1,
					dependencies: [],
					subtasks: [
						{ id: 1, dependencies: ['1.3'] },
						{ id: 2, dependencies: ['1.1'] },
						{ id: 3, dependencies: ['1.2'] }
					]
				}
			];

			// This creates a circular dependency: 1.1 -> 1.3 -> 1.2 -> 1.1
			mockTaskExists.mockImplementation(() => true);
			const result = isCircularDependency(tasks, '1.2', ['1.1']);
			expect(result).toBe(true);
		});
	});

	describe('validateTaskDependencies function', () => {
		test('should detect missing dependencies', () => {
			const tasks = [
				{ id: 1, dependencies: [99] }, // 99 doesn't exist
				{ id: 2, dependencies: [1] }
			];

			const result = validateTaskDependencies(tasks);

			expect(result.valid).toBe(false);
			expect(result.issues.length).toBeGreaterThan(0);
			expect(result.issues[0].type).toBe('missing');
			expect(result.issues[0].taskId).toBe(1);
			expect(result.issues[0].dependencyId).toBe(99);
		});

		test('should detect circular dependencies', () => {
			const tasks = [
				{ id: 1, dependencies: [2] },
				{ id: 2, dependencies: [1] }
			];

			const result = validateTaskDependencies(tasks);

			expect(result.valid).toBe(false);
			expect(result.issues.some((issue) => issue.type === 'circular')).toBe(
				true
			);
		});

		test('should detect self-dependencies', () => {
			const tasks = [{ id: 1, dependencies: [1] }];

			const result = validateTaskDependencies(tasks);

			expect(result.valid).toBe(false);
			expect(
				result.issues.some(
					(issue) => issue.type === 'self' && issue.taskId === 1
				)
			).toBe(true);
		});

		test('should return valid for correct dependencies', () => {
			const tasks = [
				{ id: 1, dependencies: [] },
				{ id: 2, dependencies: [1] },
				{ id: 3, dependencies: [1, 2] }
			];

			const result = validateTaskDependencies(tasks);

			expect(result.valid).toBe(true);
			expect(result.issues.length).toBe(0);
		});

		test('should handle tasks with no dependencies property', () => {
			const tasks = [
				{ id: 1 }, // Missing dependencies property
				{ id: 2, dependencies: [1] }
			];

			const result = validateTaskDependencies(tasks);

			// Should be valid since a missing dependencies property is interpreted as an empty array
			expect(result.valid).toBe(true);
		});

		test('should handle subtask dependencies correctly', () => {
			const tasks = [
				{
					id: 1,
					dependencies: [],
					subtasks: [
						{ id: 1, dependencies: [] },
						{ id: 2, dependencies: ['1.1'] }, // Valid - depends on another subtask
						{ id: 3, dependencies: ['1.2'] } // Valid - depends on another subtask
					]
				},
				{
					id: 2,
					dependencies: ['1.3'], // Valid - depends on a subtask from task 1
					subtasks: []
				}
			];

			// Set up mock to handle subtask validation
			mockTaskExists.mockImplementation((tasks, id) => {
				if (typeof id === 'string' && id.includes('.')) {
					const [taskId, subtaskId] = id.split('.').map(Number);
					const task = tasks.find((t) => t.id === taskId);
					return (
						task &&
						task.subtasks &&
						task.subtasks.some((st) => st.id === subtaskId)
					);
				}
				return tasks.some((task) => task.id === parseInt(id, 10));
			});

			const result = validateTaskDependencies(tasks);

			expect(result.valid).toBe(true);
			expect(result.issues.length).toBe(0);
		});

		test('should detect missing subtask dependencies', () => {
			const tasks = [
				{
					id: 1,
					dependencies: [],
					subtasks: [
						{ id: 1, dependencies: ['1.4'] }, // Invalid - subtask 4 doesn't exist
						{ id: 2, dependencies: ['2.1'] } // Invalid - task 2 has no subtasks
					]
				},
				{
					id: 2,
					dependencies: [],
					subtasks: []
				}
			];

			// Mock taskExists to correctly identify missing subtasks
			mockTaskExists.mockImplementation((taskArray, depId) => {
				if (typeof depId === 'string' && depId === '1.4') {
					return false; // Subtask 1.4 doesn't exist
				}
				if (typeof depId === 'string' && depId === '2.1') {
					return false; // Subtask 2.1 doesn't exist
				}
				return true; // All other dependencies exist
			});

			const result = validateTaskDependencies(tasks);

			expect(result.valid).toBe(false);
			expect(result.issues.length).toBeGreaterThan(0);
			// Should detect missing subtask dependencies
			expect(
				result.issues.some(
					(issue) =>
						issue.type === 'missing' &&
						String(issue.taskId) === '1.1' &&
						String(issue.dependencyId) === '1.4'
				)
			).toBe(true);
		});

		test('should detect circular dependencies between subtasks', () => {
			const tasks = [
				{
					id: 1,
					dependencies: [],
					subtasks: [
						{ id: 1, dependencies: ['1.2'] },
						{ id: 2, dependencies: ['1.1'] } // Creates a circular dependency with 1.1
					]
				}
			];

			// Mock isCircularDependency for subtasks
			mockFindCycles.mockReturnValue(true);

			const result = validateTaskDependencies(tasks);

			expect(result.valid).toBe(false);
			expect(result.issues.some((issue) => issue.type === 'circular')).toBe(
				true
			);
		});

		test('should properly validate dependencies between subtasks of the same parent', () => {
			const tasks = [
				{
					id: 23,
					dependencies: [],
					subtasks: [
						{ id: 8, dependencies: ['23.13'] },
						{ id: 10, dependencies: ['23.8'] },
						{ id: 13, dependencies: [] }
					]
				}
			];

			// Mock taskExists to validate the subtask dependencies
			mockTaskExists.mockImplementation((taskArray, id) => {
				if (typeof id === 'string') {
					if (id === '23.8' || id === '23.10' || id === '23.13') {
						return true;
					}
				}
				return false;
			});

			const result = validateTaskDependencies(tasks);

			expect(result.valid).toBe(true);
			expect(result.issues.length).toBe(0);
		});
	});

	describe('removeDuplicateDependencies function', () => {
		test('should remove duplicate dependencies from tasks', () => {
			const tasksData = {
				tasks: [
					{ id: 1, dependencies: [2, 2, 3, 3, 3] },
					{ id: 2, dependencies: [3] },
					{ id: 3, dependencies: [] }
				]
			};

			const result = removeDuplicateDependencies(tasksData);

			expect(result.tasks[0].dependencies).toEqual([2, 3]);
			expect(result.tasks[1].dependencies).toEqual([3]);
			expect(result.tasks[2].dependencies).toEqual([]);
		});

		test('should handle empty dependencies array', () => {
			const tasksData = {
				tasks: [
					{ id: 1, dependencies: [] },
					{ id: 2, dependencies: [1] }
				]
			};

			const result = removeDuplicateDependencies(tasksData);

			expect(result.tasks[0].dependencies).toEqual([]);
			expect(result.tasks[1].dependencies).toEqual([1]);
		});

		test('should handle tasks with no dependencies property', () => {
			const tasksData = {
				tasks: [
					{ id: 1 }, // No dependencies property
					{ id: 2, dependencies: [1] }
				]
			};

			const result = removeDuplicateDependencies(tasksData);

			expect(result.tasks[0]).not.toHaveProperty('dependencies');
			expect(result.tasks[1].dependencies).toEqual([1]);
		});
	});

	describe('cleanupSubtaskDependencies function', () => {
		test('should remove dependencies to non-existent subtasks', () => {
			const tasksData = {
				tasks: [
					{
						id: 1,
						dependencies: [],
						subtasks: [
							{ id: 1, dependencies: [] },
							{ id: 2, dependencies: [3] } // Dependency 3 doesn't exist
						]
					},
					{
						id: 2,
						dependencies: ['1.2'], // Valid subtask dependency
						subtasks: [
							{ id: 1, dependencies: ['1.1'] } // Valid subtask dependency
						]
					}
				]
			};

			const result = cleanupSubtaskDependencies(tasksData);

			// Should remove the invalid dependency to subtask 3
			expect(result.tasks[0].subtasks[1].dependencies).toEqual([]);
			// Should keep valid dependencies
			expect(result.tasks[1].dependencies).toEqual(['1.2']);
			expect(result.tasks[1].subtasks[0].dependencies).toEqual(['1.1']);
		});

		test('should handle tasks without subtasks', () => {
			const tasksData = {
				tasks: [
					{ id: 1, dependencies: [] },
					{ id: 2, dependencies: [1] }
				]
			};

			const result = cleanupSubtaskDependencies(tasksData);

			// Should return the original data unchanged
			expect(result).toEqual(tasksData);
		});
	});

	describe('ensureAtLeastOneIndependentSubtask function', () => {
		test('should clear dependencies of first subtask if none are independent', () => {
			const tasksData = {
				tasks: [
					{
						id: 1,
						subtasks: [
							{ id: 1, dependencies: [2] },
							{ id: 2, dependencies: [1] }
						]
					}
				]
			};

			const result = ensureAtLeastOneIndependentSubtask(tasksData);

			expect(result).toBe(true);
			expect(tasksData.tasks[0].subtasks[0].dependencies).toEqual([]);
			expect(tasksData.tasks[0].subtasks[1].dependencies).toEqual([1]);
		});

		test('should not modify tasks if at least one subtask is independent', () => {
			const tasksData = {
				tasks: [
					{
						id: 1,
						subtasks: [
							{ id: 1, dependencies: [] },
							{ id: 2, dependencies: [1] }
						]
					}
				]
			};

			const result = ensureAtLeastOneIndependentSubtask(tasksData);

			expect(result).toBe(false);
			expect(tasksData.tasks[0].subtasks[0].dependencies).toEqual([]);
			expect(tasksData.tasks[0].subtasks[1].dependencies).toEqual([1]);
		});

		test('should handle tasks without subtasks', () => {
			const tasksData = {
				tasks: [{ id: 1 }, { id: 2, dependencies: [1] }]
			};

			const result = ensureAtLeastOneIndependentSubtask(tasksData);

			expect(result).toBe(false);
			expect(tasksData).toEqual({
				tasks: [{ id: 1 }, { id: 2, dependencies: [1] }]
			});
		});

		test('should handle empty subtasks array', () => {
			const tasksData = {
				tasks: [{ id: 1, subtasks: [] }]
			};

			const result = ensureAtLeastOneIndependentSubtask(tasksData);

			expect(result).toBe(false);
			expect(tasksData).toEqual({
				tasks: [{ id: 1, subtasks: [] }]
			});
		});
	});

	describe('validateAndFixDependencies function', () => {
		test('should fix multiple dependency issues and return true if changes made', () => {
			const tasksData = {
				tasks: [
					{
						id: 1,
						dependencies: [1, 1, 99], // Self-dependency and duplicate and invalid dependency
						subtasks: [
							{ id: 1, dependencies: [2, 2] }, // Duplicate dependencies
							{ id: 2, dependencies: [1] }
						]
					},
					{
						id: 2,
						dependencies: [1],
						subtasks: [
							{ id: 1, dependencies: [99] } // Invalid dependency
						]
					}
				]
			};

			// Mock taskExists for validating dependencies
			mockTaskExists.mockImplementation((tasks, id) => {
				// Convert id to string for comparison
				const idStr = String(id);

				// Handle subtask references (e.g., "1.2")
				if (idStr.includes('.')) {
					const [parentId, subtaskId] = idStr.split('.').map(Number);
					const task = tasks.find((t) => t.id === parentId);
					return (
						task &&
						task.subtasks &&
						task.subtasks.some((st) => st.id === subtaskId)
					);
				}

				// Handle regular task references
				const taskId = parseInt(idStr, 10);
				return taskId === 1 || taskId === 2; // Only tasks 1 and 2 exist
			});

			// Make a copy for verification that original is modified
			const originalData = JSON.parse(JSON.stringify(tasksData));

			const result = validateAndFixDependencies(tasksData);

			expect(result).toBe(true);
			// Check that data has been modified
			expect(tasksData).not.toEqual(originalData);

			// Check specific changes
			// 1. Self-dependency removed
			expect(tasksData.tasks[0].dependencies).not.toContain(1);
			// 2. Invalid dependency removed
			expect(tasksData.tasks[0].dependencies).not.toContain(99);
			// 3. Dependencies have been deduplicated
			if (tasksData.tasks[0].subtasks[0].dependencies.length > 0) {
				expect(tasksData.tasks[0].subtasks[0].dependencies).toEqual(
					expect.arrayContaining([])
				);
			}
			// 4. Invalid subtask dependency removed
			expect(tasksData.tasks[1].subtasks[0].dependencies).toEqual([]);

			// IMPORTANT: Verify no calls to writeJSON with actual tasks.json
			expect(mockWriteJSON).not.toHaveBeenCalledWith(
				'tasks/tasks.json',
				expect.anything(),
				expect.anything(),
				expect.anything()
			);
		});

		test('should return false if no changes needed', () => {
			const tasksData = {
				tasks: [
					{
						id: 1,
						dependencies: [],
						subtasks: [
							{ id: 1, dependencies: [] }, // Already has an independent subtask
							{ id: 2, dependencies: ['1.1'] }
						]
					},
					{
						id: 2,
						dependencies: [1]
					}
				]
			};

			// Mock taskExists to validate all dependencies as valid
			mockTaskExists.mockImplementation((tasks, id) => {
				// Convert id to string for comparison
				const idStr = String(id);

				// Handle subtask references
				if (idStr.includes('.')) {
					const [parentId, subtaskId] = idStr.split('.').map(Number);
					const task = tasks.find((t) => t.id === parentId);
					return (
						task &&
						task.subtasks &&
						task.subtasks.some((st) => st.id === subtaskId)
					);
				}

				// Handle regular task references
				const taskId = parseInt(idStr, 10);
				return taskId === 1 || taskId === 2;
			});

			const originalData = JSON.parse(JSON.stringify(tasksData));
			const result = validateAndFixDependencies(tasksData);

			expect(result).toBe(false);
			// Verify data is unchanged
			expect(tasksData).toEqual(originalData);

			// IMPORTANT: Verify no calls to writeJSON with actual tasks.json
			expect(mockWriteJSON).not.toHaveBeenCalledWith(
				'tasks/tasks.json',
				expect.anything(),
				expect.anything(),
				expect.anything()
			);
		});

		test('should handle invalid input', () => {
			expect(validateAndFixDependencies(null)).toBe(false);
			expect(validateAndFixDependencies({})).toBe(false);
			expect(validateAndFixDependencies({ tasks: null })).toBe(false);
			expect(validateAndFixDependencies({ tasks: 'not an array' })).toBe(false);

			// IMPORTANT: Verify no calls to writeJSON with actual tasks.json
			expect(mockWriteJSON).not.toHaveBeenCalledWith(
				'tasks/tasks.json',
				expect.anything(),
				expect.anything(),
				expect.anything()
			);
		});

		test('should save changes when tasksPath is provided', () => {
			const tasksData = {
				tasks: [
					{
						id: 1,
						dependencies: [1, 1], // Self-dependency and duplicate
						subtasks: [
							{ id: 1, dependencies: [99] } // Invalid dependency
						]
					}
				]
			};

			// Mock taskExists for this specific test
			mockTaskExists.mockImplementation((tasks, id) => {
				// Convert id to string for comparison
				const idStr = String(id);

				// Handle subtask references
				if (idStr.includes('.')) {
					const [parentId, subtaskId] = idStr.split('.').map(Number);
					const task = tasks.find((t) => t.id === parentId);
					return (
						task &&
						task.subtasks &&
						task.subtasks.some((st) => st.id === subtaskId)
					);
				}

				// Handle regular task references
				const taskId = parseInt(idStr, 10);
				return taskId === 1; // Only task 1 exists
			});

			// Copy the original data to verify changes
			const originalData = JSON.parse(JSON.stringify(tasksData));

			// Call the function with our test path instead of the actual tasks.json
			const result = validateAndFixDependencies(tasksData, TEST_TASKS_PATH);

			// First verify that the result is true (changes were made)
			expect(result).toBe(true);

			// Verify the data was modified
			expect(tasksData).not.toEqual(originalData);

			// IMPORTANT: Verify no calls to writeJSON with actual tasks.json
			expect(mockWriteJSON).not.toHaveBeenCalledWith(
				'tasks/tasks.json',
				expect.anything(),
				expect.anything(),
				expect.anything()
			);
		});
	});

	describe('canMoveWithDependencies', () => {
		it('should return canMove: false when conflicts exist', () => {
			const allTasks = [
				{
					id: 1,
					tag: 'source',
					dependencies: [2],
					title: 'Task 1'
				},
				{
					id: 2,
					tag: 'other',
					dependencies: [],
					title: 'Task 2'
				}
			];

			const result = canMoveWithDependencies('1', 'source', 'target', allTasks);

			expect(result.canMove).toBe(false);
			expect(result.conflicts).toBeDefined();
			expect(result.conflicts.length).toBeGreaterThan(0);
			expect(result.dependentTaskIds).toBeDefined();
		});

		it('should return canMove: true when no conflicts exist', () => {
			const allTasks = [
				{
					id: 1,
					tag: 'source',
					dependencies: [],
					title: 'Task 1'
				},
				{
					id: 2,
					tag: 'target',
					dependencies: [],
					title: 'Task 2'
				}
			];

			const result = canMoveWithDependencies('1', 'source', 'target', allTasks);

			expect(result.canMove).toBe(true);
			expect(result.conflicts).toBeDefined();
			expect(result.conflicts.length).toBe(0);
			expect(result.dependentTaskIds).toBeDefined();
			expect(result.dependentTaskIds.length).toBe(0);
		});

		it('should handle subtask lookup correctly', () => {
			const allTasks = [
				{
					id: 1,
					tag: 'source',
					dependencies: [],
					title: 'Parent Task',
					subtasks: [
						{
							id: 1,
							dependencies: [2],
							title: 'Subtask 1'
						}
					]
				},
				{
					id: 2,
					tag: 'other',
					dependencies: [],
					title: 'Task 2'
				}
			];

			const result = canMoveWithDependencies(
				'1.1',
				'source',
				'target',
				allTasks
			);

			expect(result.canMove).toBe(false);
			expect(result.conflicts).toBeDefined();
			expect(result.conflicts.length).toBeGreaterThan(0);
		});

		it('should return error when task not found', () => {
			const allTasks = [
				{
					id: 1,
					tag: 'source',
					dependencies: [],
					title: 'Task 1'
				}
			];

			const result = canMoveWithDependencies(
				'999',
				'source',
				'target',
				allTasks
			);

			expect(result.canMove).toBe(false);
			expect(result.error).toBe('Task not found');
			expect(result.dependentTaskIds).toEqual([]);
			expect(result.conflicts).toEqual([]);
		});
	});

	describe('Cross-level dependency tests (Issue #542)', () => {
		let originalExit;

		beforeEach(async () => {
			// Ensure a fresh module instance so ESM mocks apply to dynamic imports
			jest.resetModules();
			originalExit = process.exit;
			process.exit = jest.fn();

			// For ESM dynamic imports, use the same pattern
			await jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({
				log: mockLog,
				readJSON: mockReadJSON,
				writeJSON: mockWriteJSON,
				taskExists: mockTaskExists,
				formatTaskId: mockFormatTaskId,
				findCycles: mockFindCycles,
				traverseDependencies: jest.fn(() => []),
				isSilentMode: jest.fn(() => true),
				findProjectRoot: jest.fn(() => '/test'),
				resolveEnvVariable: jest.fn(() => undefined),
				isEmpty: jest.fn((v) =>
					v == null
						? true
						: Array.isArray(v)
							? v.length === 0
							: typeof v === 'object'
								? Object.keys(v).length === 0
								: false
				),
				enableSilentMode: jest.fn(),
				disableSilentMode: jest.fn(),
				getTaskManager: jest.fn(async () => ({})),
				getTagAwareFilePath: jest.fn(
					(basePath, _tag, projectRoot = '.') => basePath
				),
				readComplexityReport: jest.fn(() => null)
			}));

			// Also mock transitive imports to keep dependency surface minimal
			await jest.unstable_mockModule('../../scripts/modules/ui.js', () => ({
				displayBanner: jest.fn()
			}));
			await jest.unstable_mockModule(
				'../../scripts/modules/task-manager/generate-task-files.js',
				() => ({ default: jest.fn() })
			);
			// Set up test data that matches the issue report
			// Clone fixture data before each test to prevent mutation issues
			mockReadJSON.mockImplementation(() =>
				structuredClone(crossLevelDependencyTasks)
			);

			// Configure mockTaskExists to properly validate cross-level dependencies
			mockTaskExists.mockImplementation((tasks, taskId) => {
				if (typeof taskId === 'string' && taskId.includes('.')) {
					const [parentId, subtaskId] = taskId.split('.').map(Number);
					const task = tasks.find((t) => t.id === parentId);
					return (
						task &&
						task.subtasks &&
						task.subtasks.some((st) => st.id === subtaskId)
					);
				}

				const numericId =
					typeof taskId === 'string' ? parseInt(taskId, 10) : taskId;
				return tasks.some((task) => task.id === numericId);
			});

			mockFormatTaskId.mockImplementation((id) => {
				if (typeof id === 'string' && id.includes('.')) return id; // keep dot notation
				return parseInt(id, 10); // normalize top-level task IDs to number
			});
		});

		afterEach(() => {
			process.exit = originalExit;
		});

		test('should allow subtask to depend on top-level task', async () => {
			const { addDependency } = await import(
				'../../scripts/modules/dependency-manager.js'
			);

			// Test the specific scenario from Issue #542: subtask 2.2 depending on task 11
			await addDependency(TEST_TASKS_PATH, '2.2', 11, { projectRoot: '/test' });

			// Verify we wrote to the test path (and not the real tasks.json)
			expect(mockWriteJSON).toHaveBeenCalledWith(
				TEST_TASKS_PATH,
				expect.anything(),
				'/test',
				undefined
			);
			expect(mockWriteJSON).not.toHaveBeenCalledWith(
				'tasks/tasks.json',
				expect.anything(),
				expect.anything(),
				expect.anything()
			);
			// Get the specific write call for TEST_TASKS_PATH
			const writeCall = mockWriteJSON.mock.calls.find(
				([p]) => p === TEST_TASKS_PATH
			);
			expect(writeCall).toBeDefined();
			const savedData = writeCall[1];
			const parent2 = savedData.tasks.find((t) => t.id === 2);
			const subtask22 = parent2.subtasks.find((st) => st.id === 2);

			// Verify the dependency was actually added to subtask 2.2
			expect(subtask22.dependencies).toContain(11);
			// Also verify a success log was emitted
			const successCall = mockLog.mock.calls.find(
				([level]) => level === 'success'
			);
			expect(successCall).toBeDefined();
			expect(successCall[1]).toContain('2.2');
			expect(successCall[1]).toContain('11');
		});

		test('should allow top-level task to depend on subtask', async () => {
			const { addDependency } = await import(
				'../../scripts/modules/dependency-manager.js'
			);

			// Test reverse scenario: task 11 depending on subtask 2.1
			await addDependency(TEST_TASKS_PATH, 11, '2.1', { projectRoot: '/test' });

			// Stronger assertions for writeJSON call and locating the correct task
			expect(mockWriteJSON).toHaveBeenCalledWith(
				TEST_TASKS_PATH,
				expect.anything(),
				'/test',
				undefined
			);
			expect(mockWriteJSON).not.toHaveBeenCalledWith(
				'tasks/tasks.json',
				expect.anything(),
				expect.anything(),
				expect.anything()
			);
			const writeCall = mockWriteJSON.mock.calls.find(
				([p]) => p === TEST_TASKS_PATH
			);
			expect(writeCall).toBeDefined();
			const savedData = writeCall[1];
			const task11 = savedData.tasks.find((t) => t.id === 11);

			// Verify the dependency was actually added to task 11
			expect(task11.dependencies).toContain('2.1');
			// Verify a success log was emitted mentioning both task 11 and subtask 2.1
			const successCall = mockLog.mock.calls.find(
				([level]) => level === 'success'
			);
			expect(successCall).toBeDefined();
			expect(successCall[1]).toContain('11');
			expect(successCall[1]).toContain('2.1');
		});

		test('should properly validate cross-level dependencies exist', async () => {
			// Test that validation correctly identifies when a cross-level dependency target doesn't exist
			mockTaskExists.mockImplementation((tasks, taskId) => {
				// Simulate task 99 not existing
				if (taskId === '99' || taskId === 99) {
					return false;
				}

				if (typeof taskId === 'string' && taskId.includes('.')) {
					const [parentId, subtaskId] = taskId.split('.').map(Number);
					const task = tasks.find((t) => t.id === parentId);
					return (
						task &&
						task.subtasks &&
						task.subtasks.some((st) => st.id === subtaskId)
					);
				}

				const numericId =
					typeof taskId === 'string' ? parseInt(taskId, 10) : taskId;
				return tasks.some((task) => task.id === numericId);
			});

			const { addDependency } = await import(
				'../../scripts/modules/dependency-manager.js'
			);

			const exitError = new Error('process.exit invoked');
			process.exit.mockImplementation(() => {
				throw exitError;
			});

			await expect(
				addDependency(TEST_TASKS_PATH, '2.2', 99, { projectRoot: '/test' })
			).rejects.toBe(exitError);

			expect(process.exit).toHaveBeenCalledWith(1);
			expect(mockWriteJSON).not.toHaveBeenCalled();
			// Verify that an error was reported to the user
			expect(mockLog).toHaveBeenCalled();
		});

		test('should remove top-level task dependency from a subtask', async () => {
			const { addDependency, removeDependency } = await import(
				'../../scripts/modules/dependency-manager.js'
			);

			// Start with cloned data and add 11 to 2.2
			await addDependency(TEST_TASKS_PATH, '2.2', 11, { projectRoot: '/test' });

			// Get the saved data from the add operation
			const addWriteCall = mockWriteJSON.mock.calls.find(
				([p]) => p === TEST_TASKS_PATH
			);
			expect(addWriteCall).toBeDefined();
			const dataWithDep = addWriteCall[1];

			// Verify the dependency was added
			const subtask22AfterAdd = dataWithDep.tasks
				.find((t) => t.id === 2)
				.subtasks.find((st) => st.id === 2);
			expect(subtask22AfterAdd.dependencies).toContain(11);

			// Clear mocks and re-setup mockReadJSON with the modified data
			jest.clearAllMocks();
			mockReadJSON.mockImplementation(() => structuredClone(dataWithDep));

			await removeDependency(TEST_TASKS_PATH, '2.2', 11, {
				projectRoot: '/test'
			});

			const writeCall = mockWriteJSON.mock.calls.find(
				([p]) => p === TEST_TASKS_PATH
			);
			expect(writeCall).toBeDefined();
			const saved = writeCall[1];
			const subtask22 = saved.tasks
				.find((t) => t.id === 2)
				.subtasks.find((st) => st.id === 2);
			expect(subtask22.dependencies).not.toContain(11);
			// Verify success log was emitted
			const successCall = mockLog.mock.calls.find(
				([level]) => level === 'success'
			);
			expect(successCall).toBeDefined();
			expect(successCall[1]).toContain('2.2');
			expect(successCall[1]).toContain('11');
		});

		test('should remove subtask dependency from a top-level task', async () => {
			const { addDependency, removeDependency } = await import(
				'../../scripts/modules/dependency-manager.js'
			);

			// Add subtask dependency to task 11
			await addDependency(TEST_TASKS_PATH, 11, '2.1', { projectRoot: '/test' });

			// Get the saved data from the add operation
			const addWriteCall = mockWriteJSON.mock.calls.find(
				([p]) => p === TEST_TASKS_PATH
			);
			expect(addWriteCall).toBeDefined();
			const dataWithDep = addWriteCall[1];

			// Verify the dependency was added
			const task11AfterAdd = dataWithDep.tasks.find((t) => t.id === 11);
			expect(task11AfterAdd.dependencies).toContain('2.1');

			// Clear mocks and re-setup mockReadJSON with the modified data
			jest.clearAllMocks();
			mockReadJSON.mockImplementation(() => structuredClone(dataWithDep));

			await removeDependency(TEST_TASKS_PATH, 11, '2.1', {
				projectRoot: '/test'
			});

			const writeCall = mockWriteJSON.mock.calls.find(
				([p]) => p === TEST_TASKS_PATH
			);
			expect(writeCall).toBeDefined();
			const saved = writeCall[1];
			const task11 = saved.tasks.find((t) => t.id === 11);
			expect(task11.dependencies).not.toContain('2.1');
			// Verify success log was emitted
			const successCall = mockLog.mock.calls.find(
				([level]) => level === 'success'
			);
			expect(successCall).toBeDefined();
			expect(successCall[1]).toContain('11');
			expect(successCall[1]).toContain('2.1');
		});
	});
});

```
Page 38/50FirstPrevNextLast