#
tokens: 48679/50000 13/975 files (page 22/50)
lines: off (toggle) GitHub
raw markdown copy
This is page 22 of 50. Use http://codebase.md/eyaltoledano/claude-task-master?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .changeset
│   ├── config.json
│   └── README.md
├── .claude
│   ├── commands
│   │   └── dedupe.md
│   └── TM_COMMANDS_GUIDE.md
├── .claude-plugin
│   └── marketplace.json
├── .coderabbit.yaml
├── .cursor
│   ├── mcp.json
│   └── rules
│       ├── ai_providers.mdc
│       ├── ai_services.mdc
│       ├── architecture.mdc
│       ├── changeset.mdc
│       ├── commands.mdc
│       ├── context_gathering.mdc
│       ├── cursor_rules.mdc
│       ├── dependencies.mdc
│       ├── dev_workflow.mdc
│       ├── git_workflow.mdc
│       ├── glossary.mdc
│       ├── mcp.mdc
│       ├── new_features.mdc
│       ├── self_improve.mdc
│       ├── tags.mdc
│       ├── taskmaster.mdc
│       ├── tasks.mdc
│       ├── telemetry.mdc
│       ├── test_workflow.mdc
│       ├── tests.mdc
│       ├── ui.mdc
│       └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── enhancements---feature-requests.md
│   │   └── feedback.md
│   ├── PULL_REQUEST_TEMPLATE
│   │   ├── bugfix.md
│   │   ├── config.yml
│   │   ├── feature.md
│   │   └── integration.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── scripts
│   │   ├── auto-close-duplicates.mjs
│   │   ├── backfill-duplicate-comments.mjs
│   │   ├── check-pre-release-mode.mjs
│   │   ├── parse-metrics.mjs
│   │   ├── release.mjs
│   │   ├── tag-extension.mjs
│   │   ├── utils.mjs
│   │   └── validate-changesets.mjs
│   └── workflows
│       ├── auto-close-duplicates.yml
│       ├── backfill-duplicate-comments.yml
│       ├── ci.yml
│       ├── claude-dedupe-issues.yml
│       ├── claude-docs-trigger.yml
│       ├── claude-docs-updater.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── extension-ci.yml
│       ├── extension-release.yml
│       ├── log-issue-events.yml
│       ├── pre-release.yml
│       ├── release-check.yml
│       ├── release.yml
│       ├── update-models-md.yml
│       └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│   ├── hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── settings
│   │   └── mcp.json
│   └── steering
│       ├── dev_workflow.md
│       ├── kiro_rules.md
│       ├── self_improve.md
│       ├── taskmaster_hooks_workflow.md
│       └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│   ├── CLAUDE.md
│   ├── config.json
│   ├── docs
│   │   ├── autonomous-tdd-git-workflow.md
│   │   ├── MIGRATION-ROADMAP.md
│   │   ├── prd-tm-start.txt
│   │   ├── prd.txt
│   │   ├── README.md
│   │   ├── research
│   │   │   ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│   │   │   ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│   │   │   ├── 2025-06-14_test-save-functionality.md
│   │   │   ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│   │   │   └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│   │   ├── task-template-importing-prd.txt
│   │   ├── tdd-workflow-phase-0-spike.md
│   │   ├── tdd-workflow-phase-1-core-rails.md
│   │   ├── tdd-workflow-phase-1-orchestrator.md
│   │   ├── tdd-workflow-phase-2-pr-resumability.md
│   │   ├── tdd-workflow-phase-3-extensibility-guardrails.md
│   │   ├── test-prd.txt
│   │   └── tm-core-phase-1.txt
│   ├── reports
│   │   ├── task-complexity-report_autonomous-tdd-git-workflow.json
│   │   ├── task-complexity-report_cc-kiro-hooks.json
│   │   ├── task-complexity-report_tdd-phase-1-core-rails.json
│   │   ├── task-complexity-report_tdd-workflow-phase-0.json
│   │   ├── task-complexity-report_test-prd-tag.json
│   │   ├── task-complexity-report_tm-core-phase-1.json
│   │   ├── task-complexity-report.json
│   │   └── tm-core-complexity.json
│   ├── state.json
│   ├── tasks
│   │   ├── task_001_tm-start.txt
│   │   ├── task_002_tm-start.txt
│   │   ├── task_003_tm-start.txt
│   │   ├── task_004_tm-start.txt
│   │   ├── task_007_tm-start.txt
│   │   └── tasks.json
│   └── templates
│       ├── example_prd_rpg.md
│       └── example_prd.md
├── .vscode
│   ├── extensions.json
│   └── settings.json
├── apps
│   ├── cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   ├── command-registry.ts
│   │   │   ├── commands
│   │   │   │   ├── auth.command.ts
│   │   │   │   ├── autopilot
│   │   │   │   │   ├── abort.command.ts
│   │   │   │   │   ├── commit.command.ts
│   │   │   │   │   ├── complete.command.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next.command.ts
│   │   │   │   │   ├── resume.command.ts
│   │   │   │   │   ├── shared.ts
│   │   │   │   │   ├── start.command.ts
│   │   │   │   │   └── status.command.ts
│   │   │   │   ├── briefs.command.ts
│   │   │   │   ├── context.command.ts
│   │   │   │   ├── export.command.ts
│   │   │   │   ├── list.command.ts
│   │   │   │   ├── models
│   │   │   │   │   ├── custom-providers.ts
│   │   │   │   │   ├── fetchers.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── prompts.ts
│   │   │   │   │   ├── setup.ts
│   │   │   │   │   └── types.ts
│   │   │   │   ├── next.command.ts
│   │   │   │   ├── set-status.command.ts
│   │   │   │   ├── show.command.ts
│   │   │   │   ├── start.command.ts
│   │   │   │   └── tags.command.ts
│   │   │   ├── index.ts
│   │   │   ├── lib
│   │   │   │   └── model-management.ts
│   │   │   ├── types
│   │   │   │   └── tag-management.d.ts
│   │   │   ├── ui
│   │   │   │   ├── components
│   │   │   │   │   ├── cardBox.component.ts
│   │   │   │   │   ├── dashboard.component.ts
│   │   │   │   │   ├── header.component.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next-task.component.ts
│   │   │   │   │   ├── suggested-steps.component.ts
│   │   │   │   │   └── task-detail.component.ts
│   │   │   │   ├── display
│   │   │   │   │   ├── messages.ts
│   │   │   │   │   └── tables.ts
│   │   │   │   ├── formatters
│   │   │   │   │   ├── complexity-formatters.ts
│   │   │   │   │   ├── dependency-formatters.ts
│   │   │   │   │   ├── priority-formatters.ts
│   │   │   │   │   ├── status-formatters.spec.ts
│   │   │   │   │   └── status-formatters.ts
│   │   │   │   ├── index.ts
│   │   │   │   └── layout
│   │   │   │       ├── helpers.spec.ts
│   │   │   │       └── helpers.ts
│   │   │   └── utils
│   │   │       ├── auth-helpers.ts
│   │   │       ├── auto-update.ts
│   │   │       ├── brief-selection.ts
│   │   │       ├── display-helpers.ts
│   │   │       ├── error-handler.ts
│   │   │       ├── index.ts
│   │   │       ├── project-root.ts
│   │   │       ├── task-status.ts
│   │   │       ├── ui.spec.ts
│   │   │       └── ui.ts
│   │   ├── tests
│   │   │   ├── integration
│   │   │   │   └── commands
│   │   │   │       └── autopilot
│   │   │   │           └── workflow.test.ts
│   │   │   └── unit
│   │   │       ├── commands
│   │   │       │   ├── autopilot
│   │   │       │   │   └── shared.test.ts
│   │   │       │   ├── list.command.spec.ts
│   │   │       │   └── show.command.spec.ts
│   │   │       └── ui
│   │   │           └── dashboard.component.spec.ts
│   │   ├── tsconfig.json
│   │   └── vitest.config.ts
│   ├── docs
│   │   ├── archive
│   │   │   ├── ai-client-utils-example.mdx
│   │   │   ├── ai-development-workflow.mdx
│   │   │   ├── command-reference.mdx
│   │   │   ├── configuration.mdx
│   │   │   ├── cursor-setup.mdx
│   │   │   ├── examples.mdx
│   │   │   └── Installation.mdx
│   │   ├── best-practices
│   │   │   ├── advanced-tasks.mdx
│   │   │   ├── configuration-advanced.mdx
│   │   │   └── index.mdx
│   │   ├── capabilities
│   │   │   ├── cli-root-commands.mdx
│   │   │   ├── index.mdx
│   │   │   ├── mcp.mdx
│   │   │   ├── rpg-method.mdx
│   │   │   └── task-structure.mdx
│   │   ├── CHANGELOG.md
│   │   ├── command-reference.mdx
│   │   ├── configuration.mdx
│   │   ├── docs.json
│   │   ├── favicon.svg
│   │   ├── getting-started
│   │   │   ├── api-keys.mdx
│   │   │   ├── contribute.mdx
│   │   │   ├── faq.mdx
│   │   │   └── quick-start
│   │   │       ├── configuration-quick.mdx
│   │   │       ├── execute-quick.mdx
│   │   │       ├── installation.mdx
│   │   │       ├── moving-forward.mdx
│   │   │       ├── prd-quick.mdx
│   │   │       ├── quick-start.mdx
│   │   │       ├── requirements.mdx
│   │   │       ├── rules-quick.mdx
│   │   │       └── tasks-quick.mdx
│   │   ├── introduction.mdx
│   │   ├── licensing.md
│   │   ├── logo
│   │   │   ├── dark.svg
│   │   │   ├── light.svg
│   │   │   └── task-master-logo.png
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── style.css
│   │   ├── tdd-workflow
│   │   │   ├── ai-agent-integration.mdx
│   │   │   └── quickstart.mdx
│   │   ├── vercel.json
│   │   └── whats-new.mdx
│   ├── extension
│   │   ├── .vscodeignore
│   │   ├── assets
│   │   │   ├── banner.png
│   │   │   ├── icon-dark.svg
│   │   │   ├── icon-light.svg
│   │   │   ├── icon.png
│   │   │   ├── screenshots
│   │   │   │   ├── kanban-board.png
│   │   │   │   └── task-details.png
│   │   │   └── sidebar-icon.svg
│   │   ├── CHANGELOG.md
│   │   ├── components.json
│   │   ├── docs
│   │   │   ├── extension-CI-setup.md
│   │   │   └── extension-development-guide.md
│   │   ├── esbuild.js
│   │   ├── LICENSE
│   │   ├── package.json
│   │   ├── package.mjs
│   │   ├── package.publish.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── components
│   │   │   │   ├── ConfigView.tsx
│   │   │   │   ├── constants.ts
│   │   │   │   ├── TaskDetails
│   │   │   │   │   ├── AIActionsSection.tsx
│   │   │   │   │   ├── DetailsSection.tsx
│   │   │   │   │   ├── PriorityBadge.tsx
│   │   │   │   │   ├── SubtasksSection.tsx
│   │   │   │   │   ├── TaskMetadataSidebar.tsx
│   │   │   │   │   └── useTaskDetails.ts
│   │   │   │   ├── TaskDetailsView.tsx
│   │   │   │   ├── TaskMasterLogo.tsx
│   │   │   │   └── ui
│   │   │   │       ├── badge.tsx
│   │   │   │       ├── breadcrumb.tsx
│   │   │   │       ├── button.tsx
│   │   │   │       ├── card.tsx
│   │   │   │       ├── collapsible.tsx
│   │   │   │       ├── CollapsibleSection.tsx
│   │   │   │       ├── dropdown-menu.tsx
│   │   │   │       ├── label.tsx
│   │   │   │       ├── scroll-area.tsx
│   │   │   │       ├── separator.tsx
│   │   │   │       ├── shadcn-io
│   │   │   │       │   └── kanban
│   │   │   │       │       └── index.tsx
│   │   │   │       └── textarea.tsx
│   │   │   ├── extension.ts
│   │   │   ├── index.ts
│   │   │   ├── lib
│   │   │   │   └── utils.ts
│   │   │   ├── services
│   │   │   │   ├── config-service.ts
│   │   │   │   ├── error-handler.ts
│   │   │   │   ├── notification-preferences.ts
│   │   │   │   ├── polling-service.ts
│   │   │   │   ├── polling-strategies.ts
│   │   │   │   ├── sidebar-webview-manager.ts
│   │   │   │   ├── task-repository.ts
│   │   │   │   ├── terminal-manager.ts
│   │   │   │   └── webview-manager.ts
│   │   │   ├── test
│   │   │   │   └── extension.test.ts
│   │   │   ├── utils
│   │   │   │   ├── configManager.ts
│   │   │   │   ├── connectionManager.ts
│   │   │   │   ├── errorHandler.ts
│   │   │   │   ├── event-emitter.ts
│   │   │   │   ├── logger.ts
│   │   │   │   ├── mcpClient.ts
│   │   │   │   ├── notificationPreferences.ts
│   │   │   │   └── task-master-api
│   │   │   │       ├── cache
│   │   │   │       │   └── cache-manager.ts
│   │   │   │       ├── index.ts
│   │   │   │       ├── mcp-client.ts
│   │   │   │       ├── transformers
│   │   │   │       │   └── task-transformer.ts
│   │   │   │       └── types
│   │   │   │           └── index.ts
│   │   │   └── webview
│   │   │       ├── App.tsx
│   │   │       ├── components
│   │   │       │   ├── AppContent.tsx
│   │   │       │   ├── EmptyState.tsx
│   │   │       │   ├── ErrorBoundary.tsx
│   │   │       │   ├── PollingStatus.tsx
│   │   │       │   ├── PriorityBadge.tsx
│   │   │       │   ├── SidebarView.tsx
│   │   │       │   ├── TagDropdown.tsx
│   │   │       │   ├── TaskCard.tsx
│   │   │       │   ├── TaskEditModal.tsx
│   │   │       │   ├── TaskMasterKanban.tsx
│   │   │       │   ├── ToastContainer.tsx
│   │   │       │   └── ToastNotification.tsx
│   │   │       ├── constants
│   │   │       │   └── index.ts
│   │   │       ├── contexts
│   │   │       │   └── VSCodeContext.tsx
│   │   │       ├── hooks
│   │   │       │   ├── useTaskQueries.ts
│   │   │       │   ├── useVSCodeMessages.ts
│   │   │       │   └── useWebviewHeight.ts
│   │   │       ├── index.css
│   │   │       ├── index.tsx
│   │   │       ├── providers
│   │   │       │   └── QueryProvider.tsx
│   │   │       ├── reducers
│   │   │       │   └── appReducer.ts
│   │   │       ├── sidebar.tsx
│   │   │       ├── types
│   │   │       │   └── index.ts
│   │   │       └── utils
│   │   │           ├── logger.ts
│   │   │           └── toast.ts
│   │   └── tsconfig.json
│   └── mcp
│       ├── CHANGELOG.md
│       ├── package.json
│       ├── src
│       │   ├── index.ts
│       │   ├── shared
│       │   │   ├── types.ts
│       │   │   └── utils.ts
│       │   └── tools
│       │       ├── autopilot
│       │       │   ├── abort.tool.ts
│       │       │   ├── commit.tool.ts
│       │       │   ├── complete.tool.ts
│       │       │   ├── finalize.tool.ts
│       │       │   ├── index.ts
│       │       │   ├── next.tool.ts
│       │       │   ├── resume.tool.ts
│       │       │   ├── start.tool.ts
│       │       │   └── status.tool.ts
│       │       ├── README-ZOD-V3.md
│       │       └── tasks
│       │           ├── get-task.tool.ts
│       │           ├── get-tasks.tool.ts
│       │           └── index.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── assets
│   ├── .windsurfrules
│   ├── AGENTS.md
│   ├── claude
│   │   └── TM_COMMANDS_GUIDE.md
│   ├── config.json
│   ├── env.example
│   ├── example_prd_rpg.txt
│   ├── example_prd.txt
│   ├── GEMINI.md
│   ├── gitignore
│   ├── kiro-hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── roocode
│   │   ├── .roo
│   │   │   ├── rules-architect
│   │   │   │   └── architect-rules
│   │   │   ├── rules-ask
│   │   │   │   └── ask-rules
│   │   │   ├── rules-code
│   │   │   │   └── code-rules
│   │   │   ├── rules-debug
│   │   │   │   └── debug-rules
│   │   │   ├── rules-orchestrator
│   │   │   │   └── orchestrator-rules
│   │   │   └── rules-test
│   │   │       └── test-rules
│   │   └── .roomodes
│   ├── rules
│   │   ├── cursor_rules.mdc
│   │   ├── dev_workflow.mdc
│   │   ├── self_improve.mdc
│   │   ├── taskmaster_hooks_workflow.mdc
│   │   └── taskmaster.mdc
│   └── scripts_README.md
├── bin
│   └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE_CODE_PLUGIN.md
├── CLAUDE.md
├── context
│   ├── chats
│   │   ├── add-task-dependencies-1.md
│   │   └── max-min-tokens.txt.md
│   ├── fastmcp-core.txt
│   ├── fastmcp-docs.txt
│   ├── MCP_INTEGRATION.md
│   ├── mcp-js-sdk-docs.txt
│   ├── mcp-protocol-repo.txt
│   ├── mcp-protocol-schema-03262025.json
│   └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│   ├── claude-code-integration.md
│   ├── CLI-COMMANDER-PATTERN.md
│   ├── command-reference.md
│   ├── configuration.md
│   ├── contributor-docs
│   │   ├── testing-roo-integration.md
│   │   └── worktree-setup.md
│   ├── cross-tag-task-movement.md
│   ├── examples
│   │   ├── claude-code-usage.md
│   │   └── codex-cli-usage.md
│   ├── examples.md
│   ├── licensing.md
│   ├── mcp-provider-guide.md
│   ├── mcp-provider.md
│   ├── migration-guide.md
│   ├── models.md
│   ├── providers
│   │   ├── codex-cli.md
│   │   └── gemini-cli.md
│   ├── README.md
│   ├── scripts
│   │   └── models-json-to-markdown.js
│   ├── task-structure.md
│   └── tutorial.md
├── images
│   ├── hamster-hiring.png
│   └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│   ├── server.js
│   └── src
│       ├── core
│       │   ├── __tests__
│       │   │   └── context-manager.test.js
│       │   ├── context-manager.js
│       │   ├── direct-functions
│       │   │   ├── add-dependency.js
│       │   │   ├── add-subtask.js
│       │   │   ├── add-tag.js
│       │   │   ├── add-task.js
│       │   │   ├── analyze-task-complexity.js
│       │   │   ├── cache-stats.js
│       │   │   ├── clear-subtasks.js
│       │   │   ├── complexity-report.js
│       │   │   ├── copy-tag.js
│       │   │   ├── create-tag-from-branch.js
│       │   │   ├── delete-tag.js
│       │   │   ├── expand-all-tasks.js
│       │   │   ├── expand-task.js
│       │   │   ├── fix-dependencies.js
│       │   │   ├── generate-task-files.js
│       │   │   ├── initialize-project.js
│       │   │   ├── list-tags.js
│       │   │   ├── models.js
│       │   │   ├── move-task-cross-tag.js
│       │   │   ├── move-task.js
│       │   │   ├── next-task.js
│       │   │   ├── parse-prd.js
│       │   │   ├── remove-dependency.js
│       │   │   ├── remove-subtask.js
│       │   │   ├── remove-task.js
│       │   │   ├── rename-tag.js
│       │   │   ├── research.js
│       │   │   ├── response-language.js
│       │   │   ├── rules.js
│       │   │   ├── scope-down.js
│       │   │   ├── scope-up.js
│       │   │   ├── set-task-status.js
│       │   │   ├── update-subtask-by-id.js
│       │   │   ├── update-task-by-id.js
│       │   │   ├── update-tasks.js
│       │   │   ├── use-tag.js
│       │   │   └── validate-dependencies.js
│       │   ├── task-master-core.js
│       │   └── utils
│       │       ├── env-utils.js
│       │       └── path-utils.js
│       ├── custom-sdk
│       │   ├── errors.js
│       │   ├── index.js
│       │   ├── json-extractor.js
│       │   ├── language-model.js
│       │   ├── message-converter.js
│       │   └── schema-converter.js
│       ├── index.js
│       ├── logger.js
│       ├── providers
│       │   └── mcp-provider.js
│       └── tools
│           ├── add-dependency.js
│           ├── add-subtask.js
│           ├── add-tag.js
│           ├── add-task.js
│           ├── analyze.js
│           ├── clear-subtasks.js
│           ├── complexity-report.js
│           ├── copy-tag.js
│           ├── delete-tag.js
│           ├── expand-all.js
│           ├── expand-task.js
│           ├── fix-dependencies.js
│           ├── generate.js
│           ├── get-operation-status.js
│           ├── index.js
│           ├── initialize-project.js
│           ├── list-tags.js
│           ├── models.js
│           ├── move-task.js
│           ├── next-task.js
│           ├── parse-prd.js
│           ├── README-ZOD-V3.md
│           ├── remove-dependency.js
│           ├── remove-subtask.js
│           ├── remove-task.js
│           ├── rename-tag.js
│           ├── research.js
│           ├── response-language.js
│           ├── rules.js
│           ├── scope-down.js
│           ├── scope-up.js
│           ├── set-task-status.js
│           ├── tool-registry.js
│           ├── update-subtask.js
│           ├── update-task.js
│           ├── update.js
│           ├── use-tag.js
│           ├── utils.js
│           └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│   ├── ai-sdk-provider-grok-cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── errors.test.ts
│   │   │   ├── errors.ts
│   │   │   ├── grok-cli-language-model.ts
│   │   │   ├── grok-cli-provider.test.ts
│   │   │   ├── grok-cli-provider.ts
│   │   │   ├── index.ts
│   │   │   ├── json-extractor.test.ts
│   │   │   ├── json-extractor.ts
│   │   │   ├── message-converter.test.ts
│   │   │   ├── message-converter.ts
│   │   │   └── types.ts
│   │   └── tsconfig.json
│   ├── build-config
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   └── tsdown.base.ts
│   │   └── tsconfig.json
│   ├── claude-code-plugin
│   │   ├── .claude-plugin
│   │   │   └── plugin.json
│   │   ├── .gitignore
│   │   ├── agents
│   │   │   ├── task-checker.md
│   │   │   ├── task-executor.md
│   │   │   └── task-orchestrator.md
│   │   ├── CHANGELOG.md
│   │   ├── commands
│   │   │   ├── add-dependency.md
│   │   │   ├── add-subtask.md
│   │   │   ├── add-task.md
│   │   │   ├── analyze-complexity.md
│   │   │   ├── analyze-project.md
│   │   │   ├── auto-implement-tasks.md
│   │   │   ├── command-pipeline.md
│   │   │   ├── complexity-report.md
│   │   │   ├── convert-task-to-subtask.md
│   │   │   ├── expand-all-tasks.md
│   │   │   ├── expand-task.md
│   │   │   ├── fix-dependencies.md
│   │   │   ├── generate-tasks.md
│   │   │   ├── help.md
│   │   │   ├── init-project-quick.md
│   │   │   ├── init-project.md
│   │   │   ├── install-taskmaster.md
│   │   │   ├── learn.md
│   │   │   ├── list-tasks-by-status.md
│   │   │   ├── list-tasks-with-subtasks.md
│   │   │   ├── list-tasks.md
│   │   │   ├── next-task.md
│   │   │   ├── parse-prd-with-research.md
│   │   │   ├── parse-prd.md
│   │   │   ├── project-status.md
│   │   │   ├── quick-install-taskmaster.md
│   │   │   ├── remove-all-subtasks.md
│   │   │   ├── remove-dependency.md
│   │   │   ├── remove-subtask.md
│   │   │   ├── remove-subtasks.md
│   │   │   ├── remove-task.md
│   │   │   ├── setup-models.md
│   │   │   ├── show-task.md
│   │   │   ├── smart-workflow.md
│   │   │   ├── sync-readme.md
│   │   │   ├── tm-main.md
│   │   │   ├── to-cancelled.md
│   │   │   ├── to-deferred.md
│   │   │   ├── to-done.md
│   │   │   ├── to-in-progress.md
│   │   │   ├── to-pending.md
│   │   │   ├── to-review.md
│   │   │   ├── update-single-task.md
│   │   │   ├── update-task.md
│   │   │   ├── update-tasks-from-id.md
│   │   │   ├── validate-dependencies.md
│   │   │   └── view-models.md
│   │   ├── mcp.json
│   │   └── package.json
│   ├── tm-bridge
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── add-tag-bridge.ts
│   │   │   ├── bridge-types.ts
│   │   │   ├── bridge-utils.ts
│   │   │   ├── expand-bridge.ts
│   │   │   ├── index.ts
│   │   │   ├── tags-bridge.ts
│   │   │   ├── update-bridge.ts
│   │   │   └── use-tag-bridge.ts
│   │   └── tsconfig.json
│   └── tm-core
│       ├── .gitignore
│       ├── CHANGELOG.md
│       ├── docs
│       │   └── listTasks-architecture.md
│       ├── package.json
│       ├── POC-STATUS.md
│       ├── README.md
│       ├── src
│       │   ├── common
│       │   │   ├── constants
│       │   │   │   ├── index.ts
│       │   │   │   ├── paths.ts
│       │   │   │   └── providers.ts
│       │   │   ├── errors
│       │   │   │   ├── index.ts
│       │   │   │   └── task-master-error.ts
│       │   │   ├── interfaces
│       │   │   │   ├── configuration.interface.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── storage.interface.ts
│       │   │   ├── logger
│       │   │   │   ├── factory.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── logger.spec.ts
│       │   │   │   └── logger.ts
│       │   │   ├── mappers
│       │   │   │   ├── TaskMapper.test.ts
│       │   │   │   └── TaskMapper.ts
│       │   │   ├── types
│       │   │   │   ├── database.types.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── legacy.ts
│       │   │   │   └── repository-types.ts
│       │   │   └── utils
│       │   │       ├── git-utils.ts
│       │   │       ├── id-generator.ts
│       │   │       ├── index.ts
│       │   │       ├── path-helpers.ts
│       │   │       ├── path-normalizer.spec.ts
│       │   │       ├── path-normalizer.ts
│       │   │       ├── project-root-finder.spec.ts
│       │   │       ├── project-root-finder.ts
│       │   │       ├── run-id-generator.spec.ts
│       │   │       └── run-id-generator.ts
│       │   ├── index.ts
│       │   ├── modules
│       │   │   ├── ai
│       │   │   │   ├── index.ts
│       │   │   │   ├── interfaces
│       │   │   │   │   └── ai-provider.interface.ts
│       │   │   │   └── providers
│       │   │   │       ├── base-provider.ts
│       │   │   │       └── index.ts
│       │   │   ├── auth
│       │   │   │   ├── auth-domain.spec.ts
│       │   │   │   ├── auth-domain.ts
│       │   │   │   ├── config.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   ├── auth-manager.spec.ts
│       │   │   │   │   └── auth-manager.ts
│       │   │   │   ├── services
│       │   │   │   │   ├── context-store.ts
│       │   │   │   │   ├── oauth-service.ts
│       │   │   │   │   ├── organization.service.ts
│       │   │   │   │   ├── supabase-session-storage.spec.ts
│       │   │   │   │   └── supabase-session-storage.ts
│       │   │   │   └── types.ts
│       │   │   ├── briefs
│       │   │   │   ├── briefs-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── brief-service.ts
│       │   │   │   ├── types.ts
│       │   │   │   └── utils
│       │   │   │       └── url-parser.ts
│       │   │   ├── commands
│       │   │   │   └── index.ts
│       │   │   ├── config
│       │   │   │   ├── config-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   ├── config-manager.spec.ts
│       │   │   │   │   └── config-manager.ts
│       │   │   │   └── services
│       │   │   │       ├── config-loader.service.spec.ts
│       │   │   │       ├── config-loader.service.ts
│       │   │   │       ├── config-merger.service.spec.ts
│       │   │   │       ├── config-merger.service.ts
│       │   │   │       ├── config-persistence.service.spec.ts
│       │   │   │       ├── config-persistence.service.ts
│       │   │   │       ├── environment-config-provider.service.spec.ts
│       │   │   │       ├── environment-config-provider.service.ts
│       │   │   │       ├── index.ts
│       │   │   │       ├── runtime-state-manager.service.spec.ts
│       │   │   │       └── runtime-state-manager.service.ts
│       │   │   ├── dependencies
│       │   │   │   └── index.ts
│       │   │   ├── execution
│       │   │   │   ├── executors
│       │   │   │   │   ├── base-executor.ts
│       │   │   │   │   ├── claude-executor.ts
│       │   │   │   │   └── executor-factory.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── executor-service.ts
│       │   │   │   └── types.ts
│       │   │   ├── git
│       │   │   │   ├── adapters
│       │   │   │   │   ├── git-adapter.test.ts
│       │   │   │   │   └── git-adapter.ts
│       │   │   │   ├── git-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── services
│       │   │   │       ├── branch-name-generator.spec.ts
│       │   │   │       ├── branch-name-generator.ts
│       │   │   │       ├── commit-message-generator.test.ts
│       │   │   │       ├── commit-message-generator.ts
│       │   │   │       ├── scope-detector.test.ts
│       │   │   │       ├── scope-detector.ts
│       │   │   │       ├── template-engine.test.ts
│       │   │   │       └── template-engine.ts
│       │   │   ├── integration
│       │   │   │   ├── clients
│       │   │   │   │   ├── index.ts
│       │   │   │   │   └── supabase-client.ts
│       │   │   │   ├── integration-domain.ts
│       │   │   │   └── services
│       │   │   │       ├── export.service.ts
│       │   │   │       ├── task-expansion.service.ts
│       │   │   │       └── task-retrieval.service.ts
│       │   │   ├── reports
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   └── complexity-report-manager.ts
│       │   │   │   └── types.ts
│       │   │   ├── storage
│       │   │   │   ├── adapters
│       │   │   │   │   ├── activity-logger.ts
│       │   │   │   │   ├── api-storage.ts
│       │   │   │   │   └── file-storage
│       │   │   │   │       ├── file-operations.ts
│       │   │   │   │       ├── file-storage.ts
│       │   │   │   │       ├── format-handler.ts
│       │   │   │   │       ├── index.ts
│       │   │   │   │       └── path-resolver.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── storage-factory.ts
│       │   │   │   └── utils
│       │   │   │       └── api-client.ts
│       │   │   ├── tasks
│       │   │   │   ├── entities
│       │   │   │   │   └── task.entity.ts
│       │   │   │   ├── parser
│       │   │   │   │   └── index.ts
│       │   │   │   ├── repositories
│       │   │   │   │   ├── supabase
│       │   │   │   │   │   ├── dependency-fetcher.ts
│       │   │   │   │   │   ├── index.ts
│       │   │   │   │   │   └── supabase-repository.ts
│       │   │   │   │   └── task-repository.interface.ts
│       │   │   │   ├── services
│       │   │   │   │   ├── preflight-checker.service.ts
│       │   │   │   │   ├── tag.service.ts
│       │   │   │   │   ├── task-execution-service.ts
│       │   │   │   │   ├── task-loader.service.ts
│       │   │   │   │   └── task-service.ts
│       │   │   │   └── tasks-domain.ts
│       │   │   ├── ui
│       │   │   │   └── index.ts
│       │   │   └── workflow
│       │   │       ├── managers
│       │   │       │   ├── workflow-state-manager.spec.ts
│       │   │       │   └── workflow-state-manager.ts
│       │   │       ├── orchestrators
│       │   │       │   ├── workflow-orchestrator.test.ts
│       │   │       │   └── workflow-orchestrator.ts
│       │   │       ├── services
│       │   │       │   ├── test-result-validator.test.ts
│       │   │       │   ├── test-result-validator.ts
│       │   │       │   ├── test-result-validator.types.ts
│       │   │       │   ├── workflow-activity-logger.ts
│       │   │       │   └── workflow.service.ts
│       │   │       ├── types.ts
│       │   │       └── workflow-domain.ts
│       │   ├── subpath-exports.test.ts
│       │   ├── tm-core.ts
│       │   └── utils
│       │       └── time.utils.ts
│       ├── tests
│       │   ├── auth
│       │   │   └── auth-refresh.test.ts
│       │   ├── integration
│       │   │   ├── auth-token-refresh.test.ts
│       │   │   ├── list-tasks.test.ts
│       │   │   └── storage
│       │   │       └── activity-logger.test.ts
│       │   ├── mocks
│       │   │   └── mock-provider.ts
│       │   ├── setup.ts
│       │   └── unit
│       │       ├── base-provider.test.ts
│       │       ├── executor.test.ts
│       │       └── smoke.test.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│   ├── create-worktree.sh
│   ├── dev.js
│   ├── init.js
│   ├── list-worktrees.sh
│   ├── modules
│   │   ├── ai-services-unified.js
│   │   ├── bridge-utils.js
│   │   ├── commands.js
│   │   ├── config-manager.js
│   │   ├── dependency-manager.js
│   │   ├── index.js
│   │   ├── prompt-manager.js
│   │   ├── supported-models.json
│   │   ├── sync-readme.js
│   │   ├── task-manager
│   │   │   ├── add-subtask.js
│   │   │   ├── add-task.js
│   │   │   ├── analyze-task-complexity.js
│   │   │   ├── clear-subtasks.js
│   │   │   ├── expand-all-tasks.js
│   │   │   ├── expand-task.js
│   │   │   ├── find-next-task.js
│   │   │   ├── generate-task-files.js
│   │   │   ├── is-task-dependent.js
│   │   │   ├── list-tasks.js
│   │   │   ├── migrate.js
│   │   │   ├── models.js
│   │   │   ├── move-task.js
│   │   │   ├── parse-prd
│   │   │   │   ├── index.js
│   │   │   │   ├── parse-prd-config.js
│   │   │   │   ├── parse-prd-helpers.js
│   │   │   │   ├── parse-prd-non-streaming.js
│   │   │   │   ├── parse-prd-streaming.js
│   │   │   │   └── parse-prd.js
│   │   │   ├── remove-subtask.js
│   │   │   ├── remove-task.js
│   │   │   ├── research.js
│   │   │   ├── response-language.js
│   │   │   ├── scope-adjustment.js
│   │   │   ├── set-task-status.js
│   │   │   ├── tag-management.js
│   │   │   ├── task-exists.js
│   │   │   ├── update-single-task-status.js
│   │   │   ├── update-subtask-by-id.js
│   │   │   ├── update-task-by-id.js
│   │   │   └── update-tasks.js
│   │   ├── task-manager.js
│   │   ├── ui.js
│   │   ├── update-config-tokens.js
│   │   ├── utils
│   │   │   ├── contextGatherer.js
│   │   │   ├── fuzzyTaskSearch.js
│   │   │   └── git-utils.js
│   │   └── utils.js
│   ├── task-complexity-report.json
│   ├── test-claude-errors.js
│   └── test-claude.js
├── sonar-project.properties
├── src
│   ├── ai-providers
│   │   ├── anthropic.js
│   │   ├── azure.js
│   │   ├── base-provider.js
│   │   ├── bedrock.js
│   │   ├── claude-code.js
│   │   ├── codex-cli.js
│   │   ├── gemini-cli.js
│   │   ├── google-vertex.js
│   │   ├── google.js
│   │   ├── grok-cli.js
│   │   ├── groq.js
│   │   ├── index.js
│   │   ├── lmstudio.js
│   │   ├── ollama.js
│   │   ├── openai-compatible.js
│   │   ├── openai.js
│   │   ├── openrouter.js
│   │   ├── perplexity.js
│   │   ├── xai.js
│   │   ├── zai-coding.js
│   │   └── zai.js
│   ├── constants
│   │   ├── commands.js
│   │   ├── paths.js
│   │   ├── profiles.js
│   │   ├── rules-actions.js
│   │   ├── task-priority.js
│   │   └── task-status.js
│   ├── profiles
│   │   ├── amp.js
│   │   ├── base-profile.js
│   │   ├── claude.js
│   │   ├── cline.js
│   │   ├── codex.js
│   │   ├── cursor.js
│   │   ├── gemini.js
│   │   ├── index.js
│   │   ├── kilo.js
│   │   ├── kiro.js
│   │   ├── opencode.js
│   │   ├── roo.js
│   │   ├── trae.js
│   │   ├── vscode.js
│   │   ├── windsurf.js
│   │   └── zed.js
│   ├── progress
│   │   ├── base-progress-tracker.js
│   │   ├── cli-progress-factory.js
│   │   ├── parse-prd-tracker.js
│   │   ├── progress-tracker-builder.js
│   │   └── tracker-ui.js
│   ├── prompts
│   │   ├── add-task.json
│   │   ├── analyze-complexity.json
│   │   ├── expand-task.json
│   │   ├── parse-prd.json
│   │   ├── README.md
│   │   ├── research.json
│   │   ├── schemas
│   │   │   ├── parameter.schema.json
│   │   │   ├── prompt-template.schema.json
│   │   │   ├── README.md
│   │   │   └── variant.schema.json
│   │   ├── update-subtask.json
│   │   ├── update-task.json
│   │   └── update-tasks.json
│   ├── provider-registry
│   │   └── index.js
│   ├── schemas
│   │   ├── add-task.js
│   │   ├── analyze-complexity.js
│   │   ├── base-schemas.js
│   │   ├── expand-task.js
│   │   ├── parse-prd.js
│   │   ├── registry.js
│   │   ├── update-subtask.js
│   │   ├── update-task.js
│   │   └── update-tasks.js
│   ├── task-master.js
│   ├── ui
│   │   ├── confirm.js
│   │   ├── indicators.js
│   │   └── parse-prd.js
│   └── utils
│       ├── asset-resolver.js
│       ├── create-mcp-config.js
│       ├── format.js
│       ├── getVersion.js
│       ├── logger-utils.js
│       ├── manage-gitignore.js
│       ├── path-utils.js
│       ├── profiles.js
│       ├── rule-transformer.js
│       ├── stream-parser.js
│       └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│   ├── e2e
│   │   ├── e2e_helpers.sh
│   │   ├── parse_llm_output.cjs
│   │   ├── run_e2e.sh
│   │   ├── run_fallback_verification.sh
│   │   └── test_llm_analysis.sh
│   ├── fixtures
│   │   ├── .taskmasterconfig
│   │   ├── sample-claude-response.js
│   │   ├── sample-prd.txt
│   │   └── sample-tasks.js
│   ├── helpers
│   │   └── tool-counts.js
│   ├── integration
│   │   ├── claude-code-error-handling.test.js
│   │   ├── claude-code-optional.test.js
│   │   ├── cli
│   │   │   ├── commands.test.js
│   │   │   ├── complex-cross-tag-scenarios.test.js
│   │   │   └── move-cross-tag.test.js
│   │   ├── manage-gitignore.test.js
│   │   ├── mcp-server
│   │   │   └── direct-functions.test.js
│   │   ├── move-task-cross-tag.integration.test.js
│   │   ├── move-task-simple.integration.test.js
│   │   ├── profiles
│   │   │   ├── amp-init-functionality.test.js
│   │   │   ├── claude-init-functionality.test.js
│   │   │   ├── cline-init-functionality.test.js
│   │   │   ├── codex-init-functionality.test.js
│   │   │   ├── cursor-init-functionality.test.js
│   │   │   ├── gemini-init-functionality.test.js
│   │   │   ├── opencode-init-functionality.test.js
│   │   │   ├── roo-files-inclusion.test.js
│   │   │   ├── roo-init-functionality.test.js
│   │   │   ├── rules-files-inclusion.test.js
│   │   │   ├── trae-init-functionality.test.js
│   │   │   ├── vscode-init-functionality.test.js
│   │   │   └── windsurf-init-functionality.test.js
│   │   └── providers
│   │       └── temperature-support.test.js
│   ├── manual
│   │   ├── progress
│   │   │   ├── parse-prd-analysis.js
│   │   │   ├── test-parse-prd.js
│   │   │   └── TESTING_GUIDE.md
│   │   └── prompts
│   │       ├── prompt-test.js
│   │       └── README.md
│   ├── README.md
│   ├── setup.js
│   └── unit
│       ├── ai-providers
│       │   ├── base-provider.test.js
│       │   ├── claude-code.test.js
│       │   ├── codex-cli.test.js
│       │   ├── gemini-cli.test.js
│       │   ├── lmstudio.test.js
│       │   ├── mcp-components.test.js
│       │   ├── openai-compatible.test.js
│       │   ├── openai.test.js
│       │   ├── provider-registry.test.js
│       │   ├── zai-coding.test.js
│       │   ├── zai-provider.test.js
│       │   ├── zai-schema-introspection.test.js
│       │   └── zai.test.js
│       ├── ai-services-unified.test.js
│       ├── commands.test.js
│       ├── config-manager.test.js
│       ├── config-manager.test.mjs
│       ├── dependency-manager.test.js
│       ├── init.test.js
│       ├── initialize-project.test.js
│       ├── kebab-case-validation.test.js
│       ├── manage-gitignore.test.js
│       ├── mcp
│       │   └── tools
│       │       ├── __mocks__
│       │       │   └── move-task.js
│       │       ├── add-task.test.js
│       │       ├── analyze-complexity.test.js
│       │       ├── expand-all.test.js
│       │       ├── get-tasks.test.js
│       │       ├── initialize-project.test.js
│       │       ├── move-task-cross-tag-options.test.js
│       │       ├── move-task-cross-tag.test.js
│       │       ├── remove-task.test.js
│       │       └── tool-registration.test.js
│       ├── mcp-providers
│       │   ├── mcp-components.test.js
│       │   └── mcp-provider.test.js
│       ├── parse-prd.test.js
│       ├── profiles
│       │   ├── amp-integration.test.js
│       │   ├── claude-integration.test.js
│       │   ├── cline-integration.test.js
│       │   ├── codex-integration.test.js
│       │   ├── cursor-integration.test.js
│       │   ├── gemini-integration.test.js
│       │   ├── kilo-integration.test.js
│       │   ├── kiro-integration.test.js
│       │   ├── mcp-config-validation.test.js
│       │   ├── opencode-integration.test.js
│       │   ├── profile-safety-check.test.js
│       │   ├── roo-integration.test.js
│       │   ├── rule-transformer-cline.test.js
│       │   ├── rule-transformer-cursor.test.js
│       │   ├── rule-transformer-gemini.test.js
│       │   ├── rule-transformer-kilo.test.js
│       │   ├── rule-transformer-kiro.test.js
│       │   ├── rule-transformer-opencode.test.js
│       │   ├── rule-transformer-roo.test.js
│       │   ├── rule-transformer-trae.test.js
│       │   ├── rule-transformer-vscode.test.js
│       │   ├── rule-transformer-windsurf.test.js
│       │   ├── rule-transformer-zed.test.js
│       │   ├── rule-transformer.test.js
│       │   ├── selective-profile-removal.test.js
│       │   ├── subdirectory-support.test.js
│       │   ├── trae-integration.test.js
│       │   ├── vscode-integration.test.js
│       │   ├── windsurf-integration.test.js
│       │   └── zed-integration.test.js
│       ├── progress
│       │   └── base-progress-tracker.test.js
│       ├── prompt-manager.test.js
│       ├── prompts
│       │   ├── expand-task-prompt.test.js
│       │   └── prompt-migration.test.js
│       ├── scripts
│       │   └── modules
│       │       ├── commands
│       │       │   ├── move-cross-tag.test.js
│       │       │   └── README.md
│       │       ├── dependency-manager
│       │       │   ├── circular-dependencies.test.js
│       │       │   ├── cross-tag-dependencies.test.js
│       │       │   └── fix-dependencies-command.test.js
│       │       ├── task-manager
│       │       │   ├── add-subtask.test.js
│       │       │   ├── add-task.test.js
│       │       │   ├── analyze-task-complexity.test.js
│       │       │   ├── clear-subtasks.test.js
│       │       │   ├── complexity-report-tag-isolation.test.js
│       │       │   ├── expand-all-tasks.test.js
│       │       │   ├── expand-task.test.js
│       │       │   ├── find-next-task.test.js
│       │       │   ├── generate-task-files.test.js
│       │       │   ├── list-tasks.test.js
│       │       │   ├── models-baseurl.test.js
│       │       │   ├── move-task-cross-tag.test.js
│       │       │   ├── move-task.test.js
│       │       │   ├── parse-prd-schema.test.js
│       │       │   ├── parse-prd.test.js
│       │       │   ├── remove-subtask.test.js
│       │       │   ├── remove-task.test.js
│       │       │   ├── research.test.js
│       │       │   ├── scope-adjustment.test.js
│       │       │   ├── set-task-status.test.js
│       │       │   ├── setup.js
│       │       │   ├── update-single-task-status.test.js
│       │       │   ├── update-subtask-by-id.test.js
│       │       │   ├── update-task-by-id.test.js
│       │       │   └── update-tasks.test.js
│       │       ├── ui
│       │       │   └── cross-tag-error-display.test.js
│       │       └── utils-tag-aware-paths.test.js
│       ├── task-finder.test.js
│       ├── task-manager
│       │   ├── clear-subtasks.test.js
│       │   ├── move-task.test.js
│       │   ├── tag-boundary.test.js
│       │   └── tag-management.test.js
│       ├── task-master.test.js
│       ├── ui
│       │   └── indicators.test.js
│       ├── ui.test.js
│       ├── utils-strip-ansi.test.js
│       └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
├── turbo.json
└── update-task-migration-plan.md
```

# Files

--------------------------------------------------------------------------------
/scripts/modules/task-manager/parse-prd/parse-prd-helpers.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Helper functions for PRD parsing
 */

import fs from 'fs';
import path from 'path';
import boxen from 'boxen';
import chalk from 'chalk';
import { ensureTagMetadata, findTaskById } from '../../utils.js';
import { displayParsePrdSummary } from '../../../../src/ui/parse-prd.js';
import { TimeoutManager } from '../../../../src/utils/timeout-manager.js';
import { displayAiUsageSummary } from '../../ui.js';
import { getPromptManager } from '../../prompt-manager.js';
import { getDefaultPriority } from '../../config-manager.js';

/**
 * Estimate token count from text
 * @param {string} text - Text to estimate tokens for
 * @returns {number} Estimated token count
 */
export function estimateTokens(text) {
	// Common approximation: ~4 characters per token for English
	return Math.ceil(text.length / 4);
}

/**
 * Read and validate PRD content
 * @param {string} prdPath - Path to PRD file
 * @returns {string} PRD content
 * @throws {Error} If file is empty or cannot be read
 */
export function readPrdContent(prdPath) {
	const prdContent = fs.readFileSync(prdPath, 'utf8');
	if (!prdContent) {
		throw new Error(`Input file ${prdPath} is empty or could not be read.`);
	}
	return prdContent;
}

/**
 * Load existing tasks from file
 * @param {string} tasksPath - Path to tasks file
 * @param {string} targetTag - Target tag to load from
 * @returns {{tasks: Array, nextId: number}} Existing tasks and next ID
 */
export function loadExistingTasks(tasksPath, targetTag) {
	let existingTasks = [];
	let nextId = 1;

	if (!fs.existsSync(tasksPath)) {
		return { existingTasks, nextId };
	}

	try {
		const existingFileContent = fs.readFileSync(tasksPath, 'utf8');
		const allData = JSON.parse(existingFileContent);

		if (allData[targetTag]?.tasks && Array.isArray(allData[targetTag].tasks)) {
			existingTasks = allData[targetTag].tasks;
			if (existingTasks.length > 0) {
				nextId = Math.max(...existingTasks.map((t) => t.id || 0)) + 1;
			}
		}
	} catch (error) {
		// If we can't read the file or parse it, assume no existing tasks
		return { existingTasks: [], nextId: 1 };
	}

	return { existingTasks, nextId };
}

/**
 * Validate overwrite/append operations
 * @param {Object} params
 * @returns {void}
 * @throws {Error} If validation fails
 */
export function validateFileOperations({
	existingTasks,
	targetTag,
	append,
	force,
	isMCP,
	logger
}) {
	const hasExistingTasks = existingTasks.length > 0;

	if (!hasExistingTasks) {
		logger.report(
			`Tag '${targetTag}' is empty or doesn't exist. Creating/updating tag with new tasks.`,
			'info'
		);
		return;
	}

	if (append) {
		logger.report(
			`Append mode enabled. Found ${existingTasks.length} existing tasks in tag '${targetTag}'.`,
			'info'
		);
		return;
	}

	if (!force) {
		const errorMessage = `Tag '${targetTag}' already contains ${existingTasks.length} tasks. Use --force to overwrite or --append to add to existing tasks.`;
		logger.report(errorMessage, 'error');

		if (isMCP) {
			throw new Error(errorMessage);
		} else {
			console.error(chalk.red(errorMessage));
			process.exit(1);
		}
	}

	logger.report(
		`Force flag enabled. Overwriting existing tasks in tag '${targetTag}'.`,
		'debug'
	);
}

/**
 * Process and transform tasks with ID remapping
 * @param {Array} rawTasks - Raw tasks from AI
 * @param {number} startId - Starting ID for new tasks
 * @param {Array} existingTasks - Existing tasks for dependency validation
 * @param {string} defaultPriority - Default priority for tasks
 * @returns {Array} Processed tasks with remapped IDs
 */
export function processTasks(
	rawTasks,
	startId,
	existingTasks,
	defaultPriority
) {
	let currentId = startId;
	const taskMap = new Map();

	// First pass: assign new IDs and create mapping
	const processedTasks = rawTasks.map((task) => {
		const newId = currentId++;
		taskMap.set(task.id, newId);

		return {
			...task,
			id: newId,
			status: task.status || 'pending',
			priority: task.priority || defaultPriority,
			dependencies: Array.isArray(task.dependencies) ? task.dependencies : [],
			subtasks: task.subtasks || [],
			// Ensure all required fields have values
			title: task.title || '',
			description: task.description || '',
			details: task.details || '',
			testStrategy: task.testStrategy || ''
		};
	});

	// Second pass: remap dependencies
	processedTasks.forEach((task) => {
		task.dependencies = task.dependencies
			.map((depId) => taskMap.get(depId))
			.filter(
				(newDepId) =>
					newDepId != null &&
					newDepId < task.id &&
					(findTaskById(existingTasks, newDepId) ||
						processedTasks.some((t) => t.id === newDepId))
			);
	});

	return processedTasks;
}

/**
 * Save tasks to file with tag support
 * @param {string} tasksPath - Path to save tasks
 * @param {Array} tasks - Tasks to save
 * @param {string} targetTag - Target tag
 * @param {Object} logger - Logger instance
 */
export function saveTasksToFile(tasksPath, tasks, targetTag, logger) {
	// Create directory if it doesn't exist
	const tasksDir = path.dirname(tasksPath);
	if (!fs.existsSync(tasksDir)) {
		fs.mkdirSync(tasksDir, { recursive: true });
	}

	// Read existing file to preserve other tags
	let outputData = {};
	if (fs.existsSync(tasksPath)) {
		try {
			const existingFileContent = fs.readFileSync(tasksPath, 'utf8');
			outputData = JSON.parse(existingFileContent);
		} catch (error) {
			outputData = {};
		}
	}

	// Update only the target tag
	outputData[targetTag] = {
		tasks: tasks,
		metadata: {
			created:
				outputData[targetTag]?.metadata?.created || new Date().toISOString(),
			updated: new Date().toISOString(),
			description: `Tasks for ${targetTag} context`
		}
	};

	// Ensure proper metadata
	ensureTagMetadata(outputData[targetTag], {
		description: `Tasks for ${targetTag} context`
	});

	// Write back to file
	fs.writeFileSync(tasksPath, JSON.stringify(outputData, null, 2));

	logger.report(
		`Successfully saved ${tasks.length} tasks to ${tasksPath}`,
		'debug'
	);
}

/**
 * Build prompts for AI service
 * @param {Object} config - Configuration object
 * @param {string} prdContent - PRD content
 * @param {number} nextId - Next task ID
 * @returns {Promise<{systemPrompt: string, userPrompt: string}>}
 */
export async function buildPrompts(config, prdContent, nextId) {
	const promptManager = getPromptManager();
	const defaultTaskPriority =
		getDefaultPriority(config.projectRoot) || 'medium';

	return promptManager.loadPrompt('parse-prd', {
		research: config.research,
		numTasks: config.numTasks,
		nextId,
		prdContent,
		prdPath: config.prdPath,
		defaultTaskPriority,
		hasCodebaseAnalysis: config.hasCodebaseAnalysis(),
		projectRoot: config.projectRoot || ''
	});
}

/**
 * Handle progress reporting for both CLI and MCP
 * @param {Object} params
 */
export async function reportTaskProgress({
	task,
	currentCount,
	totalTasks,
	estimatedTokens,
	progressTracker,
	reportProgress,
	priorityMap,
	defaultPriority,
	estimatedInputTokens
}) {
	const priority = task.priority || defaultPriority;
	const priorityIndicator = priorityMap[priority] || priorityMap.medium;

	// CLI progress tracker
	if (progressTracker) {
		progressTracker.addTaskLine(currentCount, task.title, priority);
		if (estimatedTokens) {
			progressTracker.updateTokens(estimatedInputTokens, estimatedTokens);
		}
	}

	// MCP progress reporting
	if (reportProgress) {
		try {
			const outputTokens = estimatedTokens
				? Math.floor(estimatedTokens / totalTasks)
				: 0;

			await reportProgress({
				progress: currentCount,
				total: totalTasks,
				message: `${priorityIndicator} Task ${currentCount}/${totalTasks} - ${task.title} | ~Output: ${outputTokens} tokens`
			});
		} catch (error) {
			// Ignore progress reporting errors
		}
	}
}

/**
 * Display completion summary for CLI
 * @param {Object} params
 */
export async function displayCliSummary({
	processedTasks,
	nextId,
	summary,
	prdPath,
	tasksPath,
	usedFallback,
	aiServiceResponse
}) {
	// Generate task file names
	const taskFilesGenerated = (() => {
		if (!Array.isArray(processedTasks) || processedTasks.length === 0) {
			return `task_${String(nextId).padStart(3, '0')}.txt`;
		}
		const firstNewTaskId = processedTasks[0].id;
		const lastNewTaskId = processedTasks[processedTasks.length - 1].id;
		if (processedTasks.length === 1) {
			return `task_${String(firstNewTaskId).padStart(3, '0')}.txt`;
		}
		return `task_${String(firstNewTaskId).padStart(3, '0')}.txt -> task_${String(lastNewTaskId).padStart(3, '0')}.txt`;
	})();

	displayParsePrdSummary({
		totalTasks: processedTasks.length,
		taskPriorities: summary.taskPriorities,
		prdFilePath: prdPath,
		outputPath: tasksPath,
		elapsedTime: summary.elapsedTime,
		usedFallback,
		taskFilesGenerated,
		actionVerb: summary.actionVerb
	});

	// Display telemetry
	if (aiServiceResponse?.telemetryData) {
		// For streaming, wait briefly to allow usage data to be captured
		if (aiServiceResponse.mainResult?.usage) {
			// Give the usage promise a short time to resolve
			await TimeoutManager.withSoftTimeout(
				aiServiceResponse.mainResult.usage,
				1000,
				undefined
			);
		}
		displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
	}
}

/**
 * Display non-streaming CLI output
 * @param {Object} params
 */
export function displayNonStreamingCliOutput({
	processedTasks,
	research,
	finalTasks,
	tasksPath,
	aiServiceResponse
}) {
	console.log(
		boxen(
			chalk.green(
				`Successfully generated ${processedTasks.length} new tasks${research ? ' with research-backed analysis' : ''}. Total tasks in ${tasksPath}: ${finalTasks.length}`
			),
			{ padding: 1, borderColor: 'green', borderStyle: 'round' }
		)
	);

	console.log(
		boxen(
			chalk.white.bold('Next Steps:') +
				'\n\n' +
				`${chalk.cyan('1.')} Run ${chalk.yellow('task-master list')} to view all tasks\n` +
				`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks`,
			{
				padding: 1,
				borderColor: 'cyan',
				borderStyle: 'round',
				margin: { top: 1 }
			}
		)
	);

	if (aiServiceResponse?.telemetryData) {
		displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
	}
}

```

--------------------------------------------------------------------------------
/tests/unit/mcp/tools/initialize-project.test.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Tests for the initialize-project MCP tool
 *
 * Note: This test does NOT test the actual implementation. It tests that:
 * 1. The tool is registered correctly with the correct parameters
 * 2. Command construction works correctly with various arguments
 * 3. Error handling works as expected
 * 4. Response formatting is correct
 *
 * We do NOT import the real implementation - everything is mocked
 */

import { jest } from '@jest/globals';

// Mock child_process.execSync
const mockExecSync = jest.fn();
jest.mock('child_process', () => ({
	execSync: mockExecSync
}));

// Mock the utility functions
const mockCreateContentResponse = jest.fn((content) => ({
	content
}));

const mockCreateErrorResponse = jest.fn((message, details) => ({
	error: { message, details }
}));

jest.mock('../../../../mcp-server/src/tools/utils.js', () => ({
	createContentResponse: mockCreateContentResponse,
	createErrorResponse: mockCreateErrorResponse
}));

// Mock the z object from zod
const mockZod = {
	object: jest.fn(() => mockZod),
	string: jest.fn(() => mockZod),
	boolean: jest.fn(() => mockZod),
	optional: jest.fn(() => mockZod),
	default: jest.fn(() => mockZod),
	describe: jest.fn(() => mockZod),
	_def: {
		shape: () => ({
			projectName: {},
			projectDescription: {},
			projectVersion: {},
			authorName: {},
			skipInstall: {},
			addAliases: {},
			yes: {}
		})
	}
};

jest.mock('zod', () => ({
	z: mockZod
}));

// Create our own simplified version of the registerInitializeProjectTool function
const registerInitializeProjectTool = (server) => {
	server.addTool({
		name: 'initialize_project',
		description:
			"Initializes a new Task Master project structure in the current working directory by running 'task-master init'.",
		parameters: mockZod,
		execute: async (args, { log }) => {
			try {
				log.info(
					`Executing initialize_project with args: ${JSON.stringify(args)}`
				);

				// Construct the command arguments
				let command = 'npx task-master init';
				const cliArgs = [];
				if (args.projectName) {
					cliArgs.push(`--name "${args.projectName.replace(/"/g, '\\"')}"`);
				}
				if (args.projectDescription) {
					cliArgs.push(
						`--description "${args.projectDescription.replace(/"/g, '\\"')}"`
					);
				}
				if (args.projectVersion) {
					cliArgs.push(
						`--version "${args.projectVersion.replace(/"/g, '\\"')}"`
					);
				}
				if (args.authorName) {
					cliArgs.push(`--author "${args.authorName.replace(/"/g, '\\"')}"`);
				}
				if (args.skipInstall) cliArgs.push('--skip-install');
				if (args.addAliases) cliArgs.push('--aliases');
				if (args.yes) cliArgs.push('--yes');

				command += ' ' + cliArgs.join(' ');

				log.info(`Constructed command: ${command}`);

				// Execute the command
				const output = mockExecSync(command, {
					encoding: 'utf8',
					stdio: 'pipe',
					timeout: 300000
				});

				log.info(`Initialization output:\n${output}`);

				// Return success response
				return mockCreateContentResponse({
					message: 'Project initialized successfully.',
					next_step:
						'Now that the project is initialized, the next step is to create the tasks by parsing a PRD. This will create the tasks folder and the initial task files. The parse-prd tool will required a PRD file',
					output: output
				});
			} catch (error) {
				// Catch errors
				const errorMessage = `Project initialization failed: ${error.message}`;
				const errorDetails =
					error.stderr?.toString() || error.stdout?.toString() || error.message;
				log.error(`${errorMessage}\nDetails: ${errorDetails}`);

				// Return error response
				return mockCreateErrorResponse(errorMessage, { details: errorDetails });
			}
		}
	});
};

describe('Initialize Project MCP Tool', () => {
	// Mock server and logger
	let mockServer;
	let executeFunction;

	const mockLogger = {
		debug: jest.fn(),
		info: jest.fn(),
		warn: jest.fn(),
		error: jest.fn()
	};

	beforeEach(() => {
		// Clear all mocks before each test
		jest.clearAllMocks();

		// Create mock server
		mockServer = {
			addTool: jest.fn((config) => {
				executeFunction = config.execute;
			})
		};

		// Default mock behavior
		mockExecSync.mockReturnValue('Project initialized successfully.');

		// Register the tool to capture the tool definition
		registerInitializeProjectTool(mockServer);
	});

	test('registers the tool with correct name and parameters', () => {
		// Check that addTool was called
		expect(mockServer.addTool).toHaveBeenCalledTimes(1);

		// Extract the tool definition from the mock call
		const toolDefinition = mockServer.addTool.mock.calls[0][0];

		// Verify tool properties
		expect(toolDefinition.name).toBe('initialize_project');
		expect(toolDefinition.description).toContain(
			'Initializes a new Task Master project'
		);
		expect(toolDefinition).toHaveProperty('parameters');
		expect(toolDefinition).toHaveProperty('execute');
	});

	test('constructs command with proper arguments', async () => {
		// Create arguments with all parameters
		const args = {
			projectName: 'Test Project',
			projectDescription: 'A project for testing',
			projectVersion: '1.0.0',
			authorName: 'Test Author',
			skipInstall: true,
			addAliases: true,
			yes: true
		};

		// Execute the tool
		await executeFunction(args, { log: mockLogger });

		// Verify execSync was called with the expected command
		expect(mockExecSync).toHaveBeenCalledTimes(1);

		const command = mockExecSync.mock.calls[0][0];

		// Check that the command includes npx task-master init
		expect(command).toContain('npx task-master init');

		// Verify each argument is correctly formatted in the command
		expect(command).toContain('--name "Test Project"');
		expect(command).toContain('--description "A project for testing"');
		expect(command).toContain('--version "1.0.0"');
		expect(command).toContain('--author "Test Author"');
		expect(command).toContain('--skip-install');
		expect(command).toContain('--aliases');
		expect(command).toContain('--yes');
	});

	test('properly escapes special characters in arguments', async () => {
		// Create arguments with special characters
		const args = {
			projectName: 'Test "Quoted" Project',
			projectDescription: 'A "special" project for testing'
		};

		// Execute the tool
		await executeFunction(args, { log: mockLogger });

		// Get the command that was executed
		const command = mockExecSync.mock.calls[0][0];

		// Verify quotes were properly escaped
		expect(command).toContain('--name "Test \\"Quoted\\" Project"');
		expect(command).toContain(
			'--description "A \\"special\\" project for testing"'
		);
	});

	test('returns success response when command succeeds', async () => {
		// Set up the mock to return specific output
		const outputMessage = 'Project initialized successfully.';
		mockExecSync.mockReturnValueOnce(outputMessage);

		// Execute the tool
		const result = await executeFunction({}, { log: mockLogger });

		// Verify createContentResponse was called with the right arguments
		expect(mockCreateContentResponse).toHaveBeenCalledWith(
			expect.objectContaining({
				message: 'Project initialized successfully.',
				next_step: expect.any(String),
				output: outputMessage
			})
		);

		// Verify the returned result has the expected structure
		expect(result).toHaveProperty('content');
		expect(result.content).toHaveProperty('message');
		expect(result.content).toHaveProperty('next_step');
		expect(result.content).toHaveProperty('output');
		expect(result.content.output).toBe(outputMessage);
	});

	test('returns error response when command fails', async () => {
		// Create an error to be thrown
		const error = new Error('Command failed');
		error.stdout = 'Some standard output';
		error.stderr = 'Some error output';

		// Make the mock throw the error
		mockExecSync.mockImplementationOnce(() => {
			throw error;
		});

		// Execute the tool
		const result = await executeFunction({}, { log: mockLogger });

		// Verify createErrorResponse was called with the right arguments
		expect(mockCreateErrorResponse).toHaveBeenCalledWith(
			'Project initialization failed: Command failed',
			expect.objectContaining({
				details: 'Some error output'
			})
		);

		// Verify the returned result has the expected structure
		expect(result).toHaveProperty('error');
		expect(result.error).toHaveProperty('message');
		expect(result.error.message).toContain('Project initialization failed');
	});

	test('logs information about the execution', async () => {
		// Execute the tool
		await executeFunction({}, { log: mockLogger });

		// Verify that logging occurred
		expect(mockLogger.info).toHaveBeenCalledWith(
			expect.stringContaining('Executing initialize_project')
		);
		expect(mockLogger.info).toHaveBeenCalledWith(
			expect.stringContaining('Constructed command')
		);
		expect(mockLogger.info).toHaveBeenCalledWith(
			expect.stringContaining('Initialization output')
		);
	});

	test('uses fallback to stdout if stderr is not available in error', async () => {
		// Create an error with only stdout
		const error = new Error('Command failed');
		error.stdout = 'Some standard output with error details';
		// No stderr property

		// Make the mock throw the error
		mockExecSync.mockImplementationOnce(() => {
			throw error;
		});

		// Execute the tool
		await executeFunction({}, { log: mockLogger });

		// Verify createErrorResponse was called with stdout as details
		expect(mockCreateErrorResponse).toHaveBeenCalledWith(
			expect.any(String),
			expect.objectContaining({
				details: 'Some standard output with error details'
			})
		);
	});

	test('logs error details when command fails', async () => {
		// Create an error
		const error = new Error('Command failed');
		error.stderr = 'Some detailed error message';

		// Make the mock throw the error
		mockExecSync.mockImplementationOnce(() => {
			throw error;
		});

		// Execute the tool
		await executeFunction({}, { log: mockLogger });

		// Verify error logging
		expect(mockLogger.error).toHaveBeenCalledWith(
			expect.stringContaining('Project initialization failed')
		);
		expect(mockLogger.error).toHaveBeenCalledWith(
			expect.stringContaining('Some detailed error message')
		);
	});
});

```

--------------------------------------------------------------------------------
/tests/unit/profiles/rule-transformer-kiro.test.js:
--------------------------------------------------------------------------------

```javascript
import { jest } from '@jest/globals';

// Mock fs module before importing anything that uses it
jest.mock('fs', () => ({
	readFileSync: jest.fn(),
	writeFileSync: jest.fn(),
	existsSync: jest.fn(),
	mkdirSync: jest.fn(),
	readdirSync: jest.fn(),
	copyFileSync: jest.fn()
}));

// Mock the log function
jest.mock('../../../scripts/modules/utils.js', () => ({
	log: jest.fn(),
	isSilentMode: jest.fn().mockReturnValue(false)
}));

// Import modules after mocking
import fs from 'fs';
import { convertRuleToProfileRule } from '../../../src/utils/rule-transformer.js';
import { kiroProfile } from '../../../src/profiles/kiro.js';

describe('Kiro Rule Transformer', () => {
	// Set up spies on the mocked modules
	const mockReadFileSync = jest.spyOn(fs, 'readFileSync');
	const mockWriteFileSync = jest.spyOn(fs, 'writeFileSync');
	const mockExistsSync = jest.spyOn(fs, 'existsSync');
	const mockMkdirSync = jest.spyOn(fs, 'mkdirSync');
	const mockConsoleError = jest
		.spyOn(console, 'error')
		.mockImplementation(() => {});
	jest.spyOn(console, 'log').mockImplementation(() => {});

	beforeEach(() => {
		jest.clearAllMocks();
		// Setup default mocks
		mockReadFileSync.mockReturnValue('');
		mockWriteFileSync.mockImplementation(() => {});
		mockExistsSync.mockReturnValue(true);
		mockMkdirSync.mockImplementation(() => {});
	});

	afterAll(() => {
		jest.restoreAllMocks();
	});

	it('should correctly convert basic terms', () => {
		const testContent = `---
description: Test Cursor rule for basic terms
globs: **/*
alwaysApply: true
---

This is a Cursor rule that references cursor.so and uses the word Cursor multiple times.
Also has references to .mdc files.`;

		// Mock file read to return our test content
		mockReadFileSync.mockReturnValue(testContent);

		// Mock file system operations
		mockExistsSync.mockReturnValue(true);

		// Call the function
		const result = convertRuleToProfileRule(
			'test-source.mdc',
			'test-target.md',
			kiroProfile
		);

		// Verify the result
		expect(result).toBe(true);
		expect(mockWriteFileSync).toHaveBeenCalledTimes(1);

		// Get the transformed content
		const transformedContent = mockWriteFileSync.mock.calls[0][1];

		// Verify Cursor -> Kiro transformations
		expect(transformedContent).toContain('kiro.dev');
		expect(transformedContent).toContain('Kiro');
		expect(transformedContent).not.toContain('cursor.so');
		expect(transformedContent).not.toContain('Cursor');
		expect(transformedContent).toContain('.md');
		expect(transformedContent).not.toContain('.mdc');
	});

	it('should handle URL transformations', () => {
		const testContent = `Visit https://cursor.so/docs for more information.
Also check out cursor.so and www.cursor.so for updates.`;

		mockReadFileSync.mockReturnValue(testContent);
		mockExistsSync.mockReturnValue(true);

		const result = convertRuleToProfileRule(
			'test-source.mdc',
			'test-target.md',
			kiroProfile
		);

		expect(result).toBe(true);
		const transformedContent = mockWriteFileSync.mock.calls[0][1];

		// Verify URL transformations
		expect(transformedContent).toContain('https://kiro.dev');
		expect(transformedContent).toContain('kiro.dev');
		expect(transformedContent).not.toContain('cursor.so');
	});

	it('should handle file extension transformations', () => {
		const testContent = `This rule references file.mdc and another.mdc file.
Use the .mdc extension for all rule files.`;

		mockReadFileSync.mockReturnValue(testContent);
		mockExistsSync.mockReturnValue(true);

		const result = convertRuleToProfileRule(
			'test-source.mdc',
			'test-target.md',
			kiroProfile
		);

		expect(result).toBe(true);
		const transformedContent = mockWriteFileSync.mock.calls[0][1];

		// Verify file extension transformations
		expect(transformedContent).toContain('file.md');
		expect(transformedContent).toContain('another.md');
		expect(transformedContent).toContain('.md extension');
		expect(transformedContent).not.toContain('.mdc');
	});

	it('should handle case variations', () => {
		const testContent = `CURSOR, Cursor, cursor should all be transformed.`;

		mockReadFileSync.mockReturnValue(testContent);
		mockExistsSync.mockReturnValue(true);

		const result = convertRuleToProfileRule(
			'test-source.mdc',
			'test-target.md',
			kiroProfile
		);

		expect(result).toBe(true);
		const transformedContent = mockWriteFileSync.mock.calls[0][1];

		// Verify case transformations
		// Due to regex order, the case-insensitive rule runs first:
		// CURSOR -> Kiro (because it starts with 'C'), Cursor -> Kiro, cursor -> kiro
		expect(transformedContent).toContain('Kiro');
		expect(transformedContent).toContain('kiro');
		expect(transformedContent).not.toContain('CURSOR');
		expect(transformedContent).not.toContain('Cursor');
		expect(transformedContent).not.toContain('cursor');
	});

	it('should create target directory if it does not exist', () => {
		const testContent = 'Test content';
		mockReadFileSync.mockReturnValue(testContent);
		mockExistsSync.mockReturnValue(false);

		const result = convertRuleToProfileRule(
			'test-source.mdc',
			'nested/path/test-target.md',
			kiroProfile
		);

		expect(result).toBe(true);
		expect(mockMkdirSync).toHaveBeenCalledWith('nested/path', {
			recursive: true
		});
	});

	it('should handle file system errors gracefully', () => {
		mockReadFileSync.mockImplementation(() => {
			throw new Error('File not found');
		});

		const result = convertRuleToProfileRule(
			'test-source.mdc',
			'test-target.md',
			kiroProfile
		);

		expect(result).toBe(false);
		expect(mockConsoleError).toHaveBeenCalledWith(
			'Error converting rule file: File not found'
		);
	});

	it('should handle write errors gracefully', () => {
		mockReadFileSync.mockReturnValue('Test content');
		mockWriteFileSync.mockImplementation(() => {
			throw new Error('Write permission denied');
		});

		const result = convertRuleToProfileRule(
			'test-source.mdc',
			'test-target.md',
			kiroProfile
		);

		expect(result).toBe(false);
		expect(mockConsoleError).toHaveBeenCalledWith(
			'Error converting rule file: Write permission denied'
		);
	});

	it('should verify profile configuration', () => {
		expect(kiroProfile.profileName).toBe('kiro');
		expect(kiroProfile.displayName).toBe('Kiro');
		expect(kiroProfile.profileDir).toBe('.kiro');
		expect(kiroProfile.mcpConfig).toBe(true);
		expect(kiroProfile.mcpConfigName).toBe('settings/mcp.json');
		expect(kiroProfile.mcpConfigPath).toBe('.kiro/settings/mcp.json');
		expect(kiroProfile.includeDefaultRules).toBe(true);
		expect(kiroProfile.fileMap).toEqual({
			'rules/cursor_rules.mdc': 'kiro_rules.md',
			'rules/dev_workflow.mdc': 'dev_workflow.md',
			'rules/self_improve.mdc': 'self_improve.md',
			'rules/taskmaster.mdc': 'taskmaster.md',
			'rules/taskmaster_hooks_workflow.mdc': 'taskmaster_hooks_workflow.md'
		});
	});

	describe('onPostConvert lifecycle hook', () => {
		const mockReaddirSync = jest.spyOn(fs, 'readdirSync');
		const mockCopyFileSync = jest.spyOn(fs, 'copyFileSync');

		beforeEach(() => {
			jest.clearAllMocks();
			// Setup default mock implementation that doesn't throw
			mockCopyFileSync.mockImplementation(() => {});
		});

		it('should copy hook files when kiro-hooks directory exists', () => {
			const projectRoot = '/test/project';
			const assetsDir = '/test/assets';
			const hookFiles = [
				'tm-test-hook1.kiro.hook',
				'tm-test-hook2.kiro.hook',
				'not-a-hook.txt'
			];

			// Mock directory existence
			mockExistsSync.mockImplementation((path) => {
				if (path === '/test/assets/kiro-hooks') return true;
				if (path === '/test/project/.kiro/hooks') return false;
				return true;
			});

			// Mock reading hook files
			mockReaddirSync.mockReturnValue(hookFiles);

			// Call the lifecycle hook
			kiroProfile.onPostConvertRulesProfile(projectRoot, assetsDir);

			// Verify hooks directory was created
			expect(mockMkdirSync).toHaveBeenCalledWith('/test/project/.kiro/hooks', {
				recursive: true
			});

			// Verify only .kiro.hook files were copied
			expect(mockCopyFileSync).toHaveBeenCalledTimes(2);
			expect(mockCopyFileSync).toHaveBeenCalledWith(
				'/test/assets/kiro-hooks/tm-test-hook1.kiro.hook',
				'/test/project/.kiro/hooks/tm-test-hook1.kiro.hook'
			);
			expect(mockCopyFileSync).toHaveBeenCalledWith(
				'/test/assets/kiro-hooks/tm-test-hook2.kiro.hook',
				'/test/project/.kiro/hooks/tm-test-hook2.kiro.hook'
			);
		});

		it('should handle case when hooks directory already exists', () => {
			const projectRoot = '/test/project';
			const assetsDir = '/test/assets';
			const hookFiles = ['tm-test-hook.kiro.hook'];

			// Mock all directories exist
			mockExistsSync.mockReturnValue(true);
			mockReaddirSync.mockReturnValue(hookFiles);

			// Call the lifecycle hook
			kiroProfile.onPostConvertRulesProfile(projectRoot, assetsDir);

			// Verify hooks directory was NOT created (already exists)
			expect(mockMkdirSync).not.toHaveBeenCalled();

			// Verify hook was copied
			expect(mockCopyFileSync).toHaveBeenCalledWith(
				'/test/assets/kiro-hooks/tm-test-hook.kiro.hook',
				'/test/project/.kiro/hooks/tm-test-hook.kiro.hook'
			);
		});

		it('should handle case when kiro-hooks source directory does not exist', () => {
			const projectRoot = '/test/project';
			const assetsDir = '/test/assets';

			// Mock source directory doesn't exist
			mockExistsSync.mockImplementation((path) => {
				if (path === '/test/assets/kiro-hooks') return false;
				return true;
			});

			// Call the lifecycle hook
			kiroProfile.onPostConvertRulesProfile(projectRoot, assetsDir);

			// Verify no files were copied
			expect(mockReaddirSync).not.toHaveBeenCalled();
			expect(mockCopyFileSync).not.toHaveBeenCalled();
		});

		it('should handle case when no hook files exist in source directory', () => {
			const projectRoot = '/test/project';
			const assetsDir = '/test/assets';

			// Mock directory exists but has no hook files
			mockExistsSync.mockReturnValue(true);
			mockReaddirSync.mockReturnValue(['readme.txt', 'config.json']);

			// Call the lifecycle hook
			kiroProfile.onPostConvertRulesProfile(projectRoot, assetsDir);

			// Verify no files were copied
			expect(mockCopyFileSync).not.toHaveBeenCalled();
		});
	});
});

```

--------------------------------------------------------------------------------
/tests/unit/profiles/rule-transformer.test.js:
--------------------------------------------------------------------------------

```javascript
import {
	isValidProfile,
	getRulesProfile
} from '../../../src/utils/rule-transformer.js';
import { RULE_PROFILES } from '../../../src/constants/profiles.js';
import path from 'path';

describe('Rule Transformer - General', () => {
	describe('Profile Configuration Validation', () => {
		it('should use RULE_PROFILES as the single source of truth', () => {
			// Ensure RULE_PROFILES is properly defined and contains expected profiles
			expect(Array.isArray(RULE_PROFILES)).toBe(true);
			expect(RULE_PROFILES.length).toBeGreaterThan(0);

			// Verify expected profiles are present
			const expectedProfiles = [
				'claude',
				'cline',
				'codex',
				'cursor',
				'gemini',
				'kiro',
				'opencode',
				'roo',
				'trae',
				'vscode',
				'windsurf',
				'zed'
			];
			expectedProfiles.forEach((profile) => {
				expect(RULE_PROFILES).toContain(profile);
			});
		});

		it('should validate profiles correctly with isValidProfile', () => {
			// Test valid profiles
			RULE_PROFILES.forEach((profile) => {
				expect(isValidProfile(profile)).toBe(true);
			});

			// Test invalid profiles
			expect(isValidProfile('invalid')).toBe(false);
			expect(isValidProfile('')).toBe(false);
			expect(isValidProfile(null)).toBe(false);
			expect(isValidProfile(undefined)).toBe(false);
		});

		it('should return correct rule profile with getRulesProfile', () => {
			// Test valid profiles
			RULE_PROFILES.forEach((profile) => {
				const profileConfig = getRulesProfile(profile);
				expect(profileConfig).toBeDefined();
				expect(profileConfig.profileName.toLowerCase()).toBe(profile);
			});

			// Test invalid profile - should return null
			expect(getRulesProfile('invalid')).toBeNull();
		});
	});

	describe('Profile Structure', () => {
		it('should have all required properties for each profile', () => {
			RULE_PROFILES.forEach((profile) => {
				const profileConfig = getRulesProfile(profile);

				// Check required properties
				expect(profileConfig).toHaveProperty('profileName');
				expect(profileConfig).toHaveProperty('conversionConfig');
				expect(profileConfig).toHaveProperty('fileMap');
				expect(profileConfig).toHaveProperty('rulesDir');
				expect(profileConfig).toHaveProperty('profileDir');

				// All profiles should have conversionConfig and fileMap objects
				expect(typeof profileConfig.conversionConfig).toBe('object');
				expect(typeof profileConfig.fileMap).toBe('object');

				// Check that conversionConfig has required structure for profiles with rules
				const hasRules = Object.keys(profileConfig.fileMap).length > 0;
				if (hasRules) {
					expect(profileConfig.conversionConfig).toHaveProperty('profileTerms');
					expect(profileConfig.conversionConfig).toHaveProperty('toolNames');
					expect(profileConfig.conversionConfig).toHaveProperty('toolContexts');
					expect(profileConfig.conversionConfig).toHaveProperty('toolGroups');
					expect(profileConfig.conversionConfig).toHaveProperty('docUrls');
					expect(profileConfig.conversionConfig).toHaveProperty(
						'fileReferences'
					);

					// Verify arrays are actually arrays
					expect(
						Array.isArray(profileConfig.conversionConfig.profileTerms)
					).toBe(true);
					expect(typeof profileConfig.conversionConfig.toolNames).toBe(
						'object'
					);
					expect(
						Array.isArray(profileConfig.conversionConfig.toolContexts)
					).toBe(true);
					expect(Array.isArray(profileConfig.conversionConfig.toolGroups)).toBe(
						true
					);
					expect(Array.isArray(profileConfig.conversionConfig.docUrls)).toBe(
						true
					);
				}
			});
		});

		it('should have valid fileMap with required files for each profile', () => {
			const expectedRuleFiles = [
				'cursor_rules.mdc',
				'dev_workflow.mdc',
				'self_improve.mdc',
				'taskmaster.mdc'
			];

			RULE_PROFILES.forEach((profile) => {
				const profileConfig = getRulesProfile(profile);

				// Check that fileMap exists and is an object
				expect(profileConfig.fileMap).toBeDefined();
				expect(typeof profileConfig.fileMap).toBe('object');
				expect(profileConfig.fileMap).not.toBeNull();

				const fileMapKeys = Object.keys(profileConfig.fileMap);

				// All profiles should have some fileMap entries now
				expect(fileMapKeys.length).toBeGreaterThan(0);

				// Check if this profile has rule files or asset files
				const hasRuleFiles = expectedRuleFiles.some((file) =>
					fileMapKeys.includes(file)
				);
				const hasAssetFiles = fileMapKeys.some(
					(file) => !expectedRuleFiles.includes(file)
				);

				if (hasRuleFiles) {
					// Profiles with rule files should have all expected rule files
					expectedRuleFiles.forEach((expectedFile) => {
						expect(fileMapKeys).toContain(expectedFile);
						expect(typeof profileConfig.fileMap[expectedFile]).toBe('string');
						expect(profileConfig.fileMap[expectedFile].length).toBeGreaterThan(
							0
						);
					});
				}

				if (hasAssetFiles) {
					// Profiles with asset files (like Claude/Codex) should have valid asset mappings
					fileMapKeys.forEach((key) => {
						expect(typeof profileConfig.fileMap[key]).toBe('string');
						expect(profileConfig.fileMap[key].length).toBeGreaterThan(0);
					});
				}
			});
		});
	});

	describe('MCP Configuration Properties', () => {
		it('should have all required MCP properties for each profile', () => {
			RULE_PROFILES.forEach((profile) => {
				const profileConfig = getRulesProfile(profile);

				// Check MCP-related properties exist
				expect(profileConfig).toHaveProperty('mcpConfig');
				expect(profileConfig).toHaveProperty('mcpConfigName');
				expect(profileConfig).toHaveProperty('mcpConfigPath');

				// Check types based on MCP configuration
				expect(typeof profileConfig.mcpConfig).toBe('boolean');

				if (profileConfig.mcpConfig !== false) {
					// Check that mcpConfigPath is properly constructed
					const expectedPath = path.join(
						profileConfig.profileDir,
						profileConfig.mcpConfigName
					);
					expect(profileConfig.mcpConfigPath).toBe(expectedPath);
				}
			});
		});

		it('should have correct MCP configuration for each profile', () => {
			const expectedConfigs = {
				amp: {
					mcpConfig: true,
					mcpConfigName: 'settings.json',
					expectedPath: '.vscode/settings.json'
				},
				claude: {
					mcpConfig: true,
					mcpConfigName: '.mcp.json',
					expectedPath: '.mcp.json'
				},
				cline: {
					mcpConfig: false,
					mcpConfigName: null,
					expectedPath: null
				},
				codex: {
					mcpConfig: false,
					mcpConfigName: null,
					expectedPath: null
				},
				cursor: {
					mcpConfig: true,
					mcpConfigName: 'mcp.json',
					expectedPath: '.cursor/mcp.json'
				},
				gemini: {
					mcpConfig: true,
					mcpConfigName: 'settings.json',
					expectedPath: '.gemini/settings.json'
				},
				kiro: {
					mcpConfig: true,
					mcpConfigName: 'settings/mcp.json',
					expectedPath: '.kiro/settings/mcp.json'
				},
				opencode: {
					mcpConfig: true,
					mcpConfigName: 'opencode.json',
					expectedPath: 'opencode.json'
				},
				roo: {
					mcpConfig: true,
					mcpConfigName: 'mcp.json',
					expectedPath: '.roo/mcp.json'
				},
				kilo: {
					mcpConfig: true,
					mcpConfigName: 'mcp.json',
					expectedPath: '.kilo/mcp.json'
				},
				trae: {
					mcpConfig: false,
					mcpConfigName: null,
					expectedPath: null
				},
				vscode: {
					mcpConfig: true,
					mcpConfigName: 'mcp.json',
					expectedPath: '.vscode/mcp.json'
				},
				windsurf: {
					mcpConfig: true,
					mcpConfigName: 'mcp.json',
					expectedPath: '.windsurf/mcp.json'
				},
				zed: {
					mcpConfig: true,
					mcpConfigName: 'settings.json',
					expectedPath: '.zed/settings.json'
				}
			};

			RULE_PROFILES.forEach((profile) => {
				const profileConfig = getRulesProfile(profile);
				const expected = expectedConfigs[profile];

				expect(profileConfig.mcpConfig).toBe(expected.mcpConfig);
				expect(profileConfig.mcpConfigName).toBe(expected.mcpConfigName);
				expect(profileConfig.mcpConfigPath).toBe(expected.expectedPath);
			});
		});

		it('should have consistent profileDir and mcpConfigPath relationship', () => {
			RULE_PROFILES.forEach((profile) => {
				const profileConfig = getRulesProfile(profile);
				if (profileConfig.mcpConfig !== false) {
					// Profiles with MCP configuration should have valid paths
					// Handle root directory profiles differently
					if (profileConfig.profileDir === '.') {
						if (profile === 'claude') {
							// Claude explicitly uses '.mcp.json'
							expect(profileConfig.mcpConfigPath).toBe('.mcp.json');
						} else {
							// Other root profiles normalize to just the filename
							expect(profileConfig.mcpConfigPath).toBe(
								profileConfig.mcpConfigName
							);
						}
					} else {
						// Non-root profiles should have profileDir/configName pattern
						expect(profileConfig.mcpConfigPath).toMatch(
							new RegExp(
								`^${profileConfig.profileDir.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}/`
							)
						);
					}
				}
			});
		});

		it('should have unique profile directories', () => {
			const profileDirs = RULE_PROFILES.map((profile) => {
				const profileConfig = getRulesProfile(profile);
				return profileConfig.profileDir;
			});

			// Note: Claude and Codex both use "." (root directory) so we expect some duplication
			const uniqueProfileDirs = [...new Set(profileDirs)];
			// We should have fewer unique directories than total profiles due to simple profiles using root
			expect(uniqueProfileDirs.length).toBeLessThanOrEqual(profileDirs.length);
			expect(uniqueProfileDirs.length).toBeGreaterThan(0);
		});

		it('should have unique MCP config paths', () => {
			const mcpConfigPaths = RULE_PROFILES.map((profile) => {
				const profileConfig = getRulesProfile(profile);
				return profileConfig.mcpConfigPath;
			});

			// Note: Claude and Codex both have null mcpConfigPath so we expect some duplication
			const uniqueMcpConfigPaths = [...new Set(mcpConfigPaths)];
			// We should have fewer unique paths than total profiles due to simple profiles having null
			expect(uniqueMcpConfigPaths.length).toBeLessThanOrEqual(
				mcpConfigPaths.length
			);
			expect(uniqueMcpConfigPaths.length).toBeGreaterThan(0);
		});
	});
});

```

--------------------------------------------------------------------------------
/packages/tm-core/src/modules/ai/providers/base-provider.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Abstract base provider with Template Method pattern for AI providers
 * Provides common functionality, error handling, and retry logic
 */

import {
	ERROR_CODES,
	TaskMasterError
} from '../../../common/errors/task-master-error.js';
import type {
	AIModel,
	AIOptions,
	AIResponse,
	IAIProvider,
	ProviderInfo,
	ProviderUsageStats
} from '../interfaces/ai-provider.interface.js';

// Constants for retry logic
const DEFAULT_MAX_RETRIES = 3;
const BASE_RETRY_DELAY_MS = 1000;
const MAX_RETRY_DELAY_MS = 32000;
const BACKOFF_MULTIPLIER = 2;
const JITTER_FACTOR = 0.1;

// Constants for validation
const MIN_PROMPT_LENGTH = 1;
const MAX_PROMPT_LENGTH = 100000;
const MIN_TEMPERATURE = 0;
const MAX_TEMPERATURE = 2;
const MIN_MAX_TOKENS = 1;
const MAX_MAX_TOKENS = 131072;

/**
 * Configuration for BaseProvider
 */
export interface BaseProviderConfig {
	apiKey: string;
	model?: string;
}

/**
 * Internal completion result structure
 */
export interface CompletionResult {
	content: string;
	inputTokens?: number;
	outputTokens?: number;
	finishReason?: string;
	model?: string;
}

/**
 * Validation result for input validation
 */
interface ValidationResult {
	valid: boolean;
	error?: string;
}

/**
 * Prepared request after preprocessing
 */
interface PreparedRequest {
	prompt: string;
	options: AIOptions;
	metadata: Record<string, any>;
}

/**
 * Abstract base provider implementing Template Method pattern
 * Provides common error handling, retry logic, and validation
 */
export abstract class BaseProvider implements IAIProvider {
	protected readonly apiKey: string;
	protected model: string;

	constructor(config: BaseProviderConfig) {
		if (!config.apiKey) {
			throw new TaskMasterError(
				'API key is required',
				ERROR_CODES.AUTHENTICATION_ERROR
			);
		}
		this.apiKey = config.apiKey;
		this.model = config.model || this.getDefaultModel();
	}

	/**
	 * Template method for generating completions
	 * Handles validation, retries, and error handling
	 */
	async generateCompletion(
		prompt: string,
		options?: AIOptions
	): Promise<AIResponse> {
		// Validate input
		const validation = this.validateInput(prompt, options);
		if (!validation.valid) {
			throw new TaskMasterError(
				validation.error || 'Invalid input',
				ERROR_CODES.VALIDATION_ERROR
			);
		}

		// Prepare request
		const prepared = this.prepareRequest(prompt, options);

		// Execute with retry logic
		let lastError: Error | undefined;
		const maxRetries = this.getMaxRetries();

		for (let attempt = 1; attempt <= maxRetries; attempt++) {
			try {
				const startTime = Date.now();
				const result = await this.generateCompletionInternal(
					prepared.prompt,
					prepared.options
				);

				const duration = Date.now() - startTime;
				return this.handleResponse(result, duration, prepared);
			} catch (error) {
				lastError = error as Error;

				if (!this.shouldRetry(error, attempt)) {
					break;
				}

				const delay = this.calculateBackoffDelay(attempt);
				await this.sleep(delay);
			}
		}

		// All retries failed
		this.handleError(lastError || new Error('Unknown error'));
	}

	/**
	 * Validate input prompt and options
	 */
	protected validateInput(
		prompt: string,
		options?: AIOptions
	): ValidationResult {
		// Validate prompt
		if (!prompt || typeof prompt !== 'string') {
			return { valid: false, error: 'Prompt must be a non-empty string' };
		}

		const trimmedPrompt = prompt.trim();
		if (trimmedPrompt.length < MIN_PROMPT_LENGTH) {
			return { valid: false, error: 'Prompt cannot be empty' };
		}

		if (trimmedPrompt.length > MAX_PROMPT_LENGTH) {
			return {
				valid: false,
				error: `Prompt exceeds maximum length of ${MAX_PROMPT_LENGTH} characters`
			};
		}

		// Validate options if provided
		if (options) {
			const optionValidation = this.validateOptions(options);
			if (!optionValidation.valid) {
				return optionValidation;
			}
		}

		return { valid: true };
	}

	/**
	 * Validate completion options
	 */
	protected validateOptions(options: AIOptions): ValidationResult {
		if (options.temperature !== undefined) {
			if (
				options.temperature < MIN_TEMPERATURE ||
				options.temperature > MAX_TEMPERATURE
			) {
				return {
					valid: false,
					error: `Temperature must be between ${MIN_TEMPERATURE} and ${MAX_TEMPERATURE}`
				};
			}
		}

		if (options.maxTokens !== undefined) {
			if (
				options.maxTokens < MIN_MAX_TOKENS ||
				options.maxTokens > MAX_MAX_TOKENS
			) {
				return {
					valid: false,
					error: `Max tokens must be between ${MIN_MAX_TOKENS} and ${MAX_MAX_TOKENS}`
				};
			}
		}

		if (options.topP !== undefined) {
			if (options.topP < 0 || options.topP > 1) {
				return { valid: false, error: 'Top-p must be between 0 and 1' };
			}
		}

		return { valid: true };
	}

	/**
	 * Prepare request for processing
	 */
	protected prepareRequest(
		prompt: string,
		options?: AIOptions
	): PreparedRequest {
		const defaultOptions = this.getDefaultOptions();
		const mergedOptions = { ...defaultOptions, ...options };

		return {
			prompt: prompt.trim(),
			options: mergedOptions,
			metadata: {
				provider: this.getName(),
				model: this.model,
				timestamp: new Date().toISOString()
			}
		};
	}

	/**
	 * Process and format the response
	 */
	protected handleResponse(
		result: CompletionResult,
		duration: number,
		request: PreparedRequest
	): AIResponse {
		const inputTokens =
			result.inputTokens || this.calculateTokens(request.prompt);
		const outputTokens =
			result.outputTokens || this.calculateTokens(result.content);

		return {
			content: result.content,
			inputTokens,
			outputTokens,
			totalTokens: inputTokens + outputTokens,
			model: result.model || this.model,
			provider: this.getName(),
			timestamp: request.metadata.timestamp,
			duration,
			finishReason: result.finishReason
		};
	}

	/**
	 * Handle errors with proper wrapping
	 */
	protected handleError(error: unknown): never {
		if (error instanceof TaskMasterError) {
			throw error;
		}

		const errorMessage = error instanceof Error ? error.message : String(error);
		const errorCode = this.getErrorCode(error);

		throw new TaskMasterError(
			`${this.getName()} provider error: ${errorMessage}`,
			errorCode,
			{
				operation: 'generateCompletion',
				resource: this.getName(),
				details:
					error instanceof Error
						? {
								name: error.name,
								stack: error.stack,
								model: this.model
							}
						: { error: String(error), model: this.model }
			},
			error instanceof Error ? error : undefined
		);
	}

	/**
	 * Determine if request should be retried
	 */
	protected shouldRetry(error: unknown, attempt: number): boolean {
		if (attempt >= this.getMaxRetries()) {
			return false;
		}

		return this.isRetryableError(error);
	}

	/**
	 * Check if error is retryable
	 */
	protected isRetryableError(error: unknown): boolean {
		if (this.isRateLimitError(error)) return true;
		if (this.isTimeoutError(error)) return true;
		if (this.isNetworkError(error)) return true;

		return false;
	}

	/**
	 * Check if error is a rate limit error
	 */
	protected isRateLimitError(error: unknown): boolean {
		if (error instanceof Error) {
			const message = error.message.toLowerCase();
			return (
				message.includes('rate limit') ||
				message.includes('too many requests') ||
				message.includes('429')
			);
		}
		return false;
	}

	/**
	 * Check if error is a timeout error
	 */
	protected isTimeoutError(error: unknown): boolean {
		if (error instanceof Error) {
			const message = error.message.toLowerCase();
			return (
				message.includes('timeout') ||
				message.includes('timed out') ||
				message.includes('econnreset')
			);
		}
		return false;
	}

	/**
	 * Check if error is a network error
	 */
	protected isNetworkError(error: unknown): boolean {
		if (error instanceof Error) {
			const message = error.message.toLowerCase();
			return (
				message.includes('network') ||
				message.includes('enotfound') ||
				message.includes('econnrefused')
			);
		}
		return false;
	}

	/**
	 * Calculate exponential backoff delay with jitter
	 */
	protected calculateBackoffDelay(attempt: number): number {
		const exponentialDelay =
			BASE_RETRY_DELAY_MS * BACKOFF_MULTIPLIER ** (attempt - 1);
		const clampedDelay = Math.min(exponentialDelay, MAX_RETRY_DELAY_MS);

		// Add jitter to prevent thundering herd
		const jitter = clampedDelay * JITTER_FACTOR * (Math.random() - 0.5) * 2;

		return Math.round(clampedDelay + jitter);
	}

	/**
	 * Get error code from error
	 */
	protected getErrorCode(error: unknown): string {
		if (this.isRateLimitError(error)) return ERROR_CODES.API_ERROR;
		if (this.isTimeoutError(error)) return ERROR_CODES.NETWORK_ERROR;
		if (this.isNetworkError(error)) return ERROR_CODES.NETWORK_ERROR;

		if (error instanceof Error && error.message.includes('401')) {
			return ERROR_CODES.AUTHENTICATION_ERROR;
		}

		return ERROR_CODES.PROVIDER_ERROR;
	}

	/**
	 * Sleep utility for delays
	 */
	protected sleep(ms: number): Promise<void> {
		return new Promise((resolve) => setTimeout(resolve, ms));
	}

	/**
	 * Get default options for completions
	 */
	protected getDefaultOptions(): AIOptions {
		return {
			temperature: 0.7,
			maxTokens: 2000,
			topP: 1.0
		};
	}

	/**
	 * Get maximum retry attempts
	 */
	protected getMaxRetries(): number {
		return DEFAULT_MAX_RETRIES;
	}

	// Public interface methods
	getModel(): string {
		return this.model;
	}

	setModel(model: string): void {
		this.model = model;
	}

	// Abstract methods that must be implemented by concrete providers
	protected abstract generateCompletionInternal(
		prompt: string,
		options?: AIOptions
	): Promise<CompletionResult>;

	abstract calculateTokens(text: string, model?: string): number;
	abstract getName(): string;
	abstract getDefaultModel(): string;

	// IAIProvider methods that must be implemented
	abstract generateStreamingCompletion(
		prompt: string,
		options?: AIOptions
	): AsyncIterator<Partial<AIResponse>>;
	abstract isAvailable(): Promise<boolean>;
	abstract getProviderInfo(): ProviderInfo;
	abstract getAvailableModels(): AIModel[];
	abstract validateCredentials(): Promise<boolean>;
	abstract getUsageStats(): Promise<ProviderUsageStats | null>;
	abstract initialize(): Promise<void>;
	abstract close(): Promise<void>;
}

```

--------------------------------------------------------------------------------
/scripts/modules/task-manager/update-tasks.js:
--------------------------------------------------------------------------------

```javascript
import path from 'path';
import chalk from 'chalk';
import boxen from 'boxen';
import Table from 'cli-table3';

import {
	log as consoleLog,
	readJSON,
	writeJSON,
	truncate,
	isSilentMode
} from '../utils.js';

import {
	getStatusWithColor,
	startLoadingIndicator,
	stopLoadingIndicator,
	displayAiUsageSummary
} from '../ui.js';

import { getDebugFlag, hasCodebaseAnalysis } from '../config-manager.js';
import { getPromptManager } from '../prompt-manager.js';
import generateTaskFiles from './generate-task-files.js';
import { generateObjectService } from '../ai-services-unified.js';
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
import { getModelConfiguration } from './models.js';
import { ContextGatherer } from '../utils/contextGatherer.js';
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
import { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';

/**
 * Update tasks based on new context using the unified AI service.
 * @param {string} tasksPath - Path to the tasks.json file
 * @param {number} fromId - Task ID to start updating from
 * @param {string} prompt - Prompt with new context
 * @param {boolean} [useResearch=false] - Whether to use the research AI role.
 * @param {Object} context - Context object containing session and mcpLog.
 * @param {Object} [context.session] - Session object from MCP server.
 * @param {Object} [context.mcpLog] - MCP logger object.
 * @param {string} [context.tag] - Tag for the task
 * @param {string} [outputFormat='text'] - Output format ('text' or 'json').
 */
async function updateTasks(
	tasksPath,
	fromId,
	prompt,
	useResearch = false,
	context = {},
	outputFormat = 'text' // Default to text for CLI
) {
	const { session, mcpLog, projectRoot: providedProjectRoot, tag } = context;
	// Use mcpLog if available, otherwise use the imported consoleLog function
	const logFn = mcpLog || consoleLog;
	// Flag to easily check which logger type we have
	const isMCP = !!mcpLog;

	if (isMCP)
		logFn.info(`updateTasks called with context: session=${!!session}`);
	else logFn('info', `updateTasks called`); // CLI log

	try {
		if (isMCP) logFn.info(`Updating tasks from ID ${fromId}`);
		else
			logFn(
				'info',
				`Updating tasks from ID ${fromId} with prompt: "${prompt}"`
			);

		// Determine project root
		const projectRoot = providedProjectRoot || findProjectRoot();
		if (!projectRoot) {
			throw new Error('Could not determine project root directory');
		}

		// --- Task Loading/Filtering (Updated to pass projectRoot and tag) ---
		const data = readJSON(tasksPath, projectRoot, tag);
		if (!data || !data.tasks)
			throw new Error(`No valid tasks found in ${tasksPath}`);
		const tasksToUpdate = data.tasks.filter(
			(task) => task.id >= fromId && task.status !== 'done'
		);
		if (tasksToUpdate.length === 0) {
			if (isMCP)
				logFn.info(`No tasks to update (ID >= ${fromId} and not 'done').`);
			else
				logFn('info', `No tasks to update (ID >= ${fromId} and not 'done').`);
			if (outputFormat === 'text') console.log(/* yellow message */);
			return; // Nothing to do
		}
		// --- End Task Loading/Filtering ---

		// --- Context Gathering ---
		let gatheredContext = '';
		try {
			const contextGatherer = new ContextGatherer(projectRoot, tag);
			const allTasksFlat = flattenTasksWithSubtasks(data.tasks);
			const fuzzySearch = new FuzzyTaskSearch(allTasksFlat, 'update');
			const searchResults = fuzzySearch.findRelevantTasks(prompt, {
				maxResults: 5,
				includeSelf: true
			});
			const relevantTaskIds = fuzzySearch.getTaskIds(searchResults);

			const tasksToUpdateIds = tasksToUpdate.map((t) => t.id.toString());
			const finalTaskIds = [
				...new Set([...tasksToUpdateIds, ...relevantTaskIds])
			];

			if (finalTaskIds.length > 0) {
				const contextResult = await contextGatherer.gather({
					tasks: finalTaskIds,
					format: 'research'
				});
				gatheredContext = contextResult.context || '';
			}
		} catch (contextError) {
			logFn(
				'warn',
				`Could not gather additional context: ${contextError.message}`
			);
		}
		// --- End Context Gathering ---

		// --- Display Tasks to Update (CLI Only - Unchanged) ---
		if (outputFormat === 'text') {
			// Show the tasks that will be updated
			const table = new Table({
				head: [
					chalk.cyan.bold('ID'),
					chalk.cyan.bold('Title'),
					chalk.cyan.bold('Status')
				],
				colWidths: [5, 70, 20]
			});

			tasksToUpdate.forEach((task) => {
				table.push([
					task.id,
					truncate(task.title, 57),
					getStatusWithColor(task.status)
				]);
			});

			console.log(
				boxen(chalk.white.bold(`Updating ${tasksToUpdate.length} tasks`), {
					padding: 1,
					borderColor: 'blue',
					borderStyle: 'round',
					margin: { top: 1, bottom: 0 }
				})
			);

			console.log(table.toString());

			// Display a message about how completed subtasks are handled
			console.log(
				boxen(
					chalk.cyan.bold('How Completed Subtasks Are Handled:') +
						'\n\n' +
						chalk.white(
							'• Subtasks marked as "done" or "completed" will be preserved\n'
						) +
						chalk.white(
							'• New subtasks will build upon what has already been completed\n'
						) +
						chalk.white(
							'• If completed work needs revision, a new subtask will be created instead of modifying done items\n'
						) +
						chalk.white(
							'• This approach maintains a clear record of completed work and new requirements'
						),
					{
						padding: 1,
						borderColor: 'blue',
						borderStyle: 'round',
						margin: { top: 1, bottom: 1 }
					}
				)
			);
		}
		// --- End Display Tasks ---

		// --- Build Prompts (Using PromptManager) ---
		// Load prompts using PromptManager
		const promptManager = getPromptManager();
		const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
			'update-tasks',
			{
				tasks: tasksToUpdate,
				updatePrompt: prompt,
				useResearch,
				projectContext: gatheredContext,
				hasCodebaseAnalysis: hasCodebaseAnalysis(
					useResearch,
					projectRoot,
					session
				),
				projectRoot: projectRoot
			}
		);
		// --- End Build Prompts ---

		// --- AI Call ---
		let loadingIndicator = null;
		let aiServiceResponse = null;

		if (!isMCP && outputFormat === 'text') {
			loadingIndicator = startLoadingIndicator('Updating tasks with AI...\n');
		}

		try {
			// Determine role based on research flag
			const serviceRole = useResearch ? 'research' : 'main';

			// Call the unified AI service with generateObject
			aiServiceResponse = await generateObjectService({
				role: serviceRole,
				session: session,
				projectRoot: projectRoot,
				systemPrompt: systemPrompt,
				prompt: userPrompt,
				schema: COMMAND_SCHEMAS['update-tasks'],
				objectName: 'tasks',
				commandName: 'update-tasks',
				outputType: isMCP ? 'mcp' : 'cli'
			});

			if (loadingIndicator)
				stopLoadingIndicator(loadingIndicator, 'AI update complete.');

			// With generateObject, we get structured data directly
			const parsedUpdatedTasks = aiServiceResponse.mainResult.tasks;

			// --- Update Tasks Data (Updated writeJSON call) ---
			if (!Array.isArray(parsedUpdatedTasks)) {
				// Should be caught by parser, but extra check
				throw new Error(
					'Parsed AI response for updated tasks was not an array.'
				);
			}
			if (isMCP)
				logFn.info(
					`Received ${parsedUpdatedTasks.length} updated tasks from AI.`
				);
			else
				logFn(
					'info',
					`Received ${parsedUpdatedTasks.length} updated tasks from AI.`
				);
			// Create a map for efficient lookup
			const updatedTasksMap = new Map(
				parsedUpdatedTasks.map((task) => [task.id, task])
			);

			let actualUpdateCount = 0;
			data.tasks.forEach((task, index) => {
				if (updatedTasksMap.has(task.id)) {
					// Only update if the task was part of the set sent to AI
					const updatedTask = updatedTasksMap.get(task.id);
					// Merge the updated task with the existing one to preserve fields like subtasks
					data.tasks[index] = {
						...task, // Keep all existing fields
						...updatedTask, // Override with updated fields
						// Ensure subtasks field is preserved if not provided by AI
						subtasks:
							updatedTask.subtasks !== undefined
								? updatedTask.subtasks
								: task.subtasks
					};
					actualUpdateCount++;
				}
			});
			if (isMCP)
				logFn.info(
					`Applied updates to ${actualUpdateCount} tasks in the dataset.`
				);
			else
				logFn(
					'info',
					`Applied updates to ${actualUpdateCount} tasks in the dataset.`
				);

			// Fix: Pass projectRoot and currentTag to writeJSON
			writeJSON(tasksPath, data, projectRoot, tag);
			if (isMCP)
				logFn.info(
					`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`
				);
			else
				logFn(
					'success',
					`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`
				);
			// await generateTaskFiles(tasksPath, path.dirname(tasksPath));

			if (outputFormat === 'text' && aiServiceResponse.telemetryData) {
				displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
			}

			return {
				success: true,
				updatedTasks: parsedUpdatedTasks,
				telemetryData: aiServiceResponse.telemetryData,
				tagInfo: aiServiceResponse.tagInfo
			};
		} catch (error) {
			if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
			if (isMCP) logFn.error(`Error during AI service call: ${error.message}`);
			else logFn('error', `Error during AI service call: ${error.message}`);
			if (error.message.includes('API key')) {
				if (isMCP)
					logFn.error(
						'Please ensure API keys are configured correctly in .env or mcp.json.'
					);
				else
					logFn(
						'error',
						'Please ensure API keys are configured correctly in .env or mcp.json.'
					);
			}
			throw error;
		} finally {
			if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
		}
	} catch (error) {
		// --- General Error Handling (Unchanged) ---
		if (isMCP) logFn.error(`Error updating tasks: ${error.message}`);
		else logFn('error', `Error updating tasks: ${error.message}`);
		if (outputFormat === 'text') {
			console.error(chalk.red(`Error: ${error.message}`));
			if (getDebugFlag(session)) {
				console.error(error);
			}
			process.exit(1);
		} else {
			throw error; // Re-throw for MCP/programmatic callers
		}
		// --- End General Error Handling ---
	}
}

export default updateTasks;

```

--------------------------------------------------------------------------------
/packages/tm-core/src/modules/tasks/tasks-domain.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Tasks Domain Facade
 * Public API for task-related operations
 */

import type { ConfigManager } from '../config/managers/config-manager.js';
import type { AuthDomain } from '../auth/auth-domain.js';
import { BriefsDomain } from '../briefs/briefs-domain.js';
import { TaskService } from './services/task-service.js';
import { TaskExecutionService } from './services/task-execution-service.js';
import { TaskLoaderService } from './services/task-loader.service.js';
import { PreflightChecker } from './services/preflight-checker.service.js';
import { TagService } from './services/tag.service.js';
import type {
	CreateTagOptions,
	DeleteTagOptions,
	CopyTagOptions
} from './services/tag.service.js';

import type { Subtask, Task, TaskStatus } from '../../common/types/index.js';
import type {
	TaskListResult,
	GetTaskListOptions
} from './services/task-service.js';
import type {
	StartTaskOptions,
	StartTaskResult
} from './services/task-execution-service.js';
import type {
	PreflightResult
} from './services/preflight-checker.service.js';
import type { TaskValidationResult } from './services/task-loader.service.js';
import type { ExpandTaskResult } from '../integration/services/task-expansion.service.js';

/**
 * Tasks Domain - Unified API for all task operations
 */
export class TasksDomain {
	private taskService: TaskService;
	private executionService: TaskExecutionService;
	private loaderService: TaskLoaderService;
	private preflightChecker: PreflightChecker;
	private briefsDomain: BriefsDomain;
	private tagService!: TagService;

	constructor(configManager: ConfigManager, _authDomain?: AuthDomain) {
		this.taskService = new TaskService(configManager);
		this.executionService = new TaskExecutionService(this.taskService);
		this.loaderService = new TaskLoaderService(this.taskService);
		this.preflightChecker = new PreflightChecker(configManager.getProjectRoot());
		this.briefsDomain = new BriefsDomain();
	}

	async initialize(): Promise<void> {
		await this.taskService.initialize();

		// TagService needs storage - get it from TaskService AFTER initialization
		this.tagService = new TagService(this.taskService.getStorage());
	}

	// ========== Task Retrieval ==========

	/**
	 * Get list of tasks with filtering
	 */
	async list(options?: GetTaskListOptions): Promise<TaskListResult> {
		return this.taskService.getTaskList(options);
	}

	/**
	 * Get a single task by ID
	 * Automatically handles all ID formats:
	 * - Simple task IDs (e.g., "1", "HAM-123")
	 * - Subtask IDs with dot notation (e.g., "1.2", "HAM-123.2")
	 *
	 * @returns Discriminated union indicating task/subtask with proper typing
	 */
	async get(
		taskId: string,
		tag?: string
	): Promise<
		| { task: Task; isSubtask: false }
		| { task: Subtask; isSubtask: true }
		| { task: null; isSubtask: boolean }
	> {
		// Parse ID - check for dot notation (subtask)
		const parts = taskId.split('.');
		const parentId = parts[0];
		const subtaskIdPart = parts[1];

		// Fetch the task
		const task = await this.taskService.getTask(parentId, tag);
		if (!task) {
			return { task: null, isSubtask: false };
		}

		// Handle subtask notation (1.2)
		if (subtaskIdPart && task.subtasks) {
			const subtask = task.subtasks.find(
				(st) => String(st.id) === subtaskIdPart
			);
			if (subtask) {
				// Return the actual subtask with properly typed result
				return { task: subtask, isSubtask: true };
			}
			// Subtask ID provided but not found
			return { task: null, isSubtask: true };
		}

		// It's a regular task
		return { task, isSubtask: false };
	}

	/**
	 * Get tasks by status
	 */
	async getByStatus(status: TaskStatus, tag?: string): Promise<Task[]> {
		return this.taskService.getTasksByStatus(status, tag);
	}

	/**
	 * Get task statistics
	 */
	async getStats(tag?: string) {
		return this.taskService.getTaskStats(tag);
	}

	/**
	 * Get next available task to work on
	 */
	async getNext(tag?: string): Promise<Task | null> {
		return this.taskService.getNextTask(tag);
	}

	// ========== Task Status Management ==========

	/**
	 * Update task with new data (direct structural update)
	 * @param taskId - Task ID (supports numeric, alphanumeric like TAS-49, and subtask IDs like 1.2)
	 * @param updates - Partial task object with fields to update
	 * @param tag - Optional tag context
	 */
	async update(
		taskId: string | number,
		updates: Partial<Task>,
		tag?: string
	): Promise<void> {
		return this.taskService.updateTask(taskId, updates, tag);
	}

	/**
	 * Update task using AI-powered prompt (natural language update)
	 * @param taskId - Task ID (supports numeric, alphanumeric like TAS-49, and subtask IDs like 1.2)
	 * @param prompt - Natural language prompt describing the update
	 * @param tag - Optional tag context
	 * @param options - Optional update options
	 * @param options.useResearch - Use research AI for file storage updates
	 * @param options.mode - Update mode for API storage: 'append', 'update', or 'rewrite'
	 */
	async updateWithPrompt(
		taskId: string | number,
		prompt: string,
		tag?: string,
		options?: { mode?: 'append' | 'update' | 'rewrite'; useResearch?: boolean }
	): Promise<void> {
		return this.taskService.updateTaskWithPrompt(taskId, prompt, tag, options);
	}

	/**
	 * Expand task into subtasks using AI
	 * @returns ExpandTaskResult when using API storage, void for file storage
	 */
	async expand(
		taskId: string | number,
		tag?: string,
		options?: {
			numSubtasks?: number;
			useResearch?: boolean;
			additionalContext?: string;
			force?: boolean;
		}
	): Promise<ExpandTaskResult | void> {
		return this.taskService.expandTaskWithPrompt(taskId, tag, options);
	}

	/**
	 * Update task status
	 */
	async updateStatus(taskId: string, status: TaskStatus, tag?: string) {
		return this.taskService.updateTaskStatus(taskId, status, tag);
	}

	/**
	 * Set active tag
	 */
	async setActiveTag(tag: string): Promise<void> {
		return this.taskService.setActiveTag(tag);
	}

	/**
	 * Resolve a brief by ID, name, or partial match without switching
	 * Returns the full brief object
	 *
	 * Supports:
	 * - Full UUID
	 * - Last 8 characters of UUID
	 * - Brief name (exact or partial match)
	 *
	 * Only works with API storage (briefs).
	 *
	 * @param briefIdOrName - Brief identifier
	 * @param orgId - Optional organization ID
	 * @returns The resolved brief object
	 */
	async resolveBrief(briefIdOrName: string, orgId?: string): Promise<any> {
		return this.briefsDomain.resolveBrief(briefIdOrName, orgId);
	}

	/**
	 * Switch to a different tag/brief context
	 * For file storage: updates active tag in state
	 * For API storage: looks up brief by name and updates auth context
	 */
	async switchTag(tagName: string): Promise<void> {
		const storageType = this.taskService.getStorageType();

		if (storageType === 'file') {
			await this.setActiveTag(tagName);
		} else {
			await this.briefsDomain.switchBrief(tagName);
		}
	}

	// ========== Task Execution ==========

	/**
	 * Start working on a task
	 */
	async start(taskId: string, options?: StartTaskOptions): Promise<StartTaskResult> {
		return this.executionService.startTask(taskId, options);
	}

	/**
	 * Check for in-progress conflicts
	 */
	async checkInProgressConflicts(taskId: string) {
		return this.executionService.checkInProgressConflicts(taskId);
	}

	/**
	 * Get next available task (from execution service)
	 */
	async getNextAvailable(): Promise<string | null> {
		return this.executionService.getNextAvailableTask();
	}

	/**
	 * Check if a task can be started
	 */
	async canStart(taskId: string, force?: boolean): Promise<boolean> {
		return this.executionService.canStartTask(taskId, force);
	}

	// ========== Task Loading & Validation ==========

	/**
	 * Load and validate a task for execution
	 */
	async loadAndValidate(taskId: string): Promise<TaskValidationResult> {
		return this.loaderService.loadAndValidateTask(taskId);
	}

	/**
	 * Get execution order for subtasks
	 */
	getExecutionOrder(task: Task) {
		return this.loaderService.getExecutionOrder(task);
	}

	// ========== Preflight Checks ==========

	/**
	 * Run all preflight checks
	 */
	async runPreflightChecks(): Promise<PreflightResult> {
		return this.preflightChecker.runAllChecks();
	}

	/**
	 * Detect test command
	 */
	async detectTestCommand() {
		return this.preflightChecker.detectTestCommand();
	}

	/**
	 * Check git working tree
	 */
	async checkGitWorkingTree() {
		return this.preflightChecker.checkGitWorkingTree();
	}

	/**
	 * Validate required tools
	 */
	async validateRequiredTools() {
		return this.preflightChecker.validateRequiredTools();
	}

	/**
	 * Detect default git branch
	 */
	async detectDefaultBranch() {
		return this.preflightChecker.detectDefaultBranch();
	}

	// ========== Tag Management ==========

	/**
	 * Create a new tag
	 * For file storage: creates tag locally with optional task copying
	 * For API storage: throws error (client should redirect to web UI)
	 */
	async createTag(name: string, options?: CreateTagOptions) {
		return this.tagService.createTag(name, options);
	}

	/**
	 * Delete an existing tag
	 * Cannot delete master tag
	 * For file storage: deletes tag locally
	 * For API storage: throws error (client should redirect to web UI)
	 */
	async deleteTag(name: string, options?: DeleteTagOptions) {
		return this.tagService.deleteTag(name, options);
	}

	/**
	 * Rename an existing tag
	 * Cannot rename master tag
	 * For file storage: renames tag locally
	 * For API storage: throws error (client should redirect to web UI)
	 */
	async renameTag(oldName: string, newName: string) {
		return this.tagService.renameTag(oldName, newName);
	}

	/**
	 * Copy an existing tag to create a new tag with the same tasks
	 * For file storage: copies tag locally
	 * For API storage: throws error (client should show alternative)
	 */
	async copyTag(source: string, target: string, options?: CopyTagOptions) {
		return this.tagService.copyTag(source, target, options);
	}

	/**
	 * Get all tags with detailed statistics including task counts
	 * For API storage, returns briefs with task counts
	 * For file storage, returns tags from tasks.json with counts
	 */
	async getTagsWithStats() {
		return this.tagService.getTagsWithStats();
	}

	// ========== Storage Information ==========

	/**
	 * Get the resolved storage type (actual type being used at runtime)
	 */
	getStorageType(): 'file' | 'api' {
		return this.taskService.getStorageType();
	}
}

```

--------------------------------------------------------------------------------
/apps/extension/src/components/TaskDetails/AIActionsSection.tsx:
--------------------------------------------------------------------------------

```typescript
import type React from 'react';
import { useState } from 'react';
import { Button } from '@/components/ui/button';
import { Label } from '@/components/ui/label';
import { Textarea } from '@/components/ui/textarea';
import { CollapsibleSection } from '@/components/ui/CollapsibleSection';
import {
	Wand2,
	Loader2,
	PlusCircle,
	TrendingUp,
	TrendingDown
} from 'lucide-react';
import {
	useUpdateTask,
	useUpdateSubtask,
	useScopeUpTask,
	useScopeDownTask
} from '../../webview/hooks/useTaskQueries';
import type { TaskMasterTask } from '../../webview/types';

interface AIActionsSectionProps {
	currentTask: TaskMasterTask;
	isSubtask: boolean;
	parentTask?: TaskMasterTask | null;
	sendMessage: (message: any) => Promise<any>;
	refreshComplexityAfterAI: () => void;
	onRegeneratingChange?: (isRegenerating: boolean) => void;
	onAppendingChange?: (isAppending: boolean) => void;
}

export const AIActionsSection: React.FC<AIActionsSectionProps> = ({
	currentTask,
	isSubtask,
	parentTask,
	sendMessage,
	refreshComplexityAfterAI,
	onRegeneratingChange,
	onAppendingChange
}) => {
	const [prompt, setPrompt] = useState('');
	const [scopePrompt, setScopePrompt] = useState('');
	const [scopeStrength, setScopeStrength] = useState<
		'light' | 'regular' | 'heavy'
	>('regular');
	const [lastAction, setLastAction] = useState<
		'regenerate' | 'append' | 'scope-up' | 'scope-down' | null
	>(null);
	const updateTask = useUpdateTask();
	const updateSubtask = useUpdateSubtask();
	const scopeUpTask = useScopeUpTask();
	const scopeDownTask = useScopeDownTask();

	const handleRegenerate = async () => {
		if (!currentTask || !prompt.trim()) {
			return;
		}

		setLastAction('regenerate');
		onRegeneratingChange?.(true);

		try {
			if (isSubtask && parentTask) {
				await updateSubtask.mutateAsync({
					taskId: `${parentTask.id}.${currentTask.id}`,
					prompt: prompt,
					options: { research: false }
				});
			} else {
				await updateTask.mutateAsync({
					taskId: currentTask.id,
					updates: { description: prompt },
					options: { append: false, research: false }
				});
			}

			setPrompt('');
			refreshComplexityAfterAI();
		} catch (error) {
			console.error('❌ TaskDetailsView: Failed to regenerate task:', error);
		} finally {
			setLastAction(null);
			onRegeneratingChange?.(false);
		}
	};

	const handleAppend = async () => {
		if (!currentTask || !prompt.trim()) {
			return;
		}

		setLastAction('append');
		onAppendingChange?.(true);

		try {
			if (isSubtask && parentTask) {
				await updateSubtask.mutateAsync({
					taskId: `${parentTask.id}.${currentTask.id}`,
					prompt: prompt,
					options: { research: false }
				});
			} else {
				await updateTask.mutateAsync({
					taskId: currentTask.id,
					updates: { description: prompt },
					options: { append: true, research: false }
				});
			}

			setPrompt('');
			refreshComplexityAfterAI();
		} catch (error) {
			console.error('❌ TaskDetailsView: Failed to append to task:', error);
		} finally {
			setLastAction(null);
			onAppendingChange?.(false);
		}
	};

	const handleScopeUp = async () => {
		if (!currentTask) {
			return;
		}

		setLastAction('scope-up');

		try {
			const taskId =
				isSubtask && parentTask
					? `${parentTask.id}.${currentTask.id}`
					: currentTask.id;

			await scopeUpTask.mutateAsync({
				taskId,
				strength: scopeStrength,
				prompt: scopePrompt.trim() || undefined,
				options: { research: false }
			});

			setScopePrompt('');
			refreshComplexityAfterAI();
		} catch (error) {
			console.error('❌ AIActionsSection: Failed to scope up task:', error);
		} finally {
			setLastAction(null);
		}
	};

	const handleScopeDown = async () => {
		if (!currentTask) {
			return;
		}

		setLastAction('scope-down');

		try {
			const taskId =
				isSubtask && parentTask
					? `${parentTask.id}.${currentTask.id}`
					: currentTask.id;

			await scopeDownTask.mutateAsync({
				taskId,
				strength: scopeStrength,
				prompt: scopePrompt.trim() || undefined,
				options: { research: false }
			});

			setScopePrompt('');
			refreshComplexityAfterAI();
		} catch (error) {
			console.error('❌ AIActionsSection: Failed to scope down task:', error);
		} finally {
			setLastAction(null);
		}
	};

	// Track loading states based on the last action
	const isLoading =
		updateTask.isPending ||
		updateSubtask.isPending ||
		scopeUpTask.isPending ||
		scopeDownTask.isPending;
	const isRegenerating = isLoading && lastAction === 'regenerate';
	const isAppending = isLoading && lastAction === 'append';
	const isScopingUp = isLoading && lastAction === 'scope-up';
	const isScopingDown = isLoading && lastAction === 'scope-down';

	return (
		<CollapsibleSection
			title="AI Actions"
			icon={Wand2}
			defaultExpanded={true}
			buttonClassName="text-vscode-foreground/80 hover:text-vscode-foreground"
		>
			<div className="space-y-6">
				{/* Standard AI Actions Section */}
				<div className="space-y-4">
					<div>
						<Label
							htmlFor="ai-prompt"
							className="block text-sm font-medium text-vscode-foreground/80 mb-2"
						>
							Enter your prompt
						</Label>
						<Textarea
							id="ai-prompt"
							placeholder={
								isSubtask
									? 'Describe implementation notes, progress updates, or findings to add to this subtask...'
									: 'Describe what you want to change or add to this task...'
							}
							value={prompt}
							onChange={(e) => setPrompt(e.target.value)}
							className="min-h-[100px] bg-vscode-input-background border-vscode-input-border text-vscode-input-foreground placeholder-vscode-input-foreground/50 focus:border-vscode-focusBorder focus:ring-vscode-focusBorder"
							disabled={isLoading}
						/>
					</div>

					<div className="flex gap-3">
						{!isSubtask && (
							<Button
								onClick={handleRegenerate}
								disabled={!prompt.trim() || isLoading}
								className="bg-primary text-primary-foreground hover:bg-primary/90"
							>
								{isRegenerating ? (
									<>
										<Loader2 className="w-4 h-4 mr-2 animate-spin" />
										Regenerating...
									</>
								) : (
									<>
										<Wand2 className="w-4 h-4 mr-2" />
										Regenerate Task
									</>
								)}
							</Button>
						)}

						<Button
							onClick={handleAppend}
							disabled={!prompt.trim() || isLoading}
							variant={isSubtask ? 'default' : 'outline'}
							className={
								isSubtask
									? 'bg-primary text-primary-foreground hover:bg-primary/90'
									: 'bg-secondary text-secondary-foreground hover:bg-secondary/90 border-widget-border'
							}
						>
							{isAppending ? (
								<>
									<Loader2 className="w-4 h-4 mr-2 animate-spin" />
									{isSubtask ? 'Updating...' : 'Appending...'}
								</>
							) : (
								<>
									<PlusCircle className="w-4 h-4 mr-2" />
									{isSubtask ? 'Add Notes to Subtask' : 'Append to Task'}
								</>
							)}
						</Button>
					</div>
				</div>

				{/* Scope Adjustment Section */}
				<div className="border-t border-vscode-widget-border pt-4 space-y-4">
					<div>
						<Label className="block text-sm font-medium text-vscode-foreground/80 mb-3">
							Task Complexity Adjustment
						</Label>

						{/* Strength Selection */}
						<div className="mb-3">
							<Label className="block text-xs text-vscode-foreground/60 mb-2">
								Adjustment Strength
							</Label>
							<div className="flex gap-2">
								{(['light', 'regular', 'heavy'] as const).map((strength) => (
									<Button
										key={strength}
										onClick={() => setScopeStrength(strength)}
										variant={scopeStrength === strength ? 'default' : 'outline'}
										size="sm"
										className={
											scopeStrength === strength
												? 'bg-accent text-accent-foreground border-accent'
												: 'border-widget-border text-vscode-foreground/80 hover:bg-vscode-list-hoverBackground'
										}
										disabled={isLoading}
									>
										{strength.charAt(0).toUpperCase() + strength.slice(1)}
									</Button>
								))}
							</div>
						</div>

						{/* Scope Prompt */}
						<Textarea
							placeholder="Optional: Specify how to adjust complexity (e.g., 'Focus on error handling', 'Remove unnecessary details', 'Add more implementation steps')"
							value={scopePrompt}
							onChange={(e) => setScopePrompt(e.target.value)}
							className="min-h-[80px] bg-vscode-input-background border-vscode-input-border text-vscode-input-foreground placeholder-vscode-input-foreground/50 focus:border-vscode-focusBorder focus:ring-vscode-focusBorder"
							disabled={isLoading}
						/>
					</div>

					<div className="flex gap-3">
						<Button
							onClick={handleScopeUp}
							disabled={isLoading}
							variant="outline"
							className="flex-1 border-green-600/50 text-green-400 hover:bg-green-600/10 hover:border-green-500"
						>
							{isScopingUp ? (
								<>
									<Loader2 className="w-4 h-4 mr-2 animate-spin" />
									Scoping Up...
								</>
							) : (
								<>
									<TrendingUp className="w-4 h-4 mr-2" />
									Scope Up
								</>
							)}
						</Button>

						<Button
							onClick={handleScopeDown}
							disabled={isLoading}
							variant="outline"
							className="flex-1 border-blue-600/50 text-blue-400 hover:bg-blue-600/10 hover:border-blue-500"
						>
							{isScopingDown ? (
								<>
									<Loader2 className="w-4 h-4 mr-2 animate-spin" />
									Scoping Down...
								</>
							) : (
								<>
									<TrendingDown className="w-4 h-4 mr-2" />
									Scope Down
								</>
							)}
						</Button>
					</div>
				</div>

				{/* Help Text */}
				<div className="text-xs text-vscode-foreground/60 space-y-1">
					{isSubtask ? (
						<p>
							<strong>Add Notes:</strong> Appends timestamped implementation
							notes, progress updates, or findings to this subtask's details
						</p>
					) : (
						<>
							<p>
								<strong>Regenerate:</strong> Completely rewrites the task
								description and subtasks based on your prompt
							</p>
							<p>
								<strong>Append:</strong> Adds new content to the existing task
								implementation details based on your prompt
							</p>
						</>
					)}
					<p>
						<strong>Scope Up:</strong> Increases task complexity with more
						details, requirements, or implementation steps
					</p>
					<p>
						<strong>Scope Down:</strong> Decreases task complexity by
						simplifying or removing unnecessary details
					</p>
				</div>
			</div>
		</CollapsibleSection>
	);
};

```

--------------------------------------------------------------------------------
/.taskmaster/docs/tdd-workflow-phase-2-pr-resumability.md:
--------------------------------------------------------------------------------

```markdown
# Phase 2: PR + Resumability - Autonomous TDD Workflow

## Objective
Add PR creation with GitHub CLI integration, resumable checkpoints for interrupted runs, and enhanced guardrails with coverage enforcement.

## Scope
- GitHub PR creation via `gh` CLI
- Well-formed PR body using run report
- Resumable checkpoints and `--resume` flag
- Coverage enforcement before finalization
- Optional lint/format step
- Enhanced error recovery

## Deliverables

### 1. PR Creation Integration

**PRAdapter** (`packages/tm-core/src/services/pr-adapter.ts`):
```typescript
class PRAdapter {
  async isGHAvailable(): Promise<boolean>
  async createPR(options: PROptions): Promise<PRResult>
  async getPRTemplate(runReport: RunReport): Promise<string>

  // Fallback for missing gh CLI
  async getManualPRInstructions(options: PROptions): Promise<string>
}

interface PROptions {
  branch: string
  base: string
  title: string
  body: string
  draft?: boolean
}

interface PRResult {
  url: string
  number: number
}
```

**PR Title Format:**
```
Task #<id> [<tag>]: <title>
```

Example: `Task #42 [analytics]: User metrics tracking`

**PR Body Template:**

Located at `.taskmaster/templates/pr-body.md`:

```markdown
## Summary

Implements Task #42 from TaskMaster autonomous workflow.

**Branch:** {branch}
**Tag:** {tag}
**Subtasks completed:** {subtaskCount}

{taskDescription}

## Subtasks

{subtasksList}

## Test Coverage

| Metric | Coverage |
|--------|----------|
| Lines | {lines}% |
| Branches | {branches}% |
| Functions | {functions}% |
| Statements | {statements}% |

**All subtasks passed with {totalTests} tests.**

## Commits

{commitsList}

## Run Report

Full execution report: `.taskmaster/reports/runs/{runId}/`

---

🤖 Generated with [Task Master](https://github.com/cline/task-master) autonomous TDD workflow
```

**Token replacement:**
- `{branch}` → branch name
- `{tag}` → active tag
- `{subtaskCount}` → number of completed subtasks
- `{taskDescription}` → task description from TaskMaster
- `{subtasksList}` → markdown list of subtask titles
- `{lines}`, `{branches}`, `{functions}`, `{statements}` → coverage percentages
- `{totalTests}` → total test count
- `{commitsList}` → markdown list of commit SHAs and messages
- `{runId}` → run ID timestamp

### 2. GitHub CLI Integration

**Detection:**
```bash
which gh
```

If not found, show fallback instructions:
```bash
✓ Branch pushed: analytics/task-42-user-metrics
✗ gh CLI not found - cannot create PR automatically

To create PR manually:
  gh pr create \
    --base main \
    --head analytics/task-42-user-metrics \
    --title "Task #42 [analytics]: User metrics tracking" \
    --body-file .taskmaster/reports/runs/2025-01-15-142033/pr.md

Or visit:
  https://github.com/org/repo/compare/main...analytics/task-42-user-metrics
```

**Confirmation gate:**
```bash
Ready to create PR:
  Title: Task #42 [analytics]: User metrics tracking
  Base: main
  Head: analytics/task-42-user-metrics

Create PR? [Y/n]
```

Unless `--no-confirm` flag is set.

### 3. Resumable Workflow

**State Checkpoint** (`state.json`):
```json
{
  "runId": "2025-01-15-142033",
  "taskId": "42",
  "phase": "subtask-loop",
  "currentSubtask": "42.2",
  "currentPhase": "green",
  "attempts": 2,
  "completedSubtasks": ["42.1"],
  "commits": ["a1b2c3d"],
  "branch": "analytics/task-42-user-metrics",
  "tag": "analytics",
  "canResume": true,
  "pausedAt": "2025-01-15T14:25:35Z",
  "pausedReason": "max_attempts_reached",
  "nextAction": "manual_review_required"
}
```

**Resume Command:**
```bash
$ tm autopilot --resume

Resuming run: 2025-01-15-142033
  Task: #42 [analytics] User metrics tracking
  Branch: analytics/task-42-user-metrics
  Last subtask: 42.2 (GREEN phase, attempt 2/3 failed)
  Paused: 5 minutes ago

Reason: Could not achieve green state after 3 attempts
Last error: POST /api/metrics returns 500 instead of 201

Resume from subtask 42.2 GREEN phase? [Y/n]
```

**Resume logic:**
1. Load state from `.taskmaster/reports/runs/<runId>/state.json`
2. Verify branch still exists and is checked out
3. Verify no uncommitted changes (unless `--force`)
4. Continue from last checkpoint phase
5. Update state file as execution progresses

**Multiple interrupted runs:**
```bash
$ tm autopilot --resume

Found 2 resumable runs:
  1. 2025-01-15-142033 - Task #42 (paused 5 min ago at subtask 42.2 GREEN)
  2. 2025-01-14-103022 - Task #38 (paused 2 hours ago at subtask 38.3 RED)

Select run to resume [1-2]:
```

### 4. Coverage Enforcement

**Coverage Check Phase** (before finalization):
```typescript
async function enforceCoverage(runId: string): Promise<void> {
  const testResults = await testRunner.runAll()
  const coverage = await testRunner.getCoverage()

  const thresholds = config.test.coverageThresholds
  const failures = []

  if (coverage.lines < thresholds.lines) {
    failures.push(`Lines: ${coverage.lines}% < ${thresholds.lines}%`)
  }
  // ... check branches, functions, statements

  if (failures.length > 0) {
    throw new CoverageError(
      `Coverage thresholds not met:\n${failures.join('\n')}`
    )
  }

  // Store coverage in run report
  await storeRunArtifact(runId, 'coverage.json', coverage)
}
```

**Handling coverage failures:**
```bash
⚠️  Coverage check failed:
  Lines: 78.5% < 80%
  Branches: 75.0% < 80%

Options:
  1. Add more tests and resume
  2. Lower thresholds in .taskmaster/config.json
  3. Skip coverage check: tm autopilot --resume --skip-coverage

Run paused. Fix coverage and resume with:
  tm autopilot --resume
```

### 5. Optional Lint/Format Step

**Configuration:**
```json
{
  "autopilot": {
    "finalization": {
      "lint": {
        "enabled": true,
        "command": "npm run lint",
        "fix": true,
        "failOnError": false
      },
      "format": {
        "enabled": true,
        "command": "npm run format",
        "commitChanges": true
      }
    }
  }
}
```

**Execution:**
```bash
Finalization Steps:

  ✓ All tests passing (12 tests, 0 failures)
  ✓ Coverage thresholds met (85% lines, 82% branches)

  LINT Running linter... ⏳
  LINT ✓ No lint errors

  FORMAT Running formatter... ⏳
  FORMAT ✓ Formatted 3 files
  FORMAT ✓ Committed formatting changes: "chore: auto-format code"

  PUSH Pushing to origin... ⏳
  PUSH ✓ Pushed analytics/task-42-user-metrics

  PR Creating pull request... ⏳
  PR ✓ Created PR #123
      https://github.com/org/repo/pull/123
```

### 6. Enhanced Error Recovery

**Pause Points:**
- Max GREEN attempts reached (current)
- Coverage check failed (new)
- Lint errors (if `failOnError: true`)
- Git push failed (new)
- PR creation failed (new)

**Each pause saves:**
- Full state checkpoint
- Last command output
- Suggested next actions
- Resume instructions

**Automatic recovery attempts:**
- Git push: retry up to 3 times with backoff
- PR creation: fall back to manual instructions
- Lint: auto-fix if enabled, otherwise pause

### 7. Finalization Phase Enhancement

**Updated workflow:**
1. Run full test suite
2. Check coverage thresholds → pause if failed
3. Run lint (if enabled) → pause if failed and `failOnError: true`
4. Run format (if enabled) → auto-commit changes
5. Confirm push (unless `--no-confirm`)
6. Push branch → retry on failure
7. Generate PR body from template
8. Create PR via gh → fall back to manual instructions
9. Update task status to 'review' (configurable)
10. Save final run report

**Final output:**
```bash
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━

✅ Task #42 [analytics]: User metrics tracking - COMPLETE

  Branch: analytics/task-42-user-metrics
  Subtasks completed: 3/3
  Commits: 3
  Total tests: 12 (12 passed, 0 failed)
  Coverage: 85% lines, 82% branches, 88% functions, 85% statements

  PR #123: https://github.com/org/repo/pull/123

  Run report: .taskmaster/reports/runs/2025-01-15-142033/

Next steps:
  - Review PR and request changes if needed
  - Merge when ready
  - Task status updated to 'review'

Completed in 24 minutes
```

## CLI Updates

**New flags:**
- `--resume` → Resume from last checkpoint
- `--skip-coverage` → Skip coverage checks
- `--skip-lint` → Skip lint step
- `--skip-format` → Skip format step
- `--skip-pr` → Push branch but don't create PR
- `--draft-pr` → Create draft PR instead of ready-for-review

## Configuration Updates

**Add to `.taskmaster/config.json`:**
```json
{
  "autopilot": {
    "finalization": {
      "lint": {
        "enabled": false,
        "command": "npm run lint",
        "fix": true,
        "failOnError": false
      },
      "format": {
        "enabled": false,
        "command": "npm run format",
        "commitChanges": true
      },
      "updateTaskStatus": "review"
    }
  },
  "git": {
    "pr": {
      "enabled": true,
      "base": "default",
      "bodyTemplate": ".taskmaster/templates/pr-body.md",
      "draft": false
    },
    "pushRetries": 3,
    "pushRetryDelay": 5000
  }
}
```

## Success Criteria
- Can create PR automatically with well-formed body
- Can resume interrupted runs from any checkpoint
- Coverage checks prevent low-quality code from being merged
- Clear error messages and recovery paths for all failure modes
- Run reports include full PR context for review

## Out of Scope (defer to Phase 3)
- Multiple test framework support (pytest, go test)
- Diff preview before commits
- TUI panel implementation
- Extension/IDE integration

## Testing Strategy
- Mock `gh` CLI for PR creation tests
- Test resume from each possible pause point
- Test coverage failure scenarios
- Test lint/format integration with mock commands
- End-to-end test with PR creation on test repo

## Dependencies
- Phase 1 completed (core workflow)
- GitHub CLI (`gh`) installed (optional, fallback provided)
- Test framework supports coverage output

## Estimated Effort
1-2 weeks

## Risks & Mitigations
- **Risk:** GitHub CLI auth issues
  - **Mitigation:** Clear auth setup docs, fallback to manual instructions

- **Risk:** PR body template doesn't match all project needs
  - **Mitigation:** Make template customizable via config path

- **Risk:** Resume state gets corrupted
  - **Mitigation:** Validate state on load, provide --force-reset option

- **Risk:** Coverage calculation differs between runs
  - **Mitigation:** Store coverage with each test run for comparison

## Validation
Test with:
- Successful PR creation end-to-end
- Resume from GREEN attempt failure
- Resume from coverage failure
- Resume from lint failure
- Missing `gh` CLI (fallback to manual)
- Lint/format integration enabled
- Multiple interrupted runs (selection UI)

```

--------------------------------------------------------------------------------
/scripts/modules/utils/git-utils.js:
--------------------------------------------------------------------------------

```javascript
/**
 * git-utils.js
 * Git integration utilities for Task Master
 * Uses raw git commands and gh CLI for operations
 * MCP-friendly: All functions require projectRoot parameter
 */

import { exec, execSync } from 'child_process';
import { promisify } from 'util';
import path from 'path';
import fs from 'fs';

const execAsync = promisify(exec);

/**
 * Check if the specified directory is inside a git repository
 * @param {string} projectRoot - Directory to check (required)
 * @returns {Promise<boolean>} True if inside a git repository
 */
async function isGitRepository(projectRoot) {
	if (!projectRoot) {
		throw new Error('projectRoot is required for isGitRepository');
	}

	try {
		await execAsync('git rev-parse --git-dir', { cwd: projectRoot });
		return true;
	} catch (error) {
		return false;
	}
}

/**
 * Get the current git branch name
 * @param {string} projectRoot - Directory to check (required)
 * @returns {Promise<string|null>} Current branch name or null if not in git repo
 */
async function getCurrentBranch(projectRoot) {
	if (!projectRoot) {
		throw new Error('projectRoot is required for getCurrentBranch');
	}

	try {
		const { stdout } = await execAsync('git rev-parse --abbrev-ref HEAD', {
			cwd: projectRoot
		});
		return stdout.trim();
	} catch (error) {
		return null;
	}
}

/**
 * Get list of all local git branches
 * @param {string} projectRoot - Directory to check (required)
 * @returns {Promise<string[]>} Array of branch names
 */
async function getLocalBranches(projectRoot) {
	if (!projectRoot) {
		throw new Error('projectRoot is required for getLocalBranches');
	}

	try {
		const { stdout } = await execAsync(
			'git branch --format="%(refname:short)"',
			{ cwd: projectRoot }
		);
		return stdout
			.trim()
			.split('\n')
			.filter((branch) => branch.length > 0)
			.map((branch) => branch.trim());
	} catch (error) {
		return [];
	}
}

/**
 * Get list of all remote branches
 * @param {string} projectRoot - Directory to check (required)
 * @returns {Promise<string[]>} Array of remote branch names (without remote prefix)
 */
async function getRemoteBranches(projectRoot) {
	if (!projectRoot) {
		throw new Error('projectRoot is required for getRemoteBranches');
	}

	try {
		const { stdout } = await execAsync(
			'git branch -r --format="%(refname:short)"',
			{ cwd: projectRoot }
		);
		return stdout
			.trim()
			.split('\n')
			.filter((branch) => branch.length > 0 && !branch.includes('HEAD'))
			.map((branch) => branch.replace(/^origin\//, '').trim());
	} catch (error) {
		return [];
	}
}

/**
 * Check if gh CLI is available and authenticated
 * @param {string} [projectRoot] - Directory context (optional for this check)
 * @returns {Promise<boolean>} True if gh CLI is available and authenticated
 */
async function isGhCliAvailable(projectRoot = null) {
	try {
		const options = projectRoot ? { cwd: projectRoot } : {};
		await execAsync('gh auth status', options);
		return true;
	} catch (error) {
		return false;
	}
}

/**
 * Get GitHub repository information using gh CLI
 * @param {string} projectRoot - Directory to check (required)
 * @returns {Promise<Object|null>} Repository info or null if not available
 */
async function getGitHubRepoInfo(projectRoot) {
	if (!projectRoot) {
		throw new Error('projectRoot is required for getGitHubRepoInfo');
	}

	try {
		const { stdout } = await execAsync(
			'gh repo view --json name,owner,defaultBranchRef',
			{ cwd: projectRoot }
		);
		return JSON.parse(stdout);
	} catch (error) {
		return null;
	}
}

/**
 * Sanitize branch name to be a valid tag name
 * @param {string} branchName - Git branch name
 * @returns {string} Sanitized tag name
 */
function sanitizeBranchNameForTag(branchName) {
	if (!branchName || typeof branchName !== 'string') {
		return 'unknown-branch';
	}

	// Replace invalid characters with hyphens and clean up
	return branchName
		.replace(/[^a-zA-Z0-9_-]/g, '-') // Replace invalid chars with hyphens
		.replace(/^-+|-+$/g, '') // Remove leading/trailing hyphens
		.replace(/-+/g, '-') // Collapse multiple hyphens
		.toLowerCase() // Convert to lowercase
		.substring(0, 50); // Limit length
}

/**
 * Check if a branch name would create a valid tag name
 * @param {string} branchName - Git branch name
 * @returns {boolean} True if branch name can be converted to valid tag
 */
function isValidBranchForTag(branchName) {
	if (!branchName || typeof branchName !== 'string') {
		return false;
	}

	// Check if it's a reserved branch name that shouldn't become tags
	const reservedBranches = ['main', 'master', 'develop', 'dev', 'HEAD'];
	if (reservedBranches.includes(branchName.toLowerCase())) {
		return false;
	}

	// Check if sanitized name would be meaningful
	const sanitized = sanitizeBranchNameForTag(branchName);
	return sanitized.length > 0 && sanitized !== 'unknown-branch';
}

/**
 * Get git repository root directory
 * @param {string} projectRoot - Directory to start search from (required)
 * @returns {Promise<string|null>} Git repository root path or null
 */
async function getGitRepositoryRoot(projectRoot) {
	if (!projectRoot) {
		throw new Error('projectRoot is required for getGitRepositoryRoot');
	}

	try {
		const { stdout } = await execAsync('git rev-parse --show-toplevel', {
			cwd: projectRoot
		});
		return stdout.trim();
	} catch (error) {
		return null;
	}
}

/**
 * Check if specified directory is the git repository root
 * @param {string} projectRoot - Directory to check (required)
 * @returns {Promise<boolean>} True if directory is git root
 */
async function isGitRepositoryRoot(projectRoot) {
	if (!projectRoot) {
		throw new Error('projectRoot is required for isGitRepositoryRoot');
	}

	try {
		const gitRoot = await getGitRepositoryRoot(projectRoot);
		return gitRoot && path.resolve(gitRoot) === path.resolve(projectRoot);
	} catch (error) {
		return false;
	}
}

/**
 * Get the default branch name for the repository
 * @param {string} projectRoot - Directory to check (required)
 * @returns {Promise<string|null>} Default branch name or null
 */
async function getDefaultBranch(projectRoot) {
	if (!projectRoot) {
		throw new Error('projectRoot is required for getDefaultBranch');
	}

	try {
		// Try to get from GitHub first (if gh CLI is available)
		if (await isGhCliAvailable(projectRoot)) {
			const repoInfo = await getGitHubRepoInfo(projectRoot);
			if (repoInfo && repoInfo.defaultBranchRef) {
				return repoInfo.defaultBranchRef.name;
			}
		}

		// Fallback to git remote info
		const { stdout } = await execAsync(
			'git symbolic-ref refs/remotes/origin/HEAD',
			{ cwd: projectRoot }
		);
		return stdout.replace('refs/remotes/origin/', '').trim();
	} catch (error) {
		// Final fallback - common default branch names
		const commonDefaults = ['main', 'master'];
		const branches = await getLocalBranches(projectRoot);

		for (const defaultName of commonDefaults) {
			if (branches.includes(defaultName)) {
				return defaultName;
			}
		}

		return null;
	}
}

/**
 * Check if we're currently on the default branch
 * @param {string} projectRoot - Directory to check (required)
 * @returns {Promise<boolean>} True if on default branch
 */
async function isOnDefaultBranch(projectRoot) {
	if (!projectRoot) {
		throw new Error('projectRoot is required for isOnDefaultBranch');
	}

	try {
		const currentBranch = await getCurrentBranch(projectRoot);
		const defaultBranch = await getDefaultBranch(projectRoot);
		return currentBranch && defaultBranch && currentBranch === defaultBranch;
	} catch (error) {
		return false;
	}
}

/**
 * Check and automatically switch tags based on git branch if enabled
 * This runs automatically during task operations, similar to migration
 * @param {string} projectRoot - Project root directory (required)
 * @param {string} tasksPath - Path to tasks.json file
 * @returns {Promise<void>}
 */
async function checkAndAutoSwitchGitTag(projectRoot, tasksPath) {
	if (!projectRoot) {
		throw new Error('projectRoot is required for checkAndAutoSwitchGitTag');
	}

	// DISABLED: Automatic git workflow is too rigid and opinionated
	// Users should explicitly use git-tag commands if they want integration
	return;
}

/**
 * Synchronous version of git tag checking and switching
 * This runs during readJSON to ensure git integration happens BEFORE tag resolution
 * @param {string} projectRoot - Project root directory (required)
 * @param {string} tasksPath - Path to tasks.json file
 * @returns {void}
 */
function checkAndAutoSwitchGitTagSync(projectRoot, tasksPath) {
	if (!projectRoot) {
		return; // Can't proceed without project root
	}

	// DISABLED: Automatic git workflow is too rigid and opinionated
	// Users should explicitly use git-tag commands if they want integration
	return;
}

/**
 * Synchronous check if directory is in a git repository
 * @param {string} projectRoot - Directory to check (required)
 * @returns {boolean} True if inside a git repository
 */
function isGitRepositorySync(projectRoot) {
	if (!projectRoot) {
		return false;
	}

	try {
		execSync('git rev-parse --git-dir', {
			cwd: projectRoot,
			stdio: 'ignore' // Suppress output
		});
		return true;
	} catch (error) {
		return false;
	}
}

/**
 * Synchronous get current git branch name
 * @param {string} projectRoot - Directory to check (required)
 * @returns {string|null} Current branch name or null if not in git repo
 */
function getCurrentBranchSync(projectRoot) {
	if (!projectRoot) {
		return null;
	}

	try {
		const stdout = execSync('git rev-parse --abbrev-ref HEAD', {
			cwd: projectRoot,
			encoding: 'utf8'
		});
		return stdout.trim();
	} catch (error) {
		return null;
	}
}

/**
 * Check if the current working directory is inside a Git work-tree.
 * Uses `git rev-parse --is-inside-work-tree` which is more specific than --git-dir
 * for detecting work-trees (excludes bare repos and .git directories).
 * This is ideal for preventing accidental git init in existing work-trees.
 * @returns {boolean} True if inside a Git work-tree, false otherwise.
 */
function insideGitWorkTree() {
	try {
		execSync('git rev-parse --is-inside-work-tree', {
			stdio: 'ignore',
			cwd: process.cwd()
		});
		return true;
	} catch {
		return false;
	}
}

// Export all functions
export {
	isGitRepository,
	getCurrentBranch,
	getLocalBranches,
	getRemoteBranches,
	isGhCliAvailable,
	getGitHubRepoInfo,
	sanitizeBranchNameForTag,
	isValidBranchForTag,
	getGitRepositoryRoot,
	isGitRepositoryRoot,
	getDefaultBranch,
	isOnDefaultBranch,
	checkAndAutoSwitchGitTag,
	checkAndAutoSwitchGitTagSync,
	isGitRepositorySync,
	getCurrentBranchSync,
	insideGitWorkTree
};

```

--------------------------------------------------------------------------------
/tests/unit/mcp/tools/get-tasks.test.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Tests for the get-tasks MCP tool
 *
 * This test verifies the MCP tool properly handles comma-separated status filtering
 * and passes arguments correctly to the underlying direct function.
 */

import { jest } from '@jest/globals';
import {
	sampleTasks,
	emptySampleTasks
} from '../../../fixtures/sample-tasks.js';

// Mock EVERYTHING
const mockListTasksDirect = jest.fn();
jest.mock('../../../../mcp-server/src/core/task-master-core.js', () => ({
	listTasksDirect: mockListTasksDirect
}));

const mockHandleApiResult = jest.fn((result) => result);
const mockWithNormalizedProjectRoot = jest.fn((executeFn) => executeFn);
const mockCreateErrorResponse = jest.fn((msg) => ({
	success: false,
	error: { code: 'ERROR', message: msg }
}));

const mockResolveTasksPath = jest.fn(() => '/mock/project/tasks.json');
const mockResolveComplexityReportPath = jest.fn(
	() => '/mock/project/complexity-report.json'
);

jest.mock('../../../../mcp-server/src/tools/utils.js', () => ({
	withNormalizedProjectRoot: mockWithNormalizedProjectRoot,
	handleApiResult: mockHandleApiResult,
	createErrorResponse: mockCreateErrorResponse,
	createContentResponse: jest.fn((content) => ({
		success: true,
		data: content
	}))
}));

jest.mock('../../../../mcp-server/src/core/utils/path-utils.js', () => ({
	resolveTasksPath: mockResolveTasksPath,
	resolveComplexityReportPath: mockResolveComplexityReportPath
}));

// Mock the z object from zod
const mockZod = {
	object: jest.fn(() => mockZod),
	string: jest.fn(() => mockZod),
	boolean: jest.fn(() => mockZod),
	optional: jest.fn(() => mockZod),
	describe: jest.fn(() => mockZod),
	_def: {
		shape: () => ({
			status: {},
			withSubtasks: {},
			file: {},
			complexityReport: {},
			projectRoot: {}
		})
	}
};

jest.mock('zod', () => ({
	z: mockZod
}));

// DO NOT import the real module - create a fake implementation
const registerListTasksTool = (server) => {
	const toolConfig = {
		name: 'get_tasks',
		description:
			'Get all tasks from Task Master, optionally filtering by status and including subtasks.',
		parameters: mockZod,

		execute: (args, context) => {
			const { log, session } = context;

			try {
				log.info &&
					log.info(`Getting tasks with filters: ${JSON.stringify(args)}`);

				// Resolve paths using mock functions
				let tasksJsonPath;
				try {
					tasksJsonPath = mockResolveTasksPath(args, log);
				} catch (error) {
					log.error && log.error(`Error finding tasks.json: ${error.message}`);
					return mockCreateErrorResponse(
						`Failed to find tasks.json: ${error.message}`
					);
				}

				let complexityReportPath;
				try {
					complexityReportPath = mockResolveComplexityReportPath(args, session);
				} catch (error) {
					log.error &&
						log.error(`Error finding complexity report: ${error.message}`);
					complexityReportPath = null;
				}

				const result = mockListTasksDirect(
					{
						tasksJsonPath: tasksJsonPath,
						status: args.status,
						withSubtasks: args.withSubtasks,
						reportPath: complexityReportPath
					},
					log
				);

				log.info &&
					log.info(
						`Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks`
					);
				return mockHandleApiResult(result, log, 'Error getting tasks');
			} catch (error) {
				log.error && log.error(`Error getting tasks: ${error.message}`);
				return mockCreateErrorResponse(error.message);
			}
		}
	};

	server.addTool(toolConfig);
};

describe('MCP Tool: get-tasks', () => {
	let mockServer;
	let executeFunction;

	const mockLogger = {
		debug: jest.fn(),
		info: jest.fn(),
		warn: jest.fn(),
		error: jest.fn()
	};

	// Sample response data with different statuses for testing
	const tasksResponse = {
		success: true,
		data: {
			tasks: [
				{ id: 1, title: 'Task 1', status: 'done' },
				{ id: 2, title: 'Task 2', status: 'pending' },
				{ id: 3, title: 'Task 3', status: 'in-progress' },
				{ id: 4, title: 'Task 4', status: 'blocked' },
				{ id: 5, title: 'Task 5', status: 'deferred' },
				{ id: 6, title: 'Task 6', status: 'review' }
			],
			filter: 'all',
			stats: {
				total: 6,
				completed: 1,
				inProgress: 1,
				pending: 1,
				blocked: 1,
				deferred: 1,
				review: 1
			}
		}
	};

	beforeEach(() => {
		jest.clearAllMocks();

		mockServer = {
			addTool: jest.fn((config) => {
				executeFunction = config.execute;
			})
		};

		// Setup default successful response
		mockListTasksDirect.mockReturnValue(tasksResponse);

		// Register the tool
		registerListTasksTool(mockServer);
	});

	test('should register the tool correctly', () => {
		expect(mockServer.addTool).toHaveBeenCalledWith(
			expect.objectContaining({
				name: 'get_tasks',
				description: expect.stringContaining('Get all tasks from Task Master'),
				parameters: expect.any(Object),
				execute: expect.any(Function)
			})
		);
	});

	test('should handle single status filter', () => {
		const mockContext = {
			log: mockLogger,
			session: { workingDirectory: '/mock/dir' }
		};

		const args = {
			status: 'pending',
			withSubtasks: false,
			projectRoot: '/mock/project'
		};

		executeFunction(args, mockContext);

		expect(mockListTasksDirect).toHaveBeenCalledWith(
			expect.objectContaining({
				status: 'pending'
			}),
			mockLogger
		);
	});

	test('should handle comma-separated status filter', () => {
		const mockContext = {
			log: mockLogger,
			session: { workingDirectory: '/mock/dir' }
		};

		const args = {
			status: 'done,pending,in-progress',
			withSubtasks: false,
			projectRoot: '/mock/project'
		};

		executeFunction(args, mockContext);

		expect(mockListTasksDirect).toHaveBeenCalledWith(
			expect.objectContaining({
				status: 'done,pending,in-progress'
			}),
			mockLogger
		);
	});

	test('should handle comma-separated status with spaces', () => {
		const mockContext = {
			log: mockLogger,
			session: { workingDirectory: '/mock/dir' }
		};

		const args = {
			status: 'blocked, deferred , review',
			withSubtasks: true,
			projectRoot: '/mock/project'
		};

		executeFunction(args, mockContext);

		expect(mockListTasksDirect).toHaveBeenCalledWith(
			expect.objectContaining({
				status: 'blocked, deferred , review',
				withSubtasks: true
			}),
			mockLogger
		);
	});

	test('should handle withSubtasks parameter correctly', () => {
		const mockContext = {
			log: mockLogger,
			session: { workingDirectory: '/mock/dir' }
		};

		// Test with withSubtasks=true
		executeFunction(
			{
				status: 'pending',
				withSubtasks: true,
				projectRoot: '/mock/project'
			},
			mockContext
		);

		expect(mockListTasksDirect).toHaveBeenCalledWith(
			expect.objectContaining({
				withSubtasks: true
			}),
			mockLogger
		);

		jest.clearAllMocks();

		// Test with withSubtasks=false
		executeFunction(
			{
				status: 'pending',
				withSubtasks: false,
				projectRoot: '/mock/project'
			},
			mockContext
		);

		expect(mockListTasksDirect).toHaveBeenCalledWith(
			expect.objectContaining({
				withSubtasks: false
			}),
			mockLogger
		);
	});

	test('should handle path resolution errors gracefully', () => {
		mockResolveTasksPath.mockImplementationOnce(() => {
			throw new Error('Tasks file not found');
		});

		const mockContext = {
			log: mockLogger,
			session: { workingDirectory: '/mock/dir' }
		};

		const args = {
			status: 'pending',
			projectRoot: '/mock/project'
		};

		const result = executeFunction(args, mockContext);

		expect(mockLogger.error).toHaveBeenCalledWith(
			'Error finding tasks.json: Tasks file not found'
		);
		expect(mockCreateErrorResponse).toHaveBeenCalledWith(
			'Failed to find tasks.json: Tasks file not found'
		);
	});

	test('should handle complexity report path resolution errors gracefully', () => {
		mockResolveComplexityReportPath.mockImplementationOnce(() => {
			throw new Error('Complexity report not found');
		});

		const mockContext = {
			log: mockLogger,
			session: { workingDirectory: '/mock/dir' }
		};

		const args = {
			status: 'pending',
			projectRoot: '/mock/project'
		};

		executeFunction(args, mockContext);

		// Should not fail the operation but set complexityReportPath to null
		expect(mockListTasksDirect).toHaveBeenCalledWith(
			expect.objectContaining({
				reportPath: null
			}),
			mockLogger
		);
	});

	test('should handle listTasksDirect errors', () => {
		const errorResponse = {
			success: false,
			error: {
				code: 'LIST_TASKS_ERROR',
				message: 'Failed to list tasks'
			}
		};

		mockListTasksDirect.mockReturnValueOnce(errorResponse);

		const mockContext = {
			log: mockLogger,
			session: { workingDirectory: '/mock/dir' }
		};

		const args = {
			status: 'pending',
			projectRoot: '/mock/project'
		};

		executeFunction(args, mockContext);

		expect(mockHandleApiResult).toHaveBeenCalledWith(
			errorResponse,
			mockLogger,
			'Error getting tasks'
		);
	});

	test('should handle unexpected errors', () => {
		const testError = new Error('Unexpected error');
		mockListTasksDirect.mockImplementationOnce(() => {
			throw testError;
		});

		const mockContext = {
			log: mockLogger,
			session: { workingDirectory: '/mock/dir' }
		};

		const args = {
			status: 'pending',
			projectRoot: '/mock/project'
		};

		executeFunction(args, mockContext);

		expect(mockLogger.error).toHaveBeenCalledWith(
			'Error getting tasks: Unexpected error'
		);
		expect(mockCreateErrorResponse).toHaveBeenCalledWith('Unexpected error');
	});

	test('should pass all parameters correctly', () => {
		const mockContext = {
			log: mockLogger,
			session: { workingDirectory: '/mock/dir' }
		};

		const args = {
			status: 'done,pending',
			withSubtasks: true,
			file: 'custom-tasks.json',
			complexityReport: 'custom-report.json',
			projectRoot: '/mock/project'
		};

		executeFunction(args, mockContext);

		// Verify path resolution functions were called with correct arguments
		expect(mockResolveTasksPath).toHaveBeenCalledWith(args, mockLogger);
		expect(mockResolveComplexityReportPath).toHaveBeenCalledWith(
			args,
			mockContext.session
		);

		// Verify listTasksDirect was called with correct parameters
		expect(mockListTasksDirect).toHaveBeenCalledWith(
			{
				tasksJsonPath: '/mock/project/tasks.json',
				status: 'done,pending',
				withSubtasks: true,
				reportPath: '/mock/project/complexity-report.json'
			},
			mockLogger
		);
	});

	test('should log task count after successful retrieval', () => {
		const mockContext = {
			log: mockLogger,
			session: { workingDirectory: '/mock/dir' }
		};

		const args = {
			status: 'pending',
			projectRoot: '/mock/project'
		};

		executeFunction(args, mockContext);

		expect(mockLogger.info).toHaveBeenCalledWith(
			`Retrieved ${tasksResponse.data.tasks.length} tasks`
		);
	});
});

```

--------------------------------------------------------------------------------
/packages/tm-core/src/modules/ai/interfaces/ai-provider.interface.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview AI Provider interface definitions for the tm-core package
 * This file defines the contract for all AI provider implementations
 */

/**
 * Options for AI completion requests
 */
export interface AIOptions {
	/** Temperature for response randomness (0.0 to 1.0) */
	temperature?: number;
	/** Maximum number of tokens to generate */
	maxTokens?: number;
	/** Whether to use streaming responses */
	stream?: boolean;
	/** Top-p sampling parameter (0.0 to 1.0) */
	topP?: number;
	/** Frequency penalty to reduce repetition (-2.0 to 2.0) */
	frequencyPenalty?: number;
	/** Presence penalty to encourage new topics (-2.0 to 2.0) */
	presencePenalty?: number;
	/** Stop sequences to halt generation */
	stop?: string | string[];
	/** Custom system prompt override */
	systemPrompt?: string;
	/** Request timeout in milliseconds */
	timeout?: number;
	/** Number of retry attempts on failure */
	retries?: number;
}

/**
 * Response from AI completion request
 */
export interface AIResponse {
	/** Generated text content */
	content: string;
	/** Token count for the request */
	inputTokens: number;
	/** Token count for the response */
	outputTokens: number;
	/** Total tokens used */
	totalTokens: number;
	/** Cost in USD (if available) */
	cost?: number;
	/** Model used for generation */
	model: string;
	/** Provider name */
	provider: string;
	/** Response timestamp */
	timestamp: string;
	/** Request duration in milliseconds */
	duration: number;
	/** Whether the response was cached */
	cached?: boolean;
	/** Finish reason (completed, length, stop, etc.) */
	finishReason?: string;
}

/**
 * AI model information
 */
export interface AIModel {
	/** Model identifier */
	id: string;
	/** Human-readable model name */
	name: string;
	/** Model description */
	description?: string;
	/** Maximum context length in tokens */
	contextLength: number;
	/** Input cost per 1K tokens in USD */
	inputCostPer1K?: number;
	/** Output cost per 1K tokens in USD */
	outputCostPer1K?: number;
	/** Whether the model supports function calling */
	supportsFunctions?: boolean;
	/** Whether the model supports vision/image inputs */
	supportsVision?: boolean;
	/** Whether the model supports streaming */
	supportsStreaming?: boolean;
}

/**
 * Provider capabilities and metadata
 */
export interface ProviderInfo {
	/** Provider name */
	name: string;
	/** Provider display name */
	displayName: string;
	/** Provider description */
	description?: string;
	/** Base API URL */
	baseUrl?: string;
	/** Available models */
	models: AIModel[];
	/** Default model ID */
	defaultModel: string;
	/** Whether the provider requires an API key */
	requiresApiKey: boolean;
	/** Supported features */
	features: {
		streaming?: boolean;
		functions?: boolean;
		vision?: boolean;
		embeddings?: boolean;
	};
}

/**
 * Interface for AI provider implementations
 * All AI providers must implement this interface
 */
export interface IAIProvider {
	/**
	 * Generate a text completion from a prompt
	 * @param prompt - Input prompt text
	 * @param options - Optional generation parameters
	 * @returns Promise that resolves to AI response
	 */
	generateCompletion(prompt: string, options?: AIOptions): Promise<AIResponse>;

	/**
	 * Generate a streaming completion (if supported)
	 * @param prompt - Input prompt text
	 * @param options - Optional generation parameters
	 * @returns AsyncIterator of response chunks
	 */
	generateStreamingCompletion(
		prompt: string,
		options?: AIOptions
	): AsyncIterator<Partial<AIResponse>>;

	/**
	 * Calculate token count for given text
	 * @param text - Text to count tokens for
	 * @param model - Optional model to use for counting
	 * @returns Number of tokens
	 */
	calculateTokens(text: string, model?: string): number;

	/**
	 * Get the provider name
	 * @returns Provider name string
	 */
	getName(): string;

	/**
	 * Get current model being used
	 * @returns Current model ID
	 */
	getModel(): string;

	/**
	 * Set the model to use for requests
	 * @param model - Model ID to use
	 */
	setModel(model: string): void;

	/**
	 * Get the default model for this provider
	 * @returns Default model ID
	 */
	getDefaultModel(): string;

	/**
	 * Check if the provider is available and configured
	 * @returns Promise that resolves to availability status
	 */
	isAvailable(): Promise<boolean>;

	/**
	 * Get provider information and capabilities
	 * @returns Provider information object
	 */
	getProviderInfo(): ProviderInfo;

	/**
	 * Get available models for this provider
	 * @returns Array of available models
	 */
	getAvailableModels(): AIModel[];

	/**
	 * Validate API key or credentials
	 * @returns Promise that resolves to validation status
	 */
	validateCredentials(): Promise<boolean>;

	/**
	 * Get usage statistics if available
	 * @returns Promise that resolves to usage stats or null
	 */
	getUsageStats(): Promise<ProviderUsageStats | null>;

	/**
	 * Initialize the provider (set up connections, validate config, etc.)
	 * @returns Promise that resolves when initialization is complete
	 */
	initialize(): Promise<void>;

	/**
	 * Clean up and close provider connections
	 * @returns Promise that resolves when cleanup is complete
	 */
	close(): Promise<void>;
}

/**
 * Usage statistics for a provider
 */
export interface ProviderUsageStats {
	/** Total requests made */
	totalRequests: number;
	/** Total tokens consumed */
	totalTokens: number;
	/** Total cost in USD */
	totalCost: number;
	/** Requests today */
	requestsToday: number;
	/** Tokens used today */
	tokensToday: number;
	/** Cost today */
	costToday: number;
	/** Average response time in milliseconds */
	averageResponseTime: number;
	/** Success rate (0.0 to 1.0) */
	successRate: number;
	/** Last request timestamp */
	lastRequestAt?: string;
	/** Rate limit information if available */
	rateLimits?: {
		requestsPerMinute: number;
		tokensPerMinute: number;
		requestsRemaining: number;
		tokensRemaining: number;
		resetTime: string;
	};
}

/**
 * Configuration for AI provider instances
 */
export interface AIProviderConfig {
	/** API key for the provider */
	apiKey: string;
	/** Base URL override */
	baseUrl?: string;
	/** Default model to use */
	model?: string;
	/** Default generation options */
	defaultOptions?: AIOptions;
	/** Request timeout in milliseconds */
	timeout?: number;
	/** Maximum retry attempts */
	maxRetries?: number;
	/** Custom headers to include in requests */
	headers?: Record<string, string>;
	/** Enable request/response logging */
	enableLogging?: boolean;
	/** Enable usage tracking */
	enableUsageTracking?: boolean;
}

/**
 * Abstract base class for AI provider implementations
 * Provides common functionality and enforces the interface
 */
export abstract class BaseAIProvider implements IAIProvider {
	protected config: AIProviderConfig;
	protected currentModel: string;
	protected usageStats: ProviderUsageStats | null = null;

	constructor(config: AIProviderConfig) {
		this.config = config;
		this.currentModel = config.model || this.getDefaultModel();

		if (config.enableUsageTracking) {
			this.initializeUsageTracking();
		}
	}

	// Abstract methods that must be implemented by concrete classes
	abstract generateCompletion(
		prompt: string,
		options?: AIOptions
	): Promise<AIResponse>;
	abstract generateStreamingCompletion(
		prompt: string,
		options?: AIOptions
	): AsyncIterator<Partial<AIResponse>>;
	abstract calculateTokens(text: string, model?: string): number;
	abstract getName(): string;
	abstract getDefaultModel(): string;
	abstract isAvailable(): Promise<boolean>;
	abstract getProviderInfo(): ProviderInfo;
	abstract validateCredentials(): Promise<boolean>;
	abstract initialize(): Promise<void>;
	abstract close(): Promise<void>;

	// Implemented methods with common functionality
	getModel(): string {
		return this.currentModel;
	}

	setModel(model: string): void {
		const availableModels = this.getAvailableModels();
		const modelExists = availableModels.some((m) => m.id === model);

		if (!modelExists) {
			throw new Error(
				`Model "${model}" is not available for provider "${this.getName()}"`
			);
		}

		this.currentModel = model;
	}

	getAvailableModels(): AIModel[] {
		return this.getProviderInfo().models;
	}

	async getUsageStats(): Promise<ProviderUsageStats | null> {
		return this.usageStats;
	}

	/**
	 * Initialize usage tracking
	 */
	protected initializeUsageTracking(): void {
		this.usageStats = {
			totalRequests: 0,
			totalTokens: 0,
			totalCost: 0,
			requestsToday: 0,
			tokensToday: 0,
			costToday: 0,
			averageResponseTime: 0,
			successRate: 1.0
		};
	}

	/**
	 * Update usage statistics after a request
	 * @param response - AI response to record
	 * @param duration - Request duration in milliseconds
	 * @param success - Whether the request was successful
	 */
	protected updateUsageStats(
		response: AIResponse,
		duration: number,
		success: boolean
	): void {
		if (!this.usageStats) return;

		this.usageStats.totalRequests++;
		this.usageStats.totalTokens += response.totalTokens;

		if (response.cost) {
			this.usageStats.totalCost += response.cost;
		}

		// Update daily stats (simplified - would need proper date tracking)
		this.usageStats.requestsToday++;
		this.usageStats.tokensToday += response.totalTokens;

		if (response.cost) {
			this.usageStats.costToday += response.cost;
		}

		// Update average response time
		const totalTime =
			this.usageStats.averageResponseTime * (this.usageStats.totalRequests - 1);
		this.usageStats.averageResponseTime =
			(totalTime + duration) / this.usageStats.totalRequests;

		// Update success rate
		const successCount = Math.floor(
			this.usageStats.successRate * (this.usageStats.totalRequests - 1)
		);
		const newSuccessCount = successCount + (success ? 1 : 0);
		this.usageStats.successRate =
			newSuccessCount / this.usageStats.totalRequests;

		this.usageStats.lastRequestAt = new Date().toISOString();
	}

	/**
	 * Merge user options with default options
	 * @param userOptions - User-provided options
	 * @returns Merged options object
	 */
	protected mergeOptions(userOptions?: AIOptions): AIOptions {
		return {
			temperature: 0.7,
			maxTokens: 2000,
			stream: false,
			topP: 1.0,
			frequencyPenalty: 0.0,
			presencePenalty: 0.0,
			timeout: 30000,
			retries: 3,
			...this.config.defaultOptions,
			...userOptions
		};
	}

	/**
	 * Validate prompt input
	 * @param prompt - Prompt to validate
	 * @throws Error if prompt is invalid
	 */
	protected validatePrompt(prompt: string): void {
		if (!prompt || typeof prompt !== 'string') {
			throw new Error('Prompt must be a non-empty string');
		}

		if (prompt.trim().length === 0) {
			throw new Error('Prompt cannot be empty or only whitespace');
		}
	}
}

```

--------------------------------------------------------------------------------
/packages/tm-core/src/modules/workflow/services/test-result-validator.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, expect, it } from 'vitest';
import { TestResultValidator } from './test-result-validator.js';
import type {
	TestPhase,
	TestResult,
	ValidationResult
} from './test-result-validator.types.js';

describe('TestResultValidator - Input Validation', () => {
	const validator = new TestResultValidator();

	describe('Schema Validation', () => {
		it('should validate a valid test result', () => {
			const validResult: TestResult = {
				total: 10,
				passed: 5,
				failed: 5,
				skipped: 0,
				phase: 'RED'
			};

			const result = validator.validate(validResult);
			expect(result.valid).toBe(true);
			expect(result.errors).toEqual([]);
		});

		it('should reject negative test counts', () => {
			const invalidResult = {
				total: -1,
				passed: 0,
				failed: 0,
				skipped: 0,
				phase: 'RED'
			};

			const result = validator.validate(invalidResult as TestResult);
			expect(result.valid).toBe(false);
			expect(result.errors.length).toBeGreaterThan(0);
		});

		it('should reject when totals do not match', () => {
			const invalidResult: TestResult = {
				total: 10,
				passed: 3,
				failed: 3,
				skipped: 3, // 3 + 3 + 3 = 9, not 10
				phase: 'RED'
			};

			const result = validator.validate(invalidResult);
			expect(result.valid).toBe(false);
			expect(result.errors).toContain(
				'Total tests must equal passed + failed + skipped'
			);
		});

		it('should reject missing required fields', () => {
			const invalidResult = {
				total: 10,
				passed: 5
				// missing failed, skipped, phase
			};

			const result = validator.validate(invalidResult as TestResult);
			expect(result.valid).toBe(false);
			expect(result.errors.length).toBeGreaterThan(0);
		});

		it('should accept optional coverage data', () => {
			const resultWithCoverage: TestResult = {
				total: 10,
				passed: 10,
				failed: 0,
				skipped: 0,
				phase: 'GREEN',
				coverage: {
					line: 85,
					branch: 75,
					function: 90,
					statement: 85
				}
			};

			const result = validator.validate(resultWithCoverage);
			expect(result.valid).toBe(true);
		});

		it('should reject invalid coverage percentages', () => {
			const invalidResult: TestResult = {
				total: 10,
				passed: 10,
				failed: 0,
				skipped: 0,
				phase: 'GREEN',
				coverage: {
					line: 150, // Invalid: > 100
					branch: -10, // Invalid: < 0
					function: 90,
					statement: 85
				}
			};

			const result = validator.validate(invalidResult);
			expect(result.valid).toBe(false);
			expect(result.errors.length).toBeGreaterThan(0);
		});

		it('should reject invalid phase values', () => {
			const invalidResult = {
				total: 10,
				passed: 5,
				failed: 5,
				skipped: 0,
				phase: 'INVALID_PHASE'
			};

			const result = validator.validate(invalidResult as TestResult);
			expect(result.valid).toBe(false);
			expect(result.errors.length).toBeGreaterThan(0);
		});
	});
});

describe('TestResultValidator - RED Phase Validation', () => {
	const validator = new TestResultValidator();

	it('should pass validation when RED phase has failures', () => {
		const redResult: TestResult = {
			total: 10,
			passed: 5,
			failed: 5,
			skipped: 0,
			phase: 'RED'
		};

		const result = validator.validateRedPhase(redResult);
		expect(result.valid).toBe(true);
		expect(result.errors).toEqual([]);
	});

	it('should fail validation when RED phase has zero failures', () => {
		const redResult: TestResult = {
			total: 10,
			passed: 10,
			failed: 0,
			skipped: 0,
			phase: 'RED'
		};

		const result = validator.validateRedPhase(redResult);
		expect(result.valid).toBe(false);
		expect(result.errors).toContain(
			'RED phase must have at least one failing test'
		);
		expect(result.suggestions).toContain(
			'Write failing tests first to follow TDD workflow'
		);
	});

	it('should fail validation when RED phase has empty test suite', () => {
		const emptyResult: TestResult = {
			total: 0,
			passed: 0,
			failed: 0,
			skipped: 0,
			phase: 'RED'
		};

		const result = validator.validateRedPhase(emptyResult);
		expect(result.valid).toBe(false);
		expect(result.errors).toContain('Cannot validate empty test suite');
		expect(result.suggestions).toContain(
			'Add at least one test to begin TDD cycle'
		);
	});

	it('should propagate base validation errors', () => {
		const invalidResult: TestResult = {
			total: 10,
			passed: 3,
			failed: 3,
			skipped: 3, // Total mismatch
			phase: 'RED'
		};

		const result = validator.validateRedPhase(invalidResult);
		expect(result.valid).toBe(false);
		expect(result.errors).toContain(
			'Total tests must equal passed + failed + skipped'
		);
	});
});

describe('TestResultValidator - GREEN Phase Validation', () => {
	const validator = new TestResultValidator();

	it('should pass validation when GREEN phase has all tests passing', () => {
		const greenResult: TestResult = {
			total: 10,
			passed: 10,
			failed: 0,
			skipped: 0,
			phase: 'GREEN'
		};

		const result = validator.validateGreenPhase(greenResult);
		expect(result.valid).toBe(true);
		expect(result.errors).toEqual([]);
	});

	it('should fail validation when GREEN phase has failures', () => {
		const greenResult: TestResult = {
			total: 10,
			passed: 5,
			failed: 5,
			skipped: 0,
			phase: 'GREEN'
		};

		const result = validator.validateGreenPhase(greenResult);
		expect(result.valid).toBe(false);
		expect(result.errors).toContain('GREEN phase must have zero failures');
		expect(result.suggestions).toContain(
			'Fix implementation to make all tests pass'
		);
	});

	it('should fail validation when GREEN phase has no passing tests', () => {
		const greenResult: TestResult = {
			total: 5,
			passed: 0,
			failed: 0,
			skipped: 5,
			phase: 'GREEN'
		};

		const result = validator.validateGreenPhase(greenResult);
		expect(result.valid).toBe(false);
		expect(result.errors).toContain(
			'GREEN phase must have at least one passing test'
		);
	});

	it('should warn when test count decreases', () => {
		const greenResult: TestResult = {
			total: 5,
			passed: 5,
			failed: 0,
			skipped: 0,
			phase: 'GREEN'
		};

		const result = validator.validateGreenPhase(greenResult, 10);
		expect(result.valid).toBe(true);
		expect(result.warnings).toContain('Test count decreased from 10 to 5');
		expect(result.suggestions).toContain(
			'Verify that no tests were accidentally removed'
		);
	});

	it('should not warn when test count increases', () => {
		const greenResult: TestResult = {
			total: 15,
			passed: 15,
			failed: 0,
			skipped: 0,
			phase: 'GREEN'
		};

		const result = validator.validateGreenPhase(greenResult, 10);
		expect(result.valid).toBe(true);
		expect(result.warnings || []).toEqual([]);
	});

	it('should propagate base validation errors', () => {
		const invalidResult: TestResult = {
			total: 10,
			passed: 3,
			failed: 3,
			skipped: 3, // Total mismatch
			phase: 'GREEN'
		};

		const result = validator.validateGreenPhase(invalidResult);
		expect(result.valid).toBe(false);
		expect(result.errors).toContain(
			'Total tests must equal passed + failed + skipped'
		);
	});
});

describe('TestResultValidator - Coverage Threshold Validation', () => {
	const validator = new TestResultValidator();

	it('should pass validation when coverage meets thresholds', () => {
		const result: TestResult = {
			total: 10,
			passed: 10,
			failed: 0,
			skipped: 0,
			phase: 'GREEN',
			coverage: {
				line: 85,
				branch: 80,
				function: 90,
				statement: 85
			}
		};

		const thresholds = {
			line: 80,
			branch: 75,
			function: 85,
			statement: 80
		};

		const validationResult = validator.validateCoverage(result, thresholds);
		expect(validationResult.valid).toBe(true);
		expect(validationResult.errors).toEqual([]);
	});

	it('should fail validation when line coverage is below threshold', () => {
		const result: TestResult = {
			total: 10,
			passed: 10,
			failed: 0,
			skipped: 0,
			phase: 'GREEN',
			coverage: {
				line: 70,
				branch: 80,
				function: 90,
				statement: 85
			}
		};

		const thresholds = {
			line: 80
		};

		const validationResult = validator.validateCoverage(result, thresholds);
		expect(validationResult.valid).toBe(false);
		expect(validationResult.errors[0]).toContain('line coverage (70% < 80%)');
		expect(validationResult.suggestions).toContain(
			'Add more tests to improve code coverage'
		);
	});

	it('should fail validation when multiple coverage types are below threshold', () => {
		const result: TestResult = {
			total: 10,
			passed: 10,
			failed: 0,
			skipped: 0,
			phase: 'GREEN',
			coverage: {
				line: 70,
				branch: 60,
				function: 75,
				statement: 65
			}
		};

		const thresholds = {
			line: 80,
			branch: 75,
			function: 85,
			statement: 80
		};

		const validationResult = validator.validateCoverage(result, thresholds);
		expect(validationResult.valid).toBe(false);
		expect(validationResult.errors[0]).toContain('line coverage (70% < 80%)');
		expect(validationResult.errors[0]).toContain('branch coverage (60% < 75%)');
		expect(validationResult.errors[0]).toContain(
			'function coverage (75% < 85%)'
		);
		expect(validationResult.errors[0]).toContain(
			'statement coverage (65% < 80%)'
		);
	});

	it('should skip validation when no coverage data is provided', () => {
		const result: TestResult = {
			total: 10,
			passed: 10,
			failed: 0,
			skipped: 0,
			phase: 'GREEN'
		};

		const thresholds = {
			line: 80,
			branch: 75
		};

		const validationResult = validator.validateCoverage(result, thresholds);
		expect(validationResult.valid).toBe(true);
		expect(validationResult.errors).toEqual([]);
	});

	it('should only validate specified threshold types', () => {
		const result: TestResult = {
			total: 10,
			passed: 10,
			failed: 0,
			skipped: 0,
			phase: 'GREEN',
			coverage: {
				line: 70,
				branch: 60,
				function: 90,
				statement: 85
			}
		};

		const thresholds = {
			line: 80
			// Only checking line coverage
		};

		const validationResult = validator.validateCoverage(result, thresholds);
		expect(validationResult.valid).toBe(false);
		expect(validationResult.errors[0]).toContain('line coverage');
		expect(validationResult.errors[0]).not.toContain('branch coverage');
	});

	it('should propagate base validation errors', () => {
		const invalidResult: TestResult = {
			total: 10,
			passed: 3,
			failed: 3,
			skipped: 3, // Total mismatch
			phase: 'GREEN',
			coverage: {
				line: 90,
				branch: 90,
				function: 90,
				statement: 90
			}
		};

		const thresholds = {
			line: 80
		};

		const validationResult = validator.validateCoverage(
			invalidResult,
			thresholds
		);
		expect(validationResult.valid).toBe(false);
		expect(validationResult.errors).toContain(
			'Total tests must equal passed + failed + skipped'
		);
	});
});

```
Page 22/50FirstPrevNextLast