#
tokens: 47670/50000 6/975 files (page 33/50)
lines: off (toggle) GitHub
raw markdown copy
This is page 33 of 50. Use http://codebase.md/eyaltoledano/claude-task-master?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .changeset
│   ├── config.json
│   └── README.md
├── .claude
│   ├── commands
│   │   └── dedupe.md
│   └── TM_COMMANDS_GUIDE.md
├── .claude-plugin
│   └── marketplace.json
├── .coderabbit.yaml
├── .cursor
│   ├── mcp.json
│   └── rules
│       ├── ai_providers.mdc
│       ├── ai_services.mdc
│       ├── architecture.mdc
│       ├── changeset.mdc
│       ├── commands.mdc
│       ├── context_gathering.mdc
│       ├── cursor_rules.mdc
│       ├── dependencies.mdc
│       ├── dev_workflow.mdc
│       ├── git_workflow.mdc
│       ├── glossary.mdc
│       ├── mcp.mdc
│       ├── new_features.mdc
│       ├── self_improve.mdc
│       ├── tags.mdc
│       ├── taskmaster.mdc
│       ├── tasks.mdc
│       ├── telemetry.mdc
│       ├── test_workflow.mdc
│       ├── tests.mdc
│       ├── ui.mdc
│       └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── enhancements---feature-requests.md
│   │   └── feedback.md
│   ├── PULL_REQUEST_TEMPLATE
│   │   ├── bugfix.md
│   │   ├── config.yml
│   │   ├── feature.md
│   │   └── integration.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── scripts
│   │   ├── auto-close-duplicates.mjs
│   │   ├── backfill-duplicate-comments.mjs
│   │   ├── check-pre-release-mode.mjs
│   │   ├── parse-metrics.mjs
│   │   ├── release.mjs
│   │   ├── tag-extension.mjs
│   │   ├── utils.mjs
│   │   └── validate-changesets.mjs
│   └── workflows
│       ├── auto-close-duplicates.yml
│       ├── backfill-duplicate-comments.yml
│       ├── ci.yml
│       ├── claude-dedupe-issues.yml
│       ├── claude-docs-trigger.yml
│       ├── claude-docs-updater.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── extension-ci.yml
│       ├── extension-release.yml
│       ├── log-issue-events.yml
│       ├── pre-release.yml
│       ├── release-check.yml
│       ├── release.yml
│       ├── update-models-md.yml
│       └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│   ├── hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── settings
│   │   └── mcp.json
│   └── steering
│       ├── dev_workflow.md
│       ├── kiro_rules.md
│       ├── self_improve.md
│       ├── taskmaster_hooks_workflow.md
│       └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│   ├── CLAUDE.md
│   ├── config.json
│   ├── docs
│   │   ├── autonomous-tdd-git-workflow.md
│   │   ├── MIGRATION-ROADMAP.md
│   │   ├── prd-tm-start.txt
│   │   ├── prd.txt
│   │   ├── README.md
│   │   ├── research
│   │   │   ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│   │   │   ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│   │   │   ├── 2025-06-14_test-save-functionality.md
│   │   │   ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│   │   │   └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│   │   ├── task-template-importing-prd.txt
│   │   ├── tdd-workflow-phase-0-spike.md
│   │   ├── tdd-workflow-phase-1-core-rails.md
│   │   ├── tdd-workflow-phase-1-orchestrator.md
│   │   ├── tdd-workflow-phase-2-pr-resumability.md
│   │   ├── tdd-workflow-phase-3-extensibility-guardrails.md
│   │   ├── test-prd.txt
│   │   └── tm-core-phase-1.txt
│   ├── reports
│   │   ├── task-complexity-report_autonomous-tdd-git-workflow.json
│   │   ├── task-complexity-report_cc-kiro-hooks.json
│   │   ├── task-complexity-report_tdd-phase-1-core-rails.json
│   │   ├── task-complexity-report_tdd-workflow-phase-0.json
│   │   ├── task-complexity-report_test-prd-tag.json
│   │   ├── task-complexity-report_tm-core-phase-1.json
│   │   ├── task-complexity-report.json
│   │   └── tm-core-complexity.json
│   ├── state.json
│   ├── tasks
│   │   ├── task_001_tm-start.txt
│   │   ├── task_002_tm-start.txt
│   │   ├── task_003_tm-start.txt
│   │   ├── task_004_tm-start.txt
│   │   ├── task_007_tm-start.txt
│   │   └── tasks.json
│   └── templates
│       ├── example_prd_rpg.md
│       └── example_prd.md
├── .vscode
│   ├── extensions.json
│   └── settings.json
├── apps
│   ├── cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   ├── command-registry.ts
│   │   │   ├── commands
│   │   │   │   ├── auth.command.ts
│   │   │   │   ├── autopilot
│   │   │   │   │   ├── abort.command.ts
│   │   │   │   │   ├── commit.command.ts
│   │   │   │   │   ├── complete.command.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next.command.ts
│   │   │   │   │   ├── resume.command.ts
│   │   │   │   │   ├── shared.ts
│   │   │   │   │   ├── start.command.ts
│   │   │   │   │   └── status.command.ts
│   │   │   │   ├── briefs.command.ts
│   │   │   │   ├── context.command.ts
│   │   │   │   ├── export.command.ts
│   │   │   │   ├── list.command.ts
│   │   │   │   ├── models
│   │   │   │   │   ├── custom-providers.ts
│   │   │   │   │   ├── fetchers.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── prompts.ts
│   │   │   │   │   ├── setup.ts
│   │   │   │   │   └── types.ts
│   │   │   │   ├── next.command.ts
│   │   │   │   ├── set-status.command.ts
│   │   │   │   ├── show.command.ts
│   │   │   │   ├── start.command.ts
│   │   │   │   └── tags.command.ts
│   │   │   ├── index.ts
│   │   │   ├── lib
│   │   │   │   └── model-management.ts
│   │   │   ├── types
│   │   │   │   └── tag-management.d.ts
│   │   │   ├── ui
│   │   │   │   ├── components
│   │   │   │   │   ├── cardBox.component.ts
│   │   │   │   │   ├── dashboard.component.ts
│   │   │   │   │   ├── header.component.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next-task.component.ts
│   │   │   │   │   ├── suggested-steps.component.ts
│   │   │   │   │   └── task-detail.component.ts
│   │   │   │   ├── display
│   │   │   │   │   ├── messages.ts
│   │   │   │   │   └── tables.ts
│   │   │   │   ├── formatters
│   │   │   │   │   ├── complexity-formatters.ts
│   │   │   │   │   ├── dependency-formatters.ts
│   │   │   │   │   ├── priority-formatters.ts
│   │   │   │   │   ├── status-formatters.spec.ts
│   │   │   │   │   └── status-formatters.ts
│   │   │   │   ├── index.ts
│   │   │   │   └── layout
│   │   │   │       ├── helpers.spec.ts
│   │   │   │       └── helpers.ts
│   │   │   └── utils
│   │   │       ├── auth-helpers.ts
│   │   │       ├── auto-update.ts
│   │   │       ├── brief-selection.ts
│   │   │       ├── display-helpers.ts
│   │   │       ├── error-handler.ts
│   │   │       ├── index.ts
│   │   │       ├── project-root.ts
│   │   │       ├── task-status.ts
│   │   │       ├── ui.spec.ts
│   │   │       └── ui.ts
│   │   ├── tests
│   │   │   ├── integration
│   │   │   │   └── commands
│   │   │   │       └── autopilot
│   │   │   │           └── workflow.test.ts
│   │   │   └── unit
│   │   │       ├── commands
│   │   │       │   ├── autopilot
│   │   │       │   │   └── shared.test.ts
│   │   │       │   ├── list.command.spec.ts
│   │   │       │   └── show.command.spec.ts
│   │   │       └── ui
│   │   │           └── dashboard.component.spec.ts
│   │   ├── tsconfig.json
│   │   └── vitest.config.ts
│   ├── docs
│   │   ├── archive
│   │   │   ├── ai-client-utils-example.mdx
│   │   │   ├── ai-development-workflow.mdx
│   │   │   ├── command-reference.mdx
│   │   │   ├── configuration.mdx
│   │   │   ├── cursor-setup.mdx
│   │   │   ├── examples.mdx
│   │   │   └── Installation.mdx
│   │   ├── best-practices
│   │   │   ├── advanced-tasks.mdx
│   │   │   ├── configuration-advanced.mdx
│   │   │   └── index.mdx
│   │   ├── capabilities
│   │   │   ├── cli-root-commands.mdx
│   │   │   ├── index.mdx
│   │   │   ├── mcp.mdx
│   │   │   ├── rpg-method.mdx
│   │   │   └── task-structure.mdx
│   │   ├── CHANGELOG.md
│   │   ├── command-reference.mdx
│   │   ├── configuration.mdx
│   │   ├── docs.json
│   │   ├── favicon.svg
│   │   ├── getting-started
│   │   │   ├── api-keys.mdx
│   │   │   ├── contribute.mdx
│   │   │   ├── faq.mdx
│   │   │   └── quick-start
│   │   │       ├── configuration-quick.mdx
│   │   │       ├── execute-quick.mdx
│   │   │       ├── installation.mdx
│   │   │       ├── moving-forward.mdx
│   │   │       ├── prd-quick.mdx
│   │   │       ├── quick-start.mdx
│   │   │       ├── requirements.mdx
│   │   │       ├── rules-quick.mdx
│   │   │       └── tasks-quick.mdx
│   │   ├── introduction.mdx
│   │   ├── licensing.md
│   │   ├── logo
│   │   │   ├── dark.svg
│   │   │   ├── light.svg
│   │   │   └── task-master-logo.png
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── style.css
│   │   ├── tdd-workflow
│   │   │   ├── ai-agent-integration.mdx
│   │   │   └── quickstart.mdx
│   │   ├── vercel.json
│   │   └── whats-new.mdx
│   ├── extension
│   │   ├── .vscodeignore
│   │   ├── assets
│   │   │   ├── banner.png
│   │   │   ├── icon-dark.svg
│   │   │   ├── icon-light.svg
│   │   │   ├── icon.png
│   │   │   ├── screenshots
│   │   │   │   ├── kanban-board.png
│   │   │   │   └── task-details.png
│   │   │   └── sidebar-icon.svg
│   │   ├── CHANGELOG.md
│   │   ├── components.json
│   │   ├── docs
│   │   │   ├── extension-CI-setup.md
│   │   │   └── extension-development-guide.md
│   │   ├── esbuild.js
│   │   ├── LICENSE
│   │   ├── package.json
│   │   ├── package.mjs
│   │   ├── package.publish.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── components
│   │   │   │   ├── ConfigView.tsx
│   │   │   │   ├── constants.ts
│   │   │   │   ├── TaskDetails
│   │   │   │   │   ├── AIActionsSection.tsx
│   │   │   │   │   ├── DetailsSection.tsx
│   │   │   │   │   ├── PriorityBadge.tsx
│   │   │   │   │   ├── SubtasksSection.tsx
│   │   │   │   │   ├── TaskMetadataSidebar.tsx
│   │   │   │   │   └── useTaskDetails.ts
│   │   │   │   ├── TaskDetailsView.tsx
│   │   │   │   ├── TaskMasterLogo.tsx
│   │   │   │   └── ui
│   │   │   │       ├── badge.tsx
│   │   │   │       ├── breadcrumb.tsx
│   │   │   │       ├── button.tsx
│   │   │   │       ├── card.tsx
│   │   │   │       ├── collapsible.tsx
│   │   │   │       ├── CollapsibleSection.tsx
│   │   │   │       ├── dropdown-menu.tsx
│   │   │   │       ├── label.tsx
│   │   │   │       ├── scroll-area.tsx
│   │   │   │       ├── separator.tsx
│   │   │   │       ├── shadcn-io
│   │   │   │       │   └── kanban
│   │   │   │       │       └── index.tsx
│   │   │   │       └── textarea.tsx
│   │   │   ├── extension.ts
│   │   │   ├── index.ts
│   │   │   ├── lib
│   │   │   │   └── utils.ts
│   │   │   ├── services
│   │   │   │   ├── config-service.ts
│   │   │   │   ├── error-handler.ts
│   │   │   │   ├── notification-preferences.ts
│   │   │   │   ├── polling-service.ts
│   │   │   │   ├── polling-strategies.ts
│   │   │   │   ├── sidebar-webview-manager.ts
│   │   │   │   ├── task-repository.ts
│   │   │   │   ├── terminal-manager.ts
│   │   │   │   └── webview-manager.ts
│   │   │   ├── test
│   │   │   │   └── extension.test.ts
│   │   │   ├── utils
│   │   │   │   ├── configManager.ts
│   │   │   │   ├── connectionManager.ts
│   │   │   │   ├── errorHandler.ts
│   │   │   │   ├── event-emitter.ts
│   │   │   │   ├── logger.ts
│   │   │   │   ├── mcpClient.ts
│   │   │   │   ├── notificationPreferences.ts
│   │   │   │   └── task-master-api
│   │   │   │       ├── cache
│   │   │   │       │   └── cache-manager.ts
│   │   │   │       ├── index.ts
│   │   │   │       ├── mcp-client.ts
│   │   │   │       ├── transformers
│   │   │   │       │   └── task-transformer.ts
│   │   │   │       └── types
│   │   │   │           └── index.ts
│   │   │   └── webview
│   │   │       ├── App.tsx
│   │   │       ├── components
│   │   │       │   ├── AppContent.tsx
│   │   │       │   ├── EmptyState.tsx
│   │   │       │   ├── ErrorBoundary.tsx
│   │   │       │   ├── PollingStatus.tsx
│   │   │       │   ├── PriorityBadge.tsx
│   │   │       │   ├── SidebarView.tsx
│   │   │       │   ├── TagDropdown.tsx
│   │   │       │   ├── TaskCard.tsx
│   │   │       │   ├── TaskEditModal.tsx
│   │   │       │   ├── TaskMasterKanban.tsx
│   │   │       │   ├── ToastContainer.tsx
│   │   │       │   └── ToastNotification.tsx
│   │   │       ├── constants
│   │   │       │   └── index.ts
│   │   │       ├── contexts
│   │   │       │   └── VSCodeContext.tsx
│   │   │       ├── hooks
│   │   │       │   ├── useTaskQueries.ts
│   │   │       │   ├── useVSCodeMessages.ts
│   │   │       │   └── useWebviewHeight.ts
│   │   │       ├── index.css
│   │   │       ├── index.tsx
│   │   │       ├── providers
│   │   │       │   └── QueryProvider.tsx
│   │   │       ├── reducers
│   │   │       │   └── appReducer.ts
│   │   │       ├── sidebar.tsx
│   │   │       ├── types
│   │   │       │   └── index.ts
│   │   │       └── utils
│   │   │           ├── logger.ts
│   │   │           └── toast.ts
│   │   └── tsconfig.json
│   └── mcp
│       ├── CHANGELOG.md
│       ├── package.json
│       ├── src
│       │   ├── index.ts
│       │   ├── shared
│       │   │   ├── types.ts
│       │   │   └── utils.ts
│       │   └── tools
│       │       ├── autopilot
│       │       │   ├── abort.tool.ts
│       │       │   ├── commit.tool.ts
│       │       │   ├── complete.tool.ts
│       │       │   ├── finalize.tool.ts
│       │       │   ├── index.ts
│       │       │   ├── next.tool.ts
│       │       │   ├── resume.tool.ts
│       │       │   ├── start.tool.ts
│       │       │   └── status.tool.ts
│       │       ├── README-ZOD-V3.md
│       │       └── tasks
│       │           ├── get-task.tool.ts
│       │           ├── get-tasks.tool.ts
│       │           └── index.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── assets
│   ├── .windsurfrules
│   ├── AGENTS.md
│   ├── claude
│   │   └── TM_COMMANDS_GUIDE.md
│   ├── config.json
│   ├── env.example
│   ├── example_prd_rpg.txt
│   ├── example_prd.txt
│   ├── GEMINI.md
│   ├── gitignore
│   ├── kiro-hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── roocode
│   │   ├── .roo
│   │   │   ├── rules-architect
│   │   │   │   └── architect-rules
│   │   │   ├── rules-ask
│   │   │   │   └── ask-rules
│   │   │   ├── rules-code
│   │   │   │   └── code-rules
│   │   │   ├── rules-debug
│   │   │   │   └── debug-rules
│   │   │   ├── rules-orchestrator
│   │   │   │   └── orchestrator-rules
│   │   │   └── rules-test
│   │   │       └── test-rules
│   │   └── .roomodes
│   ├── rules
│   │   ├── cursor_rules.mdc
│   │   ├── dev_workflow.mdc
│   │   ├── self_improve.mdc
│   │   ├── taskmaster_hooks_workflow.mdc
│   │   └── taskmaster.mdc
│   └── scripts_README.md
├── bin
│   └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE_CODE_PLUGIN.md
├── CLAUDE.md
├── context
│   ├── chats
│   │   ├── add-task-dependencies-1.md
│   │   └── max-min-tokens.txt.md
│   ├── fastmcp-core.txt
│   ├── fastmcp-docs.txt
│   ├── MCP_INTEGRATION.md
│   ├── mcp-js-sdk-docs.txt
│   ├── mcp-protocol-repo.txt
│   ├── mcp-protocol-schema-03262025.json
│   └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│   ├── claude-code-integration.md
│   ├── CLI-COMMANDER-PATTERN.md
│   ├── command-reference.md
│   ├── configuration.md
│   ├── contributor-docs
│   │   ├── testing-roo-integration.md
│   │   └── worktree-setup.md
│   ├── cross-tag-task-movement.md
│   ├── examples
│   │   ├── claude-code-usage.md
│   │   └── codex-cli-usage.md
│   ├── examples.md
│   ├── licensing.md
│   ├── mcp-provider-guide.md
│   ├── mcp-provider.md
│   ├── migration-guide.md
│   ├── models.md
│   ├── providers
│   │   ├── codex-cli.md
│   │   └── gemini-cli.md
│   ├── README.md
│   ├── scripts
│   │   └── models-json-to-markdown.js
│   ├── task-structure.md
│   └── tutorial.md
├── images
│   ├── hamster-hiring.png
│   └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│   ├── server.js
│   └── src
│       ├── core
│       │   ├── __tests__
│       │   │   └── context-manager.test.js
│       │   ├── context-manager.js
│       │   ├── direct-functions
│       │   │   ├── add-dependency.js
│       │   │   ├── add-subtask.js
│       │   │   ├── add-tag.js
│       │   │   ├── add-task.js
│       │   │   ├── analyze-task-complexity.js
│       │   │   ├── cache-stats.js
│       │   │   ├── clear-subtasks.js
│       │   │   ├── complexity-report.js
│       │   │   ├── copy-tag.js
│       │   │   ├── create-tag-from-branch.js
│       │   │   ├── delete-tag.js
│       │   │   ├── expand-all-tasks.js
│       │   │   ├── expand-task.js
│       │   │   ├── fix-dependencies.js
│       │   │   ├── generate-task-files.js
│       │   │   ├── initialize-project.js
│       │   │   ├── list-tags.js
│       │   │   ├── models.js
│       │   │   ├── move-task-cross-tag.js
│       │   │   ├── move-task.js
│       │   │   ├── next-task.js
│       │   │   ├── parse-prd.js
│       │   │   ├── remove-dependency.js
│       │   │   ├── remove-subtask.js
│       │   │   ├── remove-task.js
│       │   │   ├── rename-tag.js
│       │   │   ├── research.js
│       │   │   ├── response-language.js
│       │   │   ├── rules.js
│       │   │   ├── scope-down.js
│       │   │   ├── scope-up.js
│       │   │   ├── set-task-status.js
│       │   │   ├── update-subtask-by-id.js
│       │   │   ├── update-task-by-id.js
│       │   │   ├── update-tasks.js
│       │   │   ├── use-tag.js
│       │   │   └── validate-dependencies.js
│       │   ├── task-master-core.js
│       │   └── utils
│       │       ├── env-utils.js
│       │       └── path-utils.js
│       ├── custom-sdk
│       │   ├── errors.js
│       │   ├── index.js
│       │   ├── json-extractor.js
│       │   ├── language-model.js
│       │   ├── message-converter.js
│       │   └── schema-converter.js
│       ├── index.js
│       ├── logger.js
│       ├── providers
│       │   └── mcp-provider.js
│       └── tools
│           ├── add-dependency.js
│           ├── add-subtask.js
│           ├── add-tag.js
│           ├── add-task.js
│           ├── analyze.js
│           ├── clear-subtasks.js
│           ├── complexity-report.js
│           ├── copy-tag.js
│           ├── delete-tag.js
│           ├── expand-all.js
│           ├── expand-task.js
│           ├── fix-dependencies.js
│           ├── generate.js
│           ├── get-operation-status.js
│           ├── index.js
│           ├── initialize-project.js
│           ├── list-tags.js
│           ├── models.js
│           ├── move-task.js
│           ├── next-task.js
│           ├── parse-prd.js
│           ├── README-ZOD-V3.md
│           ├── remove-dependency.js
│           ├── remove-subtask.js
│           ├── remove-task.js
│           ├── rename-tag.js
│           ├── research.js
│           ├── response-language.js
│           ├── rules.js
│           ├── scope-down.js
│           ├── scope-up.js
│           ├── set-task-status.js
│           ├── tool-registry.js
│           ├── update-subtask.js
│           ├── update-task.js
│           ├── update.js
│           ├── use-tag.js
│           ├── utils.js
│           └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│   ├── ai-sdk-provider-grok-cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── errors.test.ts
│   │   │   ├── errors.ts
│   │   │   ├── grok-cli-language-model.ts
│   │   │   ├── grok-cli-provider.test.ts
│   │   │   ├── grok-cli-provider.ts
│   │   │   ├── index.ts
│   │   │   ├── json-extractor.test.ts
│   │   │   ├── json-extractor.ts
│   │   │   ├── message-converter.test.ts
│   │   │   ├── message-converter.ts
│   │   │   └── types.ts
│   │   └── tsconfig.json
│   ├── build-config
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   └── tsdown.base.ts
│   │   └── tsconfig.json
│   ├── claude-code-plugin
│   │   ├── .claude-plugin
│   │   │   └── plugin.json
│   │   ├── .gitignore
│   │   ├── agents
│   │   │   ├── task-checker.md
│   │   │   ├── task-executor.md
│   │   │   └── task-orchestrator.md
│   │   ├── CHANGELOG.md
│   │   ├── commands
│   │   │   ├── add-dependency.md
│   │   │   ├── add-subtask.md
│   │   │   ├── add-task.md
│   │   │   ├── analyze-complexity.md
│   │   │   ├── analyze-project.md
│   │   │   ├── auto-implement-tasks.md
│   │   │   ├── command-pipeline.md
│   │   │   ├── complexity-report.md
│   │   │   ├── convert-task-to-subtask.md
│   │   │   ├── expand-all-tasks.md
│   │   │   ├── expand-task.md
│   │   │   ├── fix-dependencies.md
│   │   │   ├── generate-tasks.md
│   │   │   ├── help.md
│   │   │   ├── init-project-quick.md
│   │   │   ├── init-project.md
│   │   │   ├── install-taskmaster.md
│   │   │   ├── learn.md
│   │   │   ├── list-tasks-by-status.md
│   │   │   ├── list-tasks-with-subtasks.md
│   │   │   ├── list-tasks.md
│   │   │   ├── next-task.md
│   │   │   ├── parse-prd-with-research.md
│   │   │   ├── parse-prd.md
│   │   │   ├── project-status.md
│   │   │   ├── quick-install-taskmaster.md
│   │   │   ├── remove-all-subtasks.md
│   │   │   ├── remove-dependency.md
│   │   │   ├── remove-subtask.md
│   │   │   ├── remove-subtasks.md
│   │   │   ├── remove-task.md
│   │   │   ├── setup-models.md
│   │   │   ├── show-task.md
│   │   │   ├── smart-workflow.md
│   │   │   ├── sync-readme.md
│   │   │   ├── tm-main.md
│   │   │   ├── to-cancelled.md
│   │   │   ├── to-deferred.md
│   │   │   ├── to-done.md
│   │   │   ├── to-in-progress.md
│   │   │   ├── to-pending.md
│   │   │   ├── to-review.md
│   │   │   ├── update-single-task.md
│   │   │   ├── update-task.md
│   │   │   ├── update-tasks-from-id.md
│   │   │   ├── validate-dependencies.md
│   │   │   └── view-models.md
│   │   ├── mcp.json
│   │   └── package.json
│   ├── tm-bridge
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── add-tag-bridge.ts
│   │   │   ├── bridge-types.ts
│   │   │   ├── bridge-utils.ts
│   │   │   ├── expand-bridge.ts
│   │   │   ├── index.ts
│   │   │   ├── tags-bridge.ts
│   │   │   ├── update-bridge.ts
│   │   │   └── use-tag-bridge.ts
│   │   └── tsconfig.json
│   └── tm-core
│       ├── .gitignore
│       ├── CHANGELOG.md
│       ├── docs
│       │   └── listTasks-architecture.md
│       ├── package.json
│       ├── POC-STATUS.md
│       ├── README.md
│       ├── src
│       │   ├── common
│       │   │   ├── constants
│       │   │   │   ├── index.ts
│       │   │   │   ├── paths.ts
│       │   │   │   └── providers.ts
│       │   │   ├── errors
│       │   │   │   ├── index.ts
│       │   │   │   └── task-master-error.ts
│       │   │   ├── interfaces
│       │   │   │   ├── configuration.interface.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── storage.interface.ts
│       │   │   ├── logger
│       │   │   │   ├── factory.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── logger.spec.ts
│       │   │   │   └── logger.ts
│       │   │   ├── mappers
│       │   │   │   ├── TaskMapper.test.ts
│       │   │   │   └── TaskMapper.ts
│       │   │   ├── types
│       │   │   │   ├── database.types.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── legacy.ts
│       │   │   │   └── repository-types.ts
│       │   │   └── utils
│       │   │       ├── git-utils.ts
│       │   │       ├── id-generator.ts
│       │   │       ├── index.ts
│       │   │       ├── path-helpers.ts
│       │   │       ├── path-normalizer.spec.ts
│       │   │       ├── path-normalizer.ts
│       │   │       ├── project-root-finder.spec.ts
│       │   │       ├── project-root-finder.ts
│       │   │       ├── run-id-generator.spec.ts
│       │   │       └── run-id-generator.ts
│       │   ├── index.ts
│       │   ├── modules
│       │   │   ├── ai
│       │   │   │   ├── index.ts
│       │   │   │   ├── interfaces
│       │   │   │   │   └── ai-provider.interface.ts
│       │   │   │   └── providers
│       │   │   │       ├── base-provider.ts
│       │   │   │       └── index.ts
│       │   │   ├── auth
│       │   │   │   ├── auth-domain.spec.ts
│       │   │   │   ├── auth-domain.ts
│       │   │   │   ├── config.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   ├── auth-manager.spec.ts
│       │   │   │   │   └── auth-manager.ts
│       │   │   │   ├── services
│       │   │   │   │   ├── context-store.ts
│       │   │   │   │   ├── oauth-service.ts
│       │   │   │   │   ├── organization.service.ts
│       │   │   │   │   ├── supabase-session-storage.spec.ts
│       │   │   │   │   └── supabase-session-storage.ts
│       │   │   │   └── types.ts
│       │   │   ├── briefs
│       │   │   │   ├── briefs-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── brief-service.ts
│       │   │   │   ├── types.ts
│       │   │   │   └── utils
│       │   │   │       └── url-parser.ts
│       │   │   ├── commands
│       │   │   │   └── index.ts
│       │   │   ├── config
│       │   │   │   ├── config-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   ├── config-manager.spec.ts
│       │   │   │   │   └── config-manager.ts
│       │   │   │   └── services
│       │   │   │       ├── config-loader.service.spec.ts
│       │   │   │       ├── config-loader.service.ts
│       │   │   │       ├── config-merger.service.spec.ts
│       │   │   │       ├── config-merger.service.ts
│       │   │   │       ├── config-persistence.service.spec.ts
│       │   │   │       ├── config-persistence.service.ts
│       │   │   │       ├── environment-config-provider.service.spec.ts
│       │   │   │       ├── environment-config-provider.service.ts
│       │   │   │       ├── index.ts
│       │   │   │       ├── runtime-state-manager.service.spec.ts
│       │   │   │       └── runtime-state-manager.service.ts
│       │   │   ├── dependencies
│       │   │   │   └── index.ts
│       │   │   ├── execution
│       │   │   │   ├── executors
│       │   │   │   │   ├── base-executor.ts
│       │   │   │   │   ├── claude-executor.ts
│       │   │   │   │   └── executor-factory.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── executor-service.ts
│       │   │   │   └── types.ts
│       │   │   ├── git
│       │   │   │   ├── adapters
│       │   │   │   │   ├── git-adapter.test.ts
│       │   │   │   │   └── git-adapter.ts
│       │   │   │   ├── git-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── services
│       │   │   │       ├── branch-name-generator.spec.ts
│       │   │   │       ├── branch-name-generator.ts
│       │   │   │       ├── commit-message-generator.test.ts
│       │   │   │       ├── commit-message-generator.ts
│       │   │   │       ├── scope-detector.test.ts
│       │   │   │       ├── scope-detector.ts
│       │   │   │       ├── template-engine.test.ts
│       │   │   │       └── template-engine.ts
│       │   │   ├── integration
│       │   │   │   ├── clients
│       │   │   │   │   ├── index.ts
│       │   │   │   │   └── supabase-client.ts
│       │   │   │   ├── integration-domain.ts
│       │   │   │   └── services
│       │   │   │       ├── export.service.ts
│       │   │   │       ├── task-expansion.service.ts
│       │   │   │       └── task-retrieval.service.ts
│       │   │   ├── reports
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   └── complexity-report-manager.ts
│       │   │   │   └── types.ts
│       │   │   ├── storage
│       │   │   │   ├── adapters
│       │   │   │   │   ├── activity-logger.ts
│       │   │   │   │   ├── api-storage.ts
│       │   │   │   │   └── file-storage
│       │   │   │   │       ├── file-operations.ts
│       │   │   │   │       ├── file-storage.ts
│       │   │   │   │       ├── format-handler.ts
│       │   │   │   │       ├── index.ts
│       │   │   │   │       └── path-resolver.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── storage-factory.ts
│       │   │   │   └── utils
│       │   │   │       └── api-client.ts
│       │   │   ├── tasks
│       │   │   │   ├── entities
│       │   │   │   │   └── task.entity.ts
│       │   │   │   ├── parser
│       │   │   │   │   └── index.ts
│       │   │   │   ├── repositories
│       │   │   │   │   ├── supabase
│       │   │   │   │   │   ├── dependency-fetcher.ts
│       │   │   │   │   │   ├── index.ts
│       │   │   │   │   │   └── supabase-repository.ts
│       │   │   │   │   └── task-repository.interface.ts
│       │   │   │   ├── services
│       │   │   │   │   ├── preflight-checker.service.ts
│       │   │   │   │   ├── tag.service.ts
│       │   │   │   │   ├── task-execution-service.ts
│       │   │   │   │   ├── task-loader.service.ts
│       │   │   │   │   └── task-service.ts
│       │   │   │   └── tasks-domain.ts
│       │   │   ├── ui
│       │   │   │   └── index.ts
│       │   │   └── workflow
│       │   │       ├── managers
│       │   │       │   ├── workflow-state-manager.spec.ts
│       │   │       │   └── workflow-state-manager.ts
│       │   │       ├── orchestrators
│       │   │       │   ├── workflow-orchestrator.test.ts
│       │   │       │   └── workflow-orchestrator.ts
│       │   │       ├── services
│       │   │       │   ├── test-result-validator.test.ts
│       │   │       │   ├── test-result-validator.ts
│       │   │       │   ├── test-result-validator.types.ts
│       │   │       │   ├── workflow-activity-logger.ts
│       │   │       │   └── workflow.service.ts
│       │   │       ├── types.ts
│       │   │       └── workflow-domain.ts
│       │   ├── subpath-exports.test.ts
│       │   ├── tm-core.ts
│       │   └── utils
│       │       └── time.utils.ts
│       ├── tests
│       │   ├── auth
│       │   │   └── auth-refresh.test.ts
│       │   ├── integration
│       │   │   ├── auth-token-refresh.test.ts
│       │   │   ├── list-tasks.test.ts
│       │   │   └── storage
│       │   │       └── activity-logger.test.ts
│       │   ├── mocks
│       │   │   └── mock-provider.ts
│       │   ├── setup.ts
│       │   └── unit
│       │       ├── base-provider.test.ts
│       │       ├── executor.test.ts
│       │       └── smoke.test.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│   ├── create-worktree.sh
│   ├── dev.js
│   ├── init.js
│   ├── list-worktrees.sh
│   ├── modules
│   │   ├── ai-services-unified.js
│   │   ├── bridge-utils.js
│   │   ├── commands.js
│   │   ├── config-manager.js
│   │   ├── dependency-manager.js
│   │   ├── index.js
│   │   ├── prompt-manager.js
│   │   ├── supported-models.json
│   │   ├── sync-readme.js
│   │   ├── task-manager
│   │   │   ├── add-subtask.js
│   │   │   ├── add-task.js
│   │   │   ├── analyze-task-complexity.js
│   │   │   ├── clear-subtasks.js
│   │   │   ├── expand-all-tasks.js
│   │   │   ├── expand-task.js
│   │   │   ├── find-next-task.js
│   │   │   ├── generate-task-files.js
│   │   │   ├── is-task-dependent.js
│   │   │   ├── list-tasks.js
│   │   │   ├── migrate.js
│   │   │   ├── models.js
│   │   │   ├── move-task.js
│   │   │   ├── parse-prd
│   │   │   │   ├── index.js
│   │   │   │   ├── parse-prd-config.js
│   │   │   │   ├── parse-prd-helpers.js
│   │   │   │   ├── parse-prd-non-streaming.js
│   │   │   │   ├── parse-prd-streaming.js
│   │   │   │   └── parse-prd.js
│   │   │   ├── remove-subtask.js
│   │   │   ├── remove-task.js
│   │   │   ├── research.js
│   │   │   ├── response-language.js
│   │   │   ├── scope-adjustment.js
│   │   │   ├── set-task-status.js
│   │   │   ├── tag-management.js
│   │   │   ├── task-exists.js
│   │   │   ├── update-single-task-status.js
│   │   │   ├── update-subtask-by-id.js
│   │   │   ├── update-task-by-id.js
│   │   │   └── update-tasks.js
│   │   ├── task-manager.js
│   │   ├── ui.js
│   │   ├── update-config-tokens.js
│   │   ├── utils
│   │   │   ├── contextGatherer.js
│   │   │   ├── fuzzyTaskSearch.js
│   │   │   └── git-utils.js
│   │   └── utils.js
│   ├── task-complexity-report.json
│   ├── test-claude-errors.js
│   └── test-claude.js
├── sonar-project.properties
├── src
│   ├── ai-providers
│   │   ├── anthropic.js
│   │   ├── azure.js
│   │   ├── base-provider.js
│   │   ├── bedrock.js
│   │   ├── claude-code.js
│   │   ├── codex-cli.js
│   │   ├── gemini-cli.js
│   │   ├── google-vertex.js
│   │   ├── google.js
│   │   ├── grok-cli.js
│   │   ├── groq.js
│   │   ├── index.js
│   │   ├── lmstudio.js
│   │   ├── ollama.js
│   │   ├── openai-compatible.js
│   │   ├── openai.js
│   │   ├── openrouter.js
│   │   ├── perplexity.js
│   │   ├── xai.js
│   │   ├── zai-coding.js
│   │   └── zai.js
│   ├── constants
│   │   ├── commands.js
│   │   ├── paths.js
│   │   ├── profiles.js
│   │   ├── rules-actions.js
│   │   ├── task-priority.js
│   │   └── task-status.js
│   ├── profiles
│   │   ├── amp.js
│   │   ├── base-profile.js
│   │   ├── claude.js
│   │   ├── cline.js
│   │   ├── codex.js
│   │   ├── cursor.js
│   │   ├── gemini.js
│   │   ├── index.js
│   │   ├── kilo.js
│   │   ├── kiro.js
│   │   ├── opencode.js
│   │   ├── roo.js
│   │   ├── trae.js
│   │   ├── vscode.js
│   │   ├── windsurf.js
│   │   └── zed.js
│   ├── progress
│   │   ├── base-progress-tracker.js
│   │   ├── cli-progress-factory.js
│   │   ├── parse-prd-tracker.js
│   │   ├── progress-tracker-builder.js
│   │   └── tracker-ui.js
│   ├── prompts
│   │   ├── add-task.json
│   │   ├── analyze-complexity.json
│   │   ├── expand-task.json
│   │   ├── parse-prd.json
│   │   ├── README.md
│   │   ├── research.json
│   │   ├── schemas
│   │   │   ├── parameter.schema.json
│   │   │   ├── prompt-template.schema.json
│   │   │   ├── README.md
│   │   │   └── variant.schema.json
│   │   ├── update-subtask.json
│   │   ├── update-task.json
│   │   └── update-tasks.json
│   ├── provider-registry
│   │   └── index.js
│   ├── schemas
│   │   ├── add-task.js
│   │   ├── analyze-complexity.js
│   │   ├── base-schemas.js
│   │   ├── expand-task.js
│   │   ├── parse-prd.js
│   │   ├── registry.js
│   │   ├── update-subtask.js
│   │   ├── update-task.js
│   │   └── update-tasks.js
│   ├── task-master.js
│   ├── ui
│   │   ├── confirm.js
│   │   ├── indicators.js
│   │   └── parse-prd.js
│   └── utils
│       ├── asset-resolver.js
│       ├── create-mcp-config.js
│       ├── format.js
│       ├── getVersion.js
│       ├── logger-utils.js
│       ├── manage-gitignore.js
│       ├── path-utils.js
│       ├── profiles.js
│       ├── rule-transformer.js
│       ├── stream-parser.js
│       └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│   ├── e2e
│   │   ├── e2e_helpers.sh
│   │   ├── parse_llm_output.cjs
│   │   ├── run_e2e.sh
│   │   ├── run_fallback_verification.sh
│   │   └── test_llm_analysis.sh
│   ├── fixtures
│   │   ├── .taskmasterconfig
│   │   ├── sample-claude-response.js
│   │   ├── sample-prd.txt
│   │   └── sample-tasks.js
│   ├── helpers
│   │   └── tool-counts.js
│   ├── integration
│   │   ├── claude-code-error-handling.test.js
│   │   ├── claude-code-optional.test.js
│   │   ├── cli
│   │   │   ├── commands.test.js
│   │   │   ├── complex-cross-tag-scenarios.test.js
│   │   │   └── move-cross-tag.test.js
│   │   ├── manage-gitignore.test.js
│   │   ├── mcp-server
│   │   │   └── direct-functions.test.js
│   │   ├── move-task-cross-tag.integration.test.js
│   │   ├── move-task-simple.integration.test.js
│   │   ├── profiles
│   │   │   ├── amp-init-functionality.test.js
│   │   │   ├── claude-init-functionality.test.js
│   │   │   ├── cline-init-functionality.test.js
│   │   │   ├── codex-init-functionality.test.js
│   │   │   ├── cursor-init-functionality.test.js
│   │   │   ├── gemini-init-functionality.test.js
│   │   │   ├── opencode-init-functionality.test.js
│   │   │   ├── roo-files-inclusion.test.js
│   │   │   ├── roo-init-functionality.test.js
│   │   │   ├── rules-files-inclusion.test.js
│   │   │   ├── trae-init-functionality.test.js
│   │   │   ├── vscode-init-functionality.test.js
│   │   │   └── windsurf-init-functionality.test.js
│   │   └── providers
│   │       └── temperature-support.test.js
│   ├── manual
│   │   ├── progress
│   │   │   ├── parse-prd-analysis.js
│   │   │   ├── test-parse-prd.js
│   │   │   └── TESTING_GUIDE.md
│   │   └── prompts
│   │       ├── prompt-test.js
│   │       └── README.md
│   ├── README.md
│   ├── setup.js
│   └── unit
│       ├── ai-providers
│       │   ├── base-provider.test.js
│       │   ├── claude-code.test.js
│       │   ├── codex-cli.test.js
│       │   ├── gemini-cli.test.js
│       │   ├── lmstudio.test.js
│       │   ├── mcp-components.test.js
│       │   ├── openai-compatible.test.js
│       │   ├── openai.test.js
│       │   ├── provider-registry.test.js
│       │   ├── zai-coding.test.js
│       │   ├── zai-provider.test.js
│       │   ├── zai-schema-introspection.test.js
│       │   └── zai.test.js
│       ├── ai-services-unified.test.js
│       ├── commands.test.js
│       ├── config-manager.test.js
│       ├── config-manager.test.mjs
│       ├── dependency-manager.test.js
│       ├── init.test.js
│       ├── initialize-project.test.js
│       ├── kebab-case-validation.test.js
│       ├── manage-gitignore.test.js
│       ├── mcp
│       │   └── tools
│       │       ├── __mocks__
│       │       │   └── move-task.js
│       │       ├── add-task.test.js
│       │       ├── analyze-complexity.test.js
│       │       ├── expand-all.test.js
│       │       ├── get-tasks.test.js
│       │       ├── initialize-project.test.js
│       │       ├── move-task-cross-tag-options.test.js
│       │       ├── move-task-cross-tag.test.js
│       │       ├── remove-task.test.js
│       │       └── tool-registration.test.js
│       ├── mcp-providers
│       │   ├── mcp-components.test.js
│       │   └── mcp-provider.test.js
│       ├── parse-prd.test.js
│       ├── profiles
│       │   ├── amp-integration.test.js
│       │   ├── claude-integration.test.js
│       │   ├── cline-integration.test.js
│       │   ├── codex-integration.test.js
│       │   ├── cursor-integration.test.js
│       │   ├── gemini-integration.test.js
│       │   ├── kilo-integration.test.js
│       │   ├── kiro-integration.test.js
│       │   ├── mcp-config-validation.test.js
│       │   ├── opencode-integration.test.js
│       │   ├── profile-safety-check.test.js
│       │   ├── roo-integration.test.js
│       │   ├── rule-transformer-cline.test.js
│       │   ├── rule-transformer-cursor.test.js
│       │   ├── rule-transformer-gemini.test.js
│       │   ├── rule-transformer-kilo.test.js
│       │   ├── rule-transformer-kiro.test.js
│       │   ├── rule-transformer-opencode.test.js
│       │   ├── rule-transformer-roo.test.js
│       │   ├── rule-transformer-trae.test.js
│       │   ├── rule-transformer-vscode.test.js
│       │   ├── rule-transformer-windsurf.test.js
│       │   ├── rule-transformer-zed.test.js
│       │   ├── rule-transformer.test.js
│       │   ├── selective-profile-removal.test.js
│       │   ├── subdirectory-support.test.js
│       │   ├── trae-integration.test.js
│       │   ├── vscode-integration.test.js
│       │   ├── windsurf-integration.test.js
│       │   └── zed-integration.test.js
│       ├── progress
│       │   └── base-progress-tracker.test.js
│       ├── prompt-manager.test.js
│       ├── prompts
│       │   ├── expand-task-prompt.test.js
│       │   └── prompt-migration.test.js
│       ├── scripts
│       │   └── modules
│       │       ├── commands
│       │       │   ├── move-cross-tag.test.js
│       │       │   └── README.md
│       │       ├── dependency-manager
│       │       │   ├── circular-dependencies.test.js
│       │       │   ├── cross-tag-dependencies.test.js
│       │       │   └── fix-dependencies-command.test.js
│       │       ├── task-manager
│       │       │   ├── add-subtask.test.js
│       │       │   ├── add-task.test.js
│       │       │   ├── analyze-task-complexity.test.js
│       │       │   ├── clear-subtasks.test.js
│       │       │   ├── complexity-report-tag-isolation.test.js
│       │       │   ├── expand-all-tasks.test.js
│       │       │   ├── expand-task.test.js
│       │       │   ├── find-next-task.test.js
│       │       │   ├── generate-task-files.test.js
│       │       │   ├── list-tasks.test.js
│       │       │   ├── models-baseurl.test.js
│       │       │   ├── move-task-cross-tag.test.js
│       │       │   ├── move-task.test.js
│       │       │   ├── parse-prd-schema.test.js
│       │       │   ├── parse-prd.test.js
│       │       │   ├── remove-subtask.test.js
│       │       │   ├── remove-task.test.js
│       │       │   ├── research.test.js
│       │       │   ├── scope-adjustment.test.js
│       │       │   ├── set-task-status.test.js
│       │       │   ├── setup.js
│       │       │   ├── update-single-task-status.test.js
│       │       │   ├── update-subtask-by-id.test.js
│       │       │   ├── update-task-by-id.test.js
│       │       │   └── update-tasks.test.js
│       │       ├── ui
│       │       │   └── cross-tag-error-display.test.js
│       │       └── utils-tag-aware-paths.test.js
│       ├── task-finder.test.js
│       ├── task-manager
│       │   ├── clear-subtasks.test.js
│       │   ├── move-task.test.js
│       │   ├── tag-boundary.test.js
│       │   └── tag-management.test.js
│       ├── task-master.test.js
│       ├── ui
│       │   └── indicators.test.js
│       ├── ui.test.js
│       ├── utils-strip-ansi.test.js
│       └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
├── turbo.json
└── update-task-migration-plan.md
```

# Files

--------------------------------------------------------------------------------
/packages/tm-core/src/modules/git/adapters/git-adapter.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * GitAdapter - Safe git operations wrapper with validation and safety checks.
 * Handles all git operations (branching, committing, pushing) with built-in safety gates.
 *
 * @module git-adapter
 */

import path from 'path';
import fs from 'fs-extra';
import { type SimpleGit, type StatusResult, simpleGit } from 'simple-git';

/**
 * GitAdapter class for safe git operations
 */
export class GitAdapter {
	public projectPath: string;
	public git: SimpleGit;

	/**
	 * Creates a new GitAdapter instance.
	 *
	 * @param {string} projectPath - Absolute path to the project directory
	 * @throws {Error} If projectPath is invalid or not absolute
	 *
	 * @example
	 * const git = new GitAdapter('/path/to/project');
	 * await git.ensureGitRepository();
	 */
	constructor(projectPath: string) {
		// Validate project path
		if (!projectPath) {
			throw new Error('Project path is required');
		}

		if (!path.isAbsolute(projectPath)) {
			throw new Error('Project path must be an absolute path');
		}

		// Normalize path
		this.projectPath = path.normalize(projectPath);

		// Initialize simple-git
		this.git = simpleGit(this.projectPath);
	}

	/**
	 * Checks if the current directory is a git repository.
	 * Looks for .git directory or file (worktree/submodule).
	 *
	 * @returns {Promise<boolean>} True if in a git repository
	 *
	 * @example
	 * const isRepo = await git.isGitRepository();
	 * if (!isRepo) {
	 *   console.log('Not a git repository');
	 * }
	 */
	async isGitRepository(): Promise<boolean> {
		try {
			// Check if .git exists (directory or file for submodules/worktrees)
			const gitPath = path.join(this.projectPath, '.git');

			if (await fs.pathExists(gitPath)) {
				return true;
			}

			// Try to find git root from subdirectory
			try {
				await this.git.revparse(['--git-dir']);
				return true;
			} catch {
				return false;
			}
		} catch (error) {
			return false;
		}
	}

	/**
	 * Validates that git is installed and accessible.
	 * Checks git binary availability and version.
	 *
	 * @returns {Promise<void>}
	 * @throws {Error} If git is not installed or not accessible
	 *
	 * @example
	 * await git.validateGitInstallation();
	 * console.log('Git is installed');
	 */
	async validateGitInstallation(): Promise<void> {
		try {
			await this.git.version();
		} catch (error) {
			const errorMessage =
				error instanceof Error ? error.message : String(error);
			throw new Error(
				`Git is not installed or not accessible: ${errorMessage}`
			);
		}
	}

	/**
	 * Gets the git version information.
	 *
	 * @returns {Promise<{major: number, minor: number, patch: number, agent: string}>}
	 *
	 * @example
	 * const version = await git.getGitVersion();
	 * console.log(`Git version: ${version.major}.${version.minor}.${version.patch}`);
	 */
	async getGitVersion(): Promise<{
		major: number;
		minor: number;
		patch: number;
		agent: string;
	}> {
		const versionResult = await this.git.version();
		return {
			major: versionResult.major,
			minor: versionResult.minor,
			patch:
				typeof versionResult.patch === 'string'
					? parseInt(versionResult.patch)
					: versionResult.patch || 0,
			agent: versionResult.agent
		};
	}

	/**
	 * Gets the repository root path.
	 * Works even when called from a subdirectory.
	 *
	 * @returns {Promise<string>} Absolute path to repository root
	 * @throws {Error} If not in a git repository
	 *
	 * @example
	 * const root = await git.getRepositoryRoot();
	 * console.log(`Repository root: ${root}`);
	 */
	async getRepositoryRoot(): Promise<string> {
		try {
			const result = await this.git.revparse(['--show-toplevel']);
			return path.normalize(result.trim());
		} catch (error) {
			throw new Error(`not a git repository: ${this.projectPath}`);
		}
	}

	/**
	 * Validates the repository state.
	 * Checks for corruption and basic integrity.
	 *
	 * @returns {Promise<void>}
	 * @throws {Error} If repository is corrupted or invalid
	 *
	 * @example
	 * await git.validateRepository();
	 * console.log('Repository is valid');
	 */
	async validateRepository(): Promise<void> {
		// Check if it's a git repository
		const isRepo = await this.isGitRepository();
		if (!isRepo) {
			throw new Error(`not a git repository: ${this.projectPath}`);
		}

		// Try to get repository status to verify it's not corrupted
		try {
			await this.git.status();
		} catch (error) {
			const errorMessage =
				error instanceof Error ? error.message : String(error);
			throw new Error(`Repository validation failed: ${errorMessage}`);
		}
	}

	/**
	 * Ensures we're in a valid git repository before performing operations.
	 * Convenience method that throws descriptive errors.
	 *
	 * @returns {Promise<void>}
	 * @throws {Error} If not in a valid git repository
	 *
	 * @example
	 * await git.ensureGitRepository();
	 * // Safe to perform git operations after this
	 */
	async ensureGitRepository(): Promise<void> {
		const isRepo = await this.isGitRepository();
		if (!isRepo) {
			throw new Error(
				`not a git repository: ${this.projectPath}\n` +
					`Please run this command from within a git repository, or initialize one with 'git init'.`
			);
		}
	}

	/**
	 * Checks if the working tree is clean (no uncommitted changes).
	 * A clean working tree has no staged, unstaged, or untracked files.
	 *
	 * @returns {Promise<boolean>} True if working tree is clean
	 *
	 * @example
	 * const isClean = await git.isWorkingTreeClean();
	 * if (!isClean) {
	 *   console.log('Working tree has uncommitted changes');
	 * }
	 */
	async isWorkingTreeClean(): Promise<boolean> {
		const status = await this.git.status();
		return status.isClean();
	}

	/**
	 * Gets the detailed status of the working tree.
	 * Returns raw status from simple-git with all file changes.
	 *
	 * @returns {Promise<StatusResult>} Detailed status object
	 *
	 * @example
	 * const status = await git.getStatus();
	 * console.log('Modified files:', status.modified);
	 * console.log('Staged files:', status.staged);
	 */
	async getStatus(): Promise<StatusResult> {
		return await this.git.status();
	}

	/**
	 * Checks if there are any uncommitted changes in the working tree.
	 * Includes staged, unstaged, and untracked files.
	 *
	 * @returns {Promise<boolean>} True if there are uncommitted changes
	 *
	 * @example
	 * const hasChanges = await git.hasUncommittedChanges();
	 * if (hasChanges) {
	 *   console.log('Please commit your changes before proceeding');
	 * }
	 */
	async hasUncommittedChanges(): Promise<boolean> {
		const status = await this.git.status();
		return !status.isClean();
	}

	/**
	 * Checks if there are any staged changes ready to commit.
	 *
	 * @returns {Promise<boolean>} True if there are staged changes
	 *
	 * @example
	 * const hasStaged = await git.hasStagedChanges();
	 * if (hasStaged) {
	 *   console.log('Ready to commit');
	 * }
	 */
	async hasStagedChanges(): Promise<boolean> {
		const status = await this.git.status();
		return status.staged.length > 0;
	}

	/**
	 * Checks if there are any untracked files in the working tree.
	 *
	 * @returns {Promise<boolean>} True if there are untracked files
	 *
	 * @example
	 * const hasUntracked = await git.hasUntrackedFiles();
	 * if (hasUntracked) {
	 *   console.log('You have untracked files');
	 * }
	 */
	async hasUntrackedFiles(): Promise<boolean> {
		const status = await this.git.status();
		return status.not_added.length > 0;
	}

	/**
	 * Gets a summary of the working tree status with counts.
	 *
	 * @returns {Promise<{isClean: boolean, staged: number, modified: number, deleted: number, untracked: number, totalChanges: number}>}
	 *
	 * @example
	 * const summary = await git.getStatusSummary();
	 * console.log(`${summary.totalChanges} total changes`);
	 */
	async getStatusSummary(): Promise<{
		isClean: boolean;
		staged: number;
		modified: number;
		deleted: number;
		untracked: number;
		totalChanges: number;
	}> {
		const status = await this.git.status();
		const staged = status.staged.length;
		const modified = status.modified.length;
		const deleted = status.deleted.length;
		const untracked = status.not_added.length;
		const totalChanges = staged + modified + deleted + untracked;

		return {
			isClean: status.isClean(),
			staged,
			modified,
			deleted,
			untracked,
			totalChanges
		};
	}

	/**
	 * Ensures the working tree is clean before performing operations.
	 * Throws an error with details if there are uncommitted changes.
	 *
	 * @returns {Promise<void>}
	 * @throws {Error} If working tree is not clean
	 *
	 * @example
	 * await git.ensureCleanWorkingTree();
	 * // Safe to perform git operations that require clean state
	 */
	async ensureCleanWorkingTree(): Promise<void> {
		const status = await this.git.status();
		if (!status.isClean()) {
			const summary = await this.getStatusSummary();
			throw new Error(
				`working tree is not clean: ${this.projectPath}\n` +
					`Staged: ${summary.staged}, Modified: ${summary.modified}, ` +
					`Deleted: ${summary.deleted}, Untracked: ${summary.untracked}\n` +
					`Please commit or stash your changes before proceeding.`
			);
		}
	}

	/**
	 * Gets the name of the current branch.
	 *
	 * @returns {Promise<string>} Current branch name
	 * @throws {Error} If unable to determine current branch
	 *
	 * @example
	 * const branch = await git.getCurrentBranch();
	 * console.log(`Currently on: ${branch}`);
	 */
	async getCurrentBranch(): Promise<string> {
		const status = await this.git.status();
		return status.current || 'HEAD';
	}

	/**
	 * Lists all local branches in the repository.
	 *
	 * @returns {Promise<string[]>} Array of branch names
	 *
	 * @example
	 * const branches = await git.listBranches();
	 * console.log('Available branches:', branches);
	 */
	async listBranches(): Promise<string[]> {
		const branchSummary = await this.git.branchLocal();
		return Object.keys(branchSummary.branches);
	}

	/**
	 * Checks if a branch exists in the repository.
	 *
	 * @param {string} branchName - Name of branch to check
	 * @returns {Promise<boolean>} True if branch exists
	 *
	 * @example
	 * const exists = await git.branchExists('feature-branch');
	 * if (!exists) {
	 *   console.log('Branch does not exist');
	 * }
	 */
	async branchExists(branchName: string): Promise<boolean> {
		const branches = await this.listBranches();
		return branches.includes(branchName);
	}

	/**
	 * Creates a new branch without checking it out.
	 *
	 * @param {string} branchName - Name for the new branch
	 * @param {Object} options - Branch creation options
	 * @param {boolean} options.checkout - Whether to checkout after creation
	 * @returns {Promise<void>}
	 * @throws {Error} If branch already exists or working tree is dirty (when checkout=true)
	 *
	 * @example
	 * await git.createBranch('feature-branch');
	 * await git.createBranch('feature-branch', { checkout: true });
	 */
	async createBranch(
		branchName: string,
		options: { checkout?: boolean } = {}
	): Promise<void> {
		// Check if branch already exists
		const exists = await this.branchExists(branchName);
		if (exists) {
			throw new Error(`branch already exists: ${branchName}`);
		}

		// If checkout is requested, ensure working tree is clean
		if (options.checkout) {
			await this.ensureCleanWorkingTree();
		}

		// Create the branch
		await this.git.branch([branchName]);

		// Checkout if requested
		if (options.checkout) {
			await this.git.checkout(branchName);
		}
	}

	/**
	 * Checks out an existing branch.
	 *
	 * @param {string} branchName - Name of branch to checkout
	 * @param {Object} options - Checkout options
	 * @param {boolean} options.force - Force checkout even with uncommitted changes
	 * @returns {Promise<void>}
	 * @throws {Error} If branch doesn't exist or working tree is dirty (unless force=true)
	 *
	 * @example
	 * await git.checkoutBranch('feature-branch');
	 * await git.checkoutBranch('feature-branch', { force: true });
	 */
	async checkoutBranch(
		branchName: string,
		options: { force?: boolean } = {}
	): Promise<void> {
		// Check if branch exists
		const exists = await this.branchExists(branchName);
		if (!exists) {
			throw new Error(`branch does not exist: ${branchName}`);
		}

		// Ensure clean working tree unless force is specified
		if (!options.force) {
			await this.ensureCleanWorkingTree();
		}

		// Checkout the branch
		const checkoutOptions = options.force ? ['-f', branchName] : [branchName];
		await this.git.checkout(checkoutOptions);
	}

	/**
	 * Creates a new branch and checks it out.
	 * Convenience method combining createBranch and checkoutBranch.
	 *
	 * @param {string} branchName - Name for the new branch
	 * @returns {Promise<void>}
	 * @throws {Error} If branch already exists or working tree is dirty
	 *
	 * @example
	 * await git.createAndCheckoutBranch('new-feature');
	 */
	async createAndCheckoutBranch(branchName: string): Promise<void> {
		// Ensure working tree is clean
		await this.ensureCleanWorkingTree();

		// Check if branch already exists
		const exists = await this.branchExists(branchName);
		if (exists) {
			throw new Error(`branch already exists: ${branchName}`);
		}

		// Create and checkout the branch
		await this.git.checkoutLocalBranch(branchName);
	}

	/**
	 * Deletes a branch.
	 *
	 * @param {string} branchName - Name of branch to delete
	 * @param {Object} options - Delete options
	 * @param {boolean} options.force - Force delete even if unmerged
	 * @returns {Promise<void>}
	 * @throws {Error} If branch doesn't exist or is currently checked out
	 *
	 * @example
	 * await git.deleteBranch('old-feature');
	 * await git.deleteBranch('unmerged-feature', { force: true });
	 */
	async deleteBranch(
		branchName: string,
		options: { force?: boolean } = {}
	): Promise<void> {
		// Check if branch exists
		const exists = await this.branchExists(branchName);
		if (!exists) {
			throw new Error(`branch does not exist: ${branchName}`);
		}

		// Check if trying to delete current branch
		const current = await this.getCurrentBranch();
		if (current === branchName) {
			throw new Error(`cannot delete current branch: ${branchName}`);
		}

		// Delete the branch
		const deleteOptions = options.force
			? ['-D', branchName]
			: ['-d', branchName];
		await this.git.branch(deleteOptions);
	}

	/**
	 * Stages files for commit.
	 *
	 * @param {string[]} files - Array of file paths to stage
	 * @returns {Promise<void>}
	 *
	 * @example
	 * await git.stageFiles(['file1.txt', 'file2.txt']);
	 * await git.stageFiles(['.']); // Stage all changes
	 */
	async stageFiles(files: string[]): Promise<void> {
		await this.git.add(files);
	}

	/**
	 * Unstages files that were previously staged.
	 *
	 * @param {string[]} files - Array of file paths to unstage
	 * @returns {Promise<void>}
	 *
	 * @example
	 * await git.unstageFiles(['file1.txt']);
	 */
	async unstageFiles(files: string[]): Promise<void> {
		await this.git.reset(['HEAD', '--', ...files]);
	}

	/**
	 * Creates a commit with optional metadata embedding.
	 *
	 * @param {string} message - Commit message
	 * @param {Object} options - Commit options
	 * @param {Object} options.metadata - Metadata to embed in commit message
	 * @param {boolean} options.allowEmpty - Allow empty commits
	 * @param {boolean} options.enforceNonDefaultBranch - Prevent commits on default branch
	 * @param {boolean} options.force - Force commit even on default branch
	 * @returns {Promise<void>}
	 * @throws {Error} If no staged changes (unless allowEmpty), or on default branch (unless force)
	 *
	 * @example
	 * await git.createCommit('Add feature');
	 * await git.createCommit('Add feature', {
	 *   metadata: { taskId: '2.4', phase: 'implementation' }
	 * });
	 * await git.createCommit('Add feature', {
	 *   enforceNonDefaultBranch: true
	 * });
	 */
	async createCommit(
		message: string,
		options: {
			metadata?: Record<string, string>;
			allowEmpty?: boolean;
			enforceNonDefaultBranch?: boolean;
			force?: boolean;
		} = {}
	): Promise<void> {
		// Check if on default branch and enforcement is requested
		if (options.enforceNonDefaultBranch && !options.force) {
			const currentBranch = await this.getCurrentBranch();
			const defaultBranches = ['main', 'master', 'develop'];
			if (defaultBranches.includes(currentBranch)) {
				throw new Error(
					`cannot commit to default branch: ${currentBranch}\n` +
						`Please create a feature branch or use force option.`
				);
			}
		}

		// Check for staged changes unless allowEmpty
		if (!options.allowEmpty) {
			const hasStaged = await this.hasStagedChanges();
			if (!hasStaged) {
				throw new Error('no staged changes to commit');
			}
		}

		// Build commit arguments
		const commitArgs: string[] = ['commit'];

		// Add message
		commitArgs.push('-m', message);

		// Add metadata as separate commit message lines
		if (options.metadata) {
			commitArgs.push('-m', ''); // Empty line separator
			for (const [key, value] of Object.entries(options.metadata)) {
				commitArgs.push('-m', `[${key}:${value}]`);
			}
		}

		// Add flags
		commitArgs.push('--no-gpg-sign');
		if (options.allowEmpty) {
			commitArgs.push('--allow-empty');
		}

		await this.git.raw(commitArgs);
	}

	/**
	 * Gets the commit log history.
	 *
	 * @param {Object} options - Log options
	 * @param {number} options.maxCount - Maximum number of commits to return
	 * @returns {Promise<Array>} Array of commit objects
	 *
	 * @example
	 * const log = await git.getCommitLog();
	 * const recentLog = await git.getCommitLog({ maxCount: 10 });
	 */
	async getCommitLog(options: { maxCount?: number } = {}): Promise<any[]> {
		const logOptions: any = {
			format: {
				hash: '%H',
				date: '%ai',
				message: '%B', // Full commit message including body
				author_name: '%an',
				author_email: '%ae'
			}
		};
		if (options.maxCount) {
			logOptions.maxCount = options.maxCount;
		}

		const log = await this.git.log(logOptions);
		return [...log.all];
	}

	/**
	 * Gets the last commit.
	 *
	 * @returns {Promise<any>} Last commit object
	 *
	 * @example
	 * const lastCommit = await git.getLastCommit();
	 * console.log(lastCommit.message);
	 */
	async getLastCommit(): Promise<any> {
		const log = await this.git.log({
			maxCount: 1,
			format: {
				hash: '%H',
				date: '%ai',
				message: '%B', // Full commit message including body
				author_name: '%an',
				author_email: '%ae'
			}
		});
		return log.latest;
	}

	/**
	 * Detects the default branch for the repository.
	 * Returns the current branch name, assuming it's the default if it's main/master/develop.
	 *
	 * @returns {Promise<string>} Default branch name
	 *
	 * @example
	 * const defaultBranch = await git.getDefaultBranch();
	 * console.log(`Default branch: ${defaultBranch}`);
	 */
	async getDefaultBranch(): Promise<string> {
		const currentBranch = await this.getCurrentBranch();
		const defaultBranches = ['main', 'master', 'develop'];

		if (defaultBranches.includes(currentBranch)) {
			return currentBranch;
		}

		// If not on a default branch, check which default branches exist
		const branches = await this.listBranches();
		for (const defaultBranch of defaultBranches) {
			if (branches.includes(defaultBranch)) {
				return defaultBranch;
			}
		}

		// Fallback to main
		return 'main';
	}

	/**
	 * Checks if a given branch name is considered a default branch.
	 * Default branches are: main, master, develop.
	 *
	 * @param {string} branchName - Branch name to check
	 * @returns {Promise<boolean>} True if branch is a default branch
	 *
	 * @example
	 * const isDefault = await git.isDefaultBranch('main');
	 * if (isDefault) {
	 *   console.log('This is a default branch');
	 * }
	 */
	async isDefaultBranch(branchName: string): Promise<boolean> {
		const defaultBranches = ['main', 'master', 'develop'];
		return defaultBranches.includes(branchName);
	}

	/**
	 * Checks if currently on a default branch.
	 *
	 * @returns {Promise<boolean>} True if on a default branch
	 *
	 * @example
	 * const onDefault = await git.isOnDefaultBranch();
	 * if (onDefault) {
	 *   console.log('Warning: You are on a default branch');
	 * }
	 */
	async isOnDefaultBranch(): Promise<boolean> {
		const currentBranch = await this.getCurrentBranch();
		return await this.isDefaultBranch(currentBranch);
	}

	/**
	 * Ensures the current branch is not a default branch.
	 * Throws an error if on a default branch.
	 *
	 * @returns {Promise<void>}
	 * @throws {Error} If currently on a default branch
	 *
	 * @example
	 * await git.ensureNotOnDefaultBranch();
	 * // Safe to perform operations that shouldn't happen on default branches
	 */
	async ensureNotOnDefaultBranch(): Promise<void> {
		const onDefault = await this.isOnDefaultBranch();
		if (onDefault) {
			const currentBranch = await this.getCurrentBranch();
			throw new Error(
				`currently on default branch: ${currentBranch}\n` +
					`Please create a feature branch before proceeding.`
			);
		}
	}

	/**
	 * Checks if the repository has any remotes configured.
	 *
	 * @returns {Promise<boolean>} True if remotes exist
	 *
	 * @example
	 * const hasRemote = await git.hasRemote();
	 * if (!hasRemote) {
	 *   console.log('No remotes configured');
	 * }
	 */
	async hasRemote(): Promise<boolean> {
		const remotes = await this.git.getRemotes();
		return remotes.length > 0;
	}

	/**
	 * Gets all configured remotes.
	 *
	 * @returns {Promise<Array>} Array of remote objects
	 *
	 * @example
	 * const remotes = await git.getRemotes();
	 * console.log('Remotes:', remotes);
	 */
	async getRemotes(): Promise<any[]> {
		return await this.git.getRemotes(true);
	}
}

```

--------------------------------------------------------------------------------
/tests/unit/profiles/selective-profile-removal.test.js:
--------------------------------------------------------------------------------

```javascript
import fs from 'fs';
import path from 'path';
import os from 'os';
import { jest } from '@jest/globals';
import {
	removeProfileRules,
	getRulesProfile
} from '../../../src/utils/rule-transformer.js';
import { removeTaskMasterMCPConfiguration } from '../../../src/utils/create-mcp-config.js';

// Mock logger
const mockLog = {
	info: jest.fn(),
	error: jest.fn(),
	debug: jest.fn(),
	warn: jest.fn()
};

// Mock the logger import
jest.mock('../../../scripts/modules/utils.js', () => ({
	log: (level, message) => mockLog[level]?.(message)
}));

describe('Selective Rules Removal', () => {
	let tempDir;
	let mockExistsSync;
	let mockRmSync;
	let mockReaddirSync;
	let mockReadFileSync;
	let mockWriteFileSync;
	let mockMkdirSync;
	let mockStatSync;
	let originalConsoleLog;

	beforeEach(() => {
		jest.clearAllMocks();

		// Mock console.log to prevent JSON parsing issues in Jest
		originalConsoleLog = console.log;
		console.log = jest.fn();

		// Create temp directory for testing
		tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));

		// Set up spies on fs methods
		mockExistsSync = jest.spyOn(fs, 'existsSync');
		mockRmSync = jest.spyOn(fs, 'rmSync').mockImplementation(() => {});
		mockReaddirSync = jest.spyOn(fs, 'readdirSync');
		mockReadFileSync = jest.spyOn(fs, 'readFileSync');
		mockWriteFileSync = jest
			.spyOn(fs, 'writeFileSync')
			.mockImplementation(() => {});
		mockMkdirSync = jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
		mockStatSync = jest.spyOn(fs, 'statSync').mockImplementation((filePath) => {
			// Mock stat objects for files and directories
			if (filePath.includes('taskmaster') && !filePath.endsWith('.mdc')) {
				// This is the taskmaster directory
				return { isDirectory: () => true, isFile: () => false };
			} else {
				// This is a file
				return { isDirectory: () => false, isFile: () => true };
			}
		});
	});

	afterEach(() => {
		// Restore console.log
		console.log = originalConsoleLog;

		// Clean up temp directory
		try {
			fs.rmSync(tempDir, { recursive: true, force: true });
		} catch (error) {
			// Ignore cleanup errors
		}

		// Restore all mocked functions
		jest.restoreAllMocks();
	});

	describe('removeProfileRules - Selective File Removal', () => {
		it('should only remove Task Master files, preserving existing rules', () => {
			const projectRoot = '/test/project';
			const cursorProfile = getRulesProfile('cursor');

			// Mock profile directory exists
			mockExistsSync.mockImplementation((filePath) => {
				if (filePath.includes('.cursor')) return true;
				if (filePath.includes('.cursor/rules')) return true;
				if (filePath.includes('mcp.json')) return true;
				return false;
			});

			// Mock MCP config file
			const mockMcpConfig = {
				mcpServers: {
					'task-master-ai': {
						command: 'npx',
						args: ['task-master-ai']
					}
				}
			};
			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));

			// Mock sequential calls to readdirSync to simulate the removal process
			mockReaddirSync
				// First call - get initial directory contents (rules directory)
				.mockReturnValueOnce([
					'cursor_rules.mdc', // Task Master file
					'taskmaster', // Task Master subdirectory
					'self_improve.mdc', // Task Master file
					'custom_rule.mdc', // Existing file (not Task Master)
					'my_company_rules.mdc' // Existing file (not Task Master)
				])
				// Second call - get taskmaster subdirectory contents
				.mockReturnValueOnce([
					'dev_workflow.mdc', // Task Master file in subdirectory
					'taskmaster.mdc' // Task Master file in subdirectory
				])
				// Third call - check remaining files after removal
				.mockReturnValueOnce([
					'custom_rule.mdc', // Remaining existing file
					'my_company_rules.mdc' // Remaining existing file
				])
				// Fourth call - check profile directory contents (after file removal)
				.mockReturnValueOnce([
					'custom_rule.mdc', // Remaining existing file
					'my_company_rules.mdc' // Remaining existing file
				])
				// Fifth call - check profile directory contents
				.mockReturnValueOnce(['rules', 'mcp.json']);

			const result = removeProfileRules(projectRoot, cursorProfile);

			// The function should succeed in removing files even if the final directory check fails
			expect(result.filesRemoved).toEqual([
				'cursor_rules.mdc',
				'taskmaster/dev_workflow.mdc',
				'self_improve.mdc',
				'taskmaster/taskmaster.mdc'
			]);
			expect(result.notice).toContain('Preserved 2 existing rule files');

			// The function may fail due to directory reading issues in the test environment,
			// but the core functionality (file removal) should work
			if (result.success) {
				expect(result.success).toBe(true);
			} else {
				// If it fails, it should be due to directory reading, not file removal
				expect(result.error).toContain('ENOENT');
				expect(result.filesRemoved.length).toBeGreaterThan(0);
			}

			// Verify only Task Master files were removed
			expect(mockRmSync).toHaveBeenCalledWith(
				path.join(projectRoot, '.cursor/rules/cursor_rules.mdc'),
				{ force: true }
			);
			expect(mockRmSync).toHaveBeenCalledWith(
				path.join(projectRoot, '.cursor/rules/taskmaster/dev_workflow.mdc'),
				{ force: true }
			);
			expect(mockRmSync).toHaveBeenCalledWith(
				path.join(projectRoot, '.cursor/rules/self_improve.mdc'),
				{ force: true }
			);
			expect(mockRmSync).toHaveBeenCalledWith(
				path.join(projectRoot, '.cursor/rules/taskmaster/taskmaster.mdc'),
				{ force: true }
			);

			// Verify rules directory was NOT removed (still has other files)
			expect(mockRmSync).not.toHaveBeenCalledWith(
				path.join(projectRoot, '.cursor/rules'),
				{ recursive: true, force: true }
			);

			// Verify profile directory was NOT removed
			expect(mockRmSync).not.toHaveBeenCalledWith(
				path.join(projectRoot, '.cursor'),
				{ recursive: true, force: true }
			);
		});

		it('should remove empty rules directory if only Task Master files existed', () => {
			const projectRoot = '/test/project';
			const cursorProfile = getRulesProfile('cursor');

			// Mock profile directory exists
			mockExistsSync.mockImplementation((filePath) => {
				if (filePath.includes('.cursor')) return true;
				if (filePath.includes('.cursor/rules')) return true;
				if (filePath.includes('mcp.json')) return true;
				return false;
			});

			// Mock MCP config file
			const mockMcpConfig = {
				mcpServers: {
					'task-master-ai': {
						command: 'npx',
						args: ['task-master-ai']
					}
				}
			};
			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));

			// Mock sequential calls to readdirSync to simulate the removal process
			mockReaddirSync
				// First call - get initial directory contents (rules directory)
				.mockReturnValueOnce([
					'cursor_rules.mdc',
					'taskmaster', // subdirectory
					'self_improve.mdc'
				])
				// Second call - get taskmaster subdirectory contents
				.mockReturnValueOnce(['dev_workflow.mdc', 'taskmaster.mdc'])
				// Third call - check remaining files after removal (should be empty)
				.mockReturnValueOnce([]) // Empty after removal
				// Fourth call - check profile directory contents
				.mockReturnValueOnce(['mcp.json']);

			const result = removeProfileRules(projectRoot, cursorProfile);

			// The function should succeed in removing files even if the final directory check fails
			expect(result.filesRemoved).toEqual([
				'cursor_rules.mdc',
				'taskmaster/dev_workflow.mdc',
				'self_improve.mdc',
				'taskmaster/taskmaster.mdc'
			]);

			// The function may fail due to directory reading issues in the test environment,
			// but the core functionality (file removal) should work
			if (result.success) {
				expect(result.success).toBe(true);
				// Verify rules directory was removed when empty
				expect(mockRmSync).toHaveBeenCalledWith(
					path.join(projectRoot, '.cursor/rules'),
					{ recursive: true, force: true }
				);
			} else {
				// If it fails, it should be due to directory reading, not file removal
				expect(result.error).toContain('ENOENT');
				expect(result.filesRemoved.length).toBeGreaterThan(0);
				// Verify individual files were removed even if directory removal failed
				expect(mockRmSync).toHaveBeenCalledWith(
					path.join(projectRoot, '.cursor/rules/cursor_rules.mdc'),
					{ force: true }
				);
				expect(mockRmSync).toHaveBeenCalledWith(
					path.join(projectRoot, '.cursor/rules/taskmaster/dev_workflow.mdc'),
					{ force: true }
				);
			}
		});

		it('should remove entire profile directory if completely empty and all rules were Task Master rules and MCP config deleted', () => {
			const projectRoot = '/test/project';
			const cursorProfile = getRulesProfile('cursor');

			// Mock profile directory exists
			mockExistsSync.mockImplementation((filePath) => {
				if (filePath.includes('.cursor')) return true;
				if (filePath.includes('.cursor/rules')) return true;
				if (filePath.includes('mcp.json')) return true;
				return false;
			});

			// Mock sequence: rules dir has only Task Master files, then empty, then profile dir empty
			mockReaddirSync
				.mockReturnValueOnce(['cursor_rules.mdc']) // Only Task Master files
				.mockReturnValueOnce([]) // rules dir empty after removal
				.mockReturnValueOnce([]); // profile dir empty after all cleanup

			// Mock MCP config with only Task Master (will be completely deleted)
			const mockMcpConfig = {
				mcpServers: {
					'task-master-ai': {
						command: 'npx',
						args: ['task-master-ai']
					}
				}
			};
			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));

			const result = removeProfileRules(projectRoot, cursorProfile);

			expect(result.success).toBe(true);
			expect(result.profileDirRemoved).toBe(true);
			expect(result.mcpResult.deleted).toBe(true);

			// Verify profile directory was removed when completely empty and conditions met
			expect(mockRmSync).toHaveBeenCalledWith(
				path.join(projectRoot, '.cursor'),
				{ recursive: true, force: true }
			);
		});

		it('should NOT remove profile directory if existing rules were preserved, even if MCP config deleted', () => {
			const projectRoot = '/test/project';
			const cursorProfile = getRulesProfile('cursor');

			// Mock profile directory exists
			mockExistsSync.mockImplementation((filePath) => {
				if (filePath.includes('.cursor')) return true;
				if (filePath.includes('.cursor/rules')) return true;
				if (filePath.includes('mcp.json')) return true;
				return false;
			});

			// Mock sequence: mixed rules, some remaining after removal, profile dir not empty
			mockReaddirSync
				.mockReturnValueOnce(['cursor_rules.mdc', 'my_custom_rule.mdc']) // Mixed files
				.mockReturnValueOnce(['my_custom_rule.mdc']) // Custom rule remains
				.mockReturnValueOnce(['rules', 'mcp.json']); // Profile dir has remaining content

			// Mock MCP config with only Task Master (will be completely deleted)
			const mockMcpConfig = {
				mcpServers: {
					'task-master-ai': {
						command: 'npx',
						args: ['task-master-ai']
					}
				}
			};
			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));

			const result = removeProfileRules(projectRoot, cursorProfile);

			expect(result.success).toBe(true);
			expect(result.profileDirRemoved).toBe(false);
			expect(result.mcpResult.deleted).toBe(true);

			// Verify profile directory was NOT removed (existing rules preserved)
			expect(mockRmSync).not.toHaveBeenCalledWith(
				path.join(projectRoot, '.cursor'),
				{ recursive: true, force: true }
			);
		});

		it('should NOT remove profile directory if MCP config has other servers, even if all rules were Task Master rules', () => {
			const projectRoot = '/test/project';
			const cursorProfile = getRulesProfile('cursor');

			// Mock profile directory exists
			mockExistsSync.mockImplementation((filePath) => {
				if (filePath.includes('.cursor')) return true;
				if (filePath.includes('.cursor/rules')) return true;
				if (filePath.includes('mcp.json')) return true;
				return false;
			});

			// Mock sequence: only Task Master rules, rules dir removed, but profile dir not empty due to MCP
			mockReaddirSync
				.mockReturnValueOnce(['cursor_rules.mdc']) // Only Task Master files
				.mockReturnValueOnce(['my_custom_rule.mdc']) // rules dir has other files remaining
				.mockReturnValueOnce(['rules', 'mcp.json']); // Profile dir has rules and MCP config remaining

			// Mock MCP config with multiple servers (Task Master will be removed, others preserved)
			const mockMcpConfig = {
				mcpServers: {
					'task-master-ai': {
						command: 'npx',
						args: ['task-master-ai']
					},
					'other-server': {
						command: 'node',
						args: ['other-server.js']
					}
				}
			};
			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));

			const result = removeProfileRules(projectRoot, cursorProfile);

			expect(result.success).toBe(true);
			expect(result.profileDirRemoved).toBe(false);
			expect(result.mcpResult.deleted).toBe(false);
			expect(result.mcpResult.hasOtherServers).toBe(true);

			// Verify profile directory was NOT removed (MCP config preserved)
			expect(mockRmSync).not.toHaveBeenCalledWith(
				path.join(projectRoot, '.cursor'),
				{ recursive: true, force: true }
			);
		});

		it('should NOT remove profile directory if other files/folders exist, even if all other conditions are met', () => {
			const projectRoot = '/test/project';
			const cursorProfile = getRulesProfile('cursor');

			// Mock profile directory exists
			mockExistsSync.mockImplementation((filePath) => {
				if (filePath.includes('.cursor')) return true;
				if (filePath.includes('.cursor/rules')) return true;
				if (filePath.includes('mcp.json')) return true;
				return false;
			});

			// Mock sequence: only Task Master rules, rules dir removed, but profile dir has other files/folders
			mockReaddirSync
				.mockReturnValueOnce(['cursor_rules.mdc']) // Only Task Master files (initial check)
				.mockReturnValueOnce(['cursor_rules.mdc']) // Task Master files list for filtering
				.mockReturnValueOnce([]) // Rules dir empty after removal (not used since no remaining files)
				.mockReturnValueOnce(['workflows', 'custom-config.json']); // Profile dir has other files/folders

			// Mock MCP config with only Task Master (will be completely deleted)
			const mockMcpConfig = {
				mcpServers: {
					'task-master-ai': {
						command: 'npx',
						args: ['task-master-ai']
					}
				}
			};
			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));

			const result = removeProfileRules(projectRoot, cursorProfile);

			expect(result.success).toBe(true);
			expect(result.profileDirRemoved).toBe(false);
			expect(result.mcpResult.deleted).toBe(true);
			expect(result.notice).toContain('existing files/folders in .cursor');

			// Verify profile directory was NOT removed (other files/folders exist)
			expect(mockRmSync).not.toHaveBeenCalledWith(
				path.join(projectRoot, '.cursor'),
				{ recursive: true, force: true }
			);
		});
	});

	describe('removeTaskMasterMCPConfiguration - Selective MCP Removal', () => {
		it('should only remove Task Master from MCP config, preserving other servers', () => {
			const projectRoot = '/test/project';
			const mcpConfigPath = '.cursor/mcp.json';

			// Mock MCP config with multiple servers
			const mockMcpConfig = {
				mcpServers: {
					'task-master-ai': {
						command: 'npx',
						args: ['task-master-ai']
					},
					'other-server': {
						command: 'node',
						args: ['other-server.js']
					},
					'another-server': {
						command: 'python',
						args: ['server.py']
					}
				}
			};

			mockExistsSync.mockReturnValue(true);
			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));

			const result = removeTaskMasterMCPConfiguration(
				projectRoot,
				mcpConfigPath
			);

			expect(result.success).toBe(true);
			expect(result.removed).toBe(true);
			expect(result.deleted).toBe(false);
			expect(result.hasOtherServers).toBe(true);

			// Verify the file was written back with other servers preserved
			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(projectRoot, mcpConfigPath),
				expect.stringContaining('other-server')
			);
			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(projectRoot, mcpConfigPath),
				expect.stringContaining('another-server')
			);
			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(projectRoot, mcpConfigPath),
				expect.not.stringContaining('task-master-ai')
			);
		});

		it('should delete entire MCP config if Task Master is the only server', () => {
			const projectRoot = '/test/project';
			const mcpConfigPath = '.cursor/mcp.json';

			// Mock MCP config with only Task Master
			const mockMcpConfig = {
				mcpServers: {
					'task-master-ai': {
						command: 'npx',
						args: ['task-master-ai']
					}
				}
			};

			mockExistsSync.mockReturnValue(true);
			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));

			const result = removeTaskMasterMCPConfiguration(
				projectRoot,
				mcpConfigPath
			);

			expect(result.success).toBe(true);
			expect(result.removed).toBe(true);
			expect(result.deleted).toBe(true);
			expect(result.hasOtherServers).toBe(false);

			// Verify the entire file was deleted
			expect(mockRmSync).toHaveBeenCalledWith(
				path.join(projectRoot, mcpConfigPath),
				{ force: true }
			);
			expect(mockWriteFileSync).not.toHaveBeenCalled();
		});

		it('should handle MCP config with Task Master in server args', () => {
			const projectRoot = '/test/project';
			const mcpConfigPath = '.cursor/mcp.json';

			// Mock MCP config with Task Master referenced in args
			const mockMcpConfig = {
				mcpServers: {
					'taskmaster-wrapper': {
						command: 'npx',
						args: ['-y', 'task-master-ai']
					},
					'other-server': {
						command: 'node',
						args: ['other-server.js']
					}
				}
			};

			mockExistsSync.mockReturnValue(true);
			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));

			const result = removeTaskMasterMCPConfiguration(
				projectRoot,
				mcpConfigPath
			);

			expect(result.success).toBe(true);
			expect(result.removed).toBe(true);
			expect(result.hasOtherServers).toBe(true);

			// Verify only the server with task-master-ai in args was removed
			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(projectRoot, mcpConfigPath),
				expect.stringContaining('other-server')
			);
			expect(mockWriteFileSync).toHaveBeenCalledWith(
				path.join(projectRoot, mcpConfigPath),
				expect.not.stringContaining('taskmaster-wrapper')
			);
		});

		it('should handle non-existent MCP config gracefully', () => {
			const projectRoot = '/test/project';
			const mcpConfigPath = '.cursor/mcp.json';

			mockExistsSync.mockReturnValue(false);

			const result = removeTaskMasterMCPConfiguration(
				projectRoot,
				mcpConfigPath
			);

			expect(result.success).toBe(true);
			expect(result.removed).toBe(false);
			expect(result.deleted).toBe(false);
			expect(result.hasOtherServers).toBe(false);

			// No file operations should have been attempted
			expect(mockReadFileSync).not.toHaveBeenCalled();
			expect(mockWriteFileSync).not.toHaveBeenCalled();
			expect(mockRmSync).not.toHaveBeenCalled();
		});
	});

	describe('Integration - Full Profile Removal with Preservation', () => {
		it('should handle complete removal scenario with notices', () => {
			const projectRoot = '/test/project';
			const cursorProfile = getRulesProfile('cursor');

			// Mock mixed scenario: some Task Master files, some existing files, other MCP servers
			mockExistsSync.mockImplementation((filePath) => {
				// Only .cursor directories exist
				if (filePath === path.join(projectRoot, '.cursor')) return true;
				if (filePath === path.join(projectRoot, '.cursor/rules')) return true;
				if (filePath === path.join(projectRoot, '.cursor/mcp.json'))
					return true;
				// Only cursor_rules.mdc exists, not the other taskmaster files
				if (
					filePath === path.join(projectRoot, '.cursor/rules/cursor_rules.mdc')
				)
					return true;
				if (
					filePath ===
					path.join(projectRoot, '.cursor/rules/taskmaster/dev_workflow.mdc')
				)
					return false;
				if (
					filePath === path.join(projectRoot, '.cursor/rules/self_improve.mdc')
				)
					return false;
				if (
					filePath ===
					path.join(projectRoot, '.cursor/rules/taskmaster/taskmaster.mdc')
				)
					return false;
				return false;
			});

			// Mock sequential calls to readdirSync
			mockReaddirSync
				// First call - get initial directory contents
				.mockReturnValueOnce(['cursor_rules.mdc', 'my_custom_rule.mdc'])
				// Second call - check remaining files after removal
				.mockReturnValueOnce(['my_custom_rule.mdc'])
				// Third call - check profile directory contents
				.mockReturnValueOnce(['rules', 'mcp.json']);

			// Mock MCP config with multiple servers
			const mockMcpConfig = {
				mcpServers: {
					'task-master-ai': { command: 'npx', args: ['task-master-ai'] },
					'other-server': { command: 'node', args: ['other.js'] }
				}
			};
			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));

			const result = removeProfileRules(projectRoot, cursorProfile);

			expect(result.success).toBe(true);
			expect(result.filesRemoved).toEqual(['cursor_rules.mdc']);
			expect(result.notice).toContain('Preserved 1 existing rule files');
			expect(result.notice).toContain(
				'preserved other MCP server configurations'
			);
			expect(result.mcpResult.hasOtherServers).toBe(true);
			expect(result.profileDirRemoved).toBe(false);
		});
	});
});

```

--------------------------------------------------------------------------------
/context/chats/max-min-tokens.txt.md:
--------------------------------------------------------------------------------

```markdown
We want to refine how Task Master handles AI model token limits to be more precise, by:

1.  Distinguishing between `maxInputTokens` and `maxOutputTokens` in the configuration.
2.  Dynamically adjusting the `maxOutputTokens` for an API call based on the actual prompt length to stay within the model's total context window (or respecting separate input/output limits if the API and model support that).
3.  Ensuring that `ai-services-unified.js` uses these more granular limits.

This is a good improvement for both cost control and preventing errors.

Here's a plan:

**Phase 1: Configuration and Core Logic Updates**

1.  **Update `.taskmasterconfig` Structure:**
    - I'll modify the `models` section in your `.taskmasterconfig`. For each role (`main`, `research`, `fallback`), `maxTokens` will be replaced with `maxInputTokens` and `maxOutputTokens`.
      - We'll need to decide on reasonable default values for these new fields. We can look at the current `maxTokens` and the model's known limits to make an initial guess.
2.  **Update `MODEL_MAP` in `ai-services-unified.js`:**
    - This array already stores cost data. We need to ensure it also stores the _absolute_ maximum input and output tokens for each model listed (e.g., `model_max_input_tokens`, `model_max_output_tokens`). If these fields are not present, they will need to be added. The values in `.taskmasterconfig` will then represent user-defined operational limits, which should ideally be validated against these absolute maximums.
3.  **Update `config-manager.js`:**
    - Getter functions like `getParametersForRole` will be updated to fetch `maxInputTokens` and `maxOutputTokens` instead of the singular `maxTokens`.
    - New getters might be needed if we want to access the model's absolute limits directly from `MODEL_MAP` via `config-manager.js`.
4.  **Update `ai-services-unified.js` (`_unifiedServiceRunner`):**
    - **Token Counting:** This is a crucial step. Before an API call, we need to estimate the token count of the combined `systemPrompt` and `userPrompt`.
      - The Vercel AI SDK or the individual provider SDKs might offer utilities for this. For example, some SDKs expose a `tokenizer` or a way to count tokens for a given string.
      - If a direct utility isn't available through the Vercel SDK for the specific provider, we might need to use a library like `tiktoken` for OpenAI/Anthropic models or investigate provider-specific tokenization. This could be complex as tokenization varies between models.
      - For now, let's assume we can get a reasonable estimate.
    - **Dynamic Output Token Calculation & Validation:**
      - Retrieve `configured_max_input_tokens` and `configured_max_output_tokens` from `config-manager.js` for the current role.
      - Retrieve `model_absolute_max_input_tokens` and `model_absolute_max_output_tokens` from `MODEL_MAP`.
      - Let `prompt_tokens = countTokens(systemPrompt + userPrompt)`.
      - **Input Validation:** If `prompt_tokens > configured_max_input_tokens` OR `prompt_tokens > model_absolute_max_input_tokens`, throw an error. (We might also consider auto-truncation strategies in the future, but an error is safer to start).
      - **Output Calculation:** The `max_tokens` parameter sent to the API (which typically refers to the _max number of tokens to generate_) should be `configured_max_output_tokens`. We also need to ensure this doesn't violate the model's absolute output limit.
      - Some models have a _total_ context window (input + output). If this is the case for a model, the `max_tokens` parameter for the API call might need to be `min(configured_max_output_tokens, model_absolute_total_tokens - prompt_tokens)`. However, many modern APIs handle separate input and output limits, simplifying this. We'll aim to pass `configured_max_output_tokens` as the API's `max_tokens` parameter, assuming the input validation has passed.
    - **API Call Parameter:** Update the `callParams` to use the correctly determined `max_tokens` for generation (which will be our `configured_max_output_tokens`).

**Phase 2: Error Handling and Refinements**

1.  **Enhanced Error Handling:** Provide clear error messages if prompt tokens exceed input limits, or if API calls fail due to token issues despite our calculations.
2.  **Validation:** Add validation (perhaps in `config-manager.js` or when `task-master models --setup` is run) to ensure `configured_max_input_tokens` and `configured_max_output_tokens` in `.taskmasterconfig` do not exceed the `model_absolute_max_input_tokens` and `model_absolute_max_output_tokens` from `MODEL_MAP`.

Let's start by inspecting `scripts/modules/config-manager.js` to see how parameters and `MODEL_MAP` are currently handled. This will inform the changes needed for `.taskmasterconfig` and `ai-services-unified.js`.

I will now read `scripts/modules/config-manager.js`.

Okay, I've reviewed `scripts/modules/config-manager.js`. Here's what I've gathered and how it impacts the plan:

- **`MODEL_MAP` Source:** `MODEL_MAP` is loaded from `supported-models.json`. This is excellent, as it means we can update the model capabilities (including new token limits) in a structured JSON file.
- **Configuration Loading:** `_loadAndValidateConfig` handles loading `.taskmasterconfig` and merging it with `DEFAULTS`.
- **Getters:** There are specific getters for `maxTokens` for each role (e.g., `getMainMaxTokens`). We will need to update these.
- **`getParametersForRole`:** This function already exists and is a good candidate for returning the new `maxInputTokens` and `maxOutputTokens`.

Here's the refined plan:

**Phase 1: Configuration and Data Structures**

1.  **Update `supported-models.json`:**

    - For each model entry in `supported-models.json`, we need to add `maxInputTokens` and `maxOutputTokens` fields. These should reflect the _absolute maximums_ for that specific model.
    - Example entry change:

      ```json
      // Before
      {
          "id": "claude-3-7-sonnet-20250219",
          "name": "Claude 3.7 Sonnet (Preview)",
          "context_window": 200000, // This might be total context
          "cost_per_1m_tokens": { "input": 3, "output": 15, "currency": "USD" }
      }

      // After (assuming 200k is total, and we make a reasonable split or find official numbers)
      {
          "id": "claude-3-7-sonnet-20250219",
          "name": "Claude 3.7 Sonnet (Preview)",
          "maxInputTokens": 190000, // Example
          "maxOutputTokens": 8192,  // Example, often smaller for generation
          "cost_per_1m_tokens": { "input": 3, "output": 15, "currency": "USD" }
      }
      ```

    - I will need to find the official input/output token limits for the models currently in your `MODEL_MAP`. If you have this information handy, it would speed things up. Otherwise, I can make educated guesses or search for them.

2.  **Update `.taskmasterconfig` (Defaults and User File):**
    - In `scripts/modules/config-manager.js`, modify the `DEFAULTS` object. For each role (`main`, `research`, `fallback`), replace `maxTokens` with:
      - `maxInputTokens`: A sensible default (e.g., a large portion of the model's capability, but user-configurable).
      - `maxOutputTokens`: A sensible default for generation (e.g., 4096 or 8192).
    - You will then need to manually update your existing `.taskmasterconfig` file to reflect this new structure. I can provide the snippet for you to paste.
3.  **Update `config-manager.js`:**
    - Modify `getParametersForRole(role, explicitRoot = null)`:
      - It currently fetches `maxTokens` and `temperature`.
      - Update it to fetch `maxInputTokens`, `maxOutputTokens`, and `temperature` from the loaded config for the given role.
    - Remove the role-specific `getMaxTokens` functions (e.g., `getMainMaxTokens`, `getResearchMaxTokens`, `getFallbackMaxTokens`). The `getParametersForRole` will be the central way to get these.
    - (Optional, for later validation) Consider adding a new function `getModelCapabilities(providerName, modelId)` that reads from `MODEL_MAP` to return the absolute `maxInputTokens` and `maxOutputTokens` for a given model. This would be useful for validating the user's settings in `.taskmasterconfig`.

**Phase 2: Core Logic in `ai-services-unified.js`**

1.  **Token Counting (`_unifiedServiceRunner`):**

    - This is the most complex part. We need a reliable way to count tokens for the prompts.
      - **Strategy 1 (Ideal):** Leverage Vercel AI SDK. The SDK might provide a way to get a tokenizer for the active model or a utility function. We'll need to investigate its capabilities.
      - **Strategy 2 (Fallback):** Use a library like `tiktoken` for models compatible with OpenAI's tokenization (many are, including some Anthropic models). For other models, we might need provider-specific tokenizers or make estimations (less ideal).
      - **Initial Approach:** Let's try to find a Vercel AI SDK utility first. If not, we'll start with `tiktoken` as a common case and acknowledge that other models might need specific handling later.
    - The function `_unifiedServiceRunner` will call this token counting utility:

      ```javascript
      // Placeholder for token counting
      function countTokens(text, modelId /* or providerName */) {
      	// ... implementation using SDK or library ...
      	// This is a simplification; actual tokenization is model-specific.
      	// For now, a rough estimate might be (text.length / 4) as a placeholder
      	// if a proper tokenizer isn't immediately available.
      	if (!text) return 0;
      	return Math.ceil(text.length / 3.5); // Very rough general estimate
      }

      const promptTokens = countTokens(systemPrompt) + countTokens(prompt);
      ```

2.  **Dynamic Output Token Calculation & Validation (`_unifiedServiceRunner`):**

    - Fetch parameters using the updated `getParametersForRole` from `config-manager.js`:
      ```javascript
      const roleParams = getParametersForRole(
      	currentRole,
      	effectiveProjectRoot
      );
      // roleParams will now contain { maxInputTokens, maxOutputTokens, temperature }
      ```
    - Fetch model's absolute limits from `MODEL_MAP` (we'll need a helper in `config-manager.js` or do it directly in `ai-services-unified.js` for now):
      ```javascript
      // Simplified; ideally use a robust getter from config-manager
      const modelInfo = MODEL_MAP[providerName?.toLowerCase()]?.find(
      	(m) => m.id === modelId
      );
      const modelAbsoluteMaxInput = modelInfo?.maxInputTokens || Infinity;
      const modelAbsoluteMaxOutput =
      	modelInfo?.maxOutputTokens || roleParams.maxOutputTokens; // Fallback to configured if not in map
      ```
    - **Input Validation:**
      ```javascript
      if (promptTokens > roleParams.maxInputTokens) {
      	throw new Error(
      		`Prompt (${promptTokens} tokens) exceeds configured max input tokens (${roleParams.maxInputTokens}) for role '${currentRole}'.`
      	);
      }
      if (promptTokens > modelAbsoluteMaxInput) {
      	throw new Error(
      		`Prompt (${promptTokens} tokens) exceeds model's absolute max input tokens (${modelAbsoluteMaxInput}) for ${modelId}.`
      	);
      }
      ```
    - **API Call Parameter (`max_tokens` for generation):**
      The `max_tokens` parameter passed to the provider's API call should be `roleParams.maxOutputTokens`. We must also ensure this doesn't exceed the model's absolute output capability.

      ```javascript
      const apiMaxOutputTokens = Math.min(
      	roleParams.maxOutputTokens,
      	modelAbsoluteMaxOutput
      );

      const callParams = {
      	apiKey,
      	modelId,
      	// Use apiMaxOutputTokens for the actual API call
      	maxTokens: apiMaxOutputTokens, // This usually means max tokens to GENERATE
      	temperature: roleParams.temperature,
      	messages,
      	baseUrl,
      	...(serviceType === 'generateObject' && { schema, objectName }),
      	...restApiParams
      };
      ```

3.  **Update `logAiUsage`:**
    - The cost calculation in `logAiUsage` seems correct as it already uses `inputCost` and `outputCost` per 1M tokens. This part should remain compatible.

**Next Steps:**

1.  **Modify `supported-models.json`:** I need the content of this file to propose the new `maxInputTokens` and `maxOutputTokens` fields. Please provide it.
2.  **Decide on Tokenizer Strategy:** What are your thoughts on the token counting? Are you aware of any utilities within Vercel AI SDK for this, or should we plan to integrate `tiktoken` initially?

Once I have the content of `supported-models.json`, I can draft the changes for it and then proceed to `config-manager.js` and `.taskmasterconfig`.

```json
        // Before
        {
            "id": "claude-3-7-sonnet-20250219",
            "name": "Claude 3.7 Sonnet (Preview)",
            "context_window": 200000, // This might be total context
            "cost_per_1m_tokens": { "input": 3, "output": 15, "currency": "USD" }
        }

        // After (assuming 200k is total, and we make a reasonable split or find official numbers)
        {
            "id": "claude-3-7-sonnet-20250219",
            "name": "Claude 3.7 Sonnet (Preview)",
            "maxInputTokens": 190000, // Example
            "maxOutputTokens": 8192,  // Example, often smaller for generation
            "cost_per_1m_tokens": { "input": 3, "output": 15, "currency": "USD" }
        }
```

```javascript
// Placeholder for token counting
function countTokens(text, modelId /* or providerName */) {
	// ... implementation using SDK or library ...
	// This is a simplification; actual tokenization is model-specific.
	// For now, a rough estimate might be (text.length / 4) as a placeholder
	// if a proper tokenizer isn't immediately available.
	if (!text) return 0;
	return Math.ceil(text.length / 3.5); // Very rough general estimate
}

const promptTokens = countTokens(systemPrompt) + countTokens(prompt);
```

```javascript
const roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
// roleParams will now contain { maxInputTokens, maxOutputTokens, temperature }
```

```javascript
// Simplified; ideally use a robust getter from config-manager
const modelInfo = MODEL_MAP[providerName?.toLowerCase()]?.find(
	(m) => m.id === modelId
);
const modelAbsoluteMaxInput = modelInfo?.maxInputTokens || Infinity;
const modelAbsoluteMaxOutput =
	modelInfo?.maxOutputTokens || roleParams.maxOutputTokens; // Fallback to configured if not in map
```

```javascript
if (promptTokens > roleParams.maxInputTokens) {
	throw new Error(
		`Prompt (${promptTokens} tokens) exceeds configured max input tokens (${roleParams.maxInputTokens}) for role '${currentRole}'.`
	);
}
if (promptTokens > modelAbsoluteMaxInput) {
	throw new Error(
		`Prompt (${promptTokens} tokens) exceeds model's absolute max input tokens (${modelAbsoluteMaxInput}) for ${modelId}.`
	);
}
```

```javascript
const apiMaxOutputTokens = Math.min(
	roleParams.maxOutputTokens,
	modelAbsoluteMaxOutput
);

const callParams = {
	apiKey,
	modelId,
	// Use apiMaxOutputTokens for the actual API call
	maxTokens: apiMaxOutputTokens, // This usually means max tokens to GENERATE
	temperature: roleParams.temperature,
	messages,
	baseUrl,
	...(serviceType === 'generateObject' && { schema, objectName }),
	...restApiParams
};
```

Okay, I have the contents of `supported-models.json`.

It seems many models have a `max_tokens` field, which I assume is the _total context window_. Some don't have it, and for others (like OpenAI `gpt-4o`), the listed `max_tokens` (16384) is known to be its _output_ token limit when using the Chat Completions API, while the context window is much larger (128k). This highlights the complexity: `max_tokens` in `supported-models.json` is used inconsistently.

**Revised Plan for `supported-models.json` and Token Definitions:**

To bring clarity, we'll introduce two new fields and ensure their meaning is consistent:

- `contextWindowTokens`: The total number of tokens the model can process (input + output). This would replace the current ambiguous `max_tokens`.
- `maxOutputTokens`: The maximum number of tokens the model can _generate_ in a single response. This is often smaller than the total context window, especially for larger models.

If a model _only_ specifies a total context window, we'll have to make a reasonable assumption for `maxOutputTokens` (e.g., 4096 or 8192, or a fraction of the total context window). If it only specifies an output token limit (like some OpenAI models in certain API modes), we'll need to find its total context window.

**Updated `supported-models.json` Structure (Example):**

```json
// For a model like Anthropic Claude 3.7 Sonnet (Preview)
{
    "id": "claude-3-7-sonnet-20250219",
    "swe_score": 0.623,
    "cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
    "allowed_roles": ["main", "fallback"],
    "contextWindowTokens": 200000, // From Anthropic's documentation
    "maxOutputTokens": 8192     // Anthropic default, user can override in .taskmasterconfig
}

// For a model like OpenAI GPT-4o
{
    "id": "gpt-4o",
    "swe_score": 0.332,
    "cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
    "allowed_roles": ["main", "fallback"],
    "contextWindowTokens": 128000, // Official context window
    "maxOutputTokens": 16384    // Max output for chat completions is 16k for gpt-4o (used to be 4k/8k for older gpt-4)
                                // but the model card mentions 128k total. The API call parameter for `max_tokens` sets this output cap.
}
```

I will proceed to generate the updated `supported-models.json` content. This will be a significant change. I will make my best effort to find the correct `contextWindowTokens` and `maxOutputTokens` for each model. If official numbers are ambiguous or not readily available, I'll use sensible defaults and add a comment.

**Regarding Tokenizer Strategy:**

- **Vercel AI SDK:** The `ai` package (Vercel AI SDK) itself is a lightweight wrapper. Tokenization is usually handled by the underlying provider-specific SDKs (e.g., `@anthropic-ai/sdk`, `openai`). The Vercel SDK doesn't provide a universal tokenizer.
- **Provider SDKs:**
  - `@anthropic-ai/sdk`: Does not expose a public tokenizer directly. Anthropic recommends counting based on their guidelines (roughly 3.5 chars/token for English).
  - `openai` (for Node.js): The `gpt-3-encoder` or `tiktoken` libraries are commonly used. `tiktoken` is more current.
- **`tiktoken`:** This is a good general-purpose library for OpenAI models and those that use compatible tokenization (many models fine-tuned from Llama, etc., might be close). It supports various encodings.
- **Recommendation:**
  1.  Start by integrating `tiktoken`. We can select the encoding based on the provider or even specific model if needed (e.g., `cl100k_base` for gpt-4, gpt-3.5-turbo, DALL-E models).
  2.  For providers where `tiktoken` is not accurate (e.g., Google's Gemini, Perplexity if they have proprietary tokenizers), we might have to rely on character-based estimations (e.g., `length / N`) as a rougher guide or investigate if their specific SDKs offer token counting.
  3.  The Vercel AI SDK's `experimental_streamObject` and other functions _might_ internally handle some aspects of token counting for request preparation, but we need explicit counting for our logic _before_ the call.

For now, let's assume we'll add a `countTokens` utility that uses `tiktoken` for OpenAI and Anthropic (as Anthropic's guidance is character-based but `cl100k_base` is often a decent proxy for their tokenization for rough estimates, though not perfect), and a character-based fallback for others.

I will now generate the proposed changes for `scripts/modules/supported-models.json`. This will be a large diff.

After this, we'll move to:

1.  Updating `.taskmasterconfig` defaults in `config-manager.js` and providing you the snippet for your user file.
2.  Updating `config-manager.js` getters.
3.  Implementing the logic in `ai-services-unified.js`.

```json
// For a model like Anthropic Claude 3.7 Sonnet (Preview)
{
    "id": "claude-3-7-sonnet-20250219",
    "swe_score": 0.623,
    "cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
    "allowed_roles": ["main", "fallback"],
    "contextWindowTokens": 200000, // From Anthropic's documentation
    "maxOutputTokens": 8192     // Anthropic default, user can override in .taskmasterconfig
}

// For a model like OpenAI GPT-4o
{
    "id": "gpt-4o",
    "swe_score": 0.332,
    "cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
    "allowed_roles": ["main", "fallback"],
    "contextWindowTokens": 128000, // Official context window
    "maxOutputTokens": 16384    // Max output for chat completions is 16k for gpt-4o (used to be 4k/8k for older gpt-4)
                                // but the model card mentions 128k total. The API call parameter for `max_tokens` sets this output cap.
}
```

```json
// For a model like Anthropic Claude 3.7 Sonnet (Preview)
{
    "id": "claude-3-7-sonnet-20250219",
    "swe_score": 0.623,
    "cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
    "allowed_roles": ["main", "fallback"],
    "contextWindowTokens": 200000, // From Anthropic's documentation
    "maxOutputTokens": 8192     // Anthropic default, user can override in .taskmasterconfig
}

// For a model like OpenAI GPT-4o
{
    "id": "gpt-4o",
    "swe_score": 0.332,
    "cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
    "allowed_roles": ["main", "fallback"],
    "contextWindowTokens": 128000, // Official context window
    "maxOutputTokens": 16384    // Max output for chat completions is 16k for gpt-4o (used to be 4k/8k for older gpt-4)
                                // but the model card mentions 128k total. The API call parameter for `max_tokens` sets this output cap.
}
```

```

--------------------------------------------------------------------------------
/tests/unit/scripts/modules/task-manager/list-tasks.test.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Tests for the list-tasks.js module
 */
import { jest } from '@jest/globals';

// Mock the dependencies before importing the module under test
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
	readJSON: jest.fn(),
	writeJSON: jest.fn(),
	log: jest.fn(),
	CONFIG: {
		model: 'mock-claude-model',
		maxTokens: 4000,
		temperature: 0.7,
		debug: false
	},
	sanitizePrompt: jest.fn((prompt) => prompt),
	truncate: jest.fn((text) => text),
	isSilentMode: jest.fn(() => false),
	findTaskById: jest.fn((tasks, id) =>
		tasks.find((t) => t.id === parseInt(id))
	),
	addComplexityToTask: jest.fn(),
	readComplexityReport: jest.fn(() => null),
	getTagAwareFilePath: jest.fn((tag, path) => '/mock/tagged/report.json'),
	stripAnsiCodes: jest.fn((text) =>
		text ? text.replace(/\x1b\[[0-9;]*m/g, '') : text
	)
}));

jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
	formatDependenciesWithStatus: jest.fn(),
	displayBanner: jest.fn(),
	displayTaskList: jest.fn(),
	startLoadingIndicator: jest.fn(() => ({ stop: jest.fn() })),
	stopLoadingIndicator: jest.fn(),
	createProgressBar: jest.fn(() => ' MOCK_PROGRESS_BAR '),
	getStatusWithColor: jest.fn((status) => status),
	getComplexityWithColor: jest.fn((score) => `Score: ${score}`)
}));

jest.unstable_mockModule(
	'../../../../../scripts/modules/dependency-manager.js',
	() => ({
		validateAndFixDependencies: jest.fn(),
		validateTaskDependencies: jest.fn()
	})
);

// Mock @tm/core to control task data in tests
const mockTasksList = jest.fn();
jest.unstable_mockModule('@tm/core', () => ({
	createTmCore: jest.fn(async () => ({
		tasks: {
			list: mockTasksList
		}
	}))
}));

// Import the mocked modules
const {
	readJSON,
	log,
	readComplexityReport,
	addComplexityToTask,
	stripAnsiCodes
} = await import('../../../../../scripts/modules/utils.js');
const { displayTaskList } = await import(
	'../../../../../scripts/modules/ui.js'
);
const { validateAndFixDependencies } = await import(
	'../../../../../scripts/modules/dependency-manager.js'
);

// Import the module under test
const { default: listTasks } = await import(
	'../../../../../scripts/modules/task-manager/list-tasks.js'
);

// Sample data for tests
const sampleTasks = {
	meta: { projectName: 'Test Project' },
	tasks: [
		{
			id: 1,
			title: 'Setup Project',
			description: 'Initialize project structure',
			status: 'done',
			dependencies: [],
			priority: 'high'
		},
		{
			id: 2,
			title: 'Implement Core Features',
			description: 'Build main functionality',
			status: 'pending',
			dependencies: [1],
			priority: 'high'
		},
		{
			id: 3,
			title: 'Create UI Components',
			description: 'Build user interface',
			status: 'in-progress',
			dependencies: [1, 2],
			priority: 'medium',
			subtasks: [
				{
					id: 1,
					title: 'Create Header Component',
					description: 'Build header component',
					status: 'done',
					dependencies: []
				},
				{
					id: 2,
					title: 'Create Footer Component',
					description: 'Build footer component',
					status: 'pending',
					dependencies: [1]
				}
			]
		},
		{
			id: 4,
			title: 'Testing',
			description: 'Write and run tests',
			status: 'cancelled',
			dependencies: [2, 3],
			priority: 'low'
		},
		{
			id: 5,
			title: 'Code Review',
			description: 'Review code for quality and standards',
			status: 'review',
			dependencies: [3],
			priority: 'medium'
		}
	]
};

describe('listTasks', () => {
	beforeEach(() => {
		jest.clearAllMocks();

		// Mock console methods to suppress output
		jest.spyOn(console, 'log').mockImplementation(() => {});
		jest.spyOn(console, 'error').mockImplementation(() => {});

		// Mock process.exit to prevent actual exit
		jest.spyOn(process, 'exit').mockImplementation((code) => {
			throw new Error(`process.exit: ${code}`);
		});

		// Set up default mock return values
		const defaultSampleTasks = JSON.parse(JSON.stringify(sampleTasks));
		readJSON.mockReturnValue(defaultSampleTasks);
		mockTasksList.mockResolvedValue({
			tasks: defaultSampleTasks.tasks,
			storageType: 'file'
		});
		readComplexityReport.mockReturnValue(null);
		validateAndFixDependencies.mockImplementation(() => {});
		displayTaskList.mockImplementation(() => {});
		addComplexityToTask.mockImplementation(() => {});
	});

	afterEach(() => {
		// Restore console methods
		jest.restoreAllMocks();
	});

	test('should list all tasks when no status filter is provided', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';

		// Act
		const result = await listTasks(tasksPath, null, null, false, 'json', {
			tag: 'master'
		});

		// Assert
		expect(result).toEqual(
			expect.objectContaining({
				tasks: expect.arrayContaining([
					expect.objectContaining({ id: 1 }),
					expect.objectContaining({ id: 2 }),
					expect.objectContaining({ id: 3 }),
					expect.objectContaining({ id: 4 }),
					expect.objectContaining({ id: 5 })
				])
			})
		);
	});

	test('should filter tasks by status when status filter is provided', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';
		const statusFilter = 'pending';

		// Act
		const result = await listTasks(
			tasksPath,
			statusFilter,
			null,
			false,
			'json',
			{
				tag: 'master'
			}
		);

		// Assert
		// Verify only pending tasks are returned
		expect(result.tasks).toHaveLength(1);
		expect(result.tasks[0].status).toBe('pending');
		expect(result.tasks[0].id).toBe(2);
	});

	test('should filter tasks by done status', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';
		const statusFilter = 'done';

		// Act
		const result = await listTasks(
			tasksPath,
			statusFilter,
			null,
			false,
			'json',
			{
				tag: 'master'
			}
		);

		// Assert
		// Verify only done tasks are returned
		expect(result.tasks).toHaveLength(1);
		expect(result.tasks[0].status).toBe('done');
	});

	test('should filter tasks by review status', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';
		const statusFilter = 'review';

		// Act
		const result = await listTasks(
			tasksPath,
			statusFilter,
			null,
			false,
			'json',
			{
				tag: 'master'
			}
		);

		// Assert
		// Verify only review tasks are returned
		expect(result.tasks).toHaveLength(1);
		expect(result.tasks[0].status).toBe('review');
		expect(result.tasks[0].id).toBe(5);
	});

	test('should include subtasks when withSubtasks option is true', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';

		// Act
		const result = await listTasks(tasksPath, null, null, true, 'json', {
			tag: 'master'
		});

		// Assert
		// Verify that the task with subtasks is included
		const taskWithSubtasks = result.tasks.find((task) => task.id === 3);
		expect(taskWithSubtasks).toBeDefined();
		expect(taskWithSubtasks.subtasks).toBeDefined();
		expect(taskWithSubtasks.subtasks).toHaveLength(2);
	});

	test('should not include subtasks when withSubtasks option is false', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';

		// Act
		const result = await listTasks(tasksPath, null, null, false, 'json', {
			tag: 'master'
		});

		// Assert
		// For JSON output, subtasks should still be included in the data structure
		// The withSubtasks flag affects display, not the data structure
		expect(result).toEqual(
			expect.objectContaining({
				tasks: expect.any(Array)
			})
		);
	});

	test('should return empty array when no tasks match the status filter', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';
		const statusFilter = 'blocked'; // Status that doesn't exist in sample data

		// Act
		const result = await listTasks(
			tasksPath,
			statusFilter,
			null,
			false,
			'json',
			{
				tag: 'master'
			}
		);

		// Assert
		// Verify empty array is returned
		expect(result.tasks).toHaveLength(0);
	});

	test('should handle file read errors', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';
		// Mock tm-core to throw an error, and readJSON to also throw
		mockTasksList.mockReset();
		mockTasksList.mockImplementation(() => {
			return Promise.reject(new Error('File not found'));
		});
		readJSON.mockReset();
		readJSON.mockImplementation(() => {
			throw new Error('File not found');
		});

		// Act & Assert
		// When outputFormat is 'json', listTasks throws a structured error object
		await expect(
			listTasks(tasksPath, null, null, false, 'json', { tag: 'master' })
		).rejects.toEqual(
			expect.objectContaining({
				code: 'TASK_LIST_ERROR',
				message: 'File not found'
			})
		);
	});

	test('should validate and fix dependencies before listing', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';

		// Act
		await listTasks(tasksPath, null, null, false, 'json', { tag: 'master' });

		// Assert
		// Note: validateAndFixDependencies is not called by listTasks function
		// This test just verifies the function runs without error
	});

	test('should pass correct options to displayTaskList', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';

		// Act
		const result = await listTasks(tasksPath, 'pending', null, true, 'json', {
			tag: 'master'
		});

		// Assert
		// For JSON output, we don't call displayTaskList, so just verify the result structure
		expect(result).toEqual(
			expect.objectContaining({
				tasks: expect.any(Array),
				filter: 'pending',
				stats: expect.any(Object)
			})
		);
	});

	test('should filter tasks by in-progress status', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';
		const statusFilter = 'in-progress';

		// Act
		const result = await listTasks(
			tasksPath,
			statusFilter,
			null,
			false,
			'json',
			{
				tag: 'master'
			}
		);

		// Assert
		expect(result.tasks).toHaveLength(1);
		expect(result.tasks[0].status).toBe('in-progress');
		expect(result.tasks[0].id).toBe(3);
	});

	test('should filter tasks by cancelled status', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';
		const statusFilter = 'cancelled';

		// Act
		const result = await listTasks(
			tasksPath,
			statusFilter,
			null,
			false,
			'json',
			{
				tag: 'master'
			}
		);

		// Assert
		expect(result.tasks).toHaveLength(1);
		expect(result.tasks[0].status).toBe('cancelled');
		expect(result.tasks[0].id).toBe(4);
	});

	test('should return the original tasks data structure', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';

		// Act
		const result = await listTasks(tasksPath, null, null, false, 'json', {
			tag: 'master'
		});

		// Assert
		expect(result).toEqual(
			expect.objectContaining({
				tasks: expect.any(Array),
				filter: 'all',
				stats: expect.objectContaining({
					total: 5,
					completed: expect.any(Number),
					inProgress: expect.any(Number),
					pending: expect.any(Number)
				})
			})
		);
		expect(result.tasks).toHaveLength(5);
	});

	// Tests for comma-separated status filtering
	describe('Comma-separated status filtering', () => {
		test('should filter tasks by multiple statuses separated by commas', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'done,pending';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should return tasks with 'done' or 'pending' status
			expect(result.tasks).toHaveLength(2);
			expect(result.tasks.map((t) => t.status)).toEqual(
				expect.arrayContaining(['done', 'pending'])
			);
		});

		test('should filter tasks by three or more statuses', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'done,pending,in-progress';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should return tasks with 'done', 'pending', or 'in-progress' status
			expect(result.tasks).toHaveLength(3);
			const statusValues = result.tasks.map((task) => task.status);
			expect(statusValues).toEqual(
				expect.arrayContaining(['done', 'pending', 'in-progress'])
			);

			// Verify all matching tasks are included
			const taskIds = result.tasks.map((task) => task.id);
			expect(taskIds).toContain(1); // done
			expect(taskIds).toContain(2); // pending
			expect(taskIds).toContain(3); // in-progress
			expect(taskIds).not.toContain(4); // cancelled - should not be included
		});

		test('should handle spaces around commas in status filter', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'done, pending , in-progress';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should trim spaces and work correctly
			expect(result.tasks).toHaveLength(3);
			const statusValues = result.tasks.map((task) => task.status);
			expect(statusValues).toEqual(
				expect.arrayContaining(['done', 'pending', 'in-progress'])
			);
		});

		test('should handle empty status values in comma-separated list', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'done,,pending,';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should ignore empty values and work with valid ones
			expect(result.tasks).toHaveLength(2);
			const statusValues = result.tasks.map((task) => task.status);
			expect(statusValues).toEqual(expect.arrayContaining(['done', 'pending']));
		});

		test('should handle case-insensitive matching for comma-separated statuses', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'DONE,Pending,IN-PROGRESS';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should match case-insensitively
			expect(result.tasks).toHaveLength(3);
			const statusValues = result.tasks.map((task) => task.status);
			expect(statusValues).toEqual(
				expect.arrayContaining(['done', 'pending', 'in-progress'])
			);
		});

		test('should return empty array when no tasks match comma-separated statuses', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'blocked,deferred';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should return empty array as no tasks have these statuses
			expect(result.tasks).toHaveLength(0);
		});

		test('should work with single status when using comma syntax', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'pending,';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should work the same as single status filter
			expect(result.tasks).toHaveLength(1);
			expect(result.tasks[0].status).toBe('pending');
		});

		test('should set correct filter value in response for comma-separated statuses', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'done,pending';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should return the original filter string
			expect(result.filter).toBe('done,pending');
		});

		test('should handle all statuses filter with comma syntax', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'all';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should return all tasks when filter is 'all'
			expect(result.tasks).toHaveLength(5);
			expect(result.filter).toBe('all');
		});

		test('should handle mixed existing and non-existing statuses', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'done,nonexistent,pending';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should return only tasks with existing statuses
			expect(result.tasks).toHaveLength(2);
			const statusValues = result.tasks.map((task) => task.status);
			expect(statusValues).toEqual(expect.arrayContaining(['done', 'pending']));
		});

		test('should filter by review status in comma-separated list', async () => {
			// Arrange
			const tasksPath = 'tasks/tasks.json';
			const statusFilter = 'review,cancelled';

			// Act
			const result = await listTasks(
				tasksPath,
				statusFilter,
				null,
				false,
				'json',
				{
					tag: 'master'
				}
			);

			// Assert
			// Should return tasks with 'review' or 'cancelled' status
			expect(result.tasks).toHaveLength(2);
			const statusValues = result.tasks.map((task) => task.status);
			expect(statusValues).toEqual(
				expect.arrayContaining(['review', 'cancelled'])
			);

			// Verify specific tasks
			const taskIds = result.tasks.map((task) => task.id);
			expect(taskIds).toContain(4); // cancelled task
			expect(taskIds).toContain(5); // review task
		});
	});

	describe('Compact output format', () => {
		test('should output compact format when outputFormat is compact', async () => {
			const consoleSpy = jest.spyOn(console, 'log').mockImplementation();
			const tasksPath = 'tasks/tasks.json';

			await listTasks(tasksPath, null, null, false, 'compact', {
				tag: 'master'
			});

			expect(consoleSpy).toHaveBeenCalled();
			const output = consoleSpy.mock.calls.map((call) => call[0]).join('\n');
			// Strip ANSI color codes for testing
			const cleanOutput = stripAnsiCodes(output);

			// Should contain compact format elements: ID status title (priority) [→ dependencies]
			expect(cleanOutput).toContain('1 done Setup Project (high)');
			expect(cleanOutput).toContain(
				'2 pending Implement Core Features (high) → 1'
			);

			consoleSpy.mockRestore();
		});

		test('should format single task compactly', async () => {
			const consoleSpy = jest.spyOn(console, 'log').mockImplementation();
			const tasksPath = 'tasks/tasks.json';

			await listTasks(tasksPath, null, null, false, 'compact', {
				tag: 'master'
			});

			expect(consoleSpy).toHaveBeenCalled();
			const output = consoleSpy.mock.calls.map((call) => call[0]).join('\n');

			// Should be compact (no verbose headers)
			expect(output).not.toContain('Project Dashboard');
			expect(output).not.toContain('Progress:');

			consoleSpy.mockRestore();
		});

		test('should handle compact format with subtasks', async () => {
			const consoleSpy = jest.spyOn(console, 'log').mockImplementation();
			const tasksPath = 'tasks/tasks.json';

			await listTasks(
				tasksPath,
				null,
				null,
				true, // withSubtasks = true
				'compact',
				{ tag: 'master' }
			);

			expect(consoleSpy).toHaveBeenCalled();
			const output = consoleSpy.mock.calls.map((call) => call[0]).join('\n');
			// Strip ANSI color codes for testing
			const cleanOutput = stripAnsiCodes(output);

			// Should handle both tasks and subtasks
			expect(cleanOutput).toContain('1 done Setup Project (high)');
			expect(cleanOutput).toContain('3.1 done Create Header Component');

			consoleSpy.mockRestore();
		});

		test('should handle empty task list in compact format', async () => {
			// Mock tm-core to return empty task list
			mockTasksList.mockResolvedValue({
				tasks: [],
				storageType: 'file'
			});
			readJSON.mockReturnValue({ tasks: [] });
			const consoleSpy = jest.spyOn(console, 'log').mockImplementation();
			const tasksPath = 'tasks/tasks.json';

			await listTasks(tasksPath, null, null, false, 'compact', {
				tag: 'master'
			});

			expect(consoleSpy).toHaveBeenCalledWith('No tasks found');

			consoleSpy.mockRestore();
		});

		test('should format dependencies correctly with shared helper', async () => {
			// Create mock tasks with various dependency scenarios
			const tasksWithDeps = {
				tasks: [
					{
						id: 1,
						title: 'Task with no dependencies',
						status: 'pending',
						priority: 'medium',
						dependencies: []
					},
					{
						id: 2,
						title: 'Task with few dependencies',
						status: 'pending',
						priority: 'high',
						dependencies: [1, 3]
					},
					{
						id: 3,
						title: 'Task with many dependencies',
						status: 'pending',
						priority: 'low',
						dependencies: [1, 2, 4, 5, 6, 7, 8, 9]
					}
				]
			};

			// Mock tm-core to return test data
			mockTasksList.mockResolvedValue({
				tasks: tasksWithDeps.tasks,
				storageType: 'file'
			});
			readJSON.mockReturnValue(tasksWithDeps);
			const consoleSpy = jest.spyOn(console, 'log').mockImplementation();
			const tasksPath = 'tasks/tasks.json';

			await listTasks(tasksPath, null, null, false, 'compact', {
				tag: 'master'
			});

			expect(consoleSpy).toHaveBeenCalled();
			const output = consoleSpy.mock.calls.map((call) => call[0]).join('\n');
			// Strip ANSI color codes for testing
			const cleanOutput = stripAnsiCodes(output);

			// Should format tasks correctly with compact output including priority
			expect(cleanOutput).toContain(
				'1 pending Task with no dependencies (medium)'
			);
			expect(cleanOutput).toContain('Task with few dependencies');
			expect(cleanOutput).toContain('Task with many dependencies');
			// Should show dependencies with arrow when they exist
			expect(cleanOutput).toMatch(/2.*→.*1,3/);
			// Should truncate many dependencies with "+X more" format
			expect(cleanOutput).toMatch(/3.*→.*1,2,4,5,6.*\(\+\d+ more\)/);

			consoleSpy.mockRestore();
		});
	});
});

```

--------------------------------------------------------------------------------
/tests/integration/mcp-server/direct-functions.test.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Integration test for direct function imports in MCP server
 */

import { jest } from '@jest/globals';
import path, { dirname } from 'path';
import { fileURLToPath } from 'url';

// Get the current module's directory
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);

// Test file paths
const testProjectRoot = path.join(__dirname, '../../fixtures');
const testTasksPath = path.join(testProjectRoot, 'test-tasks.json');

// Create explicit mock functions
const mockExistsSync = jest.fn().mockReturnValue(true);
const mockWriteFileSync = jest.fn();
const mockReadFileSync = jest.fn();
const mockUnlinkSync = jest.fn();
const mockMkdirSync = jest.fn();

const mockFindTasksJsonPath = jest.fn().mockReturnValue(testTasksPath);
const mockReadJSON = jest.fn();
const mockWriteJSON = jest.fn();
const mockEnableSilentMode = jest.fn();
const mockDisableSilentMode = jest.fn();
const mockReadComplexityReport = jest.fn().mockReturnValue(null);

const mockGetAnthropicClient = jest.fn().mockReturnValue({});
const mockGetConfiguredAnthropicClient = jest.fn().mockReturnValue({});
const mockHandleAnthropicStream = jest.fn().mockResolvedValue(
	JSON.stringify([
		{
			id: 1,
			title: 'Mock Subtask 1',
			description: 'First mock subtask',
			dependencies: [],
			details: 'Implementation details for mock subtask 1'
		},
		{
			id: 2,
			title: 'Mock Subtask 2',
			description: 'Second mock subtask',
			dependencies: [1],
			details: 'Implementation details for mock subtask 2'
		}
	])
);
const mockParseSubtasksFromText = jest.fn().mockReturnValue([
	{
		id: 1,
		title: 'Mock Subtask 1',
		description: 'First mock subtask',
		status: 'pending',
		dependencies: []
	},
	{
		id: 2,
		title: 'Mock Subtask 2',
		description: 'Second mock subtask',
		status: 'pending',
		dependencies: [1]
	}
]);

// Create a mock for expandTask that returns predefined responses instead of making real calls
const mockExpandTask = jest
	.fn()
	.mockImplementation(
		(taskId, numSubtasks, useResearch, additionalContext, options) => {
			const task = {
				...(sampleTasks.tasks.find((t) => t.id === taskId) || {}),
				subtasks: useResearch
					? [
							{
								id: 1,
								title: 'Research-Backed Subtask 1',
								description: 'First research-backed subtask',
								status: 'pending',
								dependencies: []
							},
							{
								id: 2,
								title: 'Research-Backed Subtask 2',
								description: 'Second research-backed subtask',
								status: 'pending',
								dependencies: [1]
							}
						]
					: [
							{
								id: 1,
								title: 'Mock Subtask 1',
								description: 'First mock subtask',
								status: 'pending',
								dependencies: []
							},
							{
								id: 2,
								title: 'Mock Subtask 2',
								description: 'Second mock subtask',
								status: 'pending',
								dependencies: [1]
							}
						]
			};

			return Promise.resolve(task);
		}
	);

const mockGenerateTaskFiles = jest.fn().mockResolvedValue(true);
const mockFindTaskById = jest.fn();
const mockTaskExists = jest.fn().mockReturnValue(true);

// Mock fs module to avoid file system operations
jest.mock('fs', () => ({
	existsSync: mockExistsSync,
	writeFileSync: mockWriteFileSync,
	readFileSync: mockReadFileSync,
	unlinkSync: mockUnlinkSync,
	mkdirSync: mockMkdirSync
}));

// Mock utils functions to avoid actual file operations
jest.mock('../../../scripts/modules/utils.js', () => ({
	readJSON: mockReadJSON,
	writeJSON: mockWriteJSON,
	enableSilentMode: mockEnableSilentMode,
	disableSilentMode: mockDisableSilentMode,
	readComplexityReport: mockReadComplexityReport,
	CONFIG: {
		model: 'claude-3-7-sonnet-20250219',
		maxTokens: 8192,
		temperature: 0.2,
		defaultSubtasks: 5
	}
}));

// Mock path-utils with findTasksJsonPath
jest.mock('../../../mcp-server/src/core/utils/path-utils.js', () => ({
	findTasksJsonPath: mockFindTasksJsonPath
}));

// Mock the AI module to prevent any real API calls
jest.mock('../../../scripts/modules/ai-services-unified.js', () => ({
	// Mock the functions exported by ai-services-unified.js as needed
	// For example, if you are testing a function that uses generateTextService:
	generateTextService: jest.fn().mockResolvedValue('Mock AI Response')
	// Add other mocks for generateObjectService, streamTextService if used
}));

// Mock task-manager.js to avoid real operations
jest.mock('../../../scripts/modules/task-manager.js', () => ({
	expandTask: mockExpandTask,
	generateTaskFiles: mockGenerateTaskFiles,
	findTaskById: mockFindTaskById,
	taskExists: mockTaskExists
}));

// Import dependencies after mocks are set up
import { sampleTasks } from '../../fixtures/sample-tasks.js';

// Mock logger
const mockLogger = {
	info: jest.fn(),
	error: jest.fn(),
	debug: jest.fn(),
	warn: jest.fn()
};

// Mock session
const mockSession = {
	env: {
		ANTHROPIC_API_KEY: 'mock-api-key',
		MODEL: 'claude-3-sonnet-20240229',
		MAX_TOKENS: 4000,
		TEMPERATURE: '0.2'
	}
};

describe('MCP Server Direct Functions', () => {
	// Set up before each test
	beforeEach(() => {
		jest.clearAllMocks();

		// Default mockReadJSON implementation
		mockReadJSON.mockReturnValue(JSON.parse(JSON.stringify(sampleTasks)));

		// Default mockFindTaskById implementation
		mockFindTaskById.mockImplementation((tasks, taskId) => {
			const id = parseInt(taskId, 10);
			return tasks.find((t) => t.id === id);
		});

		// Default mockTaskExists implementation
		mockTaskExists.mockImplementation((tasks, taskId) => {
			const id = parseInt(taskId, 10);
			return tasks.some((t) => t.id === id);
		});

		// Default findTasksJsonPath implementation
		mockFindTasksJsonPath.mockImplementation((args) => {
			// Mock returning null for non-existent files
			if (args.file === 'non-existent-file.json') {
				return null;
			}
			return testTasksPath;
		});
	});

	describe('listTasksDirect', () => {
		// Sample complexity report for testing
		const mockComplexityReport = {
			meta: {
				generatedAt: '2025-03-24T20:01:35.986Z',
				tasksAnalyzed: 3,
				thresholdScore: 5,
				projectName: 'Test Project',
				usedResearch: false
			},
			complexityAnalysis: [
				{
					taskId: 1,
					taskTitle: 'Initialize Project',
					complexityScore: 3,
					recommendedSubtasks: 2
				},
				{
					taskId: 2,
					taskTitle: 'Create Core Functionality',
					complexityScore: 8,
					recommendedSubtasks: 5
				},
				{
					taskId: 3,
					taskTitle: 'Implement UI Components',
					complexityScore: 6,
					recommendedSubtasks: 4
				}
			]
		};

		// Test wrapper function that doesn't rely on the actual implementation
		async function testListTasks(args, mockLogger) {
			// File not found case
			if (args.file === 'non-existent-file.json') {
				mockLogger.error('Tasks file not found');
				return {
					success: false,
					error: {
						code: 'FILE_NOT_FOUND_ERROR',
						message: 'Tasks file not found'
					}
				};
			}

			// Check for complexity report
			const complexityReport = mockReadComplexityReport();
			let tasksData = [...sampleTasks.tasks];

			// Add complexity scores if report exists
			if (complexityReport && complexityReport.complexityAnalysis) {
				tasksData = tasksData.map((task) => {
					const analysis = complexityReport.complexityAnalysis.find(
						(a) => a.taskId === task.id
					);
					if (analysis) {
						return { ...task, complexityScore: analysis.complexityScore };
					}
					return task;
				});
			}

			// Success case
			if (!args.status && !args.withSubtasks) {
				return {
					success: true,
					data: {
						tasks: tasksData,
						stats: {
							total: tasksData.length,
							completed: tasksData.filter((t) => t.status === 'done').length,
							inProgress: tasksData.filter((t) => t.status === 'in-progress')
								.length,
							pending: tasksData.filter((t) => t.status === 'pending').length
						}
					}
				};
			}

			// Status filter case
			if (args.status) {
				const filteredTasks = tasksData.filter((t) => t.status === args.status);
				return {
					success: true,
					data: {
						tasks: filteredTasks,
						filter: args.status,
						stats: {
							total: tasksData.length,
							filtered: filteredTasks.length
						}
					}
				};
			}

			// Include subtasks case
			if (args.withSubtasks) {
				return {
					success: true,
					data: {
						tasks: tasksData,
						includeSubtasks: true,
						stats: {
							total: tasksData.length
						}
					}
				};
			}

			// Default case
			return {
				success: true,
				data: { tasks: [] }
			};
		}

		test('should return all tasks when no filter is provided', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath
			};

			// Act
			const result = await testListTasks(args, mockLogger);

			// Assert
			expect(result.success).toBe(true);
			expect(result.data.tasks.length).toBe(sampleTasks.tasks.length);
			expect(result.data.stats.total).toBe(sampleTasks.tasks.length);
		});

		test('should filter tasks by status', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				status: 'pending'
			};

			// Act
			const result = await testListTasks(args, mockLogger);

			// Assert
			expect(result.success).toBe(true);
			expect(result.data.filter).toBe('pending');
			// Should only include pending tasks
			result.data.tasks.forEach((task) => {
				expect(task.status).toBe('pending');
			});
		});

		test('should include subtasks when requested', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				withSubtasks: true
			};

			// Act
			const result = await testListTasks(args, mockLogger);

			// Assert
			expect(result.success).toBe(true);
			expect(result.data.includeSubtasks).toBe(true);

			// Verify subtasks are included for tasks that have them
			const tasksWithSubtasks = result.data.tasks.filter(
				(t) => t.subtasks && t.subtasks.length > 0
			);
			expect(tasksWithSubtasks.length).toBeGreaterThan(0);
		});

		test('should handle file not found errors', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: 'non-existent-file.json'
			};

			// Act
			const result = await testListTasks(args, mockLogger);

			// Assert
			expect(result.success).toBe(false);
			expect(result.error.code).toBe('FILE_NOT_FOUND_ERROR');
			expect(mockLogger.error).toHaveBeenCalled();
		});

		test('should include complexity scores when complexity report exists', async () => {
			// Arrange
			mockReadComplexityReport.mockReturnValueOnce(mockComplexityReport);
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				withSubtasks: true
			};

			// Act
			const result = await testListTasks(args, mockLogger);
			// Assert
			expect(result.success).toBe(true);

			// Check that tasks have complexity scores from the report
			mockComplexityReport.complexityAnalysis.forEach((analysis) => {
				const task = result.data.tasks.find((t) => t.id === analysis.taskId);
				if (task) {
					expect(task.complexityScore).toBe(analysis.complexityScore);
				}
			});
		});
	});

	describe('expandTaskDirect', () => {
		// Test wrapper function that returns appropriate results based on the test case
		async function testExpandTask(args, mockLogger, options = {}) {
			// Missing task ID case
			if (!args.id) {
				mockLogger.error('Task ID is required');
				return {
					success: false,
					error: {
						code: 'INPUT_VALIDATION_ERROR',
						message: 'Task ID is required'
					}
				};
			}

			// Non-existent task ID case
			if (args.id === '999') {
				mockLogger.error(`Task with ID ${args.id} not found`);
				return {
					success: false,
					error: {
						code: 'TASK_NOT_FOUND',
						message: `Task with ID ${args.id} not found`
					}
				};
			}

			// Completed task case
			if (args.id === '1') {
				mockLogger.error(
					`Task ${args.id} is already marked as done and cannot be expanded`
				);
				return {
					success: false,
					error: {
						code: 'TASK_COMPLETED',
						message: `Task ${args.id} is already marked as done and cannot be expanded`
					}
				};
			}

			// For successful cases, record that functions were called but don't make real calls
			mockEnableSilentMode();

			// This is just a mock call that won't make real API requests
			// We're using mockExpandTask which is already a mock function
			const expandedTask = await mockExpandTask(
				parseInt(args.id, 10),
				args.num,
				args.research || false,
				args.prompt || '',
				{ mcpLog: mockLogger, session: options.session }
			);

			mockDisableSilentMode();

			return {
				success: true,
				data: {
					task: expandedTask,
					subtasksAdded: expandedTask.subtasks.length,
					hasExistingSubtasks: false
				}
			};
		}

		test('should expand a task with subtasks', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				id: '3', // ID 3 exists in sampleTasks with status 'pending'
				num: 2
			};

			// Act
			const result = await testExpandTask(args, mockLogger, {
				session: mockSession
			});

			// Assert
			expect(result.success).toBe(true);
			expect(result.data.task).toBeDefined();
			expect(result.data.task.subtasks).toBeDefined();
			expect(result.data.task.subtasks.length).toBe(2);
			expect(mockExpandTask).toHaveBeenCalledWith(
				3, // Task ID as number
				2, // num parameter
				false, // useResearch
				'', // prompt
				expect.objectContaining({
					mcpLog: mockLogger,
					session: mockSession
				})
			);
			expect(mockEnableSilentMode).toHaveBeenCalled();
			expect(mockDisableSilentMode).toHaveBeenCalled();
		});

		test('should handle missing task ID', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath
				// id is intentionally missing
			};

			// Act
			const result = await testExpandTask(args, mockLogger, {
				session: mockSession
			});

			// Assert
			expect(result.success).toBe(false);
			expect(result.error.code).toBe('INPUT_VALIDATION_ERROR');
			expect(mockLogger.error).toHaveBeenCalled();
			// Make sure no real expand calls were made
			expect(mockExpandTask).not.toHaveBeenCalled();
		});

		test('should handle non-existent task ID', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				id: '999' // Non-existent task ID
			};

			// Act
			const result = await testExpandTask(args, mockLogger, {
				session: mockSession
			});

			// Assert
			expect(result.success).toBe(false);
			expect(result.error.code).toBe('TASK_NOT_FOUND');
			expect(mockLogger.error).toHaveBeenCalled();
			// Make sure no real expand calls were made
			expect(mockExpandTask).not.toHaveBeenCalled();
		});

		test('should handle completed tasks', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				id: '1' // Task with 'done' status in sampleTasks
			};

			// Act
			const result = await testExpandTask(args, mockLogger, {
				session: mockSession
			});

			// Assert
			expect(result.success).toBe(false);
			expect(result.error.code).toBe('TASK_COMPLETED');
			expect(mockLogger.error).toHaveBeenCalled();
			// Make sure no real expand calls were made
			expect(mockExpandTask).not.toHaveBeenCalled();
		});

		test('should use AI client when research flag is set', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				id: '3',
				research: true
			};

			// Act
			const result = await testExpandTask(args, mockLogger, {
				session: mockSession
			});

			// Assert
			expect(result.success).toBe(true);
			expect(mockExpandTask).toHaveBeenCalledWith(
				3, // Task ID as number
				undefined, // args.num is undefined
				true, // useResearch should be true
				'', // prompt
				expect.objectContaining({
					mcpLog: mockLogger,
					session: mockSession
				})
			);
			// Verify the result includes research-backed subtasks
			expect(result.data.task.subtasks[0].title).toContain('Research-Backed');
		});
	});

	describe('expandAllTasksDirect', () => {
		// Test wrapper function that returns appropriate results based on the test case
		async function testExpandAllTasks(args, mockLogger, options = {}) {
			// For successful cases, record that functions were called but don't make real calls
			mockEnableSilentMode();

			// Mock expandAllTasks - now returns a structured object instead of undefined
			const mockExpandAll = jest.fn().mockImplementation(async () => {
				// Return the new structured response that matches the actual implementation
				return {
					success: true,
					expandedCount: 2,
					failedCount: 0,
					skippedCount: 1,
					tasksToExpand: 3,
					telemetryData: {
						timestamp: new Date().toISOString(),
						commandName: 'expand-all-tasks',
						totalCost: 0.05,
						totalTokens: 1000,
						inputTokens: 600,
						outputTokens: 400
					}
				};
			});

			// Call mock expandAllTasks with the correct signature
			const result = await mockExpandAll(
				args.file, // tasksPath
				args.num, // numSubtasks
				args.research || false, // useResearch
				args.prompt || '', // additionalContext
				args.force || false, // force
				{
					mcpLog: mockLogger,
					session: options.session,
					projectRoot: args.projectRoot
				}
			);

			mockDisableSilentMode();

			return {
				success: true,
				data: {
					message: `Expand all operation completed. Expanded: ${result.expandedCount}, Failed: ${result.failedCount}, Skipped: ${result.skippedCount}`,
					details: {
						expandedCount: result.expandedCount,
						failedCount: result.failedCount,
						skippedCount: result.skippedCount,
						tasksToExpand: result.tasksToExpand
					},
					telemetryData: result.telemetryData
				}
			};
		}

		test('should expand all pending tasks with subtasks', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				num: 3
			};

			// Act
			const result = await testExpandAllTasks(args, mockLogger, {
				session: mockSession
			});

			// Assert
			expect(result.success).toBe(true);
			expect(result.data.message).toMatch(/Expand all operation completed/);
			expect(result.data.details.expandedCount).toBe(2);
			expect(result.data.details.failedCount).toBe(0);
			expect(result.data.details.skippedCount).toBe(1);
			expect(result.data.details.tasksToExpand).toBe(3);
			expect(result.data.telemetryData).toBeDefined();
			expect(result.data.telemetryData.commandName).toBe('expand-all-tasks');
			expect(mockEnableSilentMode).toHaveBeenCalled();
			expect(mockDisableSilentMode).toHaveBeenCalled();
		});

		test('should handle research flag', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				research: true,
				num: 2
			};

			// Act
			const result = await testExpandAllTasks(args, mockLogger, {
				session: mockSession
			});

			// Assert
			expect(result.success).toBe(true);
			expect(result.data.details.expandedCount).toBe(2);
			expect(result.data.telemetryData).toBeDefined();
			expect(mockEnableSilentMode).toHaveBeenCalled();
			expect(mockDisableSilentMode).toHaveBeenCalled();
		});

		test('should handle force flag', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				force: true
			};

			// Act
			const result = await testExpandAllTasks(args, mockLogger, {
				session: mockSession
			});

			// Assert
			expect(result.success).toBe(true);
			expect(result.data.details.expandedCount).toBe(2);
			expect(result.data.telemetryData).toBeDefined();
			expect(mockEnableSilentMode).toHaveBeenCalled();
			expect(mockDisableSilentMode).toHaveBeenCalled();
		});

		test('should handle additional context/prompt', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				prompt: 'Additional context for subtasks'
			};

			// Act
			const result = await testExpandAllTasks(args, mockLogger, {
				session: mockSession
			});

			// Assert
			expect(result.success).toBe(true);
			expect(result.data.details.expandedCount).toBe(2);
			expect(result.data.telemetryData).toBeDefined();
			expect(mockEnableSilentMode).toHaveBeenCalled();
			expect(mockDisableSilentMode).toHaveBeenCalled();
		});

		test('should handle case with no eligible tasks', async () => {
			// Arrange
			const args = {
				projectRoot: testProjectRoot,
				file: testTasksPath,
				num: 3
			};

			// Act - Mock the scenario where no tasks are eligible for expansion
			async function testNoEligibleTasks(args, mockLogger, options = {}) {
				mockEnableSilentMode();

				const mockExpandAll = jest.fn().mockImplementation(async () => {
					return {
						success: true,
						expandedCount: 0,
						failedCount: 0,
						skippedCount: 0,
						tasksToExpand: 0,
						telemetryData: null,
						message: 'No tasks eligible for expansion.'
					};
				});

				const result = await mockExpandAll(
					args.file,
					args.num,
					false,
					'',
					false,
					{
						mcpLog: mockLogger,
						session: options.session,
						projectRoot: args.projectRoot
					},
					'json'
				);

				mockDisableSilentMode();

				return {
					success: true,
					data: {
						message: result.message,
						details: {
							expandedCount: result.expandedCount,
							failedCount: result.failedCount,
							skippedCount: result.skippedCount,
							tasksToExpand: result.tasksToExpand
						},
						telemetryData: result.telemetryData
					}
				};
			}

			const result = await testNoEligibleTasks(args, mockLogger, {
				session: mockSession
			});

			// Assert
			expect(result.success).toBe(true);
			expect(result.data.message).toBe('No tasks eligible for expansion.');
			expect(result.data.details.expandedCount).toBe(0);
			expect(result.data.details.tasksToExpand).toBe(0);
			expect(result.data.telemetryData).toBeNull();
		});
	});
});

```

--------------------------------------------------------------------------------
/scripts/modules/task-manager/add-task.js:
--------------------------------------------------------------------------------

```javascript
import path from 'path';
import chalk from 'chalk';
import boxen from 'boxen';
import Table from 'cli-table3';
import Fuse from 'fuse.js'; // Import Fuse.js for advanced fuzzy search

import {
	displayBanner,
	getStatusWithColor,
	startLoadingIndicator,
	stopLoadingIndicator,
	succeedLoadingIndicator,
	failLoadingIndicator,
	displayAiUsageSummary,
	displayContextAnalysis
} from '../ui.js';
import {
	readJSON,
	writeJSON,
	log as consoleLog,
	truncate,
	ensureTagMetadata,
	performCompleteTagMigration,
	markMigrationForNotice
} from '../utils.js';
import { generateObjectService } from '../ai-services-unified.js';
import { getDefaultPriority, hasCodebaseAnalysis } from '../config-manager.js';
import { getPromptManager } from '../prompt-manager.js';
import ContextGatherer from '../utils/contextGatherer.js';
import generateTaskFiles from './generate-task-files.js';
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
import {
	TASK_PRIORITY_OPTIONS,
	DEFAULT_TASK_PRIORITY,
	isValidTaskPriority,
	normalizeTaskPriority
} from '../../../src/constants/task-priority.js';

/**
 * Get all tasks from all tags
 * @param {Object} rawData - The raw tagged data object
 * @returns {Array} A flat array of all task objects
 */
function getAllTasks(rawData) {
	let allTasks = [];
	for (const tagName in rawData) {
		if (
			Object.prototype.hasOwnProperty.call(rawData, tagName) &&
			rawData[tagName] &&
			Array.isArray(rawData[tagName].tasks)
		) {
			allTasks = allTasks.concat(rawData[tagName].tasks);
		}
	}
	return allTasks;
}

/**
 * Add a new task using AI
 * @param {string} tasksPath - Path to the tasks.json file
 * @param {string} prompt - Description of the task to add (required for AI-driven creation)
 * @param {Array} dependencies - Task dependencies
 * @param {string} priority - Task priority
 * @param {function} reportProgress - Function to report progress to MCP server (optional)
 * @param {Object} mcpLog - MCP logger object (optional)
 * @param {Object} session - Session object from MCP server (optional)
 * @param {string} outputFormat - Output format (text or json)
 * @param {Object} customEnv - Custom environment variables (optional) - Note: AI params override deprecated
 * @param {Object} manualTaskData - Manual task data (optional, for direct task creation without AI)
 * @param {boolean} useResearch - Whether to use the research model (passed to unified service)
 * @param {Object} context - Context object containing session and potentially projectRoot
 * @param {string} [context.projectRoot] - Project root path (for MCP/env fallback)
 * @param {string} [context.commandName] - The name of the command being executed (for telemetry)
 * @param {string} [context.outputType] - The output type ('cli' or 'mcp', for telemetry)
 * @param {string} [context.tag] - Tag for the task (optional)
 * @returns {Promise<object>} An object containing newTaskId and telemetryData
 */
async function addTask(
	tasksPath,
	prompt,
	dependencies = [],
	priority = null,
	context = {},
	outputFormat = 'text', // Default to text for CLI
	manualTaskData = null,
	useResearch = false
) {
	const { session, mcpLog, projectRoot, commandName, outputType, tag } =
		context;
	const isMCP = !!mcpLog;

	// Create a consistent logFn object regardless of context
	const logFn = isMCP
		? mcpLog // Use MCP logger if provided
		: {
				// Create a wrapper around consoleLog for CLI
				info: (...args) => consoleLog('info', ...args),
				warn: (...args) => consoleLog('warn', ...args),
				error: (...args) => consoleLog('error', ...args),
				debug: (...args) => consoleLog('debug', ...args),
				success: (...args) => consoleLog('success', ...args)
			};

	// Validate priority - only accept high, medium, or low
	let effectivePriority =
		priority || getDefaultPriority(projectRoot) || DEFAULT_TASK_PRIORITY;

	// If priority is provided, validate and normalize it
	if (priority) {
		const normalizedPriority = normalizeTaskPriority(priority);
		if (normalizedPriority) {
			effectivePriority = normalizedPriority;
		} else {
			if (outputFormat === 'text') {
				consoleLog(
					'warn',
					`Invalid priority "${priority}". Using default priority "${DEFAULT_TASK_PRIORITY}".`
				);
			}
			effectivePriority = DEFAULT_TASK_PRIORITY;
		}
	}

	logFn.info(
		`Adding new task with prompt: "${prompt}", Priority: ${effectivePriority}, Dependencies: ${dependencies.join(', ') || 'None'}, Research: ${useResearch}, ProjectRoot: ${projectRoot}`
	);
	if (tag) {
		logFn.info(`Using tag context: ${tag}`);
	}

	let loadingIndicator = null;
	let aiServiceResponse = null; // To store the full response from AI service

	// Create custom reporter that checks for MCP log
	const report = (message, level = 'info') => {
		if (mcpLog) {
			mcpLog[level](message);
		} else if (outputFormat === 'text') {
			consoleLog(level, message);
		}
	};

	/**
	 * Recursively builds a dependency graph for a given task
	 * @param {Array} tasks - All tasks from tasks.json
	 * @param {number} taskId - ID of the task to analyze
	 * @param {Set} visited - Set of already visited task IDs
	 * @param {Map} depthMap - Map of task ID to its depth in the graph
	 * @param {number} depth - Current depth in the recursion
	 * @return {Object} Dependency graph data
	 */
	function buildDependencyGraph(
		tasks,
		taskId,
		visited = new Set(),
		depthMap = new Map(),
		depth = 0
	) {
		// Skip if we've already visited this task or it doesn't exist
		if (visited.has(taskId)) {
			return null;
		}

		// Find the task
		const task = tasks.find((t) => t.id === taskId);
		if (!task) {
			return null;
		}

		// Mark as visited
		visited.add(taskId);

		// Update depth if this is a deeper path to this task
		if (!depthMap.has(taskId) || depth < depthMap.get(taskId)) {
			depthMap.set(taskId, depth);
		}

		// Process dependencies
		const dependencyData = [];
		if (task.dependencies && task.dependencies.length > 0) {
			for (const depId of task.dependencies) {
				const depData = buildDependencyGraph(
					tasks,
					depId,
					visited,
					depthMap,
					depth + 1
				);
				if (depData) {
					dependencyData.push(depData);
				}
			}
		}

		return {
			id: task.id,
			title: task.title,
			description: task.description,
			status: task.status,
			dependencies: dependencyData
		};
	}

	try {
		// Read the existing tasks - IMPORTANT: Read the raw data without tag resolution
		let rawData = readJSON(tasksPath, projectRoot, tag); // No tag parameter

		// Handle the case where readJSON returns resolved data with _rawTaggedData
		if (rawData && rawData._rawTaggedData) {
			// Use the raw tagged data and discard the resolved view
			rawData = rawData._rawTaggedData;
		}

		// If file doesn't exist or is invalid, create a new structure in memory
		if (!rawData) {
			report(
				'tasks.json not found or invalid. Initializing new structure.',
				'info'
			);
			rawData = {
				master: {
					tasks: [],
					metadata: {
						created: new Date().toISOString(),
						description: 'Default tasks context'
					}
				}
			};
			// Do not write the file here; it will be written later with the new task.
		}

		// Handle legacy format migration using utilities
		if (rawData && Array.isArray(rawData.tasks) && !rawData._rawTaggedData) {
			report('Legacy format detected. Migrating to tagged format...', 'info');

			// This is legacy format - migrate it to tagged format
			rawData = {
				master: {
					tasks: rawData.tasks,
					metadata: rawData.metadata || {
						created: new Date().toISOString(),
						updated: new Date().toISOString(),
						description: 'Tasks for master context'
					}
				}
			};
			// Ensure proper metadata using utility
			ensureTagMetadata(rawData.master, {
				description: 'Tasks for master context'
			});
			// Do not write the file here; it will be written later with the new task.

			// Perform complete migration (config.json, state.json)
			performCompleteTagMigration(tasksPath);
			markMigrationForNotice(tasksPath);

			report('Successfully migrated to tagged format.', 'success');
		}

		// Use the provided tag, or the current active tag, or default to 'master'
		const targetTag = tag;

		// Ensure the target tag exists
		if (!rawData[targetTag]) {
			report(
				`Tag "${targetTag}" does not exist. Please create it first using the 'add-tag' command.`,
				'error'
			);
			throw new Error(`Tag "${targetTag}" not found.`);
		}

		// Ensure the target tag has a tasks array and metadata object
		if (!rawData[targetTag].tasks) {
			rawData[targetTag].tasks = [];
		}
		if (!rawData[targetTag].metadata) {
			rawData[targetTag].metadata = {
				created: new Date().toISOString(),
				updated: new Date().toISOString(),
				description: ``
			};
		}

		// Get a flat list of ALL tasks across ALL tags to validate dependencies
		const allTasks = getAllTasks(rawData);

		// Find the highest task ID *within the target tag* to determine the next ID
		const tasksInTargetTag = rawData[targetTag].tasks;
		const highestId =
			tasksInTargetTag.length > 0
				? Math.max(...tasksInTargetTag.map((t) => t.id))
				: 0;
		const newTaskId = highestId + 1;

		// Only show UI box for CLI mode
		if (outputFormat === 'text') {
			console.log(
				boxen(chalk.white.bold(`Creating New Task #${newTaskId}`), {
					padding: 1,
					borderColor: 'blue',
					borderStyle: 'round',
					margin: { top: 1, bottom: 1 }
				})
			);
		}

		// Validate dependencies before proceeding
		const invalidDeps = dependencies.filter((depId) => {
			// Ensure depId is parsed as a number for comparison
			const numDepId = parseInt(depId, 10);
			return Number.isNaN(numDepId) || !allTasks.some((t) => t.id === numDepId);
		});

		if (invalidDeps.length > 0) {
			report(
				`The following dependencies do not exist or are invalid: ${invalidDeps.join(', ')}`,
				'warn'
			);
			report('Removing invalid dependencies...', 'info');
			dependencies = dependencies.filter(
				(depId) => !invalidDeps.includes(depId)
			);
		}
		// Ensure dependencies are numbers
		const numericDependencies = dependencies.map((dep) => parseInt(dep, 10));

		// Build dependency graphs for explicitly specified dependencies
		const dependencyGraphs = [];
		const allRelatedTaskIds = new Set();
		const depthMap = new Map();

		// First pass: build a complete dependency graph for each specified dependency
		for (const depId of numericDependencies) {
			const graph = buildDependencyGraph(allTasks, depId, new Set(), depthMap);
			if (graph) {
				dependencyGraphs.push(graph);
			}
		}

		// Second pass: build a set of all related task IDs for flat analysis
		for (const [taskId, depth] of depthMap.entries()) {
			allRelatedTaskIds.add(taskId);
		}

		let taskData;

		// Check if manual task data is provided
		if (manualTaskData) {
			report('Using manually provided task data', 'info');
			taskData = manualTaskData;
			report('DEBUG: Taking MANUAL task data path.', 'debug');

			// Basic validation for manual data
			if (
				!taskData.title ||
				typeof taskData.title !== 'string' ||
				!taskData.description ||
				typeof taskData.description !== 'string'
			) {
				throw new Error(
					'Manual task data must include at least a title and description.'
				);
			}
		} else {
			report('DEBUG: Taking AI task generation path.', 'debug');
			// --- Refactored AI Interaction ---
			report(`Generating task data with AI with prompt:\n${prompt}`, 'info');

			// --- Use the new ContextGatherer ---
			const contextGatherer = new ContextGatherer(projectRoot, tag);
			const gatherResult = await contextGatherer.gather({
				semanticQuery: prompt,
				dependencyTasks: numericDependencies,
				format: 'research'
			});

			const gatheredContext = gatherResult.context;
			const analysisData = gatherResult.analysisData;

			// Display context analysis if not in silent mode
			if (outputFormat === 'text' && analysisData) {
				displayContextAnalysis(analysisData, prompt, gatheredContext.length);
			}

			// Add any manually provided details to the prompt for context
			let contextFromArgs = '';
			if (manualTaskData?.title)
				contextFromArgs += `\n- Suggested Title: "${manualTaskData.title}"`;
			if (manualTaskData?.description)
				contextFromArgs += `\n- Suggested Description: "${manualTaskData.description}"`;
			if (manualTaskData?.details)
				contextFromArgs += `\n- Additional Details Context: "${manualTaskData.details}"`;
			if (manualTaskData?.testStrategy)
				contextFromArgs += `\n- Additional Test Strategy Context: "${manualTaskData.testStrategy}"`;

			// Load prompts using PromptManager
			const promptManager = getPromptManager();
			const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
				'add-task',
				{
					prompt,
					newTaskId,
					existingTasks: allTasks,
					gatheredContext,
					contextFromArgs,
					useResearch,
					priority: effectivePriority,
					dependencies: numericDependencies,
					hasCodebaseAnalysis: hasCodebaseAnalysis(
						useResearch,
						projectRoot,
						session
					),
					projectRoot: projectRoot
				}
			);

			// Start the loading indicator - only for text mode
			if (outputFormat === 'text') {
				loadingIndicator = startLoadingIndicator(
					`Generating new task with ${useResearch ? 'Research' : 'Main'} AI... \n`
				);
			}

			try {
				const serviceRole = useResearch ? 'research' : 'main';
				report('DEBUG: Calling generateObjectService...', 'debug');

				aiServiceResponse = await generateObjectService({
					// Capture the full response
					role: serviceRole,
					session: session,
					projectRoot: projectRoot,
					schema: COMMAND_SCHEMAS['add-task'],
					objectName: 'newTaskData',
					systemPrompt: systemPrompt,
					prompt: userPrompt,
					commandName: commandName || 'add-task', // Use passed commandName or default
					outputType: outputType || (isMCP ? 'mcp' : 'cli') // Use passed outputType or derive
				});
				report('DEBUG: generateObjectService returned successfully.', 'debug');

				if (!aiServiceResponse || !aiServiceResponse.mainResult) {
					throw new Error(
						'AI service did not return the expected object structure.'
					);
				}

				// Prefer mainResult if it looks like a valid task object, otherwise try mainResult.object
				if (
					aiServiceResponse.mainResult.title &&
					aiServiceResponse.mainResult.description
				) {
					taskData = aiServiceResponse.mainResult;
				} else if (
					aiServiceResponse.mainResult.object &&
					aiServiceResponse.mainResult.object.title &&
					aiServiceResponse.mainResult.object.description
				) {
					taskData = aiServiceResponse.mainResult.object;
				} else {
					throw new Error('AI service did not return a valid task object.');
				}

				report('Successfully generated task data from AI.', 'success');

				// Success! Show checkmark
				if (loadingIndicator) {
					succeedLoadingIndicator(
						loadingIndicator,
						'Task generated successfully'
					);
					loadingIndicator = null; // Clear it
				}
			} catch (error) {
				// Failure! Show X
				if (loadingIndicator) {
					failLoadingIndicator(loadingIndicator, 'AI generation failed');
					loadingIndicator = null;
				}
				report(
					`DEBUG: generateObjectService caught error: ${error.message}`,
					'debug'
				);
				report(`Error generating task with AI: ${error.message}`, 'error');
				throw error; // Re-throw error after logging
			} finally {
				report('DEBUG: generateObjectService finally block reached.', 'debug');
				// Clean up if somehow still running
				if (loadingIndicator) {
					stopLoadingIndicator(loadingIndicator);
				}
			}
			// --- End Refactored AI Interaction ---
		}

		// Create the new task object
		const newTask = {
			id: newTaskId,
			title: taskData.title,
			description: taskData.description,
			details: taskData.details || '',
			testStrategy: taskData.testStrategy || '',
			status: 'pending',
			dependencies: taskData.dependencies?.length
				? taskData.dependencies
				: numericDependencies, // Use AI-suggested dependencies if available, fallback to manually specified
			priority: effectivePriority,
			subtasks: [] // Initialize with empty subtasks array
		};

		// Additional check: validate all dependencies in the AI response
		if (taskData.dependencies?.length) {
			const allValidDeps = taskData.dependencies.every((depId) => {
				const numDepId = parseInt(depId, 10);
				return (
					!Number.isNaN(numDepId) && allTasks.some((t) => t.id === numDepId)
				);
			});

			if (!allValidDeps) {
				report(
					'AI suggested invalid dependencies. Filtering them out...',
					'warn'
				);
				newTask.dependencies = taskData.dependencies.filter((depId) => {
					const numDepId = parseInt(depId, 10);
					return (
						!Number.isNaN(numDepId) && allTasks.some((t) => t.id === numDepId)
					);
				});
			}
		}

		// Add the task to the tasks array OF THE CORRECT TAG
		rawData[targetTag].tasks.push(newTask);
		// Update the tag's metadata
		ensureTagMetadata(rawData[targetTag], {
			description: `Tasks for ${targetTag} context`
		});

		report('DEBUG: Writing tasks.json...', 'debug');
		// Write the updated raw data back to the file
		// The writeJSON function will automatically filter out _rawTaggedData
		writeJSON(tasksPath, rawData, projectRoot, targetTag);
		report('DEBUG: tasks.json written.', 'debug');

		// Show success message - only for text output (CLI)
		if (outputFormat === 'text') {
			const table = new Table({
				head: [
					chalk.cyan.bold('ID'),
					chalk.cyan.bold('Title'),
					chalk.cyan.bold('Description')
				],
				colWidths: [5, 30, 50] // Adjust widths as needed
			});

			table.push([
				newTask.id,
				truncate(newTask.title, 27),
				truncate(newTask.description, 47)
			]);

			console.log(chalk.green('✓ New task created successfully:'));
			console.log(table.toString());

			// Helper to get priority color
			const getPriorityColor = (p) => {
				switch (p?.toLowerCase()) {
					case 'high':
						return 'red';
					case 'low':
						return 'gray';
					default:
						return 'yellow';
				}
			};

			// Check if AI added new dependencies that weren't explicitly provided
			const aiAddedDeps = newTask.dependencies.filter(
				(dep) => !numericDependencies.includes(dep)
			);

			// Check if AI removed any dependencies that were explicitly provided
			const aiRemovedDeps = numericDependencies.filter(
				(dep) => !newTask.dependencies.includes(dep)
			);

			// Get task titles for dependencies to display
			const depTitles = {};
			newTask.dependencies.forEach((dep) => {
				const depTask = allTasks.find((t) => t.id === dep);
				if (depTask) {
					depTitles[dep] = truncate(depTask.title, 30);
				}
			});

			// Prepare dependency display string
			let dependencyDisplay = '';
			if (newTask.dependencies.length > 0) {
				dependencyDisplay = chalk.white('Dependencies:') + '\n';
				newTask.dependencies.forEach((dep) => {
					const isAiAdded = aiAddedDeps.includes(dep);
					const depType = isAiAdded ? chalk.yellow(' (AI suggested)') : '';
					dependencyDisplay +=
						chalk.white(
							`  - ${dep}: ${depTitles[dep] || 'Unknown task'}${depType}`
						) + '\n';
				});
			} else {
				dependencyDisplay = chalk.white('Dependencies: None') + '\n';
			}

			// Add info about removed dependencies if any
			if (aiRemovedDeps.length > 0) {
				dependencyDisplay +=
					chalk.gray('\nUser-specified dependencies that were not used:') +
					'\n';
				aiRemovedDeps.forEach((dep) => {
					const depTask = allTasks.find((t) => t.id === dep);
					const title = depTask ? truncate(depTask.title, 30) : 'Unknown task';
					dependencyDisplay += chalk.gray(`  - ${dep}: ${title}`) + '\n';
				});
			}

			// Add dependency analysis summary
			let dependencyAnalysis = '';
			if (aiAddedDeps.length > 0 || aiRemovedDeps.length > 0) {
				dependencyAnalysis =
					'\n' + chalk.white.bold('Dependency Analysis:') + '\n';
				if (aiAddedDeps.length > 0) {
					dependencyAnalysis +=
						chalk.green(
							`AI identified ${aiAddedDeps.length} additional dependencies`
						) + '\n';
				}
				if (aiRemovedDeps.length > 0) {
					dependencyAnalysis +=
						chalk.yellow(
							`AI excluded ${aiRemovedDeps.length} user-provided dependencies`
						) + '\n';
				}
			}

			// Show success message box
			console.log(
				boxen(
					chalk.white.bold(`Task ${newTaskId} Created Successfully`) +
						'\n\n' +
						chalk.white(`Title: ${newTask.title}`) +
						'\n' +
						chalk.white(`Status: ${getStatusWithColor(newTask.status)}`) +
						'\n' +
						chalk.white(
							`Priority: ${chalk[getPriorityColor(newTask.priority)](newTask.priority)}`
						) +
						'\n\n' +
						dependencyDisplay +
						dependencyAnalysis +
						'\n' +
						chalk.white.bold('Next Steps:') +
						'\n' +
						chalk.cyan(
							`1. Run ${chalk.yellow(`task-master show ${newTaskId}`)} to see complete task details`
						) +
						'\n' +
						chalk.cyan(
							`2. Run ${chalk.yellow(`task-master set-status --id=${newTaskId} --status=in-progress`)} to start working on it`
						) +
						'\n' +
						chalk.cyan(
							`3. Run ${chalk.yellow(`task-master expand --id=${newTaskId}`)} to break it down into subtasks`
						),
					{ padding: 1, borderColor: 'green', borderStyle: 'round' }
				)
			);

			// Display AI Usage Summary if telemetryData is available
			if (
				aiServiceResponse &&
				aiServiceResponse.telemetryData &&
				(outputType === 'cli' || outputType === 'text')
			) {
				displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
			}
		}

		report(
			`DEBUG: Returning new task ID: ${newTaskId} and telemetry.`,
			'debug'
		);
		return {
			newTaskId: newTaskId,
			telemetryData: aiServiceResponse ? aiServiceResponse.telemetryData : null,
			tagInfo: aiServiceResponse ? aiServiceResponse.tagInfo : null
		};
	} catch (error) {
		// Stop any loading indicator on error
		if (loadingIndicator) {
			stopLoadingIndicator(loadingIndicator);
		}

		report(`Error adding task: ${error.message}`, 'error');
		if (outputFormat === 'text') {
			console.error(chalk.red(`Error: ${error.message}`));
		}
		// In MCP mode, we let the direct function handler catch and format
		throw error;
	}
}

export default addTask;

```
Page 33/50FirstPrevNextLast