#
tokens: 47014/50000 8/975 files (page 30/50)
lines: off (toggle) GitHub
raw markdown copy
This is page 30 of 50. Use http://codebase.md/eyaltoledano/claude-task-master?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .changeset
│   ├── config.json
│   └── README.md
├── .claude
│   ├── commands
│   │   └── dedupe.md
│   └── TM_COMMANDS_GUIDE.md
├── .claude-plugin
│   └── marketplace.json
├── .coderabbit.yaml
├── .cursor
│   ├── mcp.json
│   └── rules
│       ├── ai_providers.mdc
│       ├── ai_services.mdc
│       ├── architecture.mdc
│       ├── changeset.mdc
│       ├── commands.mdc
│       ├── context_gathering.mdc
│       ├── cursor_rules.mdc
│       ├── dependencies.mdc
│       ├── dev_workflow.mdc
│       ├── git_workflow.mdc
│       ├── glossary.mdc
│       ├── mcp.mdc
│       ├── new_features.mdc
│       ├── self_improve.mdc
│       ├── tags.mdc
│       ├── taskmaster.mdc
│       ├── tasks.mdc
│       ├── telemetry.mdc
│       ├── test_workflow.mdc
│       ├── tests.mdc
│       ├── ui.mdc
│       └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── enhancements---feature-requests.md
│   │   └── feedback.md
│   ├── PULL_REQUEST_TEMPLATE
│   │   ├── bugfix.md
│   │   ├── config.yml
│   │   ├── feature.md
│   │   └── integration.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── scripts
│   │   ├── auto-close-duplicates.mjs
│   │   ├── backfill-duplicate-comments.mjs
│   │   ├── check-pre-release-mode.mjs
│   │   ├── parse-metrics.mjs
│   │   ├── release.mjs
│   │   ├── tag-extension.mjs
│   │   ├── utils.mjs
│   │   └── validate-changesets.mjs
│   └── workflows
│       ├── auto-close-duplicates.yml
│       ├── backfill-duplicate-comments.yml
│       ├── ci.yml
│       ├── claude-dedupe-issues.yml
│       ├── claude-docs-trigger.yml
│       ├── claude-docs-updater.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── extension-ci.yml
│       ├── extension-release.yml
│       ├── log-issue-events.yml
│       ├── pre-release.yml
│       ├── release-check.yml
│       ├── release.yml
│       ├── update-models-md.yml
│       └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│   ├── hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── settings
│   │   └── mcp.json
│   └── steering
│       ├── dev_workflow.md
│       ├── kiro_rules.md
│       ├── self_improve.md
│       ├── taskmaster_hooks_workflow.md
│       └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│   ├── CLAUDE.md
│   ├── config.json
│   ├── docs
│   │   ├── autonomous-tdd-git-workflow.md
│   │   ├── MIGRATION-ROADMAP.md
│   │   ├── prd-tm-start.txt
│   │   ├── prd.txt
│   │   ├── README.md
│   │   ├── research
│   │   │   ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│   │   │   ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│   │   │   ├── 2025-06-14_test-save-functionality.md
│   │   │   ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│   │   │   └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│   │   ├── task-template-importing-prd.txt
│   │   ├── tdd-workflow-phase-0-spike.md
│   │   ├── tdd-workflow-phase-1-core-rails.md
│   │   ├── tdd-workflow-phase-1-orchestrator.md
│   │   ├── tdd-workflow-phase-2-pr-resumability.md
│   │   ├── tdd-workflow-phase-3-extensibility-guardrails.md
│   │   ├── test-prd.txt
│   │   └── tm-core-phase-1.txt
│   ├── reports
│   │   ├── task-complexity-report_autonomous-tdd-git-workflow.json
│   │   ├── task-complexity-report_cc-kiro-hooks.json
│   │   ├── task-complexity-report_tdd-phase-1-core-rails.json
│   │   ├── task-complexity-report_tdd-workflow-phase-0.json
│   │   ├── task-complexity-report_test-prd-tag.json
│   │   ├── task-complexity-report_tm-core-phase-1.json
│   │   ├── task-complexity-report.json
│   │   └── tm-core-complexity.json
│   ├── state.json
│   ├── tasks
│   │   ├── task_001_tm-start.txt
│   │   ├── task_002_tm-start.txt
│   │   ├── task_003_tm-start.txt
│   │   ├── task_004_tm-start.txt
│   │   ├── task_007_tm-start.txt
│   │   └── tasks.json
│   └── templates
│       ├── example_prd_rpg.md
│       └── example_prd.md
├── .vscode
│   ├── extensions.json
│   └── settings.json
├── apps
│   ├── cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   ├── command-registry.ts
│   │   │   ├── commands
│   │   │   │   ├── auth.command.ts
│   │   │   │   ├── autopilot
│   │   │   │   │   ├── abort.command.ts
│   │   │   │   │   ├── commit.command.ts
│   │   │   │   │   ├── complete.command.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next.command.ts
│   │   │   │   │   ├── resume.command.ts
│   │   │   │   │   ├── shared.ts
│   │   │   │   │   ├── start.command.ts
│   │   │   │   │   └── status.command.ts
│   │   │   │   ├── briefs.command.ts
│   │   │   │   ├── context.command.ts
│   │   │   │   ├── export.command.ts
│   │   │   │   ├── list.command.ts
│   │   │   │   ├── models
│   │   │   │   │   ├── custom-providers.ts
│   │   │   │   │   ├── fetchers.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── prompts.ts
│   │   │   │   │   ├── setup.ts
│   │   │   │   │   └── types.ts
│   │   │   │   ├── next.command.ts
│   │   │   │   ├── set-status.command.ts
│   │   │   │   ├── show.command.ts
│   │   │   │   ├── start.command.ts
│   │   │   │   └── tags.command.ts
│   │   │   ├── index.ts
│   │   │   ├── lib
│   │   │   │   └── model-management.ts
│   │   │   ├── types
│   │   │   │   └── tag-management.d.ts
│   │   │   ├── ui
│   │   │   │   ├── components
│   │   │   │   │   ├── cardBox.component.ts
│   │   │   │   │   ├── dashboard.component.ts
│   │   │   │   │   ├── header.component.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next-task.component.ts
│   │   │   │   │   ├── suggested-steps.component.ts
│   │   │   │   │   └── task-detail.component.ts
│   │   │   │   ├── display
│   │   │   │   │   ├── messages.ts
│   │   │   │   │   └── tables.ts
│   │   │   │   ├── formatters
│   │   │   │   │   ├── complexity-formatters.ts
│   │   │   │   │   ├── dependency-formatters.ts
│   │   │   │   │   ├── priority-formatters.ts
│   │   │   │   │   ├── status-formatters.spec.ts
│   │   │   │   │   └── status-formatters.ts
│   │   │   │   ├── index.ts
│   │   │   │   └── layout
│   │   │   │       ├── helpers.spec.ts
│   │   │   │       └── helpers.ts
│   │   │   └── utils
│   │   │       ├── auth-helpers.ts
│   │   │       ├── auto-update.ts
│   │   │       ├── brief-selection.ts
│   │   │       ├── display-helpers.ts
│   │   │       ├── error-handler.ts
│   │   │       ├── index.ts
│   │   │       ├── project-root.ts
│   │   │       ├── task-status.ts
│   │   │       ├── ui.spec.ts
│   │   │       └── ui.ts
│   │   ├── tests
│   │   │   ├── integration
│   │   │   │   └── commands
│   │   │   │       └── autopilot
│   │   │   │           └── workflow.test.ts
│   │   │   └── unit
│   │   │       ├── commands
│   │   │       │   ├── autopilot
│   │   │       │   │   └── shared.test.ts
│   │   │       │   ├── list.command.spec.ts
│   │   │       │   └── show.command.spec.ts
│   │   │       └── ui
│   │   │           └── dashboard.component.spec.ts
│   │   ├── tsconfig.json
│   │   └── vitest.config.ts
│   ├── docs
│   │   ├── archive
│   │   │   ├── ai-client-utils-example.mdx
│   │   │   ├── ai-development-workflow.mdx
│   │   │   ├── command-reference.mdx
│   │   │   ├── configuration.mdx
│   │   │   ├── cursor-setup.mdx
│   │   │   ├── examples.mdx
│   │   │   └── Installation.mdx
│   │   ├── best-practices
│   │   │   ├── advanced-tasks.mdx
│   │   │   ├── configuration-advanced.mdx
│   │   │   └── index.mdx
│   │   ├── capabilities
│   │   │   ├── cli-root-commands.mdx
│   │   │   ├── index.mdx
│   │   │   ├── mcp.mdx
│   │   │   ├── rpg-method.mdx
│   │   │   └── task-structure.mdx
│   │   ├── CHANGELOG.md
│   │   ├── command-reference.mdx
│   │   ├── configuration.mdx
│   │   ├── docs.json
│   │   ├── favicon.svg
│   │   ├── getting-started
│   │   │   ├── api-keys.mdx
│   │   │   ├── contribute.mdx
│   │   │   ├── faq.mdx
│   │   │   └── quick-start
│   │   │       ├── configuration-quick.mdx
│   │   │       ├── execute-quick.mdx
│   │   │       ├── installation.mdx
│   │   │       ├── moving-forward.mdx
│   │   │       ├── prd-quick.mdx
│   │   │       ├── quick-start.mdx
│   │   │       ├── requirements.mdx
│   │   │       ├── rules-quick.mdx
│   │   │       └── tasks-quick.mdx
│   │   ├── introduction.mdx
│   │   ├── licensing.md
│   │   ├── logo
│   │   │   ├── dark.svg
│   │   │   ├── light.svg
│   │   │   └── task-master-logo.png
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── style.css
│   │   ├── tdd-workflow
│   │   │   ├── ai-agent-integration.mdx
│   │   │   └── quickstart.mdx
│   │   ├── vercel.json
│   │   └── whats-new.mdx
│   ├── extension
│   │   ├── .vscodeignore
│   │   ├── assets
│   │   │   ├── banner.png
│   │   │   ├── icon-dark.svg
│   │   │   ├── icon-light.svg
│   │   │   ├── icon.png
│   │   │   ├── screenshots
│   │   │   │   ├── kanban-board.png
│   │   │   │   └── task-details.png
│   │   │   └── sidebar-icon.svg
│   │   ├── CHANGELOG.md
│   │   ├── components.json
│   │   ├── docs
│   │   │   ├── extension-CI-setup.md
│   │   │   └── extension-development-guide.md
│   │   ├── esbuild.js
│   │   ├── LICENSE
│   │   ├── package.json
│   │   ├── package.mjs
│   │   ├── package.publish.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── components
│   │   │   │   ├── ConfigView.tsx
│   │   │   │   ├── constants.ts
│   │   │   │   ├── TaskDetails
│   │   │   │   │   ├── AIActionsSection.tsx
│   │   │   │   │   ├── DetailsSection.tsx
│   │   │   │   │   ├── PriorityBadge.tsx
│   │   │   │   │   ├── SubtasksSection.tsx
│   │   │   │   │   ├── TaskMetadataSidebar.tsx
│   │   │   │   │   └── useTaskDetails.ts
│   │   │   │   ├── TaskDetailsView.tsx
│   │   │   │   ├── TaskMasterLogo.tsx
│   │   │   │   └── ui
│   │   │   │       ├── badge.tsx
│   │   │   │       ├── breadcrumb.tsx
│   │   │   │       ├── button.tsx
│   │   │   │       ├── card.tsx
│   │   │   │       ├── collapsible.tsx
│   │   │   │       ├── CollapsibleSection.tsx
│   │   │   │       ├── dropdown-menu.tsx
│   │   │   │       ├── label.tsx
│   │   │   │       ├── scroll-area.tsx
│   │   │   │       ├── separator.tsx
│   │   │   │       ├── shadcn-io
│   │   │   │       │   └── kanban
│   │   │   │       │       └── index.tsx
│   │   │   │       └── textarea.tsx
│   │   │   ├── extension.ts
│   │   │   ├── index.ts
│   │   │   ├── lib
│   │   │   │   └── utils.ts
│   │   │   ├── services
│   │   │   │   ├── config-service.ts
│   │   │   │   ├── error-handler.ts
│   │   │   │   ├── notification-preferences.ts
│   │   │   │   ├── polling-service.ts
│   │   │   │   ├── polling-strategies.ts
│   │   │   │   ├── sidebar-webview-manager.ts
│   │   │   │   ├── task-repository.ts
│   │   │   │   ├── terminal-manager.ts
│   │   │   │   └── webview-manager.ts
│   │   │   ├── test
│   │   │   │   └── extension.test.ts
│   │   │   ├── utils
│   │   │   │   ├── configManager.ts
│   │   │   │   ├── connectionManager.ts
│   │   │   │   ├── errorHandler.ts
│   │   │   │   ├── event-emitter.ts
│   │   │   │   ├── logger.ts
│   │   │   │   ├── mcpClient.ts
│   │   │   │   ├── notificationPreferences.ts
│   │   │   │   └── task-master-api
│   │   │   │       ├── cache
│   │   │   │       │   └── cache-manager.ts
│   │   │   │       ├── index.ts
│   │   │   │       ├── mcp-client.ts
│   │   │   │       ├── transformers
│   │   │   │       │   └── task-transformer.ts
│   │   │   │       └── types
│   │   │   │           └── index.ts
│   │   │   └── webview
│   │   │       ├── App.tsx
│   │   │       ├── components
│   │   │       │   ├── AppContent.tsx
│   │   │       │   ├── EmptyState.tsx
│   │   │       │   ├── ErrorBoundary.tsx
│   │   │       │   ├── PollingStatus.tsx
│   │   │       │   ├── PriorityBadge.tsx
│   │   │       │   ├── SidebarView.tsx
│   │   │       │   ├── TagDropdown.tsx
│   │   │       │   ├── TaskCard.tsx
│   │   │       │   ├── TaskEditModal.tsx
│   │   │       │   ├── TaskMasterKanban.tsx
│   │   │       │   ├── ToastContainer.tsx
│   │   │       │   └── ToastNotification.tsx
│   │   │       ├── constants
│   │   │       │   └── index.ts
│   │   │       ├── contexts
│   │   │       │   └── VSCodeContext.tsx
│   │   │       ├── hooks
│   │   │       │   ├── useTaskQueries.ts
│   │   │       │   ├── useVSCodeMessages.ts
│   │   │       │   └── useWebviewHeight.ts
│   │   │       ├── index.css
│   │   │       ├── index.tsx
│   │   │       ├── providers
│   │   │       │   └── QueryProvider.tsx
│   │   │       ├── reducers
│   │   │       │   └── appReducer.ts
│   │   │       ├── sidebar.tsx
│   │   │       ├── types
│   │   │       │   └── index.ts
│   │   │       └── utils
│   │   │           ├── logger.ts
│   │   │           └── toast.ts
│   │   └── tsconfig.json
│   └── mcp
│       ├── CHANGELOG.md
│       ├── package.json
│       ├── src
│       │   ├── index.ts
│       │   ├── shared
│       │   │   ├── types.ts
│       │   │   └── utils.ts
│       │   └── tools
│       │       ├── autopilot
│       │       │   ├── abort.tool.ts
│       │       │   ├── commit.tool.ts
│       │       │   ├── complete.tool.ts
│       │       │   ├── finalize.tool.ts
│       │       │   ├── index.ts
│       │       │   ├── next.tool.ts
│       │       │   ├── resume.tool.ts
│       │       │   ├── start.tool.ts
│       │       │   └── status.tool.ts
│       │       ├── README-ZOD-V3.md
│       │       └── tasks
│       │           ├── get-task.tool.ts
│       │           ├── get-tasks.tool.ts
│       │           └── index.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── assets
│   ├── .windsurfrules
│   ├── AGENTS.md
│   ├── claude
│   │   └── TM_COMMANDS_GUIDE.md
│   ├── config.json
│   ├── env.example
│   ├── example_prd_rpg.txt
│   ├── example_prd.txt
│   ├── GEMINI.md
│   ├── gitignore
│   ├── kiro-hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── roocode
│   │   ├── .roo
│   │   │   ├── rules-architect
│   │   │   │   └── architect-rules
│   │   │   ├── rules-ask
│   │   │   │   └── ask-rules
│   │   │   ├── rules-code
│   │   │   │   └── code-rules
│   │   │   ├── rules-debug
│   │   │   │   └── debug-rules
│   │   │   ├── rules-orchestrator
│   │   │   │   └── orchestrator-rules
│   │   │   └── rules-test
│   │   │       └── test-rules
│   │   └── .roomodes
│   ├── rules
│   │   ├── cursor_rules.mdc
│   │   ├── dev_workflow.mdc
│   │   ├── self_improve.mdc
│   │   ├── taskmaster_hooks_workflow.mdc
│   │   └── taskmaster.mdc
│   └── scripts_README.md
├── bin
│   └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE_CODE_PLUGIN.md
├── CLAUDE.md
├── context
│   ├── chats
│   │   ├── add-task-dependencies-1.md
│   │   └── max-min-tokens.txt.md
│   ├── fastmcp-core.txt
│   ├── fastmcp-docs.txt
│   ├── MCP_INTEGRATION.md
│   ├── mcp-js-sdk-docs.txt
│   ├── mcp-protocol-repo.txt
│   ├── mcp-protocol-schema-03262025.json
│   └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│   ├── claude-code-integration.md
│   ├── CLI-COMMANDER-PATTERN.md
│   ├── command-reference.md
│   ├── configuration.md
│   ├── contributor-docs
│   │   ├── testing-roo-integration.md
│   │   └── worktree-setup.md
│   ├── cross-tag-task-movement.md
│   ├── examples
│   │   ├── claude-code-usage.md
│   │   └── codex-cli-usage.md
│   ├── examples.md
│   ├── licensing.md
│   ├── mcp-provider-guide.md
│   ├── mcp-provider.md
│   ├── migration-guide.md
│   ├── models.md
│   ├── providers
│   │   ├── codex-cli.md
│   │   └── gemini-cli.md
│   ├── README.md
│   ├── scripts
│   │   └── models-json-to-markdown.js
│   ├── task-structure.md
│   └── tutorial.md
├── images
│   ├── hamster-hiring.png
│   └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│   ├── server.js
│   └── src
│       ├── core
│       │   ├── __tests__
│       │   │   └── context-manager.test.js
│       │   ├── context-manager.js
│       │   ├── direct-functions
│       │   │   ├── add-dependency.js
│       │   │   ├── add-subtask.js
│       │   │   ├── add-tag.js
│       │   │   ├── add-task.js
│       │   │   ├── analyze-task-complexity.js
│       │   │   ├── cache-stats.js
│       │   │   ├── clear-subtasks.js
│       │   │   ├── complexity-report.js
│       │   │   ├── copy-tag.js
│       │   │   ├── create-tag-from-branch.js
│       │   │   ├── delete-tag.js
│       │   │   ├── expand-all-tasks.js
│       │   │   ├── expand-task.js
│       │   │   ├── fix-dependencies.js
│       │   │   ├── generate-task-files.js
│       │   │   ├── initialize-project.js
│       │   │   ├── list-tags.js
│       │   │   ├── models.js
│       │   │   ├── move-task-cross-tag.js
│       │   │   ├── move-task.js
│       │   │   ├── next-task.js
│       │   │   ├── parse-prd.js
│       │   │   ├── remove-dependency.js
│       │   │   ├── remove-subtask.js
│       │   │   ├── remove-task.js
│       │   │   ├── rename-tag.js
│       │   │   ├── research.js
│       │   │   ├── response-language.js
│       │   │   ├── rules.js
│       │   │   ├── scope-down.js
│       │   │   ├── scope-up.js
│       │   │   ├── set-task-status.js
│       │   │   ├── update-subtask-by-id.js
│       │   │   ├── update-task-by-id.js
│       │   │   ├── update-tasks.js
│       │   │   ├── use-tag.js
│       │   │   └── validate-dependencies.js
│       │   ├── task-master-core.js
│       │   └── utils
│       │       ├── env-utils.js
│       │       └── path-utils.js
│       ├── custom-sdk
│       │   ├── errors.js
│       │   ├── index.js
│       │   ├── json-extractor.js
│       │   ├── language-model.js
│       │   ├── message-converter.js
│       │   └── schema-converter.js
│       ├── index.js
│       ├── logger.js
│       ├── providers
│       │   └── mcp-provider.js
│       └── tools
│           ├── add-dependency.js
│           ├── add-subtask.js
│           ├── add-tag.js
│           ├── add-task.js
│           ├── analyze.js
│           ├── clear-subtasks.js
│           ├── complexity-report.js
│           ├── copy-tag.js
│           ├── delete-tag.js
│           ├── expand-all.js
│           ├── expand-task.js
│           ├── fix-dependencies.js
│           ├── generate.js
│           ├── get-operation-status.js
│           ├── index.js
│           ├── initialize-project.js
│           ├── list-tags.js
│           ├── models.js
│           ├── move-task.js
│           ├── next-task.js
│           ├── parse-prd.js
│           ├── README-ZOD-V3.md
│           ├── remove-dependency.js
│           ├── remove-subtask.js
│           ├── remove-task.js
│           ├── rename-tag.js
│           ├── research.js
│           ├── response-language.js
│           ├── rules.js
│           ├── scope-down.js
│           ├── scope-up.js
│           ├── set-task-status.js
│           ├── tool-registry.js
│           ├── update-subtask.js
│           ├── update-task.js
│           ├── update.js
│           ├── use-tag.js
│           ├── utils.js
│           └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│   ├── ai-sdk-provider-grok-cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── errors.test.ts
│   │   │   ├── errors.ts
│   │   │   ├── grok-cli-language-model.ts
│   │   │   ├── grok-cli-provider.test.ts
│   │   │   ├── grok-cli-provider.ts
│   │   │   ├── index.ts
│   │   │   ├── json-extractor.test.ts
│   │   │   ├── json-extractor.ts
│   │   │   ├── message-converter.test.ts
│   │   │   ├── message-converter.ts
│   │   │   └── types.ts
│   │   └── tsconfig.json
│   ├── build-config
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   └── tsdown.base.ts
│   │   └── tsconfig.json
│   ├── claude-code-plugin
│   │   ├── .claude-plugin
│   │   │   └── plugin.json
│   │   ├── .gitignore
│   │   ├── agents
│   │   │   ├── task-checker.md
│   │   │   ├── task-executor.md
│   │   │   └── task-orchestrator.md
│   │   ├── CHANGELOG.md
│   │   ├── commands
│   │   │   ├── add-dependency.md
│   │   │   ├── add-subtask.md
│   │   │   ├── add-task.md
│   │   │   ├── analyze-complexity.md
│   │   │   ├── analyze-project.md
│   │   │   ├── auto-implement-tasks.md
│   │   │   ├── command-pipeline.md
│   │   │   ├── complexity-report.md
│   │   │   ├── convert-task-to-subtask.md
│   │   │   ├── expand-all-tasks.md
│   │   │   ├── expand-task.md
│   │   │   ├── fix-dependencies.md
│   │   │   ├── generate-tasks.md
│   │   │   ├── help.md
│   │   │   ├── init-project-quick.md
│   │   │   ├── init-project.md
│   │   │   ├── install-taskmaster.md
│   │   │   ├── learn.md
│   │   │   ├── list-tasks-by-status.md
│   │   │   ├── list-tasks-with-subtasks.md
│   │   │   ├── list-tasks.md
│   │   │   ├── next-task.md
│   │   │   ├── parse-prd-with-research.md
│   │   │   ├── parse-prd.md
│   │   │   ├── project-status.md
│   │   │   ├── quick-install-taskmaster.md
│   │   │   ├── remove-all-subtasks.md
│   │   │   ├── remove-dependency.md
│   │   │   ├── remove-subtask.md
│   │   │   ├── remove-subtasks.md
│   │   │   ├── remove-task.md
│   │   │   ├── setup-models.md
│   │   │   ├── show-task.md
│   │   │   ├── smart-workflow.md
│   │   │   ├── sync-readme.md
│   │   │   ├── tm-main.md
│   │   │   ├── to-cancelled.md
│   │   │   ├── to-deferred.md
│   │   │   ├── to-done.md
│   │   │   ├── to-in-progress.md
│   │   │   ├── to-pending.md
│   │   │   ├── to-review.md
│   │   │   ├── update-single-task.md
│   │   │   ├── update-task.md
│   │   │   ├── update-tasks-from-id.md
│   │   │   ├── validate-dependencies.md
│   │   │   └── view-models.md
│   │   ├── mcp.json
│   │   └── package.json
│   ├── tm-bridge
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── add-tag-bridge.ts
│   │   │   ├── bridge-types.ts
│   │   │   ├── bridge-utils.ts
│   │   │   ├── expand-bridge.ts
│   │   │   ├── index.ts
│   │   │   ├── tags-bridge.ts
│   │   │   ├── update-bridge.ts
│   │   │   └── use-tag-bridge.ts
│   │   └── tsconfig.json
│   └── tm-core
│       ├── .gitignore
│       ├── CHANGELOG.md
│       ├── docs
│       │   └── listTasks-architecture.md
│       ├── package.json
│       ├── POC-STATUS.md
│       ├── README.md
│       ├── src
│       │   ├── common
│       │   │   ├── constants
│       │   │   │   ├── index.ts
│       │   │   │   ├── paths.ts
│       │   │   │   └── providers.ts
│       │   │   ├── errors
│       │   │   │   ├── index.ts
│       │   │   │   └── task-master-error.ts
│       │   │   ├── interfaces
│       │   │   │   ├── configuration.interface.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── storage.interface.ts
│       │   │   ├── logger
│       │   │   │   ├── factory.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── logger.spec.ts
│       │   │   │   └── logger.ts
│       │   │   ├── mappers
│       │   │   │   ├── TaskMapper.test.ts
│       │   │   │   └── TaskMapper.ts
│       │   │   ├── types
│       │   │   │   ├── database.types.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── legacy.ts
│       │   │   │   └── repository-types.ts
│       │   │   └── utils
│       │   │       ├── git-utils.ts
│       │   │       ├── id-generator.ts
│       │   │       ├── index.ts
│       │   │       ├── path-helpers.ts
│       │   │       ├── path-normalizer.spec.ts
│       │   │       ├── path-normalizer.ts
│       │   │       ├── project-root-finder.spec.ts
│       │   │       ├── project-root-finder.ts
│       │   │       ├── run-id-generator.spec.ts
│       │   │       └── run-id-generator.ts
│       │   ├── index.ts
│       │   ├── modules
│       │   │   ├── ai
│       │   │   │   ├── index.ts
│       │   │   │   ├── interfaces
│       │   │   │   │   └── ai-provider.interface.ts
│       │   │   │   └── providers
│       │   │   │       ├── base-provider.ts
│       │   │   │       └── index.ts
│       │   │   ├── auth
│       │   │   │   ├── auth-domain.spec.ts
│       │   │   │   ├── auth-domain.ts
│       │   │   │   ├── config.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   ├── auth-manager.spec.ts
│       │   │   │   │   └── auth-manager.ts
│       │   │   │   ├── services
│       │   │   │   │   ├── context-store.ts
│       │   │   │   │   ├── oauth-service.ts
│       │   │   │   │   ├── organization.service.ts
│       │   │   │   │   ├── supabase-session-storage.spec.ts
│       │   │   │   │   └── supabase-session-storage.ts
│       │   │   │   └── types.ts
│       │   │   ├── briefs
│       │   │   │   ├── briefs-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── brief-service.ts
│       │   │   │   ├── types.ts
│       │   │   │   └── utils
│       │   │   │       └── url-parser.ts
│       │   │   ├── commands
│       │   │   │   └── index.ts
│       │   │   ├── config
│       │   │   │   ├── config-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   ├── config-manager.spec.ts
│       │   │   │   │   └── config-manager.ts
│       │   │   │   └── services
│       │   │   │       ├── config-loader.service.spec.ts
│       │   │   │       ├── config-loader.service.ts
│       │   │   │       ├── config-merger.service.spec.ts
│       │   │   │       ├── config-merger.service.ts
│       │   │   │       ├── config-persistence.service.spec.ts
│       │   │   │       ├── config-persistence.service.ts
│       │   │   │       ├── environment-config-provider.service.spec.ts
│       │   │   │       ├── environment-config-provider.service.ts
│       │   │   │       ├── index.ts
│       │   │   │       ├── runtime-state-manager.service.spec.ts
│       │   │   │       └── runtime-state-manager.service.ts
│       │   │   ├── dependencies
│       │   │   │   └── index.ts
│       │   │   ├── execution
│       │   │   │   ├── executors
│       │   │   │   │   ├── base-executor.ts
│       │   │   │   │   ├── claude-executor.ts
│       │   │   │   │   └── executor-factory.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── executor-service.ts
│       │   │   │   └── types.ts
│       │   │   ├── git
│       │   │   │   ├── adapters
│       │   │   │   │   ├── git-adapter.test.ts
│       │   │   │   │   └── git-adapter.ts
│       │   │   │   ├── git-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── services
│       │   │   │       ├── branch-name-generator.spec.ts
│       │   │   │       ├── branch-name-generator.ts
│       │   │   │       ├── commit-message-generator.test.ts
│       │   │   │       ├── commit-message-generator.ts
│       │   │   │       ├── scope-detector.test.ts
│       │   │   │       ├── scope-detector.ts
│       │   │   │       ├── template-engine.test.ts
│       │   │   │       └── template-engine.ts
│       │   │   ├── integration
│       │   │   │   ├── clients
│       │   │   │   │   ├── index.ts
│       │   │   │   │   └── supabase-client.ts
│       │   │   │   ├── integration-domain.ts
│       │   │   │   └── services
│       │   │   │       ├── export.service.ts
│       │   │   │       ├── task-expansion.service.ts
│       │   │   │       └── task-retrieval.service.ts
│       │   │   ├── reports
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   └── complexity-report-manager.ts
│       │   │   │   └── types.ts
│       │   │   ├── storage
│       │   │   │   ├── adapters
│       │   │   │   │   ├── activity-logger.ts
│       │   │   │   │   ├── api-storage.ts
│       │   │   │   │   └── file-storage
│       │   │   │   │       ├── file-operations.ts
│       │   │   │   │       ├── file-storage.ts
│       │   │   │   │       ├── format-handler.ts
│       │   │   │   │       ├── index.ts
│       │   │   │   │       └── path-resolver.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── storage-factory.ts
│       │   │   │   └── utils
│       │   │   │       └── api-client.ts
│       │   │   ├── tasks
│       │   │   │   ├── entities
│       │   │   │   │   └── task.entity.ts
│       │   │   │   ├── parser
│       │   │   │   │   └── index.ts
│       │   │   │   ├── repositories
│       │   │   │   │   ├── supabase
│       │   │   │   │   │   ├── dependency-fetcher.ts
│       │   │   │   │   │   ├── index.ts
│       │   │   │   │   │   └── supabase-repository.ts
│       │   │   │   │   └── task-repository.interface.ts
│       │   │   │   ├── services
│       │   │   │   │   ├── preflight-checker.service.ts
│       │   │   │   │   ├── tag.service.ts
│       │   │   │   │   ├── task-execution-service.ts
│       │   │   │   │   ├── task-loader.service.ts
│       │   │   │   │   └── task-service.ts
│       │   │   │   └── tasks-domain.ts
│       │   │   ├── ui
│       │   │   │   └── index.ts
│       │   │   └── workflow
│       │   │       ├── managers
│       │   │       │   ├── workflow-state-manager.spec.ts
│       │   │       │   └── workflow-state-manager.ts
│       │   │       ├── orchestrators
│       │   │       │   ├── workflow-orchestrator.test.ts
│       │   │       │   └── workflow-orchestrator.ts
│       │   │       ├── services
│       │   │       │   ├── test-result-validator.test.ts
│       │   │       │   ├── test-result-validator.ts
│       │   │       │   ├── test-result-validator.types.ts
│       │   │       │   ├── workflow-activity-logger.ts
│       │   │       │   └── workflow.service.ts
│       │   │       ├── types.ts
│       │   │       └── workflow-domain.ts
│       │   ├── subpath-exports.test.ts
│       │   ├── tm-core.ts
│       │   └── utils
│       │       └── time.utils.ts
│       ├── tests
│       │   ├── auth
│       │   │   └── auth-refresh.test.ts
│       │   ├── integration
│       │   │   ├── auth-token-refresh.test.ts
│       │   │   ├── list-tasks.test.ts
│       │   │   └── storage
│       │   │       └── activity-logger.test.ts
│       │   ├── mocks
│       │   │   └── mock-provider.ts
│       │   ├── setup.ts
│       │   └── unit
│       │       ├── base-provider.test.ts
│       │       ├── executor.test.ts
│       │       └── smoke.test.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│   ├── create-worktree.sh
│   ├── dev.js
│   ├── init.js
│   ├── list-worktrees.sh
│   ├── modules
│   │   ├── ai-services-unified.js
│   │   ├── bridge-utils.js
│   │   ├── commands.js
│   │   ├── config-manager.js
│   │   ├── dependency-manager.js
│   │   ├── index.js
│   │   ├── prompt-manager.js
│   │   ├── supported-models.json
│   │   ├── sync-readme.js
│   │   ├── task-manager
│   │   │   ├── add-subtask.js
│   │   │   ├── add-task.js
│   │   │   ├── analyze-task-complexity.js
│   │   │   ├── clear-subtasks.js
│   │   │   ├── expand-all-tasks.js
│   │   │   ├── expand-task.js
│   │   │   ├── find-next-task.js
│   │   │   ├── generate-task-files.js
│   │   │   ├── is-task-dependent.js
│   │   │   ├── list-tasks.js
│   │   │   ├── migrate.js
│   │   │   ├── models.js
│   │   │   ├── move-task.js
│   │   │   ├── parse-prd
│   │   │   │   ├── index.js
│   │   │   │   ├── parse-prd-config.js
│   │   │   │   ├── parse-prd-helpers.js
│   │   │   │   ├── parse-prd-non-streaming.js
│   │   │   │   ├── parse-prd-streaming.js
│   │   │   │   └── parse-prd.js
│   │   │   ├── remove-subtask.js
│   │   │   ├── remove-task.js
│   │   │   ├── research.js
│   │   │   ├── response-language.js
│   │   │   ├── scope-adjustment.js
│   │   │   ├── set-task-status.js
│   │   │   ├── tag-management.js
│   │   │   ├── task-exists.js
│   │   │   ├── update-single-task-status.js
│   │   │   ├── update-subtask-by-id.js
│   │   │   ├── update-task-by-id.js
│   │   │   └── update-tasks.js
│   │   ├── task-manager.js
│   │   ├── ui.js
│   │   ├── update-config-tokens.js
│   │   ├── utils
│   │   │   ├── contextGatherer.js
│   │   │   ├── fuzzyTaskSearch.js
│   │   │   └── git-utils.js
│   │   └── utils.js
│   ├── task-complexity-report.json
│   ├── test-claude-errors.js
│   └── test-claude.js
├── sonar-project.properties
├── src
│   ├── ai-providers
│   │   ├── anthropic.js
│   │   ├── azure.js
│   │   ├── base-provider.js
│   │   ├── bedrock.js
│   │   ├── claude-code.js
│   │   ├── codex-cli.js
│   │   ├── gemini-cli.js
│   │   ├── google-vertex.js
│   │   ├── google.js
│   │   ├── grok-cli.js
│   │   ├── groq.js
│   │   ├── index.js
│   │   ├── lmstudio.js
│   │   ├── ollama.js
│   │   ├── openai-compatible.js
│   │   ├── openai.js
│   │   ├── openrouter.js
│   │   ├── perplexity.js
│   │   ├── xai.js
│   │   ├── zai-coding.js
│   │   └── zai.js
│   ├── constants
│   │   ├── commands.js
│   │   ├── paths.js
│   │   ├── profiles.js
│   │   ├── rules-actions.js
│   │   ├── task-priority.js
│   │   └── task-status.js
│   ├── profiles
│   │   ├── amp.js
│   │   ├── base-profile.js
│   │   ├── claude.js
│   │   ├── cline.js
│   │   ├── codex.js
│   │   ├── cursor.js
│   │   ├── gemini.js
│   │   ├── index.js
│   │   ├── kilo.js
│   │   ├── kiro.js
│   │   ├── opencode.js
│   │   ├── roo.js
│   │   ├── trae.js
│   │   ├── vscode.js
│   │   ├── windsurf.js
│   │   └── zed.js
│   ├── progress
│   │   ├── base-progress-tracker.js
│   │   ├── cli-progress-factory.js
│   │   ├── parse-prd-tracker.js
│   │   ├── progress-tracker-builder.js
│   │   └── tracker-ui.js
│   ├── prompts
│   │   ├── add-task.json
│   │   ├── analyze-complexity.json
│   │   ├── expand-task.json
│   │   ├── parse-prd.json
│   │   ├── README.md
│   │   ├── research.json
│   │   ├── schemas
│   │   │   ├── parameter.schema.json
│   │   │   ├── prompt-template.schema.json
│   │   │   ├── README.md
│   │   │   └── variant.schema.json
│   │   ├── update-subtask.json
│   │   ├── update-task.json
│   │   └── update-tasks.json
│   ├── provider-registry
│   │   └── index.js
│   ├── schemas
│   │   ├── add-task.js
│   │   ├── analyze-complexity.js
│   │   ├── base-schemas.js
│   │   ├── expand-task.js
│   │   ├── parse-prd.js
│   │   ├── registry.js
│   │   ├── update-subtask.js
│   │   ├── update-task.js
│   │   └── update-tasks.js
│   ├── task-master.js
│   ├── ui
│   │   ├── confirm.js
│   │   ├── indicators.js
│   │   └── parse-prd.js
│   └── utils
│       ├── asset-resolver.js
│       ├── create-mcp-config.js
│       ├── format.js
│       ├── getVersion.js
│       ├── logger-utils.js
│       ├── manage-gitignore.js
│       ├── path-utils.js
│       ├── profiles.js
│       ├── rule-transformer.js
│       ├── stream-parser.js
│       └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│   ├── e2e
│   │   ├── e2e_helpers.sh
│   │   ├── parse_llm_output.cjs
│   │   ├── run_e2e.sh
│   │   ├── run_fallback_verification.sh
│   │   └── test_llm_analysis.sh
│   ├── fixtures
│   │   ├── .taskmasterconfig
│   │   ├── sample-claude-response.js
│   │   ├── sample-prd.txt
│   │   └── sample-tasks.js
│   ├── helpers
│   │   └── tool-counts.js
│   ├── integration
│   │   ├── claude-code-error-handling.test.js
│   │   ├── claude-code-optional.test.js
│   │   ├── cli
│   │   │   ├── commands.test.js
│   │   │   ├── complex-cross-tag-scenarios.test.js
│   │   │   └── move-cross-tag.test.js
│   │   ├── manage-gitignore.test.js
│   │   ├── mcp-server
│   │   │   └── direct-functions.test.js
│   │   ├── move-task-cross-tag.integration.test.js
│   │   ├── move-task-simple.integration.test.js
│   │   ├── profiles
│   │   │   ├── amp-init-functionality.test.js
│   │   │   ├── claude-init-functionality.test.js
│   │   │   ├── cline-init-functionality.test.js
│   │   │   ├── codex-init-functionality.test.js
│   │   │   ├── cursor-init-functionality.test.js
│   │   │   ├── gemini-init-functionality.test.js
│   │   │   ├── opencode-init-functionality.test.js
│   │   │   ├── roo-files-inclusion.test.js
│   │   │   ├── roo-init-functionality.test.js
│   │   │   ├── rules-files-inclusion.test.js
│   │   │   ├── trae-init-functionality.test.js
│   │   │   ├── vscode-init-functionality.test.js
│   │   │   └── windsurf-init-functionality.test.js
│   │   └── providers
│   │       └── temperature-support.test.js
│   ├── manual
│   │   ├── progress
│   │   │   ├── parse-prd-analysis.js
│   │   │   ├── test-parse-prd.js
│   │   │   └── TESTING_GUIDE.md
│   │   └── prompts
│   │       ├── prompt-test.js
│   │       └── README.md
│   ├── README.md
│   ├── setup.js
│   └── unit
│       ├── ai-providers
│       │   ├── base-provider.test.js
│       │   ├── claude-code.test.js
│       │   ├── codex-cli.test.js
│       │   ├── gemini-cli.test.js
│       │   ├── lmstudio.test.js
│       │   ├── mcp-components.test.js
│       │   ├── openai-compatible.test.js
│       │   ├── openai.test.js
│       │   ├── provider-registry.test.js
│       │   ├── zai-coding.test.js
│       │   ├── zai-provider.test.js
│       │   ├── zai-schema-introspection.test.js
│       │   └── zai.test.js
│       ├── ai-services-unified.test.js
│       ├── commands.test.js
│       ├── config-manager.test.js
│       ├── config-manager.test.mjs
│       ├── dependency-manager.test.js
│       ├── init.test.js
│       ├── initialize-project.test.js
│       ├── kebab-case-validation.test.js
│       ├── manage-gitignore.test.js
│       ├── mcp
│       │   └── tools
│       │       ├── __mocks__
│       │       │   └── move-task.js
│       │       ├── add-task.test.js
│       │       ├── analyze-complexity.test.js
│       │       ├── expand-all.test.js
│       │       ├── get-tasks.test.js
│       │       ├── initialize-project.test.js
│       │       ├── move-task-cross-tag-options.test.js
│       │       ├── move-task-cross-tag.test.js
│       │       ├── remove-task.test.js
│       │       └── tool-registration.test.js
│       ├── mcp-providers
│       │   ├── mcp-components.test.js
│       │   └── mcp-provider.test.js
│       ├── parse-prd.test.js
│       ├── profiles
│       │   ├── amp-integration.test.js
│       │   ├── claude-integration.test.js
│       │   ├── cline-integration.test.js
│       │   ├── codex-integration.test.js
│       │   ├── cursor-integration.test.js
│       │   ├── gemini-integration.test.js
│       │   ├── kilo-integration.test.js
│       │   ├── kiro-integration.test.js
│       │   ├── mcp-config-validation.test.js
│       │   ├── opencode-integration.test.js
│       │   ├── profile-safety-check.test.js
│       │   ├── roo-integration.test.js
│       │   ├── rule-transformer-cline.test.js
│       │   ├── rule-transformer-cursor.test.js
│       │   ├── rule-transformer-gemini.test.js
│       │   ├── rule-transformer-kilo.test.js
│       │   ├── rule-transformer-kiro.test.js
│       │   ├── rule-transformer-opencode.test.js
│       │   ├── rule-transformer-roo.test.js
│       │   ├── rule-transformer-trae.test.js
│       │   ├── rule-transformer-vscode.test.js
│       │   ├── rule-transformer-windsurf.test.js
│       │   ├── rule-transformer-zed.test.js
│       │   ├── rule-transformer.test.js
│       │   ├── selective-profile-removal.test.js
│       │   ├── subdirectory-support.test.js
│       │   ├── trae-integration.test.js
│       │   ├── vscode-integration.test.js
│       │   ├── windsurf-integration.test.js
│       │   └── zed-integration.test.js
│       ├── progress
│       │   └── base-progress-tracker.test.js
│       ├── prompt-manager.test.js
│       ├── prompts
│       │   ├── expand-task-prompt.test.js
│       │   └── prompt-migration.test.js
│       ├── scripts
│       │   └── modules
│       │       ├── commands
│       │       │   ├── move-cross-tag.test.js
│       │       │   └── README.md
│       │       ├── dependency-manager
│       │       │   ├── circular-dependencies.test.js
│       │       │   ├── cross-tag-dependencies.test.js
│       │       │   └── fix-dependencies-command.test.js
│       │       ├── task-manager
│       │       │   ├── add-subtask.test.js
│       │       │   ├── add-task.test.js
│       │       │   ├── analyze-task-complexity.test.js
│       │       │   ├── clear-subtasks.test.js
│       │       │   ├── complexity-report-tag-isolation.test.js
│       │       │   ├── expand-all-tasks.test.js
│       │       │   ├── expand-task.test.js
│       │       │   ├── find-next-task.test.js
│       │       │   ├── generate-task-files.test.js
│       │       │   ├── list-tasks.test.js
│       │       │   ├── models-baseurl.test.js
│       │       │   ├── move-task-cross-tag.test.js
│       │       │   ├── move-task.test.js
│       │       │   ├── parse-prd-schema.test.js
│       │       │   ├── parse-prd.test.js
│       │       │   ├── remove-subtask.test.js
│       │       │   ├── remove-task.test.js
│       │       │   ├── research.test.js
│       │       │   ├── scope-adjustment.test.js
│       │       │   ├── set-task-status.test.js
│       │       │   ├── setup.js
│       │       │   ├── update-single-task-status.test.js
│       │       │   ├── update-subtask-by-id.test.js
│       │       │   ├── update-task-by-id.test.js
│       │       │   └── update-tasks.test.js
│       │       ├── ui
│       │       │   └── cross-tag-error-display.test.js
│       │       └── utils-tag-aware-paths.test.js
│       ├── task-finder.test.js
│       ├── task-manager
│       │   ├── clear-subtasks.test.js
│       │   ├── move-task.test.js
│       │   ├── tag-boundary.test.js
│       │   └── tag-management.test.js
│       ├── task-master.test.js
│       ├── ui
│       │   └── indicators.test.js
│       ├── ui.test.js
│       ├── utils-strip-ansi.test.js
│       └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
├── turbo.json
└── update-task-migration-plan.md
```

# Files

--------------------------------------------------------------------------------
/apps/cli/src/commands/context.command.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Context command for managing org/brief selection
 * Provides a clean interface for workspace context management
 */

import { Command } from 'commander';
import chalk from 'chalk';
import inquirer from 'inquirer';
import ora from 'ora';
import {
	AuthManager,
	createTmCore,
	type UserContext,
	type TmCore
} from '@tm/core';
import * as ui from '../utils/ui.js';
import { checkAuthentication } from '../utils/auth-helpers.js';
import { getBriefStatusWithColor } from '../ui/formatters/status-formatters.js';
import {
	selectBriefInteractive,
	selectBriefFromInput
} from '../utils/brief-selection.js';

/**
 * Result type from context command
 */
export interface ContextResult {
	success: boolean;
	action: 'show' | 'select-org' | 'select-brief' | 'clear' | 'set';
	context?: UserContext;
	message?: string;
}

/**
 * ContextCommand extending Commander's Command class
 * Manages user's workspace context (org/brief selection)
 */
export class ContextCommand extends Command {
	private authManager: AuthManager;
	private tmCore?: TmCore;
	private lastResult?: ContextResult;

	constructor(name?: string) {
		super(name || 'context');

		// Initialize auth manager
		this.authManager = AuthManager.getInstance();

		// Configure the command
		this.description(
			'Manage workspace context (organization and brief selection)'
		);

		// Add subcommands
		this.addOrgCommand();
		this.addBriefCommand();
		this.addClearCommand();
		this.addSetCommand();

		// Accept optional positional argument for brief ID or Hamster URL
		this.argument('[briefOrUrl]', 'Brief ID or Hamster brief URL');

		// Default action: if an argument is provided, resolve and set context; else show
		this.action(async (briefOrUrl?: string) => {
			if (briefOrUrl && briefOrUrl.trim().length > 0) {
				await this.executeSetFromBriefInput(briefOrUrl.trim());
				return;
			}
			await this.executeShow();
		});
	}

	/**
	 * Add org selection subcommand
	 */
	private addOrgCommand(): void {
		this.command('org')
			.description('Select an organization')
			.argument('[orgId]', 'Organization ID or slug to select directly')
			.action(async (orgId?: string) => {
				await this.executeSelectOrg(orgId);
			});
	}

	/**
	 * Add brief selection subcommand
	 */
	private addBriefCommand(): void {
		this.command('brief')
			.description('Select a brief within the current organization')
			.argument('[briefIdOrUrl]', 'Brief ID or Hamster URL to select directly')
			.action(async (briefIdOrUrl?: string) => {
				await this.executeSelectBrief(briefIdOrUrl);
			});
	}

	/**
	 * Add clear subcommand
	 */
	private addClearCommand(): void {
		this.command('clear')
			.description('Clear all context selections')
			.action(async () => {
				await this.executeClear();
			});
	}

	/**
	 * Add set subcommand for direct context setting
	 */
	private addSetCommand(): void {
		this.command('set')
			.description('Set context directly')
			.option('--org <id>', 'Organization ID')
			.option('--org-name <name>', 'Organization name')
			.option('--brief <id>', 'Brief ID')
			.option('--brief-name <name>', 'Brief name')
			.action(async (options) => {
				await this.executeSet(options);
			});
	}

	/**
	 * Execute show current context
	 */
	private async executeShow(): Promise<void> {
		try {
			const result = await this.displayContext();
			this.setLastResult(result);
		} catch (error: any) {
			ui.displayError(`Failed to show context: ${(error as Error).message}`);
			process.exit(1);
		}
	}

	/**
	 * Display current context
	 */
	private async displayContext(): Promise<ContextResult> {
		// Check authentication first
		const isAuthenticated = await checkAuthentication(this.authManager, {
			message:
				'The "context" command requires you to be logged in to your Hamster account.'
		});

		if (!isAuthenticated) {
			return {
				success: false,
				action: 'show',
				message: 'Not authenticated'
			};
		}

		const context = this.authManager.getContext();

		console.log(chalk.cyan('\n🌍 Workspace Context\n'));

		if (context && (context.orgId || context.briefId)) {
			if (context.orgName || context.orgId) {
				console.log(chalk.green('✓ Organization'));
				if (context.orgName) {
					console.log(chalk.white(`  ${context.orgName}`));
				}
				if (context.orgId) {
					console.log(chalk.gray(`  ID: ${context.orgId}`));
				}
			}

			if (context.briefName || context.briefId) {
				console.log(chalk.green('\n✓ Brief'));
				if (context.briefName && context.briefId) {
					const shortId = context.briefId.slice(-8);
					console.log(
						chalk.white(`  ${context.briefName} `) + chalk.gray(`(${shortId})`)
					);
				} else if (context.briefName) {
					console.log(chalk.white(`  ${context.briefName}`));
				} else if (context.briefId) {
					console.log(chalk.gray(`  ID: ${context.briefId}`));
				}

				// Show brief status if available
				if (context.briefStatus) {
					const statusDisplay = getBriefStatusWithColor(context.briefStatus);
					console.log(chalk.gray(`  Status: `) + statusDisplay);
				}

				// Show brief updated date if available
				if (context.briefUpdatedAt) {
					const updatedDate = new Date(
						context.briefUpdatedAt
					).toLocaleDateString('en-US', {
						month: 'short',
						day: 'numeric',
						year: 'numeric',
						hour: '2-digit',
						minute: '2-digit'
					});
					console.log(chalk.gray(`  Updated: ${updatedDate}`));
				}
			}

			if (context.updatedAt) {
				console.log(
					chalk.gray(
						`\n  Last updated: ${new Date(context.updatedAt).toLocaleString()}`
					)
				);
			}

			return {
				success: true,
				action: 'show',
				context,
				message: 'Context loaded'
			};
		} else {
			console.log(chalk.yellow('✗ No context selected'));
			console.log(
				chalk.gray('\n  Run "tm context org" to select an organization')
			);
			console.log(chalk.gray('  Run "tm context brief" to select a brief'));

			return {
				success: true,
				action: 'show',
				message: 'No context selected'
			};
		}
	}

	/**
	 * Execute org selection
	 */
	private async executeSelectOrg(orgId?: string): Promise<void> {
		try {
			// Check authentication
			if (!(await checkAuthentication(this.authManager))) {
				process.exit(1);
			}

			const result = await this.selectOrganization(orgId);
			this.setLastResult(result);

			if (!result.success) {
				process.exit(1);
			}
		} catch (error: any) {
			ui.displayError(
				`Failed to select organization: ${(error as Error).message}`
			);
			process.exit(1);
		}
	}

	/**
	 * Select an organization interactively or by ID/slug/name
	 */
	private async selectOrganization(orgId?: string): Promise<ContextResult> {
		const spinner = ora('Fetching organizations...').start();

		try {
			// Fetch organizations from API
			const organizations = await this.authManager.getOrganizations();
			spinner.stop();

			if (organizations.length === 0) {
				ui.displayWarning('No organizations available');
				return {
					success: false,
					action: 'select-org',
					message: 'No organizations available'
				};
			}

			let selectedOrg;

			// If orgId provided, find matching org by ID, slug or name
			const trimmedOrgId = orgId?.trim();
			if (trimmedOrgId) {
				const normalizedInput = trimmedOrgId.toLowerCase();
				selectedOrg = organizations.find(
					(org) =>
						org.id === trimmedOrgId ||
						org.slug?.toLowerCase() === normalizedInput ||
						org.name.toLowerCase() === normalizedInput
				);

				if (!selectedOrg) {
					const totalCount = organizations.length;
					const displayLimit = 5;
					const orgList = organizations
						.slice(0, displayLimit)
						.map((o) => o.name)
						.join(', ');

					let errorMessage = `Organization not found: ${trimmedOrgId}\n`;
					if (totalCount <= displayLimit) {
						errorMessage += `Available organizations: ${orgList}`;
					} else {
						errorMessage += `Available organizations (showing ${displayLimit} of ${totalCount}): ${orgList}`;
						errorMessage += `\nRun "tm context org" to see all organizations and select interactively`;
					}

					ui.displayError(errorMessage);
					return {
						success: false,
						action: 'select-org',
						message: `Organization not found: ${trimmedOrgId}`
					};
				}
			} else {
				// Interactive selection
				const response = await inquirer.prompt([
					{
						type: 'list',
						name: 'selectedOrg',
						message: 'Select an organization:',
						choices: organizations.map((org) => ({
							name: org.name,
							value: org
						}))
					}
				]);
				selectedOrg = response.selectedOrg;
			}

			// Update context
			await this.authManager.updateContext({
				orgId: selectedOrg.id,
				orgName: selectedOrg.name,
				orgSlug: selectedOrg.slug,
				// Clear brief when changing org
				briefId: undefined,
				briefName: undefined
			});

			ui.displaySuccess(`Selected organization: ${selectedOrg.name}`);

			return {
				success: true,
				action: 'select-org',
				context: this.authManager.getContext() || undefined,
				message: `Selected organization: ${selectedOrg.name}`
			};
		} catch (error) {
			spinner.fail('Failed to fetch organizations');
			throw error;
		}
	}

	/**
	 * Execute brief selection
	 */
	private async executeSelectBrief(briefIdOrUrl?: string): Promise<void> {
		try {
			// Check authentication
			if (!(await checkAuthentication(this.authManager))) {
				process.exit(1);
			}

			// If briefIdOrUrl provided, use direct selection
			if (briefIdOrUrl && briefIdOrUrl.trim().length > 0) {
				await this.selectBriefDirectly(briefIdOrUrl.trim(), 'select-brief');
				return;
			}

			// Interactive selection
			const context = this.authManager.getContext();
			if (!context?.orgId) {
				ui.displayError(
					'No organization selected. Run "tm context org" first.'
				);
				process.exit(1);
			}

			// Use shared utility for interactive selection
			const result = await selectBriefInteractive(
				this.authManager,
				context.orgId
			);

			this.setLastResult({
				success: result.success,
				action: 'select-brief',
				context: this.authManager.getContext() || undefined,
				message: result.message
			});

			if (!result.success) {
				process.exit(1);
			}
		} catch (error: any) {
			ui.displayError(`Failed to select brief: ${(error as Error).message}`);
			process.exit(1);
		}
	}

	/**
	 * Execute clear context
	 */
	private async executeClear(): Promise<void> {
		try {
			// Check authentication
			if (!(await checkAuthentication(this.authManager))) {
				process.exit(1);
			}

			const result = await this.clearContext();
			this.setLastResult(result);

			if (!result.success) {
				process.exit(1);
			}
		} catch (error: any) {
			ui.displayError(`Failed to clear context: ${(error as Error).message}`);
			process.exit(1);
		}
	}

	/**
	 * Clear all context selections
	 */
	private async clearContext(): Promise<ContextResult> {
		try {
			await this.authManager.clearContext();
			ui.displaySuccess('Context cleared');

			return {
				success: true,
				action: 'clear',
				message: 'Context cleared'
			};
		} catch (error) {
			ui.displayError(`Failed to clear context: ${(error as Error).message}`);

			return {
				success: false,
				action: 'clear',
				message: `Failed to clear context: ${(error as Error).message}`
			};
		}
	}

	/**
	 * Execute set context with options
	 */
	private async executeSet(options: any): Promise<void> {
		try {
			// Check authentication
			if (!(await checkAuthentication(this.authManager))) {
				process.exit(1);
			}

			const result = await this.setContext(options);
			this.setLastResult(result);

			if (!result.success) {
				process.exit(1);
			}
		} catch (error: any) {
			ui.displayError(`Failed to set context: ${(error as Error).message}`);
			process.exit(1);
		}
	}

	/**
	 * Initialize TmCore if not already initialized
	 */
	private async initTmCore(): Promise<void> {
		if (!this.tmCore) {
			this.tmCore = await createTmCore({
				projectPath: process.cwd()
			});
		}
	}

	/**
	 * Helper method to select brief directly from input (URL or ID)
	 * Used by both executeSelectBrief and executeSetFromBriefInput
	 */
	private async selectBriefDirectly(
		input: string,
		action: 'select-brief' | 'set'
	): Promise<void> {
		await this.initTmCore();

		const result = await selectBriefFromInput(
			this.authManager,
			input,
			this.tmCore
		);

		this.setLastResult({
			success: result.success,
			action,
			context: this.authManager.getContext() || undefined,
			message: result.message
		});

		if (!result.success) {
			process.exit(1);
		}
	}

	/**
	 * Execute setting context from a brief ID or Hamster URL
	 * All parsing logic is in tm-core
	 */
	private async executeSetFromBriefInput(input: string): Promise<void> {
		try {
			// Check authentication
			if (!(await checkAuthentication(this.authManager))) {
				process.exit(1);
			}

			await this.selectBriefDirectly(input, 'set');
		} catch (error: any) {
			ui.displayError(
				`Failed to set context from brief: ${(error as Error).message}`
			);
			process.exit(1);
		}
	}

	/**
	 * Set context directly from options
	 */
	private async setContext(options: any): Promise<ContextResult> {
		try {
			const context: Partial<UserContext> = {};

			if (options.org) {
				context.orgId = options.org;
			}
			if (options.orgName) {
				context.orgName = options.orgName;
			}
			if (options.brief) {
				context.briefId = options.brief;
			}
			if (options.briefName) {
				context.briefName = options.briefName;
			}

			if (Object.keys(context).length === 0) {
				ui.displayWarning('No context options provided');
				return {
					success: false,
					action: 'set',
					message: 'No context options provided'
				};
			}

			await this.authManager.updateContext(context);
			ui.displaySuccess('Context updated');

			// Display what was set
			if (context.orgName || context.orgId) {
				console.log(
					chalk.gray(`  Organization: ${context.orgName || context.orgId}`)
				);
			}
			if (context.briefName || context.briefId) {
				console.log(
					chalk.gray(`  Brief: ${context.briefName || context.briefId}`)
				);
			}

			return {
				success: true,
				action: 'set',
				context: this.authManager.getContext() || undefined,
				message: 'Context updated'
			};
		} catch (error) {
			ui.displayError(`Failed to set context: ${(error as Error).message}`);

			return {
				success: false,
				action: 'set',
				message: `Failed to set context: ${(error as Error).message}`
			};
		}
	}

	/**
	 * Set the last result for programmatic access
	 */
	private setLastResult(result: ContextResult): void {
		this.lastResult = result;
	}

	/**
	 * Get the last result (for programmatic usage)
	 */
	getLastResult(): ContextResult | undefined {
		return this.lastResult;
	}

	/**
	 * Get current context (for programmatic usage)
	 */
	getContext(): UserContext | null {
		return this.authManager.getContext();
	}

	/**
	 * Interactive context setup (for post-auth flow)
	 * Prompts user to select org and brief
	 */
	async setupContextInteractive(): Promise<{
		success: boolean;
		orgSelected: boolean;
		briefSelected: boolean;
	}> {
		try {
			// Ask if user wants to set up workspace context
			const { setupContext } = await inquirer.prompt([
				{
					type: 'confirm',
					name: 'setupContext',
					message: 'Would you like to set up your workspace context now?',
					default: true
				}
			]);

			if (!setupContext) {
				return { success: true, orgSelected: false, briefSelected: false };
			}

			// Select organization
			const orgResult = await this.selectOrganization();
			if (!orgResult.success || !orgResult.context?.orgId) {
				return { success: false, orgSelected: false, briefSelected: false };
			}

			// Select brief using shared utility
			const briefResult = await selectBriefInteractive(
				this.authManager,
				orgResult.context.orgId
			);
			return {
				success: true,
				orgSelected: true,
				briefSelected: briefResult.success
			};
		} catch (error) {
			console.error(
				chalk.yellow(
					'\nContext setup skipped due to error. You can set it up later with "tm context"'
				)
			);
			return { success: false, orgSelected: false, briefSelected: false };
		}
	}

	/**
	 * Clean up resources
	 */
	async cleanup(): Promise<void> {
		// No resources to clean up for context command
	}

	/**
	 * Register this command on an existing program
	 */
	static register(program: Command, name?: string): ContextCommand {
		const contextCommand = new ContextCommand(name);
		program.addCommand(contextCommand);
		return contextCommand;
	}
}

```

--------------------------------------------------------------------------------
/.taskmaster/docs/prd.txt:
--------------------------------------------------------------------------------

```

# Claude Task Master - Product Requirements Document

<PRD>
# Technical Architecture  

## System Components
1. **Task Management Core**
   - Tasks.json file structure (single source of truth)
   - Task model with dependencies, priorities, and metadata
   - Task state management system
   - Task file generation subsystem

2. **AI Integration Layer**
   - Anthropic Claude API integration
   - Perplexity API integration (optional)
   - Prompt engineering components
   - Response parsing and processing

3. **Command Line Interface**
   - Command parsing and execution
   - Interactive user input handling
   - Display and formatting utilities
   - Status reporting and feedback system

4. **Cursor AI Integration**
   - Cursor rules documentation
   - Agent interaction patterns
   - Workflow guideline specifications

## Data Models

### Task Model
```json
{
  "id": 1,
  "title": "Task Title",
  "description": "Brief task description",
  "status": "pending|done|deferred",
  "dependencies": [0],
  "priority": "high|medium|low",
  "details": "Detailed implementation instructions",
  "testStrategy": "Verification approach details",
  "subtasks": [
    {
      "id": 1,
      "title": "Subtask Title",
      "description": "Subtask description",
      "status": "pending|done|deferred",
      "dependencies": [],
      "acceptanceCriteria": "Verification criteria"
    }
  ]
}
```

### Tasks Collection Model
```json
{
  "meta": {
    "projectName": "Project Name",
    "version": "1.0.0",
    "prdSource": "path/to/prd.txt",
    "createdAt": "ISO-8601 timestamp",
    "updatedAt": "ISO-8601 timestamp"
  },
  "tasks": [
    // Array of Task objects
  ]
}
```

### Task File Format
```
# Task ID: <id>
# Title: <title>
# Status: <status>
# Dependencies: <comma-separated list of dependency IDs>
# Priority: <priority>
# Description: <brief description>
# Details:
<detailed implementation notes>

# Test Strategy:
<verification approach>

# Subtasks:
1. <subtask title> - <subtask description>
```

## APIs and Integrations
1. **Anthropic Claude API**
   - Authentication via API key
   - Prompt construction and streaming
   - Response parsing and extraction
   - Error handling and retries

2. **Perplexity API (via OpenAI client)**
   - Authentication via API key
   - Research-oriented prompt construction
   - Enhanced contextual response handling
   - Fallback mechanisms to Claude

3. **File System API**
   - Reading/writing tasks.json
   - Managing individual task files
   - Command execution logging
   - Debug logging system

## Infrastructure Requirements
1. **Node.js Runtime**
   - Version 14.0.0 or higher
   - ES Module support
   - File system access rights
   - Command execution capabilities

2. **Configuration Management**
   - Environment variable handling
   - .env file support
   - Configuration validation
   - Sensible defaults with overrides

3. **Development Environment**
   - Git repository
   - NPM package management
   - Cursor editor integration
   - Command-line terminal access

# Development Roadmap  

## Phase 1: Core Task Management System
1. **Task Data Structure**
   - Design and implement the tasks.json structure
   - Create task model validation
   - Implement basic task operations (create, read, update)
   - Develop file system interactions

2. **Command Line Interface Foundation**
   - Implement command parsing with Commander.js
   - Create help documentation
   - Implement colorized console output
   - Add logging system with configurable levels

3. **Basic Task Operations**
   - Implement task listing functionality
   - Create task status update capability
   - Add dependency tracking
   - Implement priority management

4. **Task File Generation**
   - Create task file templates
   - Implement generation from tasks.json
   - Add bi-directional synchronization
   - Implement proper file naming and organization

## Phase 2: AI Integration
1. **Claude API Integration**
   - Implement API authentication
   - Create prompt templates for PRD parsing
   - Design response handlers
   - Add error management and retries

2. **PRD Parsing System**
   - Implement PRD file reading
   - Create PRD to task conversion logic
   - Add intelligent dependency inference
   - Implement priority assignment logic

3. **Task Expansion With Claude**
   - Create subtask generation prompts
   - Implement subtask creation workflow
   - Add context-aware expansion capabilities
   - Implement parent-child relationship management

4. **Implementation Drift Handling**
   - Add capability to update future tasks
   - Implement task rewriting based on new context
   - Create dependency chain updates
   - Preserve completed work while updating future tasks

## Phase 3: Advanced Features
1. **Perplexity Integration**
   - Implement Perplexity API authentication
   - Create research-oriented prompts
   - Add fallback to Claude when unavailable
   - Implement response quality comparison logic

2. **Research-Backed Subtask Generation**
   - Create specialized research prompts
   - Implement context enrichment
   - Add domain-specific knowledge incorporation
   - Create more detailed subtask generation

3. **Batch Operations**
   - Implement multi-task status updates
   - Add bulk subtask generation
   - Create task filtering and querying
   - Implement advanced dependency management

4. **Project Initialization**
   - Create project templating system
   - Implement interactive setup
   - Add environment configuration
   - Create documentation generation

## Phase 4: Cursor AI Integration
1. **Cursor Rules Implementation**
   - Create dev_workflow.mdc documentation
   - Implement cursor_rules.mdc
   - Add self_improve.mdc
   - Design rule integration documentation

2. **Agent Workflow Guidelines**
   - Document task discovery workflow
   - Create task selection guidelines
   - Implement implementation guidance
   - Add verification procedures

3. **Agent Command Integration**
   - Document command syntax for agents
   - Create example interactions
   - Implement agent response patterns
   - Add context management for agents

4. **User Documentation**
   - Create detailed README
   - Add scripts documentation
   - Implement example workflows
   - Create troubleshooting guides

# Logical Dependency Chain

## Foundation Layer
1. **Task Data Structure**
   - Must be implemented first as all other functionality depends on this
   - Defines the core data model for the entire system
   - Establishes the single source of truth concept

2. **Command Line Interface**
   - Built on top of the task data structure
   - Provides the primary user interaction mechanism
   - Required for all subsequent operations to be accessible

3. **Basic Task Operations**
   - Depends on both task data structure and CLI
   - Provides the fundamental operations for task management
   - Enables the minimal viable workflow

## Functional Layer
4. **Task File Generation**
   - Depends on task data structure and basic operations
   - Creates the individual task files for reference
   - Enables the file-based workflow complementing tasks.json

5. **Claude API Integration**
   - Independent of most previous components but needs the task data structure
   - Provides the AI capabilities that enhance the system
   - Gateway to advanced task generation features

6. **PRD Parsing System**
   - Depends on Claude API integration and task data structure
   - Enables the initial task generation workflow
   - Creates the starting point for new projects

## Enhancement Layer
7. **Task Expansion With Claude**
   - Depends on Claude API integration and basic task operations
   - Enhances existing tasks with more detailed subtasks
   - Improves the implementation guidance

8. **Implementation Drift Handling**
   - Depends on Claude API integration and task operations
   - Addresses a key challenge in AI-driven development
   - Maintains the relevance of task planning as implementation evolves

9. **Perplexity Integration**
   - Can be developed in parallel with other features after Claude integration
   - Enhances the quality of generated content
   - Provides research-backed improvements

## Advanced Layer
10. **Research-Backed Subtask Generation**
    - Depends on Perplexity integration and task expansion
    - Provides higher quality, more contextual subtasks
    - Enhances the value of the task breakdown

11. **Batch Operations**
    - Depends on basic task operations
    - Improves efficiency for managing multiple tasks
    - Quality-of-life enhancement for larger projects

12. **Project Initialization**
    - Depends on most previous components being stable
    - Provides a smooth onboarding experience
    - Creates a complete project setup in one step

## Integration Layer
13. **Cursor Rules Implementation**
    - Can be developed in parallel after basic functionality
    - Provides the guidance for Cursor AI agent
    - Enhances the AI-driven workflow

14. **Agent Workflow Guidelines**
    - Depends on Cursor rules implementation
    - Structures how the agent interacts with the system
    - Ensures consistent agent behavior

15. **Agent Command Integration**
    - Depends on agent workflow guidelines
    - Provides specific command patterns for the agent
    - Optimizes the agent-user interaction

16. **User Documentation**
    - Should be developed alongside all features
    - Must be completed before release
    - Ensures users can effectively use the system

# Risks and Mitigations  

## Technical Challenges

### API Reliability
**Risk**: Anthropic or Perplexity API could have downtime, rate limiting, or breaking changes.
**Mitigation**: 
- Implement robust error handling with exponential backoff
- Add fallback mechanisms (Claude fallback for Perplexity)
- Cache important responses to reduce API dependency
- Support offline mode for critical functions

### Model Output Variability
**Risk**: AI models may produce inconsistent or unexpected outputs.
**Mitigation**:
- Design robust prompt templates with strict output formatting requirements
- Implement response validation and error detection
- Add self-correction mechanisms and retries with improved prompts
- Allow manual editing of generated content

### Node.js Version Compatibility
**Risk**: Differences in Node.js versions could cause unexpected behavior.
**Mitigation**:
- Clearly document minimum Node.js version requirements
- Use transpilers if needed for compatibility
- Test across multiple Node.js versions
- Handle version-specific features gracefully

## MVP Definition

### Feature Prioritization
**Risk**: Including too many features in the MVP could delay release and adoption.
**Mitigation**:
- Define MVP as core task management + basic Claude integration
- Ensure each phase delivers a complete, usable product
- Implement feature flags for easy enabling/disabling of features
- Get early user feedback to validate feature importance

### Scope Creep
**Risk**: The project could expand beyond its original intent, becoming too complex.
**Mitigation**:
- Maintain a strict definition of what the tool is and isn't
- Focus on task management for AI-driven development
- Evaluate new features against core value proposition
- Implement extensibility rather than building every feature

### User Expectations
**Risk**: Users might expect a full project management solution rather than a task tracking system.
**Mitigation**:
- Clearly communicate the tool's purpose and limitations
- Provide integration points with existing project management tools
- Focus on the unique value of AI-driven development
- Document specific use cases and example workflows

## Resource Constraints

### Development Capacity
**Risk**: Limited development resources could delay implementation.
**Mitigation**:
- Phase implementation to deliver value incrementally
- Focus on core functionality first
- Leverage open source libraries where possible
- Design for extensibility to allow community contributions

### AI Cost Management
**Risk**: Excessive API usage could lead to high costs.
**Mitigation**:
- Implement token usage tracking and reporting
- Add configurable limits to prevent unexpected costs
- Cache responses where appropriate
- Optimize prompts for token efficiency
- Support local LLM options in the future

### Documentation Overhead
**Risk**: Complexity of the system requires extensive documentation that is time-consuming to maintain.
**Mitigation**:
- Use AI to help generate and maintain documentation
- Create self-documenting commands and features
- Implement progressive documentation (basic to advanced)
- Build help directly into the CLI

# Appendix  

## AI Prompt Engineering Specifications

### PRD Parsing Prompt Structure
```
You are assisting with transforming a Product Requirements Document (PRD) into a structured set of development tasks.

Given the following PRD, create a comprehensive list of development tasks that would be needed to implement the described product.

For each task:
1. Assign a short, descriptive title
2. Write a concise description
3. Identify dependencies (which tasks must be completed before this one)
4. Assign a priority (high, medium, low)
5. Include detailed implementation notes
6. Describe a test strategy to verify completion

Structure the tasks in a logical order of implementation.

PRD:
{prd_content}
```

### Task Expansion Prompt Structure
```
You are helping to break down a development task into more manageable subtasks.

Main task:
Title: {task_title}
Description: {task_description}
Details: {task_details}

Please create {num_subtasks} specific subtasks that together would accomplish this main task.

For each subtask, provide:
1. A clear, actionable title
2. A concise description
3. Any dependencies on other subtasks
4. Specific acceptance criteria to verify completion

Additional context:
{additional_context}
```

### Research-Backed Expansion Prompt Structure
```
You are a technical researcher and developer helping to break down a software development task into detailed, well-researched subtasks.

Main task:
Title: {task_title}
Description: {task_description}
Details: {task_details}

Research the latest best practices, technologies, and implementation patterns for this type of task. Then create {num_subtasks} specific, actionable subtasks that together would accomplish the main task.

For each subtask:
1. Provide a clear, specific title
2. Write a detailed description including technical approach
3. Identify dependencies on other subtasks
4. Include specific acceptance criteria
5. Reference any relevant libraries, tools, or resources that should be used

Consider security, performance, maintainability, and user experience in your recommendations.
```

## Task File System Specification

### Directory Structure
```
/
├── .cursor/
│   └── rules/
│       ├── dev_workflow.mdc
│       ├── cursor_rules.mdc
│       └── self_improve.mdc
├── scripts/
│   ├── dev.js
│   └── README.md
├── tasks/
│   ├── task_001.txt
│   ├── task_002.txt
│   └── ...
├── .env
├── .env.example
├── .gitignore
├── package.json
├── README.md
└── tasks.json
```

### Task ID Specification
- Main tasks: Sequential integers (1, 2, 3, ...)
- Subtasks: Parent ID + dot + sequential integer (1.1, 1.2, 2.1, ...)
- ID references: Used in dependencies, command parameters
- ID ordering: Implies suggested implementation order

## Command-Line Interface Specification

### Global Options
- `--help`: Display help information
- `--version`: Display version information
- `--file=<file>`: Specify an alternative tasks.json file
- `--quiet`: Reduce output verbosity
- `--debug`: Increase output verbosity
- `--json`: Output in JSON format (for programmatic use)

### Command Structure
- `node scripts/dev.js <command> [options]`
- All commands operate on tasks.json by default
- Commands follow consistent parameter naming
- Common parameter styles: `--id=<id>`, `--status=<status>`, `--prompt="<text>"`
- Boolean flags: `--all`, `--force`, `--with-subtasks`

## API Integration Specifications

### Anthropic API Configuration
- Authentication: ANTHROPIC_API_KEY environment variable
- Model selection: MODEL environment variable
- Default model: claude-3-7-sonnet-20250219
- Maximum tokens: MAX_TOKENS environment variable (default: 4000)
- Temperature: TEMPERATURE environment variable (default: 0.7)

### Perplexity API Configuration
- Authentication: PERPLEXITY_API_KEY environment variable
- Model selection: PERPLEXITY_MODEL environment variable
- Default model: sonar-medium-online
- Connection: Via OpenAI client
- Fallback: Use Claude if Perplexity unavailable
</PRD>

```

--------------------------------------------------------------------------------
/assets/scripts_README.md:
--------------------------------------------------------------------------------

```markdown
# Meta-Development Script

This folder contains a **meta-development script** (`dev.js`) and related utilities that manage tasks for an AI-driven or traditional software development workflow. The script revolves around a `tasks.json` file, which holds an up-to-date list of development tasks.

## Overview

In an AI-driven development process—particularly with tools like [Cursor](https://www.cursor.so/)—it's beneficial to have a **single source of truth** for tasks. This script allows you to:

1. **Parse** a PRD or requirements document (`.txt`) to initialize a set of tasks (`tasks.json`).
2. **List** all existing tasks (IDs, statuses, titles).
3. **Update** tasks to accommodate new prompts or architecture changes (useful if you discover "implementation drift").
4. **Generate** individual task files (e.g., `task_001.txt`) for easy reference or to feed into an AI coding workflow.
5. **Set task status**—mark tasks as `done`, `pending`, or `deferred` based on progress.
6. **Expand** tasks with subtasks—break down complex tasks into smaller, more manageable subtasks.
7. **Research-backed subtask generation**—use Perplexity AI to generate more informed and contextually relevant subtasks.
8. **Clear subtasks**—remove subtasks from specified tasks to allow regeneration or restructuring.
9. **Show task details**—display detailed information about a specific task and its subtasks.

## Configuration (Updated)

Task Master configuration is now managed through two primary methods:

1.  **`.taskmaster/config.json` File (Project Root - Primary)**

    - Stores AI model selections (`main`, `research`, `fallback`), model parameters (`maxTokens`, `temperature`), `logLevel`, `defaultSubtasks`, `defaultPriority`, `projectName`, etc.
    - Managed using the `task-master models --setup` command or the `models` MCP tool.
    - This is the main configuration file for most settings.

2.  **Environment Variables (`.env` File - API Keys Only)**
    - Used **only** for sensitive **API Keys** (e.g., `ANTHROPIC_API_KEY`, `PERPLEXITY_API_KEY`).
    - Create a `.env` file in your project root for CLI usage.
    - See `assets/env.example` for required key names.

**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `TASKMASTER_LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead.

## How It Works

1. **`tasks.json`**:

   - A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
   - The `meta` field can store additional info like the project's name, version, or reference to the PRD.
   - Tasks can have `subtasks` for more detailed implementation steps.
   - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.

2. **CLI Commands**
   You can run the commands via:

   ```bash
   # If installed globally
   task-master [command] [options]

   # If using locally within the project
   node scripts/dev.js [command] [options]
   ```

   Available commands:

   - `init`: Initialize a new project
   - `parse-prd`: Generate tasks from a PRD document
   - `list`: Display all tasks with their status
   - `update`: Update tasks based on new information
   - `generate`: Create individual task files
   - `set-status`: Change a task's status
   - `expand`: Add subtasks to a task or all tasks
   - `clear-subtasks`: Remove subtasks from specified tasks
   - `next`: Determine the next task to work on based on dependencies
   - `show`: Display detailed information about a specific task
   - `analyze-complexity`: Analyze task complexity and generate recommendations
   - `complexity-report`: Display the complexity analysis in a readable format
   - `add-dependency`: Add a dependency between tasks
   - `remove-dependency`: Remove a dependency from a task
   - `validate-dependencies`: Check for invalid dependencies
   - `fix-dependencies`: Fix invalid dependencies automatically
   - `add-task`: Add a new task using AI

   Run `task-master --help` or `node scripts/dev.js --help` to see detailed usage information.

## Listing Tasks

The `list` command allows you to view all tasks and their status:

```bash
# List all tasks
task-master list

# List tasks with a specific status
task-master list --status=pending

# List tasks and include their subtasks
task-master list --with-subtasks

# List tasks with a specific status and include their subtasks
task-master list --status=pending --with-subtasks
```

## Updating Tasks

The `update` command allows you to update tasks based on new information or implementation changes:

```bash
# Update tasks starting from ID 4 with a new prompt
task-master update --from=4 --prompt="Refactor tasks from ID 4 onward to use Express instead of Fastify"

# Update all tasks (default from=1)
task-master update --prompt="Add authentication to all relevant tasks"

# Specify a different tasks file
task-master update --file=custom-tasks.json --from=5 --prompt="Change database from MongoDB to PostgreSQL"
```

Notes:

- The `--prompt` parameter is required and should explain the changes or new context
- Only tasks that aren't marked as 'done' will be updated
- Tasks with ID >= the specified --from value will be updated

## Setting Task Status

The `set-status` command allows you to change a task's status:

```bash
# Mark a task as done
task-master set-status --id=3 --status=done

# Mark a task as pending
task-master set-status --id=4 --status=pending

# Mark a specific subtask as done
task-master set-status --id=3.1 --status=done

# Mark multiple tasks at once
task-master set-status --id=1,2,3 --status=done
```

Notes:

- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
- You can specify multiple task IDs by separating them with commas
- Subtask IDs are specified using the format `parentId.subtaskId` (e.g., `3.1`)
- Dependencies are updated to show completion status (✅ for completed, ⏱️ for pending) throughout the system

## Expanding Tasks

The `expand` command allows you to break down tasks into subtasks for more detailed implementation:

```bash
# Expand a specific task with 3 subtasks (default)
task-master expand --id=3

# Expand a specific task with 5 subtasks
task-master expand --id=3 --num=5

# Expand a task with additional context
task-master expand --id=3 --prompt="Focus on security aspects"

# Expand all pending tasks that don't have subtasks
task-master expand --all

# Force regeneration of subtasks for all pending tasks
task-master expand --all --force

# Use Perplexity AI for research-backed subtask generation
task-master expand --id=3 --research

# Use Perplexity AI for research-backed generation on all pending tasks
task-master expand --all --research
```

## Clearing Subtasks

The `clear-subtasks` command allows you to remove subtasks from specified tasks:

```bash
# Clear subtasks from a specific task
task-master clear-subtasks --id=3

# Clear subtasks from multiple tasks
task-master clear-subtasks --id=1,2,3

# Clear subtasks from all tasks
task-master clear-subtasks --all
```

Notes:

- After clearing subtasks, task files are automatically regenerated
- This is useful when you want to regenerate subtasks with a different approach
- Can be combined with the `expand` command to immediately generate new subtasks
- Works with both parent tasks and individual subtasks

## AI Integration (Updated)

- The script now uses a unified AI service layer (`ai-services-unified.js`).
- Model selection (e.g., Claude vs. Perplexity for `--research`) is determined by the configuration in `.taskmaster/config.json` based on the requested `role` (`main` or `research`).
- API keys are automatically resolved from your `.env` file (for CLI) or MCP session environment.
- To use the research capabilities (e.g., `expand --research`), ensure you have:
  1.  Configured a model for the `research` role using `task-master models --setup` (Perplexity models are recommended).
  2.  Added the corresponding API key (e.g., `PERPLEXITY_API_KEY`) to your `.env` file.

## Logging

The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable:

- `debug`: Detailed information, typically useful for troubleshooting
- `info`: Confirmation that things are working as expected (default)
- `warn`: Warning messages that don't prevent execution
- `error`: Error messages that might prevent execution

When `DEBUG=true` is set, debug logs are also written to a `dev-debug.log` file in the project root.

## Managing Task Dependencies

The `add-dependency` and `remove-dependency` commands allow you to manage task dependencies:

```bash
# Add a dependency to a task
task-master add-dependency --id=<id> --depends-on=<id>

# Remove a dependency from a task
task-master remove-dependency --id=<id> --depends-on=<id>
```

These commands:

1. **Allow precise dependency management**:

   - Add dependencies between tasks with automatic validation
   - Remove dependencies when they're no longer needed
   - Update task files automatically after changes

2. **Include validation checks**:

   - Prevent circular dependencies (a task depending on itself)
   - Prevent duplicate dependencies
   - Verify that both tasks exist before adding/removing dependencies
   - Check if dependencies exist before attempting to remove them

3. **Provide clear feedback**:

   - Success messages confirm when dependencies are added/removed
   - Error messages explain why operations failed (if applicable)

4. **Automatically update task files**:
   - Regenerates task files to reflect dependency changes
   - Ensures tasks and their files stay synchronized

## Dependency Validation and Fixing

The script provides two specialized commands to ensure task dependencies remain valid and properly maintained:

### Validating Dependencies

The `validate-dependencies` command allows you to check for invalid dependencies without making changes:

```bash
# Check for invalid dependencies in tasks.json
task-master validate-dependencies

# Specify a different tasks file
task-master validate-dependencies --file=custom-tasks.json
```

This command:

- Scans all tasks and subtasks for non-existent dependencies
- Identifies potential self-dependencies (tasks referencing themselves)
- Reports all found issues without modifying files
- Provides a comprehensive summary of dependency state
- Gives detailed statistics on task dependencies

Use this command to audit your task structure before applying fixes.

### Fixing Dependencies

The `fix-dependencies` command proactively finds and fixes all invalid dependencies:

```bash
# Find and fix all invalid dependencies
task-master fix-dependencies

# Specify a different tasks file
task-master fix-dependencies --file=custom-tasks.json
```

This command:

1. **Validates all dependencies** across tasks and subtasks
2. **Automatically removes**:
   - References to non-existent tasks and subtasks
   - Self-dependencies (tasks depending on themselves)
3. **Fixes issues in both**:
   - The tasks.json data structure
   - Individual task files during regeneration
4. **Provides a detailed report**:
   - Types of issues fixed (non-existent vs. self-dependencies)
   - Number of tasks affected (tasks vs. subtasks)
   - Where fixes were applied (tasks.json vs. task files)
   - List of all individual fixes made

This is especially useful when tasks have been deleted or IDs have changed, potentially breaking dependency chains.

## Analyzing Task Complexity

The `analyze-complexity` command allows you to automatically assess task complexity and generate expansion recommendations:

```bash
# Analyze all tasks and generate expansion recommendations
task-master analyze-complexity

# Specify a custom output file
task-master analyze-complexity --output=custom-report.json

# Override the model used for analysis
task-master analyze-complexity --model=claude-3-opus-20240229

# Set a custom complexity threshold (1-10)
task-master analyze-complexity --threshold=6

# Use Perplexity AI for research-backed complexity analysis
task-master analyze-complexity --research
```

Notes:

- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
- Tasks are scored on a scale of 1-10
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
- The default output path is `scripts/task-complexity-report.json`
- Each task in the analysis includes a ready-to-use `expansionCommand` that can be copied directly to the terminal or executed programmatically
- Tasks with complexity scores below the threshold (default: 5) may not need expansion
- The research flag provides more contextual and informed complexity assessments

### Integration with Expand Command

The `expand` command automatically checks for and uses complexity analysis if available:

```bash
# Expand a task, using complexity report recommendations if available
task-master expand --id=8

# Expand all tasks, prioritizing by complexity score if a report exists
task-master expand --all

# Override recommendations with explicit values
task-master expand --id=8 --num=5 --prompt="Custom prompt"
```

When a complexity report exists:

- The `expand` command will use the recommended subtask count from the report (unless overridden)
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
- When using `--all`, tasks are sorted by complexity score (highest first)
- The `--research` flag is preserved from the complexity analysis to expansion

The output report structure is:

```json
{
  "meta": {
    "generatedAt": "2023-06-15T12:34:56.789Z",
    "tasksAnalyzed": 20,
    "thresholdScore": 5,
    "projectName": "Your Project Name",
    "usedResearch": true
  },
  "complexityAnalysis": [
    {
      "taskId": 8,
      "taskTitle": "Develop Implementation Drift Handling",
      "complexityScore": 9.5,
      "recommendedSubtasks": 6,
      "expansionPrompt": "Create subtasks that handle detecting...",
      "reasoning": "This task requires sophisticated logic...",
      "expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
    }
    // More tasks sorted by complexity score (highest first)
  ]
}
```

## Finding the Next Task

The `next` command helps you determine which task to work on next based on dependencies and status:

```bash
# Show the next task to work on
task-master next

# Specify a different tasks file
task-master next --file=custom-tasks.json
```

This command:

1. Identifies all **eligible tasks** - pending or in-progress tasks whose dependencies are all satisfied (marked as done)
2. **Prioritizes** these eligible tasks by:
   - Priority level (high > medium > low)
   - Number of dependencies (fewer dependencies first)
   - Task ID (lower ID first)
3. **Displays** comprehensive information about the selected task:
   - Basic task details (ID, title, priority, dependencies)
   - Detailed description and implementation details
   - Subtasks if they exist
4. Provides **contextual suggested actions**:
   - Command to mark the task as in-progress
   - Command to mark the task as done when completed
   - Commands for working with subtasks (update status or expand)

This feature ensures you're always working on the most appropriate task based on your project's current state and dependency structure.

## Showing Task Details

The `show` command allows you to view detailed information about a specific task:

```bash
# Show details for a specific task
task-master show 1

# Alternative syntax with --id option
task-master show --id=1

# Show details for a subtask
task-master show --id=1.2

# Specify a different tasks file
task-master show 3 --file=custom-tasks.json
```

This command:

1. **Displays comprehensive information** about the specified task:
   - Basic task details (ID, title, priority, dependencies, status)
   - Full description and implementation details
   - Test strategy information
   - Subtasks if they exist
2. **Handles both regular tasks and subtasks**:
   - For regular tasks, shows all subtasks and their status
   - For subtasks, shows the parent task relationship
3. **Provides contextual suggested actions**:
   - Commands to update the task status
   - Commands for working with subtasks
   - For subtasks, provides a link to view the parent task

This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.

```

--------------------------------------------------------------------------------
/scripts/modules/task-manager/parse-prd/parse-prd-streaming.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Streaming handler for PRD parsing
 */

import { createParsePrdTracker } from '../../../../src/progress/parse-prd-tracker.js';
import { displayParsePrdStart } from '../../../../src/ui/parse-prd.js';
import { getPriorityIndicators } from '../../../../src/ui/indicators.js';
import { TimeoutManager } from '../../../../src/utils/timeout-manager.js';
import {
	streamObjectService,
	generateObjectService
} from '../../ai-services-unified.js';
import {
	getMainModelId,
	getParametersForRole,
	getResearchModelId,
	getDefaultPriority
} from '../../config-manager.js';
import { LoggingConfig, prdResponseSchema } from './parse-prd-config.js';
import { estimateTokens, reportTaskProgress } from './parse-prd-helpers.js';

/**
 * Extract a readable stream from various stream result formats
 * @param {any} streamResult - The stream result object from AI service
 * @returns {AsyncIterable|ReadableStream} The extracted stream
 * @throws {StreamingError} If no valid stream can be extracted
 */
function extractStreamFromResult(streamResult) {
	if (!streamResult) {
		throw new StreamingError(
			'Stream result is null or undefined',
			STREAMING_ERROR_CODES.NOT_ASYNC_ITERABLE
		);
	}

	// Try extraction strategies in priority order
	const stream = tryExtractStream(streamResult);

	if (!stream) {
		throw new StreamingError(
			'Stream object is not async iterable or readable',
			STREAMING_ERROR_CODES.NOT_ASYNC_ITERABLE
		);
	}

	return stream;
}

/**
 * Try to extract stream using various strategies
 */
function tryExtractStream(streamResult) {
	const streamExtractors = [
		{ key: 'partialObjectStream', extractor: (obj) => obj.partialObjectStream },
		{ key: 'textStream', extractor: (obj) => extractCallable(obj.textStream) },
		{ key: 'stream', extractor: (obj) => extractCallable(obj.stream) },
		{ key: 'baseStream', extractor: (obj) => obj.baseStream }
	];

	for (const { key, extractor } of streamExtractors) {
		const stream = extractor(streamResult);
		if (stream && isStreamable(stream)) {
			return stream;
		}
	}

	// Check if already streamable
	return isStreamable(streamResult) ? streamResult : null;
}

/**
 * Extract a property that might be a function or direct value
 */
function extractCallable(property) {
	if (!property) return null;
	return typeof property === 'function' ? property() : property;
}

/**
 * Check if object is streamable (async iterable or readable stream)
 */
function isStreamable(obj) {
	return (
		obj &&
		(typeof obj[Symbol.asyncIterator] === 'function' ||
			(obj.getReader && typeof obj.getReader === 'function'))
	);
}

/**
 * Handle streaming AI service call and parsing
 * @param {Object} config - Configuration object
 * @param {Object} prompts - System and user prompts
 * @param {number} numTasks - Number of tasks to generate
 * @returns {Promise<Object>} Parsed tasks and telemetry
 */
export async function handleStreamingService(config, prompts, numTasks) {
	const context = createStreamingContext(config, prompts, numTasks);

	await initializeProgress(config, numTasks, context.estimatedInputTokens);

	const aiServiceResponse = await callAIServiceWithTimeout(
		config,
		prompts,
		config.streamingTimeout
	);

	const { progressTracker, priorityMap } = await setupProgressTracking(
		config,
		numTasks
	);

	const streamingResult = await processStreamResponse(
		aiServiceResponse.mainResult,
		config,
		prompts,
		numTasks,
		progressTracker,
		priorityMap,
		context.defaultPriority,
		context.estimatedInputTokens,
		context.logger
	);

	validateStreamingResult(streamingResult);

	// If we have usage data from streaming, log telemetry now
	if (streamingResult.usage && config.projectRoot) {
		const { logAiUsage } = await import('../../ai-services-unified.js');
		const { getUserId } = await import('../../config-manager.js');
		const userId = getUserId(config.projectRoot);

		if (userId && aiServiceResponse.providerName && aiServiceResponse.modelId) {
			try {
				const telemetryData = await logAiUsage({
					userId,
					commandName: 'parse-prd',
					providerName: aiServiceResponse.providerName,
					modelId: aiServiceResponse.modelId,
					inputTokens: streamingResult.usage.promptTokens || 0,
					outputTokens: streamingResult.usage.completionTokens || 0,
					outputType: config.isMCP ? 'mcp' : 'cli'
				});

				// Add telemetry to the response
				if (telemetryData) {
					aiServiceResponse.telemetryData = telemetryData;
				}
			} catch (telemetryError) {
				context.logger.report(
					`Failed to log telemetry: ${telemetryError.message}`,
					'debug'
				);
			}
		}
	}

	return prepareFinalResult(
		streamingResult,
		aiServiceResponse,
		context.estimatedInputTokens,
		progressTracker
	);
}

/**
 * Create streaming context with common values
 */
function createStreamingContext(config, prompts, numTasks) {
	const { systemPrompt, userPrompt } = prompts;
	return {
		logger: new LoggingConfig(config.mcpLog, config.reportProgress),
		estimatedInputTokens: estimateTokens(systemPrompt + userPrompt),
		defaultPriority: getDefaultPriority(config.projectRoot) || 'medium'
	};
}

/**
 * Validate streaming result has tasks
 */
function validateStreamingResult(streamingResult) {
	if (streamingResult.parsedTasks.length === 0) {
		throw new Error('No tasks were generated from the PRD');
	}
}

/**
 * Initialize progress reporting
 */
async function initializeProgress(config, numTasks, estimatedInputTokens) {
	if (config.reportProgress) {
		await config.reportProgress({
			progress: 0,
			total: numTasks,
			message: `Starting PRD analysis (Input: ${estimatedInputTokens} tokens)${config.research ? ' with research' : ''}...`
		});
	}
}

/**
 * Call AI service with timeout
 */
async function callAIServiceWithTimeout(config, prompts, timeout) {
	const { systemPrompt, userPrompt } = prompts;

	return await TimeoutManager.withTimeout(
		streamObjectService({
			role: config.research ? 'research' : 'main',
			session: config.session,
			projectRoot: config.projectRoot,
			schema: prdResponseSchema,
			systemPrompt,
			prompt: userPrompt,
			commandName: 'parse-prd',
			outputType: config.isMCP ? 'mcp' : 'cli'
		}),
		timeout,
		'Streaming operation'
	);
}

/**
 * Setup progress tracking for CLI output
 */
async function setupProgressTracking(config, numTasks) {
	const priorityMap = getPriorityIndicators(config.isMCP);
	let progressTracker = null;

	if (config.outputFormat === 'text' && !config.isMCP) {
		progressTracker = createParsePrdTracker({
			numUnits: numTasks,
			unitName: 'task',
			append: config.append
		});

		const modelId = config.research ? getResearchModelId() : getMainModelId();
		const parameters = getParametersForRole(
			config.research ? 'research' : 'main'
		);

		displayParsePrdStart({
			prdFilePath: config.prdPath,
			outputPath: config.tasksPath,
			numTasks,
			append: config.append,
			research: config.research,
			force: config.force,
			existingTasks: [],
			nextId: 1,
			model: modelId || 'Default',
			temperature: parameters?.temperature || 0.7
		});

		progressTracker.start();
	}

	return { progressTracker, priorityMap };
}

/**
 * Process stream response based on stream type
 */
async function processStreamResponse(
	streamResult,
	config,
	prompts,
	numTasks,
	progressTracker,
	priorityMap,
	defaultPriority,
	estimatedInputTokens,
	logger
) {
	const { systemPrompt, userPrompt } = prompts;
	const context = {
		config: {
			...config,
			schema: prdResponseSchema // Add the schema for generateObject fallback
		},
		numTasks,
		progressTracker,
		priorityMap,
		defaultPriority,
		estimatedInputTokens,
		prompt: userPrompt,
		systemPrompt: systemPrompt
	};

	try {
		const streamingState = {
			lastPartialObject: null,
			taskCount: 0,
			estimatedOutputTokens: 0,
			usage: null
		};

		await processPartialStream(
			streamResult.partialObjectStream,
			streamingState,
			context
		);

		// Wait for usage data if available
		if (streamResult.usage) {
			try {
				streamingState.usage = await streamResult.usage;
			} catch (usageError) {
				logger.report(
					`Failed to get usage data: ${usageError.message}`,
					'debug'
				);
			}
		}

		return finalizeStreamingResults(streamingState, context);
	} catch (error) {
		logger.report(
			`StreamObject processing failed: ${error.message}. Falling back to generateObject.`,
			'debug'
		);
		return await processWithGenerateObject(context, logger);
	}
}

/**
 * Process the partial object stream
 */
async function processPartialStream(partialStream, state, context) {
	for await (const partialObject of partialStream) {
		state.lastPartialObject = partialObject;

		if (partialObject) {
			state.estimatedOutputTokens = estimateTokens(
				JSON.stringify(partialObject)
			);
		}

		await processStreamingTasks(partialObject, state, context);
	}
}

/**
 * Process tasks from a streaming partial object
 */
async function processStreamingTasks(partialObject, state, context) {
	if (!partialObject?.tasks || !Array.isArray(partialObject.tasks)) {
		return;
	}

	const newTaskCount = partialObject.tasks.length;

	if (newTaskCount > state.taskCount) {
		await processNewTasks(
			partialObject.tasks,
			state.taskCount,
			newTaskCount,
			state.estimatedOutputTokens,
			context
		);
		state.taskCount = newTaskCount;
	} else if (context.progressTracker && state.estimatedOutputTokens > 0) {
		context.progressTracker.updateTokens(
			context.estimatedInputTokens,
			state.estimatedOutputTokens,
			true
		);
	}
}

/**
 * Process newly appeared tasks in the stream
 */
async function processNewTasks(
	tasks,
	startIndex,
	endIndex,
	estimatedOutputTokens,
	context
) {
	for (let i = startIndex; i < endIndex; i++) {
		const task = tasks[i] || {};

		if (task.title) {
			await reportTaskProgress({
				task,
				currentCount: i + 1,
				totalTasks: context.numTasks,
				estimatedTokens: estimatedOutputTokens,
				progressTracker: context.progressTracker,
				reportProgress: context.config.reportProgress,
				priorityMap: context.priorityMap,
				defaultPriority: context.defaultPriority,
				estimatedInputTokens: context.estimatedInputTokens
			});
		} else {
			await reportPlaceholderTask(i + 1, estimatedOutputTokens, context);
		}
	}
}

/**
 * Report a placeholder task while it's being generated
 */
async function reportPlaceholderTask(
	taskNumber,
	estimatedOutputTokens,
	context
) {
	const {
		progressTracker,
		config,
		numTasks,
		defaultPriority,
		estimatedInputTokens
	} = context;

	if (progressTracker) {
		progressTracker.addTaskLine(
			taskNumber,
			`Generating task ${taskNumber}...`,
			defaultPriority
		);
		progressTracker.updateTokens(
			estimatedInputTokens,
			estimatedOutputTokens,
			true
		);
	}

	if (config.reportProgress && !progressTracker) {
		await config.reportProgress({
			progress: taskNumber,
			total: numTasks,
			message: `Generating task ${taskNumber}/${numTasks}...`
		});
	}
}

/**
 * Finalize streaming results and update progress display
 */
async function finalizeStreamingResults(state, context) {
	const { lastPartialObject, estimatedOutputTokens, taskCount, usage } = state;

	if (!lastPartialObject?.tasks || !Array.isArray(lastPartialObject.tasks)) {
		throw new Error('No tasks generated from streamObject');
	}

	// Use actual token counts if available, otherwise use estimates
	const finalOutputTokens = usage?.completionTokens || estimatedOutputTokens;
	const finalInputTokens = usage?.promptTokens || context.estimatedInputTokens;

	if (context.progressTracker) {
		await updateFinalProgress(
			lastPartialObject.tasks,
			taskCount,
			usage ? finalOutputTokens : estimatedOutputTokens,
			context,
			usage ? finalInputTokens : null
		);
	}

	return {
		parsedTasks: lastPartialObject.tasks,
		estimatedOutputTokens: finalOutputTokens,
		actualInputTokens: finalInputTokens,
		usage,
		usedFallback: false
	};
}

/**
 * Update progress tracker with final task content
 */
async function updateFinalProgress(
	tasks,
	taskCount,
	outputTokens,
	context,
	actualInputTokens = null
) {
	const { progressTracker, defaultPriority, estimatedInputTokens } = context;

	if (taskCount > 0) {
		updateTaskLines(tasks, progressTracker, defaultPriority);
	} else {
		await reportAllTasks(tasks, outputTokens, context);
	}

	progressTracker.updateTokens(
		actualInputTokens || estimatedInputTokens,
		outputTokens,
		false
	);
	progressTracker.stop();
}

/**
 * Update task lines in progress tracker with final content
 */
function updateTaskLines(tasks, progressTracker, defaultPriority) {
	for (let i = 0; i < tasks.length; i++) {
		const task = tasks[i];
		if (task?.title) {
			progressTracker.addTaskLine(
				i + 1,
				task.title,
				task.priority || defaultPriority
			);
		}
	}
}

/**
 * Report all tasks that were not streamed incrementally
 */
async function reportAllTasks(tasks, estimatedOutputTokens, context) {
	for (let i = 0; i < tasks.length; i++) {
		const task = tasks[i];
		if (task?.title) {
			await reportTaskProgress({
				task,
				currentCount: i + 1,
				totalTasks: context.numTasks,
				estimatedTokens: estimatedOutputTokens,
				progressTracker: context.progressTracker,
				reportProgress: context.config.reportProgress,
				priorityMap: context.priorityMap,
				defaultPriority: context.defaultPriority,
				estimatedInputTokens: context.estimatedInputTokens
			});
		}
	}
}

/**
 * Process with generateObject as fallback when streaming fails
 */
async function processWithGenerateObject(context, logger) {
	logger.report('Using generateObject fallback for PRD parsing', 'info');

	// Show placeholder tasks while generating
	if (context.progressTracker) {
		for (let i = 0; i < context.numTasks; i++) {
			context.progressTracker.addTaskLine(
				i + 1,
				`Generating task ${i + 1}...`,
				context.defaultPriority
			);
			context.progressTracker.updateTokens(
				context.estimatedInputTokens,
				0,
				true
			);
		}
	}

	// Use generateObjectService instead of streaming
	const result = await generateObjectService({
		role: context.config.research ? 'research' : 'main',
		commandName: 'parse-prd',
		prompt: context.prompt,
		systemPrompt: context.systemPrompt,
		schema: context.config.schema,
		outputFormat: context.config.outputFormat || 'text',
		projectRoot: context.config.projectRoot,
		session: context.config.session
	});

	// Extract tasks from the result (handle both direct tasks and mainResult.tasks)
	const tasks = result?.mainResult || result;

	// Process the generated tasks
	if (tasks && Array.isArray(tasks.tasks)) {
		// Update progress tracker with final tasks
		if (context.progressTracker) {
			for (let i = 0; i < tasks.tasks.length; i++) {
				const task = tasks.tasks[i];
				if (task && task.title) {
					context.progressTracker.addTaskLine(
						i + 1,
						task.title,
						task.priority || context.defaultPriority
					);
				}
			}

			// Final token update - use actual telemetry if available
			const outputTokens =
				result.telemetryData?.outputTokens ||
				estimateTokens(JSON.stringify(tasks));
			const inputTokens =
				result.telemetryData?.inputTokens || context.estimatedInputTokens;

			context.progressTracker.updateTokens(inputTokens, outputTokens, false);
		}

		return {
			parsedTasks: tasks.tasks,
			estimatedOutputTokens:
				result.telemetryData?.outputTokens ||
				estimateTokens(JSON.stringify(tasks)),
			actualInputTokens: result.telemetryData?.inputTokens,
			telemetryData: result.telemetryData,
			usedFallback: true
		};
	}

	throw new Error('Failed to generate tasks using generateObject fallback');
}

/**
 * Prepare final result with cleanup
 */
function prepareFinalResult(
	streamingResult,
	aiServiceResponse,
	estimatedInputTokens,
	progressTracker
) {
	let summary = null;
	if (progressTracker) {
		summary = progressTracker.getSummary();
		progressTracker.cleanup();
	}

	// If we have actual usage data from streaming, update the AI service response
	if (streamingResult.usage && aiServiceResponse) {
		// Map the Vercel AI SDK usage format to our telemetry format
		const usage = streamingResult.usage;
		if (!aiServiceResponse.usage) {
			aiServiceResponse.usage = {
				promptTokens: usage.promptTokens || 0,
				completionTokens: usage.completionTokens || 0,
				totalTokens: usage.totalTokens || 0
			};
		}

		// The telemetry should have been logged in the unified service runner
		// but if not, the usage is now available for telemetry calculation
	}

	return {
		parsedTasks: streamingResult.parsedTasks,
		aiServiceResponse,
		estimatedInputTokens:
			streamingResult.actualInputTokens || estimatedInputTokens,
		estimatedOutputTokens: streamingResult.estimatedOutputTokens,
		usedFallback: streamingResult.usedFallback,
		progressTracker,
		summary
	};
}

```

--------------------------------------------------------------------------------
/packages/tm-core/src/modules/workflow/orchestrators/workflow-orchestrator.ts:
--------------------------------------------------------------------------------

```typescript
import type { TestResultValidator } from '../services/test-result-validator.js';
import type {
	StateTransition,
	SubtaskInfo,
	TDDPhase,
	WorkflowContext,
	WorkflowError,
	WorkflowEvent,
	WorkflowEventData,
	WorkflowEventListener,
	WorkflowEventType,
	WorkflowPhase,
	WorkflowState
} from '../types.js';

/**
 * Lightweight state machine for TDD workflow orchestration
 */
export class WorkflowOrchestrator {
	private currentPhase: WorkflowPhase;
	private context: WorkflowContext;
	private readonly transitions: StateTransition[];
	private readonly eventListeners: Map<
		WorkflowEventType,
		Set<WorkflowEventListener>
	>;
	private persistCallback?: (state: WorkflowState) => void | Promise<void>;
	private autoPersistEnabled: boolean = false;
	private readonly phaseGuards: Map<
		WorkflowPhase,
		(context: WorkflowContext) => boolean
	>;
	private aborted: boolean = false;
	private testResultValidator?: TestResultValidator;
	private gitOperationHook?: (operation: string, data?: unknown) => void;
	private executeHook?: (command: string, context: WorkflowContext) => void;

	constructor(initialContext: WorkflowContext) {
		this.currentPhase = 'PREFLIGHT';
		this.context = { ...initialContext };
		this.transitions = this.defineTransitions();
		this.eventListeners = new Map();
		this.phaseGuards = new Map();
	}

	/**
	 * Define valid state transitions
	 */
	private defineTransitions(): StateTransition[] {
		return [
			{
				from: 'PREFLIGHT',
				to: 'BRANCH_SETUP',
				event: 'PREFLIGHT_COMPLETE'
			},
			{
				from: 'BRANCH_SETUP',
				to: 'SUBTASK_LOOP',
				event: 'BRANCH_CREATED'
			},
			{
				from: 'SUBTASK_LOOP',
				to: 'FINALIZE',
				event: 'ALL_SUBTASKS_COMPLETE'
			},
			{
				from: 'FINALIZE',
				to: 'COMPLETE',
				event: 'FINALIZE_COMPLETE'
			}
		];
	}

	/**
	 * Get current workflow phase
	 */
	getCurrentPhase(): WorkflowPhase {
		return this.currentPhase;
	}

	/**
	 * Get current TDD phase (only valid in SUBTASK_LOOP)
	 */
	getCurrentTDDPhase(): TDDPhase | undefined {
		if (this.currentPhase === 'SUBTASK_LOOP') {
			return this.context.currentTDDPhase || 'RED';
		}
		return undefined;
	}

	/**
	 * Get workflow context
	 */
	getContext(): WorkflowContext {
		return { ...this.context };
	}

	/**
	 * Transition to next state based on event
	 */
	async transition(event: WorkflowEvent): Promise<void> {
		// Check if workflow is aborted
		if (this.aborted && event.type !== 'ABORT') {
			throw new Error('Workflow has been aborted');
		}

		// Handle special events that work across all phases
		if (event.type === 'ERROR') {
			this.handleError(event.error);
			await this.triggerAutoPersist();
			return;
		}

		if (event.type === 'ABORT') {
			this.aborted = true;
			await this.triggerAutoPersist();
			return;
		}

		if (event.type === 'RETRY') {
			this.handleRetry();
			await this.triggerAutoPersist();
			return;
		}

		// Handle TDD phase transitions within SUBTASK_LOOP
		if (this.currentPhase === 'SUBTASK_LOOP') {
			await this.handleTDDPhaseTransition(event);
			await this.triggerAutoPersist();
			return;
		}

		// Handle main workflow phase transitions
		const validTransition = this.transitions.find(
			(t) => t.from === this.currentPhase && t.event === event.type
		);

		if (!validTransition) {
			throw new Error(
				`Invalid transition: ${event.type} from ${this.currentPhase}`
			);
		}

		// Execute transition
		this.executeTransition(validTransition, event);
		await this.triggerAutoPersist();
	}

	/**
	 * Handle TDD phase transitions (RED -> GREEN -> COMMIT)
	 */
	private async handleTDDPhaseTransition(event: WorkflowEvent): Promise<void> {
		const currentTDD = this.context.currentTDDPhase || 'RED';

		switch (event.type) {
			case 'RED_PHASE_COMPLETE':
				if (currentTDD !== 'RED') {
					throw new Error(
						'Invalid transition: RED_PHASE_COMPLETE from non-RED phase'
					);
				}

				// Validate test results are provided
				if (!event.testResults) {
					throw new Error('Test results required for RED phase transition');
				}

				// Store test results in context
				this.context.lastTestResults = event.testResults;

				// Special case: All tests passing in RED phase means feature already implemented
				if (event.testResults.failed === 0) {
					this.emit('tdd:red:completed');
					this.emit('tdd:feature-already-implemented', {
						subtaskId: this.getCurrentSubtaskId(),
						testResults: event.testResults
					});

					// Mark subtask as complete and move to next one
					const subtask =
						this.context.subtasks[this.context.currentSubtaskIndex];
					if (subtask) {
						subtask.status = 'completed';
					}

					this.emit('subtask:completed');
					this.context.currentSubtaskIndex++;

					// Emit progress update
					const progress = this.getProgress();
					this.emit('progress:updated', {
						completed: progress.completed,
						total: progress.total,
						percentage: progress.percentage
					});

					// Start next subtask or complete workflow
					if (this.context.currentSubtaskIndex < this.context.subtasks.length) {
						this.context.currentTDDPhase = 'RED';
						this.emit('tdd:red:started');
						this.emit('subtask:started');
					} else {
						// All subtasks complete, transition to FINALIZE
						await this.transition({ type: 'ALL_SUBTASKS_COMPLETE' });
					}
					break;
				}

				// Normal RED phase: has failing tests, proceed to GREEN
				this.emit('tdd:red:completed');
				this.context.currentTDDPhase = 'GREEN';
				this.emit('tdd:green:started');
				break;

			case 'GREEN_PHASE_COMPLETE':
				if (currentTDD !== 'GREEN') {
					throw new Error(
						'Invalid transition: GREEN_PHASE_COMPLETE from non-GREEN phase'
					);
				}

				// Validate test results are provided
				if (!event.testResults) {
					throw new Error('Test results required for GREEN phase transition');
				}

				// Validate GREEN phase has no failures
				if (event.testResults.failed !== 0) {
					throw new Error('GREEN phase must have zero failures');
				}

				// Store test results in context
				this.context.lastTestResults = event.testResults;

				this.emit('tdd:green:completed');
				this.context.currentTDDPhase = 'COMMIT';
				this.emit('tdd:commit:started');
				break;

			case 'COMMIT_COMPLETE':
				if (currentTDD !== 'COMMIT') {
					throw new Error(
						'Invalid transition: COMMIT_COMPLETE from non-COMMIT phase'
					);
				}
				this.emit('tdd:commit:completed');
				// Mark current subtask as completed
				const currentSubtask =
					this.context.subtasks[this.context.currentSubtaskIndex];
				if (currentSubtask) {
					currentSubtask.status = 'completed';
				}
				break;

			case 'SUBTASK_COMPLETE':
				this.emit('subtask:completed');
				// Move to next subtask
				this.context.currentSubtaskIndex++;

				// Emit progress update
				const progress = this.getProgress();
				this.emit('progress:updated', {
					completed: progress.completed,
					total: progress.total,
					percentage: progress.percentage
				});

				if (this.context.currentSubtaskIndex < this.context.subtasks.length) {
					// Start next subtask with RED phase
					this.context.currentTDDPhase = 'RED';
					this.emit('tdd:red:started');
					this.emit('subtask:started');
				} else {
					// All subtasks complete, transition to FINALIZE
					await this.transition({ type: 'ALL_SUBTASKS_COMPLETE' });
				}
				break;

			case 'ALL_SUBTASKS_COMPLETE':
				// Transition to FINALIZE phase
				this.emit('phase:exited');
				this.currentPhase = 'FINALIZE';
				this.context.currentTDDPhase = undefined;
				this.emit('phase:entered');
				// Note: Don't auto-transition to COMPLETE - requires explicit finalize call
				break;

			default:
				throw new Error(`Invalid transition: ${event.type} in SUBTASK_LOOP`);
		}
	}

	/**
	 * Execute a state transition
	 */
	private executeTransition(
		transition: StateTransition,
		event: WorkflowEvent
	): void {
		// Check guard condition if present
		if (transition.guard && !transition.guard(this.context)) {
			throw new Error(
				`Guard condition failed for transition to ${transition.to}`
			);
		}

		// Check phase-specific guard if present
		const phaseGuard = this.phaseGuards.get(transition.to);
		if (phaseGuard && !phaseGuard(this.context)) {
			throw new Error('Guard condition failed');
		}

		// Emit phase exit event
		this.emit('phase:exited');

		// Update context based on event
		this.updateContext(event);

		// Transition to new phase
		this.currentPhase = transition.to;

		// Emit phase entry event
		this.emit('phase:entered');

		// Initialize TDD phase if entering SUBTASK_LOOP
		if (this.currentPhase === 'SUBTASK_LOOP') {
			this.context.currentTDDPhase = 'RED';
			this.emit('tdd:red:started');
			this.emit('subtask:started');
		}
	}

	/**
	 * Update context based on event
	 */
	private updateContext(event: WorkflowEvent): void {
		switch (event.type) {
			case 'BRANCH_CREATED':
				this.context.branchName = event.branchName;
				this.emit('git:branch:created', { branchName: event.branchName });

				// Trigger git operation hook
				if (this.gitOperationHook) {
					this.gitOperationHook('branch:created', {
						branchName: event.branchName
					});
				}
				break;

			case 'ERROR':
				this.context.errors.push(event.error);
				this.emit('error:occurred', { error: event.error });
				break;

			// Add more context updates as needed
		}
	}

	/**
	 * Get current state for serialization
	 */
	getState(): WorkflowState {
		return {
			phase: this.currentPhase,
			context: { ...this.context }
		};
	}

	/**
	 * Restore state from checkpoint
	 */
	restoreState(state: WorkflowState): void {
		this.currentPhase = state.phase;
		this.context = { ...state.context };

		// Emit workflow:resumed event
		this.emit('workflow:resumed', {
			phase: this.currentPhase,
			progress: this.getProgress()
		});
	}

	/**
	 * Add event listener
	 */
	on(eventType: WorkflowEventType, listener: WorkflowEventListener): void {
		if (!this.eventListeners.has(eventType)) {
			this.eventListeners.set(eventType, new Set());
		}
		this.eventListeners.get(eventType)!.add(listener);
	}

	/**
	 * Remove event listener
	 */
	off(eventType: WorkflowEventType, listener: WorkflowEventListener): void {
		const listeners = this.eventListeners.get(eventType);
		if (listeners) {
			listeners.delete(listener);
		}
	}

	/**
	 * Emit workflow event
	 */
	private emit(
		eventType: WorkflowEventType,
		data?: Record<string, unknown>
	): void {
		const eventData: WorkflowEventData = {
			type: eventType,
			timestamp: new Date(),
			phase: this.currentPhase,
			tddPhase: this.context.currentTDDPhase,
			subtaskId: this.getCurrentSubtaskId(),
			data: {
				...data,
				adapters: {
					testValidator: !!this.testResultValidator,
					gitHook: !!this.gitOperationHook,
					executeHook: !!this.executeHook
				}
			}
		};

		const listeners = this.eventListeners.get(eventType);
		if (listeners) {
			listeners.forEach((listener) => listener(eventData));
		}
	}

	/**
	 * Get current subtask ID
	 */
	private getCurrentSubtaskId(): string | undefined {
		const currentSubtask =
			this.context.subtasks[this.context.currentSubtaskIndex];
		return currentSubtask?.id;
	}

	/**
	 * Register callback for state persistence
	 */
	onStatePersist(
		callback: (state: WorkflowState) => void | Promise<void>
	): void {
		this.persistCallback = callback;
	}

	/**
	 * Enable auto-persistence after each transition
	 */
	enableAutoPersist(
		callback: (state: WorkflowState) => void | Promise<void>
	): void {
		this.persistCallback = callback;
		this.autoPersistEnabled = true;
	}

	/**
	 * Disable auto-persistence
	 */
	disableAutoPersist(): void {
		this.autoPersistEnabled = false;
	}

	/**
	 * Manually persist current state
	 */
	async persistState(): Promise<void> {
		if (this.persistCallback) {
			await this.persistCallback(this.getState());
		}
		this.emit('state:persisted');
	}

	/**
	 * Trigger auto-persistence if enabled
	 */
	private async triggerAutoPersist(): Promise<void> {
		if (this.autoPersistEnabled && this.persistCallback) {
			await this.persistCallback(this.getState());
		}
	}

	/**
	 * Add a guard condition for a specific phase
	 */
	addGuard(
		phase: WorkflowPhase,
		guard: (context: WorkflowContext) => boolean
	): void {
		this.phaseGuards.set(phase, guard);
	}

	/**
	 * Remove a guard condition for a specific phase
	 */
	removeGuard(phase: WorkflowPhase): void {
		this.phaseGuards.delete(phase);
	}

	/**
	 * Get current subtask being worked on
	 */
	getCurrentSubtask(): SubtaskInfo | undefined {
		return this.context.subtasks[this.context.currentSubtaskIndex];
	}

	/**
	 * Get workflow progress information
	 */
	getProgress(): {
		completed: number;
		total: number;
		current: number;
		percentage: number;
	} {
		const completed = this.context.subtasks.filter(
			(st) => st.status === 'completed'
		).length;
		const total = this.context.subtasks.length;
		const current = this.context.currentSubtaskIndex + 1;
		const percentage = total > 0 ? Math.round((completed / total) * 100) : 0;

		return { completed, total, current, percentage };
	}

	/**
	 * Check if can proceed to next subtask or phase
	 */
	canProceed(): boolean {
		if (this.currentPhase !== 'SUBTASK_LOOP') {
			return false;
		}

		const currentSubtask = this.getCurrentSubtask();

		// Can proceed if current subtask is completed (after COMMIT phase)
		return currentSubtask?.status === 'completed';
	}

	/**
	 * Increment attempts for current subtask
	 */
	incrementAttempts(): void {
		const currentSubtask = this.getCurrentSubtask();
		if (currentSubtask) {
			currentSubtask.attempts++;
		}
	}

	/**
	 * Check if current subtask has exceeded max attempts
	 */
	hasExceededMaxAttempts(): boolean {
		const currentSubtask = this.getCurrentSubtask();
		if (!currentSubtask || !currentSubtask.maxAttempts) {
			return false;
		}

		return currentSubtask.attempts > currentSubtask.maxAttempts;
	}

	/**
	 * Handle error event
	 */
	private handleError(error: WorkflowError): void {
		this.context.errors.push(error);
		this.emit('error:occurred', { error });
	}

	/**
	 * Handle retry event
	 */
	private handleRetry(): void {
		if (this.currentPhase === 'SUBTASK_LOOP') {
			// Reset to RED phase to retry current subtask
			this.context.currentTDDPhase = 'RED';
			this.emit('tdd:red:started');
		}
	}

	/**
	 * Retry current subtask (resets to RED phase)
	 */
	retryCurrentSubtask(): void {
		if (this.currentPhase === 'SUBTASK_LOOP') {
			this.context.currentTDDPhase = 'RED';
			this.emit('tdd:red:started');
		}
	}

	/**
	 * Handle max attempts exceeded for current subtask
	 */
	handleMaxAttemptsExceeded(): void {
		const currentSubtask = this.getCurrentSubtask();
		if (currentSubtask) {
			currentSubtask.status = 'failed';
			this.emit('subtask:failed', {
				subtaskId: currentSubtask.id,
				attempts: currentSubtask.attempts,
				maxAttempts: currentSubtask.maxAttempts
			});
		}
	}

	/**
	 * Check if workflow has been aborted
	 */
	isAborted(): boolean {
		return this.aborted;
	}

	/**
	 * Validate if a state can be resumed from
	 */
	canResumeFromState(state: WorkflowState): boolean {
		// Validate phase is valid
		const validPhases: WorkflowPhase[] = [
			'PREFLIGHT',
			'BRANCH_SETUP',
			'SUBTASK_LOOP',
			'FINALIZE',
			'COMPLETE'
		];

		if (!validPhases.includes(state.phase)) {
			return false;
		}

		// Validate context structure
		if (!state.context || typeof state.context !== 'object') {
			return false;
		}

		// Validate required context fields
		if (!state.context.taskId || !Array.isArray(state.context.subtasks)) {
			return false;
		}

		if (typeof state.context.currentSubtaskIndex !== 'number') {
			return false;
		}

		if (!Array.isArray(state.context.errors)) {
			return false;
		}

		// All validations passed
		return true;
	}

	/**
	 * Set TestResultValidator adapter
	 */
	setTestResultValidator(validator: TestResultValidator): void {
		this.testResultValidator = validator;
		this.emit('adapter:configured', { adapterType: 'test-validator' });
	}

	/**
	 * Check if TestResultValidator is configured
	 */
	hasTestResultValidator(): boolean {
		return !!this.testResultValidator;
	}

	/**
	 * Remove TestResultValidator adapter
	 */
	removeTestResultValidator(): void {
		this.testResultValidator = undefined;
	}

	/**
	 * Register git operation hook
	 */
	onGitOperation(hook: (operation: string, data?: unknown) => void): void {
		this.gitOperationHook = hook;
	}

	/**
	 * Register execute command hook
	 */
	onExecute(hook: (command: string, context: WorkflowContext) => void): void {
		this.executeHook = hook;
	}

	/**
	 * Execute a command (triggers execute hook)
	 */
	executeCommand(command: string): void {
		if (this.executeHook) {
			this.executeHook(command, this.context);
		}
	}
}

```

--------------------------------------------------------------------------------
/tests/unit/scripts/modules/task-manager/research.test.js:
--------------------------------------------------------------------------------

```javascript
import { jest } from '@jest/globals';

jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
	findProjectRoot: jest.fn(() => '/test/project/root'),
	log: jest.fn(),
	readJSON: jest.fn(),
	flattenTasksWithSubtasks: jest.fn(() => []),
	isEmpty: jest.fn(() => false)
}));

// Mock UI-affecting external libs to minimal no-op implementations
jest.unstable_mockModule('chalk', () => ({
	default: {
		white: Object.assign(
			jest.fn((text) => text),
			{
				bold: jest.fn((text) => text)
			}
		),
		cyan: Object.assign(
			jest.fn((text) => text),
			{
				bold: jest.fn((text) => text)
			}
		),
		green: Object.assign(
			jest.fn((text) => text),
			{
				bold: jest.fn((text) => text)
			}
		),
		yellow: jest.fn((text) => text),
		red: jest.fn((text) => text),
		gray: jest.fn((text) => text),
		blue: Object.assign(
			jest.fn((text) => text),
			{
				bold: jest.fn((text) => text)
			}
		),
		bold: jest.fn((text) => text)
	}
}));

jest.unstable_mockModule('boxen', () => ({ default: (text) => text }));

jest.unstable_mockModule('inquirer', () => ({
	default: { prompt: jest.fn() }
}));

jest.unstable_mockModule('cli-highlight', () => ({
	highlight: (code) => code
}));

jest.unstable_mockModule('cli-table3', () => ({
	default: jest.fn().mockImplementation(() => ({
		push: jest.fn(),
		toString: jest.fn(() => '')
	}))
}));

jest.unstable_mockModule(
	'../../../../../scripts/modules/utils/contextGatherer.js',
	() => ({
		ContextGatherer: jest.fn().mockImplementation(() => ({
			gather: jest.fn().mockResolvedValue({
				context: 'Gathered context',
				tokenBreakdown: { total: 500 }
			}),
			countTokens: jest.fn(() => 100)
		}))
	})
);

jest.unstable_mockModule(
	'../../../../../scripts/modules/utils/fuzzyTaskSearch.js',
	() => ({
		FuzzyTaskSearch: jest.fn().mockImplementation(() => ({
			findRelevantTasks: jest.fn(() => []),
			getTaskIds: jest.fn(() => [])
		}))
	})
);

jest.unstable_mockModule(
	'../../../../../scripts/modules/ai-services-unified.js',
	() => ({
		generateTextService: jest.fn().mockResolvedValue({
			mainResult:
				'Test research result with ```javascript\nconsole.log("test");\n```',
			telemetryData: {}
		})
	})
);

jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
	displayAiUsageSummary: jest.fn(),
	startLoadingIndicator: jest.fn(() => ({ stop: jest.fn() })),
	stopLoadingIndicator: jest.fn()
}));

jest.unstable_mockModule(
	'../../../../../scripts/modules/prompt-manager.js',
	() => ({
		getPromptManager: jest.fn().mockReturnValue({
			loadPrompt: jest.fn().mockResolvedValue({
				systemPrompt: 'System prompt',
				userPrompt: 'User prompt'
			})
		})
	})
);

const { performResearch } = await import(
	'../../../../../scripts/modules/task-manager/research.js'
);

// Import mocked modules for testing
const utils = await import('../../../../../scripts/modules/utils.js');
const { ContextGatherer } = await import(
	'../../../../../scripts/modules/utils/contextGatherer.js'
);
const { FuzzyTaskSearch } = await import(
	'../../../../../scripts/modules/utils/fuzzyTaskSearch.js'
);
const { generateTextService } = await import(
	'../../../../../scripts/modules/ai-services-unified.js'
);

describe('performResearch project root validation', () => {
	it('throws error when project root cannot be determined', async () => {
		// Mock findProjectRoot to return null
		utils.findProjectRoot.mockReturnValueOnce(null);

		await expect(
			performResearch('Test query', {}, {}, 'json', false)
		).rejects.toThrow('Could not determine project root directory');
	});
});

describe('performResearch tag-aware functionality', () => {
	let mockContextGatherer;
	let mockFuzzySearch;
	let mockReadJSON;
	let mockFlattenTasks;

	beforeEach(() => {
		// Reset all mocks
		jest.clearAllMocks();

		// Set up default mocks
		utils.findProjectRoot.mockReturnValue('/test/project/root');
		utils.readJSON.mockResolvedValue({
			tasks: [
				{ id: 1, title: 'Task 1', description: 'Description 1' },
				{ id: 2, title: 'Task 2', description: 'Description 2' }
			]
		});
		utils.flattenTasksWithSubtasks.mockReturnValue([
			{ id: 1, title: 'Task 1', description: 'Description 1' },
			{ id: 2, title: 'Task 2', description: 'Description 2' }
		]);

		// Set up ContextGatherer mock
		mockContextGatherer = {
			gather: jest.fn().mockResolvedValue({
				context: 'Gathered context',
				tokenBreakdown: { total: 500 }
			}),
			countTokens: jest.fn(() => 100)
		};
		ContextGatherer.mockImplementation(() => mockContextGatherer);

		// Set up FuzzyTaskSearch mock
		mockFuzzySearch = {
			findRelevantTasks: jest.fn(() => [
				{ id: 1, title: 'Task 1', description: 'Description 1' }
			]),
			getTaskIds: jest.fn(() => ['1'])
		};
		FuzzyTaskSearch.mockImplementation(() => mockFuzzySearch);

		// Store references for easier access
		mockReadJSON = utils.readJSON;
		mockFlattenTasks = utils.flattenTasksWithSubtasks;
	});

	describe('tag parameter passing to ContextGatherer', () => {
		it('passes tag parameter to ContextGatherer constructor', async () => {
			const testTag = 'feature-branch';

			await performResearch('Test query', { tag: testTag }, {}, 'json', false);

			expect(ContextGatherer).toHaveBeenCalledWith(
				'/test/project/root',
				testTag
			);
		});

		it('passes undefined tag when no tag is provided', async () => {
			await performResearch('Test query', {}, {}, 'json', false);

			expect(ContextGatherer).toHaveBeenCalledWith(
				'/test/project/root',
				undefined
			);
		});

		it('passes empty string tag when empty string is provided', async () => {
			await performResearch('Test query', { tag: '' }, {}, 'json', false);

			expect(ContextGatherer).toHaveBeenCalledWith('/test/project/root', '');
		});

		it('passes null tag when null is provided', async () => {
			await performResearch('Test query', { tag: null }, {}, 'json', false);

			expect(ContextGatherer).toHaveBeenCalledWith('/test/project/root', null);
		});
	});

	describe('tag-aware readJSON calls', () => {
		it('calls readJSON with correct tag parameter for task discovery', async () => {
			const testTag = 'development';

			await performResearch('Test query', { tag: testTag }, {}, 'json', false);

			expect(mockReadJSON).toHaveBeenCalledWith(
				expect.stringContaining('tasks.json'),
				'/test/project/root',
				testTag
			);
		});

		it('calls readJSON with undefined tag when no tag provided', async () => {
			await performResearch('Test query', {}, {}, 'json', false);

			expect(mockReadJSON).toHaveBeenCalledWith(
				expect.stringContaining('tasks.json'),
				'/test/project/root',
				undefined
			);
		});

		it('calls readJSON with provided projectRoot and tag', async () => {
			const customProjectRoot = '/custom/project/root';
			const testTag = 'production';

			await performResearch(
				'Test query',
				{
					projectRoot: customProjectRoot,
					tag: testTag
				},
				{},
				'json',
				false
			);

			expect(mockReadJSON).toHaveBeenCalledWith(
				expect.stringContaining('tasks.json'),
				customProjectRoot,
				testTag
			);
		});
	});

	describe('context gathering behavior for different tags', () => {
		it('calls contextGatherer.gather with correct parameters', async () => {
			const options = {
				taskIds: ['1', '2'],
				filePaths: ['src/file.js'],
				customContext: 'Custom context',
				includeProjectTree: true,
				tag: 'feature-branch'
			};

			await performResearch('Test query', options, {}, 'json', false);

			expect(mockContextGatherer.gather).toHaveBeenCalledWith({
				tasks: expect.arrayContaining(['1', '2']),
				files: ['src/file.js'],
				customContext: 'Custom context',
				includeProjectTree: true,
				format: 'research',
				includeTokenCounts: true
			});
		});

		it('handles empty task discovery gracefully when readJSON fails', async () => {
			mockReadJSON.mockRejectedValueOnce(new Error('File not found'));

			const result = await performResearch(
				'Test query',
				{ tag: 'test-tag' },
				{},
				'json',
				false
			);

			// Should still succeed even if task discovery fails
			expect(result).toBeDefined();
			expect(mockContextGatherer.gather).toHaveBeenCalledWith({
				tasks: [],
				files: [],
				customContext: '',
				includeProjectTree: false,
				format: 'research',
				includeTokenCounts: true
			});
		});

		it('combines provided taskIds with auto-discovered tasks', async () => {
			const providedTaskIds = ['3', '4'];
			const autoDiscoveredIds = ['1', '2'];

			mockFuzzySearch.getTaskIds.mockReturnValue(autoDiscoveredIds);

			await performResearch(
				'Test query',
				{
					taskIds: providedTaskIds,
					tag: 'feature-branch'
				},
				{},
				'json',
				false
			);

			expect(mockContextGatherer.gather).toHaveBeenCalledWith({
				tasks: expect.arrayContaining([
					...providedTaskIds,
					...autoDiscoveredIds
				]),
				files: [],
				customContext: '',
				includeProjectTree: false,
				format: 'research',
				includeTokenCounts: true
			});
		});

		it('removes duplicate tasks when auto-discovered tasks overlap with provided tasks', async () => {
			const providedTaskIds = ['1', '2'];
			const autoDiscoveredIds = ['2', '3']; // '2' is duplicate

			mockFuzzySearch.getTaskIds.mockReturnValue(autoDiscoveredIds);

			await performResearch(
				'Test query',
				{
					taskIds: providedTaskIds,
					tag: 'feature-branch'
				},
				{},
				'json',
				false
			);

			expect(mockContextGatherer.gather).toHaveBeenCalledWith({
				tasks: ['1', '2', '3'], // Should include '3' but not duplicate '2'
				files: [],
				customContext: '',
				includeProjectTree: false,
				format: 'research',
				includeTokenCounts: true
			});
		});
	});

	describe('tag-aware fuzzy search', () => {
		it('initializes FuzzyTaskSearch with flattened tasks from correct tag', async () => {
			const testTag = 'development';
			const mockFlattenedTasks = [
				{ id: 1, title: 'Dev Task 1' },
				{ id: 2, title: 'Dev Task 2' }
			];

			mockFlattenTasks.mockReturnValue(mockFlattenedTasks);

			await performResearch('Test query', { tag: testTag }, {}, 'json', false);

			expect(mockFlattenTasks).toHaveBeenCalledWith(
				expect.arrayContaining([
					expect.objectContaining({ id: 1 }),
					expect.objectContaining({ id: 2 })
				])
			);
			expect(FuzzyTaskSearch).toHaveBeenCalledWith(
				mockFlattenedTasks,
				'research'
			);
		});

		it('calls fuzzy search with correct parameters', async () => {
			const testQuery = 'authentication implementation';

			await performResearch(
				testQuery,
				{ tag: 'feature-branch' },
				{},
				'json',
				false
			);

			expect(mockFuzzySearch.findRelevantTasks).toHaveBeenCalledWith(
				testQuery,
				{
					maxResults: 8,
					includeRecent: true,
					includeCategoryMatches: true
				}
			);
		});

		it('handles empty tasks data gracefully', async () => {
			mockReadJSON.mockResolvedValueOnce({ tasks: [] });

			await performResearch(
				'Test query',
				{ tag: 'empty-tag' },
				{},
				'json',
				false
			);

			// Should not call FuzzyTaskSearch when no tasks exist
			expect(FuzzyTaskSearch).not.toHaveBeenCalled();
			expect(mockContextGatherer.gather).toHaveBeenCalledWith({
				tasks: [],
				files: [],
				customContext: '',
				includeProjectTree: false,
				format: 'research',
				includeTokenCounts: true
			});
		});

		it('handles null tasks data gracefully', async () => {
			mockReadJSON.mockResolvedValueOnce(null);

			await performResearch(
				'Test query',
				{ tag: 'null-tag' },
				{},
				'json',
				false
			);

			// Should not call FuzzyTaskSearch when data is null
			expect(FuzzyTaskSearch).not.toHaveBeenCalled();
		});
	});

	describe('error handling for invalid tags', () => {
		it('continues execution when readJSON throws error for invalid tag', async () => {
			mockReadJSON.mockRejectedValueOnce(new Error('Tag not found'));

			const result = await performResearch(
				'Test query',
				{ tag: 'invalid-tag' },
				{},
				'json',
				false
			);

			// Should still succeed and return a result
			expect(result).toBeDefined();
			expect(mockContextGatherer.gather).toHaveBeenCalled();
		});

		it('logs debug message when task discovery fails', async () => {
			const mockLog = {
				debug: jest.fn(),
				info: jest.fn(),
				warn: jest.fn(),
				error: jest.fn(),
				success: jest.fn()
			};

			mockReadJSON.mockRejectedValueOnce(new Error('File not found'));

			await performResearch(
				'Test query',
				{ tag: 'error-tag' },
				{ mcpLog: mockLog },
				'json',
				false
			);

			expect(mockLog.debug).toHaveBeenCalledWith(
				expect.stringContaining('Could not auto-discover tasks')
			);
		});

		it('handles ContextGatherer constructor errors gracefully', async () => {
			ContextGatherer.mockImplementationOnce(() => {
				throw new Error('Invalid tag provided');
			});

			await expect(
				performResearch('Test query', { tag: 'invalid-tag' }, {}, 'json', false)
			).rejects.toThrow('Invalid tag provided');
		});

		it('handles ContextGatherer.gather errors gracefully', async () => {
			mockContextGatherer.gather.mockRejectedValueOnce(
				new Error('Gather failed')
			);

			await expect(
				performResearch(
					'Test query',
					{ tag: 'gather-error-tag' },
					{},
					'json',
					false
				)
			).rejects.toThrow('Gather failed');
		});
	});

	describe('MCP integration with tags', () => {
		it('uses MCP logger when mcpLog is provided in context', async () => {
			const mockMCPLog = {
				debug: jest.fn(),
				info: jest.fn(),
				warn: jest.fn(),
				error: jest.fn(),
				success: jest.fn()
			};

			mockReadJSON.mockRejectedValueOnce(new Error('Test error'));

			await performResearch(
				'Test query',
				{ tag: 'mcp-tag' },
				{ mcpLog: mockMCPLog },
				'json',
				false
			);

			expect(mockMCPLog.debug).toHaveBeenCalledWith(
				expect.stringContaining('Could not auto-discover tasks')
			);
		});

		it('passes session to generateTextService when provided', async () => {
			const mockSession = { userId: 'test-user', env: {} };

			await performResearch(
				'Test query',
				{ tag: 'session-tag' },
				{ session: mockSession },
				'json',
				false
			);

			expect(generateTextService).toHaveBeenCalledWith(
				expect.objectContaining({
					session: mockSession
				})
			);
		});
	});

	describe('output format handling with tags', () => {
		it('displays UI banner only in text format', async () => {
			const consoleSpy = jest.spyOn(console, 'log').mockImplementation();

			await performResearch('Test query', { tag: 'ui-tag' }, {}, 'text', false);

			expect(consoleSpy).toHaveBeenCalledWith(
				expect.stringContaining('🔍 AI Research Query')
			);

			consoleSpy.mockRestore();
		});

		it('does not display UI banner in json format', async () => {
			const consoleSpy = jest.spyOn(console, 'log').mockImplementation();

			await performResearch('Test query', { tag: 'ui-tag' }, {}, 'json', false);

			expect(consoleSpy).not.toHaveBeenCalledWith(
				expect.stringContaining('🔍 AI Research Query')
			);

			consoleSpy.mockRestore();
		});
	});

	describe('comprehensive tag integration test', () => {
		it('performs complete research flow with tag-aware functionality', async () => {
			const testOptions = {
				taskIds: ['1', '2'],
				filePaths: ['src/main.js'],
				customContext: 'Testing tag integration',
				includeProjectTree: true,
				detailLevel: 'high',
				tag: 'integration-test',
				projectRoot: '/custom/root'
			};

			const testContext = {
				session: { userId: 'test-user' },
				mcpLog: {
					debug: jest.fn(),
					info: jest.fn(),
					warn: jest.fn(),
					error: jest.fn(),
					success: jest.fn()
				},
				commandName: 'test-research',
				outputType: 'mcp'
			};

			// Mock successful task discovery
			mockFuzzySearch.getTaskIds.mockReturnValue(['3', '4']);

			const result = await performResearch(
				'Integration test query',
				testOptions,
				testContext,
				'json',
				false
			);

			// Verify ContextGatherer was initialized with correct tag
			expect(ContextGatherer).toHaveBeenCalledWith(
				'/custom/root',
				'integration-test'
			);

			// Verify readJSON was called with correct parameters
			expect(mockReadJSON).toHaveBeenCalledWith(
				expect.stringContaining('tasks.json'),
				'/custom/root',
				'integration-test'
			);

			// Verify context gathering was called with combined tasks
			expect(mockContextGatherer.gather).toHaveBeenCalledWith({
				tasks: ['1', '2', '3', '4'],
				files: ['src/main.js'],
				customContext: 'Testing tag integration',
				includeProjectTree: true,
				format: 'research',
				includeTokenCounts: true
			});

			// Verify AI service was called with session
			expect(generateTextService).toHaveBeenCalledWith(
				expect.objectContaining({
					session: testContext.session,
					role: 'research'
				})
			);

			expect(result).toBeDefined();
		});
	});
});

```

--------------------------------------------------------------------------------
/tests/unit/profiles/mcp-config-validation.test.js:
--------------------------------------------------------------------------------

```javascript
import { RULE_PROFILES } from '../../../src/constants/profiles.js';
import { getRulesProfile } from '../../../src/utils/rule-transformer.js';
import path from 'path';

describe('MCP Configuration Validation', () => {
	describe('Profile MCP Configuration Properties', () => {
		const expectedMcpConfigurations = {
			amp: {
				shouldHaveMcp: true,
				expectedDir: '.vscode',
				expectedConfigName: 'settings.json',
				expectedPath: '.vscode/settings.json'
			},
			claude: {
				shouldHaveMcp: true,
				expectedDir: '.',
				expectedConfigName: '.mcp.json',
				expectedPath: '.mcp.json'
			},
			cline: {
				shouldHaveMcp: false,
				expectedDir: '.clinerules',
				expectedConfigName: null,
				expectedPath: null
			},
			codex: {
				shouldHaveMcp: false,
				expectedDir: '.',
				expectedConfigName: null,
				expectedPath: null
			},
			cursor: {
				shouldHaveMcp: true,
				expectedDir: '.cursor',
				expectedConfigName: 'mcp.json',
				expectedPath: '.cursor/mcp.json'
			},
			gemini: {
				shouldHaveMcp: true,
				expectedDir: '.gemini',
				expectedConfigName: 'settings.json',
				expectedPath: '.gemini/settings.json'
			},
			kiro: {
				shouldHaveMcp: true,
				expectedDir: '.kiro',
				expectedConfigName: 'settings/mcp.json',
				expectedPath: '.kiro/settings/mcp.json'
			},
			opencode: {
				shouldHaveMcp: true,
				expectedDir: '.',
				expectedConfigName: 'opencode.json',
				expectedPath: 'opencode.json'
			},
			roo: {
				shouldHaveMcp: true,
				expectedDir: '.roo',
				expectedConfigName: 'mcp.json',
				expectedPath: '.roo/mcp.json'
			},
			trae: {
				shouldHaveMcp: false,
				expectedDir: '.trae',
				expectedConfigName: null,
				expectedPath: null
			},
			vscode: {
				shouldHaveMcp: true,
				expectedDir: '.vscode',
				expectedConfigName: 'mcp.json',
				expectedPath: '.vscode/mcp.json'
			},
			windsurf: {
				shouldHaveMcp: true,
				expectedDir: '.windsurf',
				expectedConfigName: 'mcp.json',
				expectedPath: '.windsurf/mcp.json'
			},
			zed: {
				shouldHaveMcp: true,
				expectedDir: '.zed',
				expectedConfigName: 'settings.json',
				expectedPath: '.zed/settings.json'
			}
		};

		Object.entries(expectedMcpConfigurations).forEach(
			([profileName, expected]) => {
				test(`should have correct MCP configuration for ${profileName} profile`, () => {
					const profile = getRulesProfile(profileName);
					expect(profile).toBeDefined();
					expect(profile.mcpConfig).toBe(expected.shouldHaveMcp);
					expect(profile.profileDir).toBe(expected.expectedDir);
					expect(profile.mcpConfigName).toBe(expected.expectedConfigName);
					expect(profile.mcpConfigPath).toBe(expected.expectedPath);
				});
			}
		);
	});

	describe('MCP Configuration Path Consistency', () => {
		test('should ensure all profiles have consistent mcpConfigPath construction', () => {
			RULE_PROFILES.forEach((profileName) => {
				const profile = getRulesProfile(profileName);
				if (profile.mcpConfig !== false) {
					// For root directory profiles, path.join('.', filename) normalizes to just 'filename'
					// except for Claude which uses '.mcp.json' explicitly
					let expectedPath;
					if (profile.profileDir === '.') {
						if (profileName === 'claude') {
							expectedPath = '.mcp.json'; // Claude explicitly uses '.mcp.json'
						} else {
							expectedPath = profile.mcpConfigName; // Other root profiles normalize to just the filename
						}
					} else {
						expectedPath = `${profile.profileDir}/${profile.mcpConfigName}`;
					}
					expect(profile.mcpConfigPath).toBe(expectedPath);
				}
			});
		});

		test('should ensure no two profiles have the same MCP config path', () => {
			const mcpPaths = new Set();
			RULE_PROFILES.forEach((profileName) => {
				const profile = getRulesProfile(profileName);
				if (profile.mcpConfig !== false) {
					expect(mcpPaths.has(profile.mcpConfigPath)).toBe(false);
					mcpPaths.add(profile.mcpConfigPath);
				}
			});
		});

		test('should ensure all MCP-enabled profiles use proper directory structure', () => {
			const rootProfiles = ['opencode', 'claude', 'codex']; // Profiles that use root directory for config
			const nestedConfigProfiles = ['kiro']; // Profiles that use nested directories for config

			RULE_PROFILES.forEach((profileName) => {
				const profile = getRulesProfile(profileName);
				if (profile.mcpConfig !== false) {
					if (rootProfiles.includes(profileName)) {
						// Root profiles have different patterns
						if (profileName === 'claude') {
							expect(profile.mcpConfigPath).toBe('.mcp.json');
						} else {
							// Other root profiles normalize to just the filename (no ./ prefix)
							expect(profile.mcpConfigPath).toMatch(/^[\w_.]+$/);
						}
					} else if (nestedConfigProfiles.includes(profileName)) {
						// Profiles with nested config directories
						expect(profile.mcpConfigPath).toMatch(
							/^\.[\w-]+\/[\w-]+\/[\w_.]+$/
						);
					} else {
						// Other profiles should have config files in their specific directories
						expect(profile.mcpConfigPath).toMatch(/^\.[\w-]+\/[\w_.]+$/);
					}
				}
			});
		});

		test('should ensure all profiles have required MCP properties', () => {
			RULE_PROFILES.forEach((profileName) => {
				const profile = getRulesProfile(profileName);
				expect(profile).toHaveProperty('mcpConfig');
				expect(profile).toHaveProperty('profileDir');
				expect(profile).toHaveProperty('mcpConfigName');
				expect(profile).toHaveProperty('mcpConfigPath');
			});
		});
	});

	describe('MCP Configuration File Names', () => {
		test('should use standard mcp.json for MCP-enabled profiles', () => {
			const standardMcpProfiles = ['cursor', 'roo', 'vscode', 'windsurf'];
			standardMcpProfiles.forEach((profileName) => {
				const profile = getRulesProfile(profileName);
				expect(profile.mcpConfigName).toBe('mcp.json');
			});
		});

		test('should use custom settings.json for Gemini profile', () => {
			const profile = getRulesProfile('gemini');
			expect(profile.mcpConfigName).toBe('settings.json');
		});

		test('should have null config name for non-MCP profiles', () => {
			// Only codex, cline, and trae profiles should have null config names
			const nonMcpProfiles = ['codex', 'cline', 'trae'];

			for (const profileName of nonMcpProfiles) {
				const profile = getRulesProfile(profileName);
				expect(profile.mcpConfigName).toBe(null);
			}
		});
	});

	describe('Profile Directory Structure', () => {
		test('should ensure each profile has a unique directory', () => {
			const profileDirs = new Set();
			// Profiles that use root directory (can share the same directory)
			const rootProfiles = ['claude', 'codex', 'gemini', 'opencode'];
			// Profiles that intentionally share the same directory
			const sharedDirectoryProfiles = ['amp', 'vscode']; // Both use .vscode

			RULE_PROFILES.forEach((profileName) => {
				const profile = getRulesProfile(profileName);

				// Root profiles can share the root directory for rules
				if (rootProfiles.includes(profileName) && profile.rulesDir === '.') {
					expect(profile.rulesDir).toBe('.');
				}

				// Profile directories should be unique (except for root profiles and shared directory profiles)
				if (
					!rootProfiles.includes(profileName) &&
					!sharedDirectoryProfiles.includes(profileName)
				) {
					if (profile.profileDir !== '.') {
						expect(profileDirs.has(profile.profileDir)).toBe(false);
						profileDirs.add(profile.profileDir);
					}
				} else if (sharedDirectoryProfiles.includes(profileName)) {
					// Shared directory profiles should use .vscode
					expect(profile.profileDir).toBe('.vscode');
				}
			});
		});

		test('should ensure profile directories follow expected naming convention', () => {
			// Profiles that use root directory for rules
			const rootRulesProfiles = ['claude', 'codex', 'gemini', 'opencode'];

			RULE_PROFILES.forEach((profileName) => {
				const profile = getRulesProfile(profileName);

				// Some profiles use root directory for rules
				if (
					rootRulesProfiles.includes(profileName) &&
					profile.rulesDir === '.'
				) {
					expect(profile.rulesDir).toBe('.');
				}

				// Profile directories (not rules directories) should follow the .name pattern
				// unless they are root profiles with profileDir = '.'
				if (profile.profileDir !== '.') {
					expect(profile.profileDir).toMatch(/^\.[\w-]+$/);
				}
			});
		});
	});

	describe('MCP Configuration Creation Logic', () => {
		test('should indicate which profiles require MCP configuration creation', () => {
			// Get all profiles that have MCP configuration enabled
			const mcpEnabledProfiles = RULE_PROFILES.filter((profileName) => {
				const profile = getRulesProfile(profileName);
				return profile.mcpConfig !== false;
			});

			// Verify expected MCP-enabled profiles
			expect(mcpEnabledProfiles).toContain('amp');
			expect(mcpEnabledProfiles).toContain('claude');
			expect(mcpEnabledProfiles).toContain('cursor');
			expect(mcpEnabledProfiles).toContain('gemini');
			expect(mcpEnabledProfiles).toContain('opencode');
			expect(mcpEnabledProfiles).toContain('vscode');
			expect(mcpEnabledProfiles).toContain('windsurf');
			expect(mcpEnabledProfiles).toContain('zed');
			expect(mcpEnabledProfiles).toContain('roo');
			expect(mcpEnabledProfiles).not.toContain('cline');
			expect(mcpEnabledProfiles).not.toContain('codex');
			expect(mcpEnabledProfiles).not.toContain('trae');
		});

		test('should provide all necessary information for MCP config creation', () => {
			RULE_PROFILES.forEach((profileName) => {
				const profile = getRulesProfile(profileName);
				if (profile.mcpConfig !== false) {
					expect(profile.mcpConfigPath).toBeDefined();
					expect(typeof profile.mcpConfigPath).toBe('string');
					expect(profile.mcpConfigPath.length).toBeGreaterThan(0);
				}
			});
		});
	});

	describe('MCP Configuration Path Usage Verification', () => {
		test('should verify that rule transformer functions use mcpConfigPath correctly', () => {
			RULE_PROFILES.forEach((profileName) => {
				const profile = getRulesProfile(profileName);
				if (profile.mcpConfig !== false) {
					// Verify the path is properly formatted for path.join usage
					expect(profile.mcpConfigPath.startsWith('/')).toBe(false);

					// Root directory profiles have different patterns
					if (profile.profileDir === '.') {
						if (profileName === 'claude') {
							expect(profile.mcpConfigPath).toBe('.mcp.json');
						} else {
							// Other root profiles (opencode) normalize to just the filename
							expect(profile.mcpConfigPath).toBe(profile.mcpConfigName);
						}
					} else {
						// Non-root profiles should contain a directory separator
						expect(profile.mcpConfigPath).toContain('/');
					}

					// Verify it matches the expected pattern based on how path.join works
					let expectedPath;
					if (profile.profileDir === '.') {
						if (profileName === 'claude') {
							expectedPath = '.mcp.json'; // Claude explicitly uses '.mcp.json'
						} else {
							expectedPath = profile.mcpConfigName; // path.join('.', 'filename') normalizes to 'filename'
						}
					} else {
						expectedPath = `${profile.profileDir}/${profile.mcpConfigName}`;
					}
					expect(profile.mcpConfigPath).toBe(expectedPath);
				}
			});
		});

		test('should verify that mcpConfigPath is properly constructed for path.join usage', () => {
			RULE_PROFILES.forEach((profileName) => {
				const profile = getRulesProfile(profileName);
				if (profile.mcpConfig !== false) {
					// Test that path.join works correctly with the mcpConfigPath
					const testProjectRoot = '/test/project';
					const fullPath = path.join(testProjectRoot, profile.mcpConfigPath);

					// Should result in a proper absolute path
					// Note: path.join normalizes paths, so './opencode.json' becomes 'opencode.json'
					const normalizedExpectedPath = path.join(
						testProjectRoot,
						profile.mcpConfigPath
					);
					expect(fullPath).toBe(normalizedExpectedPath);
					expect(fullPath).toContain(profile.mcpConfigName);
				}
			});
		});
	});

	describe('MCP Configuration Function Integration', () => {
		test('should verify that setupMCPConfiguration receives the correct mcpConfigPath parameter', () => {
			RULE_PROFILES.forEach((profileName) => {
				const profile = getRulesProfile(profileName);
				if (profile.mcpConfig !== false) {
					// Verify the path structure is correct for the new function signature
					if (profile.profileDir === '.') {
						// Root directory profiles have special handling
						if (profileName === 'claude') {
							expect(profile.mcpConfigPath).toBe('.mcp.json');
						} else {
							// Other root profiles normalize to just the filename
							expect(profile.mcpConfigPath).toBe(profile.mcpConfigName);
						}
					} else if (profileName === 'kiro') {
						// Kiro has a nested config structure
						const parts = profile.mcpConfigPath.split('/');
						expect(parts).toHaveLength(3); // Should be profileDir/settings/mcp.json
						expect(parts[0]).toBe(profile.profileDir);
						expect(parts[1]).toBe('settings');
						expect(parts[2]).toBe('mcp.json');
					} else {
						// Non-root profiles should have profileDir/configName structure
						const parts = profile.mcpConfigPath.split('/');
						expect(parts).toHaveLength(2); // Should be profileDir/configName
						expect(parts[0]).toBe(profile.profileDir);
						expect(parts[1]).toBe(profile.mcpConfigName);
					}
				}
			});
		});
	});

	describe('MCP configuration validation', () => {
		const mcpProfiles = [
			'amp',
			'claude',
			'cursor',
			'gemini',
			'kiro',
			'opencode',
			'roo',
			'windsurf',
			'vscode',
			'zed'
		];
		const nonMcpProfiles = ['codex', 'cline', 'trae'];
		const profilesWithLifecycle = ['claude'];
		const profilesWithoutLifecycle = ['codex'];

		test.each(mcpProfiles)(
			'should have valid MCP config for %s profile',
			(profileName) => {
				const profile = getRulesProfile(profileName);
				expect(profile).toBeDefined();
				expect(profile.mcpConfig).toBe(true);
				expect(profile.mcpConfigPath).toBeDefined();
				expect(typeof profile.mcpConfigPath).toBe('string');
			}
		);

		test.each(nonMcpProfiles)(
			'should not require MCP config for %s profile',
			(profileName) => {
				const profile = getRulesProfile(profileName);
				expect(profile).toBeDefined();
				expect(profile.mcpConfig).toBe(false);
			}
		);
	});

	describe('Profile structure validation', () => {
		const allProfiles = [
			'amp',
			'claude',
			'cline',
			'codex',
			'cursor',
			'gemini',
			'opencode',
			'roo',
			'trae',
			'vscode',
			'windsurf',
			'zed'
		];
		const profilesWithLifecycle = ['amp', 'claude'];
		const profilesWithPostConvertLifecycle = ['opencode'];
		const profilesWithoutLifecycle = ['codex'];

		test.each(allProfiles)(
			'should have file mappings for %s profile',
			(profileName) => {
				const profile = getRulesProfile(profileName);
				expect(profile).toBeDefined();
				expect(profile.fileMap).toBeDefined();
				expect(typeof profile.fileMap).toBe('object');
				expect(Object.keys(profile.fileMap).length).toBeGreaterThan(0);
			}
		);

		test.each(profilesWithLifecycle)(
			'should have file mappings and lifecycle functions for %s profile',
			(profileName) => {
				const profile = getRulesProfile(profileName);
				expect(profile).toBeDefined();
				// Claude profile has both fileMap and lifecycle functions
				expect(profile.fileMap).toBeDefined();
				expect(typeof profile.fileMap).toBe('object');
				expect(Object.keys(profile.fileMap).length).toBeGreaterThan(0);
				expect(typeof profile.onAddRulesProfile).toBe('function');
				expect(typeof profile.onRemoveRulesProfile).toBe('function');
				expect(typeof profile.onPostConvertRulesProfile).toBe('function');
			}
		);

		test.each(profilesWithPostConvertLifecycle)(
			'should have file mappings and post-convert lifecycle functions for %s profile',
			(profileName) => {
				const profile = getRulesProfile(profileName);
				expect(profile).toBeDefined();
				// OpenCode profile has fileMap and post-convert lifecycle functions
				expect(profile.fileMap).toBeDefined();
				expect(typeof profile.fileMap).toBe('object');
				expect(Object.keys(profile.fileMap).length).toBeGreaterThan(0);
				expect(profile.onAddRulesProfile).toBeUndefined(); // OpenCode doesn't have onAdd
				expect(typeof profile.onRemoveRulesProfile).toBe('function');
				expect(typeof profile.onPostConvertRulesProfile).toBe('function');
			}
		);

		test.each(profilesWithoutLifecycle)(
			'should have file mappings without lifecycle functions for %s profile',
			(profileName) => {
				const profile = getRulesProfile(profileName);
				expect(profile).toBeDefined();
				// Codex profile has fileMap but no lifecycle functions (simplified)
				expect(profile.fileMap).toBeDefined();
				expect(typeof profile.fileMap).toBe('object');
				expect(Object.keys(profile.fileMap).length).toBeGreaterThan(0);
				expect(profile.onAddRulesProfile).toBeUndefined();
				expect(profile.onRemoveRulesProfile).toBeUndefined();
				expect(profile.onPostConvertRulesProfile).toBeUndefined();
			}
		);
	});
});

```

--------------------------------------------------------------------------------
/tests/unit/scripts/modules/task-manager/move-task-cross-tag.test.js:
--------------------------------------------------------------------------------

```javascript
import { jest } from '@jest/globals';

// --- Mocks ---
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
	readJSON: jest.fn(),
	writeJSON: jest.fn(),
	log: jest.fn(),
	setTasksForTag: jest.fn(),
	truncate: jest.fn((t) => t),
	isSilentMode: jest.fn(() => false),
	traverseDependencies: jest.fn((sourceTasks, allTasks, options = {}) => {
		// Mock realistic dependency behavior for testing
		const { direction = 'forward' } = options;

		if (direction === 'forward') {
			// For forward dependencies: return tasks that the source tasks depend on
			const result = [];
			sourceTasks.forEach((task) => {
				if (task.dependencies && Array.isArray(task.dependencies)) {
					result.push(...task.dependencies);
				}
			});
			return result;
		} else if (direction === 'reverse') {
			// For reverse dependencies: return tasks that depend on the source tasks
			const sourceIds = sourceTasks.map((t) => t.id);
			const normalizedSourceIds = sourceIds.map((id) => String(id));
			const result = [];
			allTasks.forEach((task) => {
				if (task.dependencies && Array.isArray(task.dependencies)) {
					const hasDependency = task.dependencies.some((depId) =>
						normalizedSourceIds.includes(String(depId))
					);
					if (hasDependency) {
						result.push(task.id);
					}
				}
			});
			return result;
		}
		return [];
	})
}));

jest.unstable_mockModule(
	'../../../../../scripts/modules/task-manager/generate-task-files.js',
	() => ({
		default: jest.fn().mockResolvedValue()
	})
);

jest.unstable_mockModule(
	'../../../../../scripts/modules/task-manager.js',
	() => ({
		isTaskDependentOn: jest.fn(() => false)
	})
);

jest.unstable_mockModule(
	'../../../../../scripts/modules/dependency-manager.js',
	() => ({
		validateCrossTagMove: jest.fn(),
		findCrossTagDependencies: jest.fn(),
		getDependentTaskIds: jest.fn(),
		validateSubtaskMove: jest.fn()
	})
);

const { readJSON, writeJSON, log } = await import(
	'../../../../../scripts/modules/utils.js'
);

const {
	validateCrossTagMove,
	findCrossTagDependencies,
	getDependentTaskIds,
	validateSubtaskMove
} = await import('../../../../../scripts/modules/dependency-manager.js');

const { moveTasksBetweenTags, getAllTasksWithTags } = await import(
	'../../../../../scripts/modules/task-manager/move-task.js'
);

describe('Cross-Tag Task Movement', () => {
	let mockRawData;
	let mockTasksPath;
	let mockContext;

	beforeEach(() => {
		jest.clearAllMocks();

		// Setup mock data
		mockRawData = {
			backlog: {
				tasks: [
					{ id: 1, title: 'Task 1', dependencies: [2] },
					{ id: 2, title: 'Task 2', dependencies: [] },
					{ id: 3, title: 'Task 3', dependencies: [1] }
				]
			},
			'in-progress': {
				tasks: [{ id: 4, title: 'Task 4', dependencies: [] }]
			},
			done: {
				tasks: [{ id: 5, title: 'Task 5', dependencies: [4] }]
			}
		};

		mockTasksPath = '/test/path/tasks.json';
		mockContext = { projectRoot: '/test/project' };

		// Mock readJSON to return our test data
		readJSON.mockImplementation((path, projectRoot, tag) => {
			return { ...mockRawData[tag], tag, _rawTaggedData: mockRawData };
		});

		writeJSON.mockResolvedValue();
		log.mockImplementation(() => {});
	});

	afterEach(() => {
		jest.clearAllMocks();
	});

	describe('getAllTasksWithTags', () => {
		it('should return all tasks with tag information', () => {
			const allTasks = getAllTasksWithTags(mockRawData);

			expect(allTasks).toHaveLength(5);
			expect(allTasks.find((t) => t.id === 1).tag).toBe('backlog');
			expect(allTasks.find((t) => t.id === 4).tag).toBe('in-progress');
			expect(allTasks.find((t) => t.id === 5).tag).toBe('done');
		});
	});

	describe('validateCrossTagMove', () => {
		it('should allow move when no dependencies exist', () => {
			const task = { id: 2, dependencies: [] };
			const allTasks = getAllTasksWithTags(mockRawData);

			validateCrossTagMove.mockReturnValue({ canMove: true, conflicts: [] });
			const result = validateCrossTagMove(
				task,
				'backlog',
				'in-progress',
				allTasks
			);

			expect(result.canMove).toBe(true);
			expect(result.conflicts).toHaveLength(0);
		});

		it('should block move when cross-tag dependencies exist', () => {
			const task = { id: 1, dependencies: [2] };
			const allTasks = getAllTasksWithTags(mockRawData);

			validateCrossTagMove.mockReturnValue({
				canMove: false,
				conflicts: [{ taskId: 1, dependencyId: 2, dependencyTag: 'backlog' }]
			});
			const result = validateCrossTagMove(
				task,
				'backlog',
				'in-progress',
				allTasks
			);

			expect(result.canMove).toBe(false);
			expect(result.conflicts).toHaveLength(1);
			expect(result.conflicts[0].dependencyId).toBe(2);
		});
	});

	describe('findCrossTagDependencies', () => {
		it('should find cross-tag dependencies for multiple tasks', () => {
			const sourceTasks = [
				{ id: 1, dependencies: [2] },
				{ id: 3, dependencies: [1] }
			];
			const allTasks = getAllTasksWithTags(mockRawData);

			findCrossTagDependencies.mockReturnValue([
				{ taskId: 1, dependencyId: 2, dependencyTag: 'backlog' },
				{ taskId: 3, dependencyId: 1, dependencyTag: 'backlog' }
			]);
			const conflicts = findCrossTagDependencies(
				sourceTasks,
				'backlog',
				'in-progress',
				allTasks
			);

			expect(conflicts).toHaveLength(2);
			expect(
				conflicts.some((c) => c.taskId === 1 && c.dependencyId === 2)
			).toBe(true);
			expect(
				conflicts.some((c) => c.taskId === 3 && c.dependencyId === 1)
			).toBe(true);
		});
	});

	describe('getDependentTaskIds', () => {
		it('should return dependent task IDs', () => {
			const sourceTasks = [{ id: 1, dependencies: [2] }];
			const crossTagDependencies = [
				{ taskId: 1, dependencyId: 2, dependencyTag: 'backlog' }
			];
			const allTasks = getAllTasksWithTags(mockRawData);

			getDependentTaskIds.mockReturnValue([2]);
			const dependentTaskIds = getDependentTaskIds(
				sourceTasks,
				crossTagDependencies,
				allTasks
			);

			expect(dependentTaskIds).toContain(2);
		});
	});

	// New test: ensure with-dependencies only traverses tasks from the source tag
	it('should scope dependency traversal to source tag when using --with-dependencies', async () => {
		findCrossTagDependencies.mockReturnValue([]);
		validateSubtaskMove.mockImplementation(() => {});

		const result = await moveTasksBetweenTags(
			mockTasksPath,
			[1], // backlog:1 depends on backlog:2
			'backlog',
			'in-progress',
			{ withDependencies: true },
			mockContext
		);

		// Write should include backlog:2 moved, and must NOT traverse or fetch dependencies from the target tag
		expect(writeJSON).toHaveBeenCalledWith(
			mockTasksPath,
			expect.objectContaining({
				'in-progress': expect.objectContaining({
					tasks: expect.arrayContaining([
						expect.objectContaining({ id: 1 }),
						expect.objectContaining({ id: 2 }) // the backlog:2 now moved
						// ensure existing in-progress:2 remains (by id) but we don't double-add or fetch deps from it
					])
				})
			}),
			mockContext.projectRoot,
			null
		);
	});

	describe('moveTasksBetweenTags', () => {
		it('should move tasks without dependencies successfully', async () => {
			// Mock the dependency functions to return no conflicts
			findCrossTagDependencies.mockReturnValue([]);
			validateSubtaskMove.mockImplementation(() => {});

			const result = await moveTasksBetweenTags(
				mockTasksPath,
				[2],
				'backlog',
				'in-progress',
				{},
				mockContext
			);

			expect(result.message).toContain('Successfully moved 1 tasks');
			expect(writeJSON).toHaveBeenCalledWith(
				mockTasksPath,
				expect.any(Object),
				mockContext.projectRoot,
				null
			);
		});

		it('should throw error for cross-tag dependencies by default', async () => {
			const mockDependency = {
				taskId: 1,
				dependencyId: 2,
				dependencyTag: 'backlog'
			};
			findCrossTagDependencies.mockReturnValue([mockDependency]);
			validateSubtaskMove.mockImplementation(() => {});

			await expect(
				moveTasksBetweenTags(
					mockTasksPath,
					[1],
					'backlog',
					'in-progress',
					{},
					mockContext
				)
			).rejects.toThrow(
				'Cannot move tasks: 1 cross-tag dependency conflicts found'
			);

			expect(writeJSON).not.toHaveBeenCalled();
		});

		it('should move with dependencies when --with-dependencies is used', async () => {
			const mockDependency = {
				taskId: 1,
				dependencyId: 2,
				dependencyTag: 'backlog'
			};
			findCrossTagDependencies.mockReturnValue([mockDependency]);
			getDependentTaskIds.mockReturnValue([2]);
			validateSubtaskMove.mockImplementation(() => {});

			const result = await moveTasksBetweenTags(
				mockTasksPath,
				[1],
				'backlog',
				'in-progress',
				{ withDependencies: true },
				mockContext
			);

			expect(result.message).toContain('Successfully moved 2 tasks');
			expect(writeJSON).toHaveBeenCalledWith(
				mockTasksPath,
				expect.objectContaining({
					backlog: expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 3,
								title: 'Task 3',
								dependencies: [1]
							})
						])
					}),
					'in-progress': expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 4,
								title: 'Task 4',
								dependencies: []
							}),
							expect.objectContaining({
								id: 1,
								title: 'Task 1',
								dependencies: [2],
								metadata: expect.objectContaining({
									moveHistory: expect.arrayContaining([
										expect.objectContaining({
											fromTag: 'backlog',
											toTag: 'in-progress',
											timestamp: expect.any(String)
										})
									])
								})
							}),
							expect.objectContaining({
								id: 2,
								title: 'Task 2',
								dependencies: [],
								metadata: expect.objectContaining({
									moveHistory: expect.arrayContaining([
										expect.objectContaining({
											fromTag: 'backlog',
											toTag: 'in-progress',
											timestamp: expect.any(String)
										})
									])
								})
							})
						])
					}),
					done: expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 5,
								title: 'Task 5',
								dependencies: [4]
							})
						])
					})
				}),
				mockContext.projectRoot,
				null
			);
		});

		it('should break dependencies when --ignore-dependencies is used', async () => {
			const mockDependency = {
				taskId: 1,
				dependencyId: 2,
				dependencyTag: 'backlog'
			};
			findCrossTagDependencies.mockReturnValue([mockDependency]);
			validateSubtaskMove.mockImplementation(() => {});

			const result = await moveTasksBetweenTags(
				mockTasksPath,
				[2],
				'backlog',
				'in-progress',
				{ ignoreDependencies: true },
				mockContext
			);

			expect(result.message).toContain('Successfully moved 1 tasks');
			expect(writeJSON).toHaveBeenCalledWith(
				mockTasksPath,
				expect.objectContaining({
					backlog: expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 1,
								title: 'Task 1',
								dependencies: [2] // Dependencies not actually removed in current implementation
							}),
							expect.objectContaining({
								id: 3,
								title: 'Task 3',
								dependencies: [1]
							})
						])
					}),
					'in-progress': expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 4,
								title: 'Task 4',
								dependencies: []
							}),
							expect.objectContaining({
								id: 2,
								title: 'Task 2',
								dependencies: [],
								metadata: expect.objectContaining({
									moveHistory: expect.arrayContaining([
										expect.objectContaining({
											fromTag: 'backlog',
											toTag: 'in-progress',
											timestamp: expect.any(String)
										})
									])
								})
							})
						])
					}),
					done: expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 5,
								title: 'Task 5',
								dependencies: [4]
							})
						])
					})
				}),
				mockContext.projectRoot,
				null
			);
		});

		it('should create target tag if it does not exist', async () => {
			findCrossTagDependencies.mockReturnValue([]);
			validateSubtaskMove.mockImplementation(() => {});

			const result = await moveTasksBetweenTags(
				mockTasksPath,
				[2],
				'backlog',
				'new-tag',
				{},
				mockContext
			);

			expect(result.message).toContain('Successfully moved 1 tasks');
			expect(result.message).toContain('new-tag');
			expect(writeJSON).toHaveBeenCalledWith(
				mockTasksPath,
				expect.objectContaining({
					backlog: expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 1,
								title: 'Task 1',
								dependencies: [2]
							}),
							expect.objectContaining({
								id: 3,
								title: 'Task 3',
								dependencies: [1]
							})
						])
					}),
					'new-tag': expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 2,
								title: 'Task 2',
								dependencies: [],
								metadata: expect.objectContaining({
									moveHistory: expect.arrayContaining([
										expect.objectContaining({
											fromTag: 'backlog',
											toTag: 'new-tag',
											timestamp: expect.any(String)
										})
									])
								})
							})
						])
					}),
					'in-progress': expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 4,
								title: 'Task 4',
								dependencies: []
							})
						])
					}),
					done: expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 5,
								title: 'Task 5',
								dependencies: [4]
							})
						])
					})
				}),
				mockContext.projectRoot,
				null
			);
		});

		it('should throw error for subtask movement', async () => {
			const subtaskError = 'Cannot move subtask 1.2 directly between tags';
			validateSubtaskMove.mockImplementation(() => {
				throw new Error(subtaskError);
			});

			await expect(
				moveTasksBetweenTags(
					mockTasksPath,
					['1.2'],
					'backlog',
					'in-progress',
					{},
					mockContext
				)
			).rejects.toThrow(subtaskError);

			expect(writeJSON).not.toHaveBeenCalled();
		});

		it('should throw error for invalid task IDs', async () => {
			findCrossTagDependencies.mockReturnValue([]);
			validateSubtaskMove.mockImplementation(() => {});

			await expect(
				moveTasksBetweenTags(
					mockTasksPath,
					[999], // Non-existent task
					'backlog',
					'in-progress',
					{},
					mockContext
				)
			).rejects.toThrow('Task 999 not found in source tag "backlog"');

			expect(writeJSON).not.toHaveBeenCalled();
		});

		it('should throw error for invalid source tag', async () => {
			findCrossTagDependencies.mockReturnValue([]);
			validateSubtaskMove.mockImplementation(() => {});

			await expect(
				moveTasksBetweenTags(
					mockTasksPath,
					[1],
					'non-existent-tag',
					'in-progress',
					{},
					mockContext
				)
			).rejects.toThrow('Source tag "non-existent-tag" not found or invalid');

			expect(writeJSON).not.toHaveBeenCalled();
		});

		it('should handle string dependencies correctly during cross-tag move', async () => {
			// Setup mock data with string dependencies
			mockRawData = {
				backlog: {
					tasks: [
						{ id: 1, title: 'Task 1', dependencies: ['2'] }, // String dependency
						{ id: 2, title: 'Task 2', dependencies: [] },
						{ id: 3, title: 'Task 3', dependencies: ['1'] } // String dependency
					]
				},
				'in-progress': {
					tasks: [{ id: 4, title: 'Task 4', dependencies: [] }]
				}
			};

			// Mock readJSON to return our test data
			readJSON.mockImplementation((path, projectRoot, tag) => {
				return { ...mockRawData[tag], tag, _rawTaggedData: mockRawData };
			});

			findCrossTagDependencies.mockReturnValue([]);
			validateSubtaskMove.mockImplementation(() => {});

			const result = await moveTasksBetweenTags(
				mockTasksPath,
				['1'], // String task ID
				'backlog',
				'in-progress',
				{},
				mockContext
			);

			expect(result.message).toContain('Successfully moved 1 tasks');
			expect(writeJSON).toHaveBeenCalledWith(
				mockTasksPath,
				expect.objectContaining({
					backlog: expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 2,
								title: 'Task 2',
								dependencies: []
							}),
							expect.objectContaining({
								id: 3,
								title: 'Task 3',
								dependencies: ['1'] // Should remain as string
							})
						])
					}),
					'in-progress': expect.objectContaining({
						tasks: expect.arrayContaining([
							expect.objectContaining({
								id: 1,
								title: 'Task 1',
								dependencies: ['2'], // Should remain as string
								metadata: expect.objectContaining({
									moveHistory: expect.arrayContaining([
										expect.objectContaining({
											fromTag: 'backlog',
											toTag: 'in-progress',
											timestamp: expect.any(String)
										})
									])
								})
							})
						])
					})
				}),
				mockContext.projectRoot,
				null
			);
		});
	});
});

```
Page 30/50FirstPrevNextLast