#
tokens: 46396/50000 9/975 files (page 28/50)
lines: off (toggle) GitHub
raw markdown copy
This is page 28 of 50. Use http://codebase.md/eyaltoledano/claude-task-master?page={x} to view the full context.

# Directory Structure

```
├── .changeset
│   ├── config.json
│   └── README.md
├── .claude
│   ├── commands
│   │   └── dedupe.md
│   └── TM_COMMANDS_GUIDE.md
├── .claude-plugin
│   └── marketplace.json
├── .coderabbit.yaml
├── .cursor
│   ├── mcp.json
│   └── rules
│       ├── ai_providers.mdc
│       ├── ai_services.mdc
│       ├── architecture.mdc
│       ├── changeset.mdc
│       ├── commands.mdc
│       ├── context_gathering.mdc
│       ├── cursor_rules.mdc
│       ├── dependencies.mdc
│       ├── dev_workflow.mdc
│       ├── git_workflow.mdc
│       ├── glossary.mdc
│       ├── mcp.mdc
│       ├── new_features.mdc
│       ├── self_improve.mdc
│       ├── tags.mdc
│       ├── taskmaster.mdc
│       ├── tasks.mdc
│       ├── telemetry.mdc
│       ├── test_workflow.mdc
│       ├── tests.mdc
│       ├── ui.mdc
│       └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── enhancements---feature-requests.md
│   │   └── feedback.md
│   ├── PULL_REQUEST_TEMPLATE
│   │   ├── bugfix.md
│   │   ├── config.yml
│   │   ├── feature.md
│   │   └── integration.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── scripts
│   │   ├── auto-close-duplicates.mjs
│   │   ├── backfill-duplicate-comments.mjs
│   │   ├── check-pre-release-mode.mjs
│   │   ├── parse-metrics.mjs
│   │   ├── release.mjs
│   │   ├── tag-extension.mjs
│   │   ├── utils.mjs
│   │   └── validate-changesets.mjs
│   └── workflows
│       ├── auto-close-duplicates.yml
│       ├── backfill-duplicate-comments.yml
│       ├── ci.yml
│       ├── claude-dedupe-issues.yml
│       ├── claude-docs-trigger.yml
│       ├── claude-docs-updater.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── extension-ci.yml
│       ├── extension-release.yml
│       ├── log-issue-events.yml
│       ├── pre-release.yml
│       ├── release-check.yml
│       ├── release.yml
│       ├── update-models-md.yml
│       └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│   ├── hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── settings
│   │   └── mcp.json
│   └── steering
│       ├── dev_workflow.md
│       ├── kiro_rules.md
│       ├── self_improve.md
│       ├── taskmaster_hooks_workflow.md
│       └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│   ├── CLAUDE.md
│   ├── config.json
│   ├── docs
│   │   ├── autonomous-tdd-git-workflow.md
│   │   ├── MIGRATION-ROADMAP.md
│   │   ├── prd-tm-start.txt
│   │   ├── prd.txt
│   │   ├── README.md
│   │   ├── research
│   │   │   ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│   │   │   ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│   │   │   ├── 2025-06-14_test-save-functionality.md
│   │   │   ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│   │   │   └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│   │   ├── task-template-importing-prd.txt
│   │   ├── tdd-workflow-phase-0-spike.md
│   │   ├── tdd-workflow-phase-1-core-rails.md
│   │   ├── tdd-workflow-phase-1-orchestrator.md
│   │   ├── tdd-workflow-phase-2-pr-resumability.md
│   │   ├── tdd-workflow-phase-3-extensibility-guardrails.md
│   │   ├── test-prd.txt
│   │   └── tm-core-phase-1.txt
│   ├── reports
│   │   ├── task-complexity-report_autonomous-tdd-git-workflow.json
│   │   ├── task-complexity-report_cc-kiro-hooks.json
│   │   ├── task-complexity-report_tdd-phase-1-core-rails.json
│   │   ├── task-complexity-report_tdd-workflow-phase-0.json
│   │   ├── task-complexity-report_test-prd-tag.json
│   │   ├── task-complexity-report_tm-core-phase-1.json
│   │   ├── task-complexity-report.json
│   │   └── tm-core-complexity.json
│   ├── state.json
│   ├── tasks
│   │   ├── task_001_tm-start.txt
│   │   ├── task_002_tm-start.txt
│   │   ├── task_003_tm-start.txt
│   │   ├── task_004_tm-start.txt
│   │   ├── task_007_tm-start.txt
│   │   └── tasks.json
│   └── templates
│       ├── example_prd_rpg.md
│       └── example_prd.md
├── .vscode
│   ├── extensions.json
│   └── settings.json
├── apps
│   ├── cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   ├── command-registry.ts
│   │   │   ├── commands
│   │   │   │   ├── auth.command.ts
│   │   │   │   ├── autopilot
│   │   │   │   │   ├── abort.command.ts
│   │   │   │   │   ├── commit.command.ts
│   │   │   │   │   ├── complete.command.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next.command.ts
│   │   │   │   │   ├── resume.command.ts
│   │   │   │   │   ├── shared.ts
│   │   │   │   │   ├── start.command.ts
│   │   │   │   │   └── status.command.ts
│   │   │   │   ├── briefs.command.ts
│   │   │   │   ├── context.command.ts
│   │   │   │   ├── export.command.ts
│   │   │   │   ├── list.command.ts
│   │   │   │   ├── models
│   │   │   │   │   ├── custom-providers.ts
│   │   │   │   │   ├── fetchers.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── prompts.ts
│   │   │   │   │   ├── setup.ts
│   │   │   │   │   └── types.ts
│   │   │   │   ├── next.command.ts
│   │   │   │   ├── set-status.command.ts
│   │   │   │   ├── show.command.ts
│   │   │   │   ├── start.command.ts
│   │   │   │   └── tags.command.ts
│   │   │   ├── index.ts
│   │   │   ├── lib
│   │   │   │   └── model-management.ts
│   │   │   ├── types
│   │   │   │   └── tag-management.d.ts
│   │   │   ├── ui
│   │   │   │   ├── components
│   │   │   │   │   ├── cardBox.component.ts
│   │   │   │   │   ├── dashboard.component.ts
│   │   │   │   │   ├── header.component.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next-task.component.ts
│   │   │   │   │   ├── suggested-steps.component.ts
│   │   │   │   │   └── task-detail.component.ts
│   │   │   │   ├── display
│   │   │   │   │   ├── messages.ts
│   │   │   │   │   └── tables.ts
│   │   │   │   ├── formatters
│   │   │   │   │   ├── complexity-formatters.ts
│   │   │   │   │   ├── dependency-formatters.ts
│   │   │   │   │   ├── priority-formatters.ts
│   │   │   │   │   ├── status-formatters.spec.ts
│   │   │   │   │   └── status-formatters.ts
│   │   │   │   ├── index.ts
│   │   │   │   └── layout
│   │   │   │       ├── helpers.spec.ts
│   │   │   │       └── helpers.ts
│   │   │   └── utils
│   │   │       ├── auth-helpers.ts
│   │   │       ├── auto-update.ts
│   │   │       ├── brief-selection.ts
│   │   │       ├── display-helpers.ts
│   │   │       ├── error-handler.ts
│   │   │       ├── index.ts
│   │   │       ├── project-root.ts
│   │   │       ├── task-status.ts
│   │   │       ├── ui.spec.ts
│   │   │       └── ui.ts
│   │   ├── tests
│   │   │   ├── integration
│   │   │   │   └── commands
│   │   │   │       └── autopilot
│   │   │   │           └── workflow.test.ts
│   │   │   └── unit
│   │   │       ├── commands
│   │   │       │   ├── autopilot
│   │   │       │   │   └── shared.test.ts
│   │   │       │   ├── list.command.spec.ts
│   │   │       │   └── show.command.spec.ts
│   │   │       └── ui
│   │   │           └── dashboard.component.spec.ts
│   │   ├── tsconfig.json
│   │   └── vitest.config.ts
│   ├── docs
│   │   ├── archive
│   │   │   ├── ai-client-utils-example.mdx
│   │   │   ├── ai-development-workflow.mdx
│   │   │   ├── command-reference.mdx
│   │   │   ├── configuration.mdx
│   │   │   ├── cursor-setup.mdx
│   │   │   ├── examples.mdx
│   │   │   └── Installation.mdx
│   │   ├── best-practices
│   │   │   ├── advanced-tasks.mdx
│   │   │   ├── configuration-advanced.mdx
│   │   │   └── index.mdx
│   │   ├── capabilities
│   │   │   ├── cli-root-commands.mdx
│   │   │   ├── index.mdx
│   │   │   ├── mcp.mdx
│   │   │   ├── rpg-method.mdx
│   │   │   └── task-structure.mdx
│   │   ├── CHANGELOG.md
│   │   ├── command-reference.mdx
│   │   ├── configuration.mdx
│   │   ├── docs.json
│   │   ├── favicon.svg
│   │   ├── getting-started
│   │   │   ├── api-keys.mdx
│   │   │   ├── contribute.mdx
│   │   │   ├── faq.mdx
│   │   │   └── quick-start
│   │   │       ├── configuration-quick.mdx
│   │   │       ├── execute-quick.mdx
│   │   │       ├── installation.mdx
│   │   │       ├── moving-forward.mdx
│   │   │       ├── prd-quick.mdx
│   │   │       ├── quick-start.mdx
│   │   │       ├── requirements.mdx
│   │   │       ├── rules-quick.mdx
│   │   │       └── tasks-quick.mdx
│   │   ├── introduction.mdx
│   │   ├── licensing.md
│   │   ├── logo
│   │   │   ├── dark.svg
│   │   │   ├── light.svg
│   │   │   └── task-master-logo.png
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── style.css
│   │   ├── tdd-workflow
│   │   │   ├── ai-agent-integration.mdx
│   │   │   └── quickstart.mdx
│   │   ├── vercel.json
│   │   └── whats-new.mdx
│   ├── extension
│   │   ├── .vscodeignore
│   │   ├── assets
│   │   │   ├── banner.png
│   │   │   ├── icon-dark.svg
│   │   │   ├── icon-light.svg
│   │   │   ├── icon.png
│   │   │   ├── screenshots
│   │   │   │   ├── kanban-board.png
│   │   │   │   └── task-details.png
│   │   │   └── sidebar-icon.svg
│   │   ├── CHANGELOG.md
│   │   ├── components.json
│   │   ├── docs
│   │   │   ├── extension-CI-setup.md
│   │   │   └── extension-development-guide.md
│   │   ├── esbuild.js
│   │   ├── LICENSE
│   │   ├── package.json
│   │   ├── package.mjs
│   │   ├── package.publish.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── components
│   │   │   │   ├── ConfigView.tsx
│   │   │   │   ├── constants.ts
│   │   │   │   ├── TaskDetails
│   │   │   │   │   ├── AIActionsSection.tsx
│   │   │   │   │   ├── DetailsSection.tsx
│   │   │   │   │   ├── PriorityBadge.tsx
│   │   │   │   │   ├── SubtasksSection.tsx
│   │   │   │   │   ├── TaskMetadataSidebar.tsx
│   │   │   │   │   └── useTaskDetails.ts
│   │   │   │   ├── TaskDetailsView.tsx
│   │   │   │   ├── TaskMasterLogo.tsx
│   │   │   │   └── ui
│   │   │   │       ├── badge.tsx
│   │   │   │       ├── breadcrumb.tsx
│   │   │   │       ├── button.tsx
│   │   │   │       ├── card.tsx
│   │   │   │       ├── collapsible.tsx
│   │   │   │       ├── CollapsibleSection.tsx
│   │   │   │       ├── dropdown-menu.tsx
│   │   │   │       ├── label.tsx
│   │   │   │       ├── scroll-area.tsx
│   │   │   │       ├── separator.tsx
│   │   │   │       ├── shadcn-io
│   │   │   │       │   └── kanban
│   │   │   │       │       └── index.tsx
│   │   │   │       └── textarea.tsx
│   │   │   ├── extension.ts
│   │   │   ├── index.ts
│   │   │   ├── lib
│   │   │   │   └── utils.ts
│   │   │   ├── services
│   │   │   │   ├── config-service.ts
│   │   │   │   ├── error-handler.ts
│   │   │   │   ├── notification-preferences.ts
│   │   │   │   ├── polling-service.ts
│   │   │   │   ├── polling-strategies.ts
│   │   │   │   ├── sidebar-webview-manager.ts
│   │   │   │   ├── task-repository.ts
│   │   │   │   ├── terminal-manager.ts
│   │   │   │   └── webview-manager.ts
│   │   │   ├── test
│   │   │   │   └── extension.test.ts
│   │   │   ├── utils
│   │   │   │   ├── configManager.ts
│   │   │   │   ├── connectionManager.ts
│   │   │   │   ├── errorHandler.ts
│   │   │   │   ├── event-emitter.ts
│   │   │   │   ├── logger.ts
│   │   │   │   ├── mcpClient.ts
│   │   │   │   ├── notificationPreferences.ts
│   │   │   │   └── task-master-api
│   │   │   │       ├── cache
│   │   │   │       │   └── cache-manager.ts
│   │   │   │       ├── index.ts
│   │   │   │       ├── mcp-client.ts
│   │   │   │       ├── transformers
│   │   │   │       │   └── task-transformer.ts
│   │   │   │       └── types
│   │   │   │           └── index.ts
│   │   │   └── webview
│   │   │       ├── App.tsx
│   │   │       ├── components
│   │   │       │   ├── AppContent.tsx
│   │   │       │   ├── EmptyState.tsx
│   │   │       │   ├── ErrorBoundary.tsx
│   │   │       │   ├── PollingStatus.tsx
│   │   │       │   ├── PriorityBadge.tsx
│   │   │       │   ├── SidebarView.tsx
│   │   │       │   ├── TagDropdown.tsx
│   │   │       │   ├── TaskCard.tsx
│   │   │       │   ├── TaskEditModal.tsx
│   │   │       │   ├── TaskMasterKanban.tsx
│   │   │       │   ├── ToastContainer.tsx
│   │   │       │   └── ToastNotification.tsx
│   │   │       ├── constants
│   │   │       │   └── index.ts
│   │   │       ├── contexts
│   │   │       │   └── VSCodeContext.tsx
│   │   │       ├── hooks
│   │   │       │   ├── useTaskQueries.ts
│   │   │       │   ├── useVSCodeMessages.ts
│   │   │       │   └── useWebviewHeight.ts
│   │   │       ├── index.css
│   │   │       ├── index.tsx
│   │   │       ├── providers
│   │   │       │   └── QueryProvider.tsx
│   │   │       ├── reducers
│   │   │       │   └── appReducer.ts
│   │   │       ├── sidebar.tsx
│   │   │       ├── types
│   │   │       │   └── index.ts
│   │   │       └── utils
│   │   │           ├── logger.ts
│   │   │           └── toast.ts
│   │   └── tsconfig.json
│   └── mcp
│       ├── CHANGELOG.md
│       ├── package.json
│       ├── src
│       │   ├── index.ts
│       │   ├── shared
│       │   │   ├── types.ts
│       │   │   └── utils.ts
│       │   └── tools
│       │       ├── autopilot
│       │       │   ├── abort.tool.ts
│       │       │   ├── commit.tool.ts
│       │       │   ├── complete.tool.ts
│       │       │   ├── finalize.tool.ts
│       │       │   ├── index.ts
│       │       │   ├── next.tool.ts
│       │       │   ├── resume.tool.ts
│       │       │   ├── start.tool.ts
│       │       │   └── status.tool.ts
│       │       ├── README-ZOD-V3.md
│       │       └── tasks
│       │           ├── get-task.tool.ts
│       │           ├── get-tasks.tool.ts
│       │           └── index.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── assets
│   ├── .windsurfrules
│   ├── AGENTS.md
│   ├── claude
│   │   └── TM_COMMANDS_GUIDE.md
│   ├── config.json
│   ├── env.example
│   ├── example_prd_rpg.txt
│   ├── example_prd.txt
│   ├── GEMINI.md
│   ├── gitignore
│   ├── kiro-hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── roocode
│   │   ├── .roo
│   │   │   ├── rules-architect
│   │   │   │   └── architect-rules
│   │   │   ├── rules-ask
│   │   │   │   └── ask-rules
│   │   │   ├── rules-code
│   │   │   │   └── code-rules
│   │   │   ├── rules-debug
│   │   │   │   └── debug-rules
│   │   │   ├── rules-orchestrator
│   │   │   │   └── orchestrator-rules
│   │   │   └── rules-test
│   │   │       └── test-rules
│   │   └── .roomodes
│   ├── rules
│   │   ├── cursor_rules.mdc
│   │   ├── dev_workflow.mdc
│   │   ├── self_improve.mdc
│   │   ├── taskmaster_hooks_workflow.mdc
│   │   └── taskmaster.mdc
│   └── scripts_README.md
├── bin
│   └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE_CODE_PLUGIN.md
├── CLAUDE.md
├── context
│   ├── chats
│   │   ├── add-task-dependencies-1.md
│   │   └── max-min-tokens.txt.md
│   ├── fastmcp-core.txt
│   ├── fastmcp-docs.txt
│   ├── MCP_INTEGRATION.md
│   ├── mcp-js-sdk-docs.txt
│   ├── mcp-protocol-repo.txt
│   ├── mcp-protocol-schema-03262025.json
│   └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│   ├── claude-code-integration.md
│   ├── CLI-COMMANDER-PATTERN.md
│   ├── command-reference.md
│   ├── configuration.md
│   ├── contributor-docs
│   │   ├── testing-roo-integration.md
│   │   └── worktree-setup.md
│   ├── cross-tag-task-movement.md
│   ├── examples
│   │   ├── claude-code-usage.md
│   │   └── codex-cli-usage.md
│   ├── examples.md
│   ├── licensing.md
│   ├── mcp-provider-guide.md
│   ├── mcp-provider.md
│   ├── migration-guide.md
│   ├── models.md
│   ├── providers
│   │   ├── codex-cli.md
│   │   └── gemini-cli.md
│   ├── README.md
│   ├── scripts
│   │   └── models-json-to-markdown.js
│   ├── task-structure.md
│   └── tutorial.md
├── images
│   ├── hamster-hiring.png
│   └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│   ├── server.js
│   └── src
│       ├── core
│       │   ├── __tests__
│       │   │   └── context-manager.test.js
│       │   ├── context-manager.js
│       │   ├── direct-functions
│       │   │   ├── add-dependency.js
│       │   │   ├── add-subtask.js
│       │   │   ├── add-tag.js
│       │   │   ├── add-task.js
│       │   │   ├── analyze-task-complexity.js
│       │   │   ├── cache-stats.js
│       │   │   ├── clear-subtasks.js
│       │   │   ├── complexity-report.js
│       │   │   ├── copy-tag.js
│       │   │   ├── create-tag-from-branch.js
│       │   │   ├── delete-tag.js
│       │   │   ├── expand-all-tasks.js
│       │   │   ├── expand-task.js
│       │   │   ├── fix-dependencies.js
│       │   │   ├── generate-task-files.js
│       │   │   ├── initialize-project.js
│       │   │   ├── list-tags.js
│       │   │   ├── models.js
│       │   │   ├── move-task-cross-tag.js
│       │   │   ├── move-task.js
│       │   │   ├── next-task.js
│       │   │   ├── parse-prd.js
│       │   │   ├── remove-dependency.js
│       │   │   ├── remove-subtask.js
│       │   │   ├── remove-task.js
│       │   │   ├── rename-tag.js
│       │   │   ├── research.js
│       │   │   ├── response-language.js
│       │   │   ├── rules.js
│       │   │   ├── scope-down.js
│       │   │   ├── scope-up.js
│       │   │   ├── set-task-status.js
│       │   │   ├── update-subtask-by-id.js
│       │   │   ├── update-task-by-id.js
│       │   │   ├── update-tasks.js
│       │   │   ├── use-tag.js
│       │   │   └── validate-dependencies.js
│       │   ├── task-master-core.js
│       │   └── utils
│       │       ├── env-utils.js
│       │       └── path-utils.js
│       ├── custom-sdk
│       │   ├── errors.js
│       │   ├── index.js
│       │   ├── json-extractor.js
│       │   ├── language-model.js
│       │   ├── message-converter.js
│       │   └── schema-converter.js
│       ├── index.js
│       ├── logger.js
│       ├── providers
│       │   └── mcp-provider.js
│       └── tools
│           ├── add-dependency.js
│           ├── add-subtask.js
│           ├── add-tag.js
│           ├── add-task.js
│           ├── analyze.js
│           ├── clear-subtasks.js
│           ├── complexity-report.js
│           ├── copy-tag.js
│           ├── delete-tag.js
│           ├── expand-all.js
│           ├── expand-task.js
│           ├── fix-dependencies.js
│           ├── generate.js
│           ├── get-operation-status.js
│           ├── index.js
│           ├── initialize-project.js
│           ├── list-tags.js
│           ├── models.js
│           ├── move-task.js
│           ├── next-task.js
│           ├── parse-prd.js
│           ├── README-ZOD-V3.md
│           ├── remove-dependency.js
│           ├── remove-subtask.js
│           ├── remove-task.js
│           ├── rename-tag.js
│           ├── research.js
│           ├── response-language.js
│           ├── rules.js
│           ├── scope-down.js
│           ├── scope-up.js
│           ├── set-task-status.js
│           ├── tool-registry.js
│           ├── update-subtask.js
│           ├── update-task.js
│           ├── update.js
│           ├── use-tag.js
│           ├── utils.js
│           └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│   ├── ai-sdk-provider-grok-cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── errors.test.ts
│   │   │   ├── errors.ts
│   │   │   ├── grok-cli-language-model.ts
│   │   │   ├── grok-cli-provider.test.ts
│   │   │   ├── grok-cli-provider.ts
│   │   │   ├── index.ts
│   │   │   ├── json-extractor.test.ts
│   │   │   ├── json-extractor.ts
│   │   │   ├── message-converter.test.ts
│   │   │   ├── message-converter.ts
│   │   │   └── types.ts
│   │   └── tsconfig.json
│   ├── build-config
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   └── tsdown.base.ts
│   │   └── tsconfig.json
│   ├── claude-code-plugin
│   │   ├── .claude-plugin
│   │   │   └── plugin.json
│   │   ├── .gitignore
│   │   ├── agents
│   │   │   ├── task-checker.md
│   │   │   ├── task-executor.md
│   │   │   └── task-orchestrator.md
│   │   ├── CHANGELOG.md
│   │   ├── commands
│   │   │   ├── add-dependency.md
│   │   │   ├── add-subtask.md
│   │   │   ├── add-task.md
│   │   │   ├── analyze-complexity.md
│   │   │   ├── analyze-project.md
│   │   │   ├── auto-implement-tasks.md
│   │   │   ├── command-pipeline.md
│   │   │   ├── complexity-report.md
│   │   │   ├── convert-task-to-subtask.md
│   │   │   ├── expand-all-tasks.md
│   │   │   ├── expand-task.md
│   │   │   ├── fix-dependencies.md
│   │   │   ├── generate-tasks.md
│   │   │   ├── help.md
│   │   │   ├── init-project-quick.md
│   │   │   ├── init-project.md
│   │   │   ├── install-taskmaster.md
│   │   │   ├── learn.md
│   │   │   ├── list-tasks-by-status.md
│   │   │   ├── list-tasks-with-subtasks.md
│   │   │   ├── list-tasks.md
│   │   │   ├── next-task.md
│   │   │   ├── parse-prd-with-research.md
│   │   │   ├── parse-prd.md
│   │   │   ├── project-status.md
│   │   │   ├── quick-install-taskmaster.md
│   │   │   ├── remove-all-subtasks.md
│   │   │   ├── remove-dependency.md
│   │   │   ├── remove-subtask.md
│   │   │   ├── remove-subtasks.md
│   │   │   ├── remove-task.md
│   │   │   ├── setup-models.md
│   │   │   ├── show-task.md
│   │   │   ├── smart-workflow.md
│   │   │   ├── sync-readme.md
│   │   │   ├── tm-main.md
│   │   │   ├── to-cancelled.md
│   │   │   ├── to-deferred.md
│   │   │   ├── to-done.md
│   │   │   ├── to-in-progress.md
│   │   │   ├── to-pending.md
│   │   │   ├── to-review.md
│   │   │   ├── update-single-task.md
│   │   │   ├── update-task.md
│   │   │   ├── update-tasks-from-id.md
│   │   │   ├── validate-dependencies.md
│   │   │   └── view-models.md
│   │   ├── mcp.json
│   │   └── package.json
│   ├── tm-bridge
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── src
│   │   │   ├── add-tag-bridge.ts
│   │   │   ├── bridge-types.ts
│   │   │   ├── bridge-utils.ts
│   │   │   ├── expand-bridge.ts
│   │   │   ├── index.ts
│   │   │   ├── tags-bridge.ts
│   │   │   ├── update-bridge.ts
│   │   │   └── use-tag-bridge.ts
│   │   └── tsconfig.json
│   └── tm-core
│       ├── .gitignore
│       ├── CHANGELOG.md
│       ├── docs
│       │   └── listTasks-architecture.md
│       ├── package.json
│       ├── POC-STATUS.md
│       ├── README.md
│       ├── src
│       │   ├── common
│       │   │   ├── constants
│       │   │   │   ├── index.ts
│       │   │   │   ├── paths.ts
│       │   │   │   └── providers.ts
│       │   │   ├── errors
│       │   │   │   ├── index.ts
│       │   │   │   └── task-master-error.ts
│       │   │   ├── interfaces
│       │   │   │   ├── configuration.interface.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── storage.interface.ts
│       │   │   ├── logger
│       │   │   │   ├── factory.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── logger.spec.ts
│       │   │   │   └── logger.ts
│       │   │   ├── mappers
│       │   │   │   ├── TaskMapper.test.ts
│       │   │   │   └── TaskMapper.ts
│       │   │   ├── types
│       │   │   │   ├── database.types.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── legacy.ts
│       │   │   │   └── repository-types.ts
│       │   │   └── utils
│       │   │       ├── git-utils.ts
│       │   │       ├── id-generator.ts
│       │   │       ├── index.ts
│       │   │       ├── path-helpers.ts
│       │   │       ├── path-normalizer.spec.ts
│       │   │       ├── path-normalizer.ts
│       │   │       ├── project-root-finder.spec.ts
│       │   │       ├── project-root-finder.ts
│       │   │       ├── run-id-generator.spec.ts
│       │   │       └── run-id-generator.ts
│       │   ├── index.ts
│       │   ├── modules
│       │   │   ├── ai
│       │   │   │   ├── index.ts
│       │   │   │   ├── interfaces
│       │   │   │   │   └── ai-provider.interface.ts
│       │   │   │   └── providers
│       │   │   │       ├── base-provider.ts
│       │   │   │       └── index.ts
│       │   │   ├── auth
│       │   │   │   ├── auth-domain.spec.ts
│       │   │   │   ├── auth-domain.ts
│       │   │   │   ├── config.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   ├── auth-manager.spec.ts
│       │   │   │   │   └── auth-manager.ts
│       │   │   │   ├── services
│       │   │   │   │   ├── context-store.ts
│       │   │   │   │   ├── oauth-service.ts
│       │   │   │   │   ├── organization.service.ts
│       │   │   │   │   ├── supabase-session-storage.spec.ts
│       │   │   │   │   └── supabase-session-storage.ts
│       │   │   │   └── types.ts
│       │   │   ├── briefs
│       │   │   │   ├── briefs-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── brief-service.ts
│       │   │   │   ├── types.ts
│       │   │   │   └── utils
│       │   │   │       └── url-parser.ts
│       │   │   ├── commands
│       │   │   │   └── index.ts
│       │   │   ├── config
│       │   │   │   ├── config-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   ├── config-manager.spec.ts
│       │   │   │   │   └── config-manager.ts
│       │   │   │   └── services
│       │   │   │       ├── config-loader.service.spec.ts
│       │   │   │       ├── config-loader.service.ts
│       │   │   │       ├── config-merger.service.spec.ts
│       │   │   │       ├── config-merger.service.ts
│       │   │   │       ├── config-persistence.service.spec.ts
│       │   │   │       ├── config-persistence.service.ts
│       │   │   │       ├── environment-config-provider.service.spec.ts
│       │   │   │       ├── environment-config-provider.service.ts
│       │   │   │       ├── index.ts
│       │   │   │       ├── runtime-state-manager.service.spec.ts
│       │   │   │       └── runtime-state-manager.service.ts
│       │   │   ├── dependencies
│       │   │   │   └── index.ts
│       │   │   ├── execution
│       │   │   │   ├── executors
│       │   │   │   │   ├── base-executor.ts
│       │   │   │   │   ├── claude-executor.ts
│       │   │   │   │   └── executor-factory.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── executor-service.ts
│       │   │   │   └── types.ts
│       │   │   ├── git
│       │   │   │   ├── adapters
│       │   │   │   │   ├── git-adapter.test.ts
│       │   │   │   │   └── git-adapter.ts
│       │   │   │   ├── git-domain.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── services
│       │   │   │       ├── branch-name-generator.spec.ts
│       │   │   │       ├── branch-name-generator.ts
│       │   │   │       ├── commit-message-generator.test.ts
│       │   │   │       ├── commit-message-generator.ts
│       │   │   │       ├── scope-detector.test.ts
│       │   │   │       ├── scope-detector.ts
│       │   │   │       ├── template-engine.test.ts
│       │   │   │       └── template-engine.ts
│       │   │   ├── integration
│       │   │   │   ├── clients
│       │   │   │   │   ├── index.ts
│       │   │   │   │   └── supabase-client.ts
│       │   │   │   ├── integration-domain.ts
│       │   │   │   └── services
│       │   │   │       ├── export.service.ts
│       │   │   │       ├── task-expansion.service.ts
│       │   │   │       └── task-retrieval.service.ts
│       │   │   ├── reports
│       │   │   │   ├── index.ts
│       │   │   │   ├── managers
│       │   │   │   │   └── complexity-report-manager.ts
│       │   │   │   └── types.ts
│       │   │   ├── storage
│       │   │   │   ├── adapters
│       │   │   │   │   ├── activity-logger.ts
│       │   │   │   │   ├── api-storage.ts
│       │   │   │   │   └── file-storage
│       │   │   │   │       ├── file-operations.ts
│       │   │   │   │       ├── file-storage.ts
│       │   │   │   │       ├── format-handler.ts
│       │   │   │   │       ├── index.ts
│       │   │   │   │       └── path-resolver.ts
│       │   │   │   ├── index.ts
│       │   │   │   ├── services
│       │   │   │   │   └── storage-factory.ts
│       │   │   │   └── utils
│       │   │   │       └── api-client.ts
│       │   │   ├── tasks
│       │   │   │   ├── entities
│       │   │   │   │   └── task.entity.ts
│       │   │   │   ├── parser
│       │   │   │   │   └── index.ts
│       │   │   │   ├── repositories
│       │   │   │   │   ├── supabase
│       │   │   │   │   │   ├── dependency-fetcher.ts
│       │   │   │   │   │   ├── index.ts
│       │   │   │   │   │   └── supabase-repository.ts
│       │   │   │   │   └── task-repository.interface.ts
│       │   │   │   ├── services
│       │   │   │   │   ├── preflight-checker.service.ts
│       │   │   │   │   ├── tag.service.ts
│       │   │   │   │   ├── task-execution-service.ts
│       │   │   │   │   ├── task-loader.service.ts
│       │   │   │   │   └── task-service.ts
│       │   │   │   └── tasks-domain.ts
│       │   │   ├── ui
│       │   │   │   └── index.ts
│       │   │   └── workflow
│       │   │       ├── managers
│       │   │       │   ├── workflow-state-manager.spec.ts
│       │   │       │   └── workflow-state-manager.ts
│       │   │       ├── orchestrators
│       │   │       │   ├── workflow-orchestrator.test.ts
│       │   │       │   └── workflow-orchestrator.ts
│       │   │       ├── services
│       │   │       │   ├── test-result-validator.test.ts
│       │   │       │   ├── test-result-validator.ts
│       │   │       │   ├── test-result-validator.types.ts
│       │   │       │   ├── workflow-activity-logger.ts
│       │   │       │   └── workflow.service.ts
│       │   │       ├── types.ts
│       │   │       └── workflow-domain.ts
│       │   ├── subpath-exports.test.ts
│       │   ├── tm-core.ts
│       │   └── utils
│       │       └── time.utils.ts
│       ├── tests
│       │   ├── auth
│       │   │   └── auth-refresh.test.ts
│       │   ├── integration
│       │   │   ├── auth-token-refresh.test.ts
│       │   │   ├── list-tasks.test.ts
│       │   │   └── storage
│       │   │       └── activity-logger.test.ts
│       │   ├── mocks
│       │   │   └── mock-provider.ts
│       │   ├── setup.ts
│       │   └── unit
│       │       ├── base-provider.test.ts
│       │       ├── executor.test.ts
│       │       └── smoke.test.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│   ├── create-worktree.sh
│   ├── dev.js
│   ├── init.js
│   ├── list-worktrees.sh
│   ├── modules
│   │   ├── ai-services-unified.js
│   │   ├── bridge-utils.js
│   │   ├── commands.js
│   │   ├── config-manager.js
│   │   ├── dependency-manager.js
│   │   ├── index.js
│   │   ├── prompt-manager.js
│   │   ├── supported-models.json
│   │   ├── sync-readme.js
│   │   ├── task-manager
│   │   │   ├── add-subtask.js
│   │   │   ├── add-task.js
│   │   │   ├── analyze-task-complexity.js
│   │   │   ├── clear-subtasks.js
│   │   │   ├── expand-all-tasks.js
│   │   │   ├── expand-task.js
│   │   │   ├── find-next-task.js
│   │   │   ├── generate-task-files.js
│   │   │   ├── is-task-dependent.js
│   │   │   ├── list-tasks.js
│   │   │   ├── migrate.js
│   │   │   ├── models.js
│   │   │   ├── move-task.js
│   │   │   ├── parse-prd
│   │   │   │   ├── index.js
│   │   │   │   ├── parse-prd-config.js
│   │   │   │   ├── parse-prd-helpers.js
│   │   │   │   ├── parse-prd-non-streaming.js
│   │   │   │   ├── parse-prd-streaming.js
│   │   │   │   └── parse-prd.js
│   │   │   ├── remove-subtask.js
│   │   │   ├── remove-task.js
│   │   │   ├── research.js
│   │   │   ├── response-language.js
│   │   │   ├── scope-adjustment.js
│   │   │   ├── set-task-status.js
│   │   │   ├── tag-management.js
│   │   │   ├── task-exists.js
│   │   │   ├── update-single-task-status.js
│   │   │   ├── update-subtask-by-id.js
│   │   │   ├── update-task-by-id.js
│   │   │   └── update-tasks.js
│   │   ├── task-manager.js
│   │   ├── ui.js
│   │   ├── update-config-tokens.js
│   │   ├── utils
│   │   │   ├── contextGatherer.js
│   │   │   ├── fuzzyTaskSearch.js
│   │   │   └── git-utils.js
│   │   └── utils.js
│   ├── task-complexity-report.json
│   ├── test-claude-errors.js
│   └── test-claude.js
├── sonar-project.properties
├── src
│   ├── ai-providers
│   │   ├── anthropic.js
│   │   ├── azure.js
│   │   ├── base-provider.js
│   │   ├── bedrock.js
│   │   ├── claude-code.js
│   │   ├── codex-cli.js
│   │   ├── gemini-cli.js
│   │   ├── google-vertex.js
│   │   ├── google.js
│   │   ├── grok-cli.js
│   │   ├── groq.js
│   │   ├── index.js
│   │   ├── lmstudio.js
│   │   ├── ollama.js
│   │   ├── openai-compatible.js
│   │   ├── openai.js
│   │   ├── openrouter.js
│   │   ├── perplexity.js
│   │   ├── xai.js
│   │   ├── zai-coding.js
│   │   └── zai.js
│   ├── constants
│   │   ├── commands.js
│   │   ├── paths.js
│   │   ├── profiles.js
│   │   ├── rules-actions.js
│   │   ├── task-priority.js
│   │   └── task-status.js
│   ├── profiles
│   │   ├── amp.js
│   │   ├── base-profile.js
│   │   ├── claude.js
│   │   ├── cline.js
│   │   ├── codex.js
│   │   ├── cursor.js
│   │   ├── gemini.js
│   │   ├── index.js
│   │   ├── kilo.js
│   │   ├── kiro.js
│   │   ├── opencode.js
│   │   ├── roo.js
│   │   ├── trae.js
│   │   ├── vscode.js
│   │   ├── windsurf.js
│   │   └── zed.js
│   ├── progress
│   │   ├── base-progress-tracker.js
│   │   ├── cli-progress-factory.js
│   │   ├── parse-prd-tracker.js
│   │   ├── progress-tracker-builder.js
│   │   └── tracker-ui.js
│   ├── prompts
│   │   ├── add-task.json
│   │   ├── analyze-complexity.json
│   │   ├── expand-task.json
│   │   ├── parse-prd.json
│   │   ├── README.md
│   │   ├── research.json
│   │   ├── schemas
│   │   │   ├── parameter.schema.json
│   │   │   ├── prompt-template.schema.json
│   │   │   ├── README.md
│   │   │   └── variant.schema.json
│   │   ├── update-subtask.json
│   │   ├── update-task.json
│   │   └── update-tasks.json
│   ├── provider-registry
│   │   └── index.js
│   ├── schemas
│   │   ├── add-task.js
│   │   ├── analyze-complexity.js
│   │   ├── base-schemas.js
│   │   ├── expand-task.js
│   │   ├── parse-prd.js
│   │   ├── registry.js
│   │   ├── update-subtask.js
│   │   ├── update-task.js
│   │   └── update-tasks.js
│   ├── task-master.js
│   ├── ui
│   │   ├── confirm.js
│   │   ├── indicators.js
│   │   └── parse-prd.js
│   └── utils
│       ├── asset-resolver.js
│       ├── create-mcp-config.js
│       ├── format.js
│       ├── getVersion.js
│       ├── logger-utils.js
│       ├── manage-gitignore.js
│       ├── path-utils.js
│       ├── profiles.js
│       ├── rule-transformer.js
│       ├── stream-parser.js
│       └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│   ├── e2e
│   │   ├── e2e_helpers.sh
│   │   ├── parse_llm_output.cjs
│   │   ├── run_e2e.sh
│   │   ├── run_fallback_verification.sh
│   │   └── test_llm_analysis.sh
│   ├── fixtures
│   │   ├── .taskmasterconfig
│   │   ├── sample-claude-response.js
│   │   ├── sample-prd.txt
│   │   └── sample-tasks.js
│   ├── helpers
│   │   └── tool-counts.js
│   ├── integration
│   │   ├── claude-code-error-handling.test.js
│   │   ├── claude-code-optional.test.js
│   │   ├── cli
│   │   │   ├── commands.test.js
│   │   │   ├── complex-cross-tag-scenarios.test.js
│   │   │   └── move-cross-tag.test.js
│   │   ├── manage-gitignore.test.js
│   │   ├── mcp-server
│   │   │   └── direct-functions.test.js
│   │   ├── move-task-cross-tag.integration.test.js
│   │   ├── move-task-simple.integration.test.js
│   │   ├── profiles
│   │   │   ├── amp-init-functionality.test.js
│   │   │   ├── claude-init-functionality.test.js
│   │   │   ├── cline-init-functionality.test.js
│   │   │   ├── codex-init-functionality.test.js
│   │   │   ├── cursor-init-functionality.test.js
│   │   │   ├── gemini-init-functionality.test.js
│   │   │   ├── opencode-init-functionality.test.js
│   │   │   ├── roo-files-inclusion.test.js
│   │   │   ├── roo-init-functionality.test.js
│   │   │   ├── rules-files-inclusion.test.js
│   │   │   ├── trae-init-functionality.test.js
│   │   │   ├── vscode-init-functionality.test.js
│   │   │   └── windsurf-init-functionality.test.js
│   │   └── providers
│   │       └── temperature-support.test.js
│   ├── manual
│   │   ├── progress
│   │   │   ├── parse-prd-analysis.js
│   │   │   ├── test-parse-prd.js
│   │   │   └── TESTING_GUIDE.md
│   │   └── prompts
│   │       ├── prompt-test.js
│   │       └── README.md
│   ├── README.md
│   ├── setup.js
│   └── unit
│       ├── ai-providers
│       │   ├── base-provider.test.js
│       │   ├── claude-code.test.js
│       │   ├── codex-cli.test.js
│       │   ├── gemini-cli.test.js
│       │   ├── lmstudio.test.js
│       │   ├── mcp-components.test.js
│       │   ├── openai-compatible.test.js
│       │   ├── openai.test.js
│       │   ├── provider-registry.test.js
│       │   ├── zai-coding.test.js
│       │   ├── zai-provider.test.js
│       │   ├── zai-schema-introspection.test.js
│       │   └── zai.test.js
│       ├── ai-services-unified.test.js
│       ├── commands.test.js
│       ├── config-manager.test.js
│       ├── config-manager.test.mjs
│       ├── dependency-manager.test.js
│       ├── init.test.js
│       ├── initialize-project.test.js
│       ├── kebab-case-validation.test.js
│       ├── manage-gitignore.test.js
│       ├── mcp
│       │   └── tools
│       │       ├── __mocks__
│       │       │   └── move-task.js
│       │       ├── add-task.test.js
│       │       ├── analyze-complexity.test.js
│       │       ├── expand-all.test.js
│       │       ├── get-tasks.test.js
│       │       ├── initialize-project.test.js
│       │       ├── move-task-cross-tag-options.test.js
│       │       ├── move-task-cross-tag.test.js
│       │       ├── remove-task.test.js
│       │       └── tool-registration.test.js
│       ├── mcp-providers
│       │   ├── mcp-components.test.js
│       │   └── mcp-provider.test.js
│       ├── parse-prd.test.js
│       ├── profiles
│       │   ├── amp-integration.test.js
│       │   ├── claude-integration.test.js
│       │   ├── cline-integration.test.js
│       │   ├── codex-integration.test.js
│       │   ├── cursor-integration.test.js
│       │   ├── gemini-integration.test.js
│       │   ├── kilo-integration.test.js
│       │   ├── kiro-integration.test.js
│       │   ├── mcp-config-validation.test.js
│       │   ├── opencode-integration.test.js
│       │   ├── profile-safety-check.test.js
│       │   ├── roo-integration.test.js
│       │   ├── rule-transformer-cline.test.js
│       │   ├── rule-transformer-cursor.test.js
│       │   ├── rule-transformer-gemini.test.js
│       │   ├── rule-transformer-kilo.test.js
│       │   ├── rule-transformer-kiro.test.js
│       │   ├── rule-transformer-opencode.test.js
│       │   ├── rule-transformer-roo.test.js
│       │   ├── rule-transformer-trae.test.js
│       │   ├── rule-transformer-vscode.test.js
│       │   ├── rule-transformer-windsurf.test.js
│       │   ├── rule-transformer-zed.test.js
│       │   ├── rule-transformer.test.js
│       │   ├── selective-profile-removal.test.js
│       │   ├── subdirectory-support.test.js
│       │   ├── trae-integration.test.js
│       │   ├── vscode-integration.test.js
│       │   ├── windsurf-integration.test.js
│       │   └── zed-integration.test.js
│       ├── progress
│       │   └── base-progress-tracker.test.js
│       ├── prompt-manager.test.js
│       ├── prompts
│       │   ├── expand-task-prompt.test.js
│       │   └── prompt-migration.test.js
│       ├── scripts
│       │   └── modules
│       │       ├── commands
│       │       │   ├── move-cross-tag.test.js
│       │       │   └── README.md
│       │       ├── dependency-manager
│       │       │   ├── circular-dependencies.test.js
│       │       │   ├── cross-tag-dependencies.test.js
│       │       │   └── fix-dependencies-command.test.js
│       │       ├── task-manager
│       │       │   ├── add-subtask.test.js
│       │       │   ├── add-task.test.js
│       │       │   ├── analyze-task-complexity.test.js
│       │       │   ├── clear-subtasks.test.js
│       │       │   ├── complexity-report-tag-isolation.test.js
│       │       │   ├── expand-all-tasks.test.js
│       │       │   ├── expand-task.test.js
│       │       │   ├── find-next-task.test.js
│       │       │   ├── generate-task-files.test.js
│       │       │   ├── list-tasks.test.js
│       │       │   ├── models-baseurl.test.js
│       │       │   ├── move-task-cross-tag.test.js
│       │       │   ├── move-task.test.js
│       │       │   ├── parse-prd-schema.test.js
│       │       │   ├── parse-prd.test.js
│       │       │   ├── remove-subtask.test.js
│       │       │   ├── remove-task.test.js
│       │       │   ├── research.test.js
│       │       │   ├── scope-adjustment.test.js
│       │       │   ├── set-task-status.test.js
│       │       │   ├── setup.js
│       │       │   ├── update-single-task-status.test.js
│       │       │   ├── update-subtask-by-id.test.js
│       │       │   ├── update-task-by-id.test.js
│       │       │   └── update-tasks.test.js
│       │       ├── ui
│       │       │   └── cross-tag-error-display.test.js
│       │       └── utils-tag-aware-paths.test.js
│       ├── task-finder.test.js
│       ├── task-manager
│       │   ├── clear-subtasks.test.js
│       │   ├── move-task.test.js
│       │   ├── tag-boundary.test.js
│       │   └── tag-management.test.js
│       ├── task-master.test.js
│       ├── ui
│       │   └── indicators.test.js
│       ├── ui.test.js
│       ├── utils-strip-ansi.test.js
│       └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
├── turbo.json
└── update-task-migration-plan.md
```

# Files

--------------------------------------------------------------------------------
/tests/manual/progress/test-parse-prd.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node

/**
 * test-parse-prd.js
 *
 * Comprehensive integration test for parse-prd functionality.
 * Tests MCP streaming, CLI streaming, and non-streaming modes.
 * Validates token tracking, message formats, and priority indicators across all contexts.
 */

import fs from 'fs';
import path from 'path';
import chalk from 'chalk';
import { fileURLToPath } from 'url';

// Get current directory
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);

// Get project root (three levels up from tests/manual/progress/)
const PROJECT_ROOT = path.resolve(__dirname, '..', '..', '..');

// Import the parse-prd function
import parsePRD from '../../../scripts/modules/task-manager/parse-prd/index.js';

/**
 * Mock Progress Reporter for testing
 */
class MockProgressReporter {
	constructor(enableDebug = true) {
		this.enableDebug = enableDebug;
		this.progressHistory = [];
		this.startTime = Date.now();
	}

	async reportProgress(data) {
		const timestamp = Date.now() - this.startTime;

		const entry = {
			timestamp,
			...data
		};

		this.progressHistory.push(entry);

		if (this.enableDebug) {
			const percentage = data.total
				? Math.round((data.progress / data.total) * 100)
				: 0;
			console.log(
				chalk.blue(`[${timestamp}ms]`),
				chalk.green(`${percentage}%`),
				chalk.yellow(data.message)
			);
		}
	}

	getProgressHistory() {
		return this.progressHistory;
	}

	printSummary() {
		console.log(chalk.green('\n=== Progress Summary ==='));
		console.log(`Total progress reports: ${this.progressHistory.length}`);
		console.log(
			`Duration: ${this.progressHistory[this.progressHistory.length - 1]?.timestamp || 0}ms`
		);

		this.progressHistory.forEach((entry, index) => {
			const percentage = entry.total
				? Math.round((entry.progress / entry.total) * 100)
				: 0;
			console.log(
				`${index + 1}. [${entry.timestamp}ms] ${percentage}% - ${entry.message}`
			);
		});

		// Check for expected message formats
		const hasInitialMessage = this.progressHistory.some(
			(entry) =>
				entry.message.includes('Starting PRD analysis') &&
				entry.message.includes('Input:') &&
				entry.message.includes('tokens')
		);
		// Make regex more flexible to handle potential whitespace variations
		const hasTaskMessages = this.progressHistory.some((entry) =>
			/^[🔴🟠🟢⚪]{3} Task \d+\/\d+ - .+ \| ~Output: \d+ tokens/u.test(
				entry.message.trim()
			)
		);

		const hasCompletionMessage = this.progressHistory.some(
			(entry) =>
				entry.message.includes('✅ Task Generation Completed') &&
				entry.message.includes('Tokens (I/O):')
		);

		console.log(chalk.cyan('\n=== Message Format Validation ==='));
		console.log(
			`✅ Initial message format: ${hasInitialMessage ? 'PASS' : 'FAIL'}`
		);
		console.log(`✅ Task message format: ${hasTaskMessages ? 'PASS' : 'FAIL'}`);
		console.log(
			`✅ Completion message format: ${hasCompletionMessage ? 'PASS' : 'FAIL'}`
		);
	}
}

/**
 * Mock MCP Logger for testing
 */
class MockMCPLogger {
	constructor(enableDebug = true) {
		this.enableDebug = enableDebug;
		this.logs = [];
	}

	_log(level, ...args) {
		const entry = {
			level,
			timestamp: Date.now(),
			message: args.join(' ')
		};
		this.logs.push(entry);

		if (this.enableDebug) {
			const color =
				{
					info: chalk.blue,
					warn: chalk.yellow,
					error: chalk.red,
					debug: chalk.gray,
					success: chalk.green
				}[level] || chalk.white;

			console.log(color(`[${level.toUpperCase()}]`), ...args);
		}
	}

	info(...args) {
		this._log('info', ...args);
	}
	warn(...args) {
		this._log('warn', ...args);
	}
	error(...args) {
		this._log('error', ...args);
	}
	debug(...args) {
		this._log('debug', ...args);
	}
	success(...args) {
		this._log('success', ...args);
	}

	getLogs() {
		return this.logs;
	}
}

/**
 * Get the path to the sample PRD file
 */
function getSamplePRDPath() {
	return path.resolve(PROJECT_ROOT, 'tests', 'fixtures', 'sample-prd.txt');
}

/**
 * Create a basic test config file
 */
function createTestConfig() {
	const testConfig = {
		models: {
			main: {
				provider: 'anthropic',
				modelId: 'claude-3-5-sonnet',
				maxTokens: 64000,
				temperature: 0.2
			},
			research: {
				provider: 'perplexity',
				modelId: 'sonar-pro',
				maxTokens: 8700,
				temperature: 0.1
			},
			fallback: {
				provider: 'anthropic',
				modelId: 'claude-3-5-sonnet',
				maxTokens: 64000,
				temperature: 0.2
			}
		},
		global: {
			logLevel: 'info',
			debug: false,
			defaultSubtasks: 5,
			defaultPriority: 'medium',
			projectName: 'Task Master Test',
			ollamaBaseURL: 'http://localhost:11434/api',
			bedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com'
		}
	};

	const taskmasterDir = path.join(__dirname, '.taskmaster');
	const configPath = path.join(taskmasterDir, 'config.json');

	// Create .taskmaster directory if it doesn't exist
	if (!fs.existsSync(taskmasterDir)) {
		fs.mkdirSync(taskmasterDir, { recursive: true });
	}

	fs.writeFileSync(configPath, JSON.stringify(testConfig, null, 2));
	return configPath;
}

/**
 * Setup test files and configuration
 */
function setupTestFiles(testName) {
	const testPRDPath = getSamplePRDPath();
	const testTasksPath = path.join(__dirname, `test-${testName}-tasks.json`);
	const configPath = createTestConfig();

	// Clean up existing files
	if (fs.existsSync(testTasksPath)) {
		fs.unlinkSync(testTasksPath);
	}

	return { testPRDPath, testTasksPath, configPath };
}

/**
 * Clean up test files
 */
function cleanupTestFiles(testTasksPath, configPath) {
	if (fs.existsSync(testTasksPath)) fs.unlinkSync(testTasksPath);
	if (fs.existsSync(configPath)) fs.unlinkSync(configPath);
}

/**
 * Run parsePRD with configurable options
 */
async function runParsePRD(testPRDPath, testTasksPath, numTasks, options = {}) {
	const startTime = Date.now();

	const result = await parsePRD(testPRDPath, testTasksPath, numTasks, {
		force: true,
		append: false,
		research: false,
		projectRoot: PROJECT_ROOT,
		...options
	});

	const endTime = Date.now();
	const duration = endTime - startTime;

	return { result, duration };
}

/**
 * Verify task file existence and structure
 */
function verifyTaskResults(testTasksPath) {
	if (fs.existsSync(testTasksPath)) {
		const tasksData = JSON.parse(fs.readFileSync(testTasksPath, 'utf8'));
		console.log(
			chalk.green(
				`\n✅ Tasks file created with ${tasksData.tasks.length} tasks`
			)
		);

		// Verify task structure
		const firstTask = tasksData.tasks[0];
		if (firstTask && firstTask.id && firstTask.title && firstTask.description) {
			console.log(chalk.green('✅ Task structure is valid'));
			return true;
		} else {
			console.log(chalk.red('❌ Task structure is invalid'));
			return false;
		}
	} else {
		console.log(chalk.red('❌ Tasks file was not created'));
		return false;
	}
}

/**
 * Print MCP-specific logs and validation
 */
function printMCPResults(mcpLogger, progressReporter) {
	// Print progress summary
	progressReporter.printSummary();

	// Print MCP logs
	console.log(chalk.cyan('\n=== MCP Logs ==='));
	const logs = mcpLogger.getLogs();
	logs.forEach((log, index) => {
		const color =
			{
				info: chalk.blue,
				warn: chalk.yellow,
				error: chalk.red,
				debug: chalk.gray,
				success: chalk.green
			}[log.level] || chalk.white;
		console.log(
			`${index + 1}. ${color(`[${log.level.toUpperCase()}]`)} ${log.message}`
		);
	});

	// Verify MCP-specific message formats (should use emoji indicators)
	const hasEmojiIndicators = progressReporter
		.getProgressHistory()
		.some((entry) => /[🔴🟠🟢]/u.test(entry.message));

	console.log(chalk.cyan('\n=== MCP-Specific Validation ==='));
	console.log(
		`✅ Emoji priority indicators: ${hasEmojiIndicators ? 'PASS' : 'FAIL'}`
	);

	return { hasEmojiIndicators, logs };
}

/**
 * Test MCP streaming with proper MCP context
 */
async function testMCPStreaming(numTasks = 10) {
	console.log(chalk.cyan('🧪 Testing MCP Streaming Functionality\n'));

	const { testPRDPath, testTasksPath, configPath } = setupTestFiles('mcp');
	const progressReporter = new MockProgressReporter(true);
	const mcpLogger = new MockMCPLogger(true); // Enable debug for MCP context

	try {
		console.log(chalk.yellow('Starting MCP streaming test...'));

		const { result, duration } = await runParsePRD(
			testPRDPath,
			testTasksPath,
			numTasks,
			{
				reportProgress: progressReporter.reportProgress.bind(progressReporter),
				mcpLog: mcpLogger // Add MCP context - this is the key difference
			}
		);

		console.log(
			chalk.green(`\n✅ MCP streaming test completed in ${duration}ms`)
		);

		const { hasEmojiIndicators, logs } = printMCPResults(
			mcpLogger,
			progressReporter
		);
		const isValidStructure = verifyTaskResults(testTasksPath);

		return {
			success: true,
			duration,
			progressHistory: progressReporter.getProgressHistory(),
			mcpLogs: logs,
			hasEmojiIndicators,
			result
		};
	} catch (error) {
		console.error(chalk.red(`❌ MCP streaming test failed: ${error.message}`));
		return {
			success: false,
			error: error.message
		};
	} finally {
		cleanupTestFiles(testTasksPath, configPath);
	}
}

/**
 * Test CLI streaming (no reportProgress)
 */
async function testCLIStreaming(numTasks = 10) {
	console.log(chalk.cyan('🧪 Testing CLI Streaming (No Progress Reporter)\n'));

	const { testPRDPath, testTasksPath, configPath } = setupTestFiles('cli');

	try {
		console.log(chalk.yellow('Starting CLI streaming test...'));

		// No reportProgress provided; CLI text mode uses the default streaming reporter
		const { result, duration } = await runParsePRD(
			testPRDPath,
			testTasksPath,
			numTasks
		);

		console.log(
			chalk.green(`\n✅ CLI streaming test completed in ${duration}ms`)
		);

		const isValidStructure = verifyTaskResults(testTasksPath);

		return {
			success: true,
			duration,
			result
		};
	} catch (error) {
		console.error(chalk.red(`❌ CLI streaming test failed: ${error.message}`));
		return {
			success: false,
			error: error.message
		};
	} finally {
		cleanupTestFiles(testTasksPath, configPath);
	}
}

/**
 * Test non-streaming functionality
 */
async function testNonStreaming(numTasks = 10) {
	console.log(chalk.cyan('🧪 Testing Non-Streaming Functionality\n'));

	const { testPRDPath, testTasksPath, configPath } =
		setupTestFiles('non-streaming');

	try {
		console.log(chalk.yellow('Starting non-streaming test...'));

		// Force non-streaming by not providing reportProgress
		const { result, duration } = await runParsePRD(
			testPRDPath,
			testTasksPath,
			numTasks
		);

		console.log(
			chalk.green(`\n✅ Non-streaming test completed in ${duration}ms`)
		);

		const isValidStructure = verifyTaskResults(testTasksPath);

		return {
			success: true,
			duration,
			result
		};
	} catch (error) {
		console.error(chalk.red(`❌ Non-streaming test failed: ${error.message}`));
		return {
			success: false,
			error: error.message
		};
	} finally {
		cleanupTestFiles(testTasksPath, configPath);
	}
}

/**
 * Compare results between streaming and non-streaming
 */
function compareResults(streamingResult, nonStreamingResult) {
	console.log(chalk.cyan('\n=== Results Comparison ==='));

	if (!streamingResult.success || !nonStreamingResult.success) {
		console.log(chalk.red('❌ Cannot compare - one or both tests failed'));
		return;
	}

	console.log(`Streaming duration: ${streamingResult.duration}ms`);
	console.log(`Non-streaming duration: ${nonStreamingResult.duration}ms`);

	const durationDiff = Math.abs(
		streamingResult.duration - nonStreamingResult.duration
	);
	const durationDiffPercent = Math.round(
		(durationDiff /
			Math.max(streamingResult.duration, nonStreamingResult.duration)) *
			100
	);

	console.log(
		`Duration difference: ${durationDiff}ms (${durationDiffPercent}%)`
	);

	if (streamingResult.progressHistory) {
		console.log(
			`Streaming progress reports: ${streamingResult.progressHistory.length}`
		);
	}

	console.log(chalk.green('✅ Both methods completed successfully'));
}

/**
 * Main test runner
 */
async function main() {
	const args = process.argv.slice(2);
	const testType = args[0] || 'streaming';
	const numTasks = parseInt(args[1]) || 8;

	console.log(chalk.bold.cyan('🚀 Task Master PRD Streaming Tests\n'));
	console.log(chalk.blue(`Test type: ${testType}`));
	console.log(chalk.blue(`Number of tasks: ${numTasks}\n`));

	try {
		switch (testType.toLowerCase()) {
			case 'mcp':
			case 'mcp-streaming':
				await testMCPStreaming(numTasks);
				break;

			case 'cli':
			case 'cli-streaming':
				await testCLIStreaming(numTasks);
				break;

			case 'non-streaming':
			case 'non':
				await testNonStreaming(numTasks);
				break;

			case 'both': {
				console.log(
					chalk.yellow(
						'Running both MCP streaming and non-streaming tests...\n'
					)
				);
				const mcpStreamingResult = await testMCPStreaming(numTasks);
				console.log('\n' + '='.repeat(60) + '\n');
				const nonStreamingResult = await testNonStreaming(numTasks);
				compareResults(mcpStreamingResult, nonStreamingResult);
				break;
			}

			case 'all': {
				console.log(chalk.yellow('Running all test types...\n'));
				const mcpResult = await testMCPStreaming(numTasks);
				console.log('\n' + '='.repeat(60) + '\n');
				const cliResult = await testCLIStreaming(numTasks);
				console.log('\n' + '='.repeat(60) + '\n');
				const nonStreamResult = await testNonStreaming(numTasks);

				console.log(chalk.cyan('\n=== All Tests Summary ==='));
				console.log(
					`MCP Streaming: ${mcpResult.success ? '✅ PASS' : '❌ FAIL'} ${mcpResult.hasEmojiIndicators ? '(✅ Emojis)' : '(❌ No Emojis)'}`
				);
				console.log(
					`CLI Streaming: ${cliResult.success ? '✅ PASS' : '❌ FAIL'}`
				);
				console.log(
					`Non-streaming: ${nonStreamResult.success ? '✅ PASS' : '❌ FAIL'}`
				);
				break;
			}

			default:
				console.log(chalk.red(`Unknown test type: ${testType}`));
				console.log(
					chalk.yellow(
						'Available options: mcp-streaming, cli-streaming, non-streaming, both, all'
					)
				);
				process.exit(1);
		}

		console.log(chalk.green('\n🎉 Tests completed successfully!'));
	} catch (error) {
		console.error(chalk.red(`\n❌ Test failed: ${error.message}`));
		console.error(chalk.red(error.stack));
		process.exit(1);
	}
}

// Run if called directly
if (import.meta.url === `file://${process.argv[1]}`) {
	main();
}

```

--------------------------------------------------------------------------------
/.taskmaster/templates/example_prd_rpg.md:
--------------------------------------------------------------------------------

```markdown
<rpg-method>
# Repository Planning Graph (RPG) Method - PRD Template

This template teaches you (AI or human) how to create structured, dependency-aware PRDs using the RPG methodology from Microsoft Research. The key insight: separate WHAT (functional) from HOW (structural), then connect them with explicit dependencies.

## Core Principles

1. **Dual-Semantics**: Think functional (capabilities) AND structural (code organization) separately, then map them
2. **Explicit Dependencies**: Never assume - always state what depends on what
3. **Topological Order**: Build foundation first, then layers on top
4. **Progressive Refinement**: Start broad, refine iteratively

## How to Use This Template

- Follow the instructions in each `<instruction>` block
- Look at `<example>` blocks to see good vs bad patterns
- Fill in the content sections with your project details
- The AI reading this will learn the RPG method by following along
- Task Master will parse the resulting PRD into dependency-aware tasks

## Recommended Tools for Creating PRDs

When using this template to **create** a PRD (not parse it), use **code-context-aware AI assistants** for best results:

**Why?** The AI needs to understand your existing codebase to make good architectural decisions about modules, dependencies, and integration points.

**Recommended tools:**
- **Claude Code** (claude-code CLI) - Best for structured reasoning and large contexts
- **Cursor/Windsurf** - IDE integration with full codebase context
- **Gemini CLI** (gemini-cli) - Massive context window for large codebases
- **Codex/Grok CLI** - Strong code generation with context awareness

**Note:** Once your PRD is created, `task-master parse-prd` works with any configured AI model - it just needs to read the PRD text itself, not your codebase.
</rpg-method>

---

<overview>
<instruction>
Start with the problem, not the solution. Be specific about:
- What pain point exists?
- Who experiences it?
- Why existing solutions don't work?
- What success looks like (measurable outcomes)?

Keep this section focused - don't jump into implementation details yet.
</instruction>

## Problem Statement
[Describe the core problem. Be concrete about user pain points.]

## Target Users
[Define personas, their workflows, and what they're trying to achieve.]

## Success Metrics
[Quantifiable outcomes. Examples: "80% task completion via autopilot", "< 5% manual intervention rate"]

</overview>

---

<functional-decomposition>
<instruction>
Now think about CAPABILITIES (what the system DOES), not code structure yet.

Step 1: Identify high-level capability domains
- Think: "What major things does this system do?"
- Examples: Data Management, Core Processing, Presentation Layer

Step 2: For each capability, enumerate specific features
- Use explore-exploit strategy:
  * Exploit: What features are REQUIRED for core value?
  * Explore: What features make this domain COMPLETE?

Step 3: For each feature, define:
- Description: What it does in one sentence
- Inputs: What data/context it needs
- Outputs: What it produces/returns
- Behavior: Key logic or transformations

<example type="good">
Capability: Data Validation
  Feature: Schema validation
    - Description: Validate JSON payloads against defined schemas
    - Inputs: JSON object, schema definition
    - Outputs: Validation result (pass/fail) + error details
    - Behavior: Iterate fields, check types, enforce constraints

  Feature: Business rule validation
    - Description: Apply domain-specific validation rules
    - Inputs: Validated data object, rule set
    - Outputs: Boolean + list of violated rules
    - Behavior: Execute rules sequentially, short-circuit on failure
</example>

<example type="bad">
Capability: validation.js
  (Problem: This is a FILE, not a CAPABILITY. Mixing structure into functional thinking.)

Capability: Validation
  Feature: Make sure data is good
  (Problem: Too vague. No inputs/outputs. Not actionable.)
</example>
</instruction>

## Capability Tree

### Capability: [Name]
[Brief description of what this capability domain covers]

#### Feature: [Name]
- **Description**: [One sentence]
- **Inputs**: [What it needs]
- **Outputs**: [What it produces]
- **Behavior**: [Key logic]

#### Feature: [Name]
- **Description**:
- **Inputs**:
- **Outputs**:
- **Behavior**:

### Capability: [Name]
...

</functional-decomposition>

---

<structural-decomposition>
<instruction>
NOW think about code organization. Map capabilities to actual file/folder structure.

Rules:
1. Each capability maps to a module (folder or file)
2. Features within a capability map to functions/classes
3. Use clear module boundaries - each module has ONE responsibility
4. Define what each module exports (public interface)

The goal: Create a clear mapping between "what it does" (functional) and "where it lives" (structural).

<example type="good">
Capability: Data Validation
  → Maps to: src/validation/
    ├── schema-validator.js      (Schema validation feature)
    ├── rule-validator.js         (Business rule validation feature)
    └── index.js                  (Public exports)

Exports:
  - validateSchema(data, schema)
  - validateRules(data, rules)
</example>

<example type="bad">
Capability: Data Validation
  → Maps to: src/utils.js
  (Problem: "utils" is not a clear module boundary. Where do I find validation logic?)

Capability: Data Validation
  → Maps to: src/validation/everything.js
  (Problem: One giant file. Features should map to separate files for maintainability.)
</example>
</instruction>

## Repository Structure

```
project-root/
├── src/
│   ├── [module-name]/       # Maps to: [Capability Name]
│   │   ├── [file].js        # Maps to: [Feature Name]
│   │   └── index.js         # Public exports
│   └── [module-name]/
├── tests/
└── docs/
```

## Module Definitions

### Module: [Name]
- **Maps to capability**: [Capability from functional decomposition]
- **Responsibility**: [Single clear purpose]
- **File structure**:
  ```
  module-name/
  ├── feature1.js
  ├── feature2.js
  └── index.js
  ```
- **Exports**:
  - `functionName()` - [what it does]
  - `ClassName` - [what it does]

</structural-decomposition>

---

<dependency-graph>
<instruction>
This is THE CRITICAL SECTION for Task Master parsing.

Define explicit dependencies between modules. This creates the topological order for task execution.

Rules:
1. List modules in dependency order (foundation first)
2. For each module, state what it depends on
3. Foundation modules should have NO dependencies
4. Every non-foundation module should depend on at least one other module
5. Think: "What must EXIST before I can build this module?"

<example type="good">
Foundation Layer (no dependencies):
  - error-handling: No dependencies
  - config-manager: No dependencies
  - base-types: No dependencies

Data Layer:
  - schema-validator: Depends on [base-types, error-handling]
  - data-ingestion: Depends on [schema-validator, config-manager]

Core Layer:
  - algorithm-engine: Depends on [base-types, error-handling]
  - pipeline-orchestrator: Depends on [algorithm-engine, data-ingestion]
</example>

<example type="bad">
- validation: Depends on API
- API: Depends on validation
(Problem: Circular dependency. This will cause build/runtime issues.)

- user-auth: Depends on everything
(Problem: Too many dependencies. Should be more focused.)
</example>
</instruction>

## Dependency Chain

### Foundation Layer (Phase 0)
No dependencies - these are built first.

- **[Module Name]**: [What it provides]
- **[Module Name]**: [What it provides]

### [Layer Name] (Phase 1)
- **[Module Name]**: Depends on [[module-from-phase-0], [module-from-phase-0]]
- **[Module Name]**: Depends on [[module-from-phase-0]]

### [Layer Name] (Phase 2)
- **[Module Name]**: Depends on [[module-from-phase-1], [module-from-foundation]]

[Continue building up layers...]

</dependency-graph>

---

<implementation-roadmap>
<instruction>
Turn the dependency graph into concrete development phases.

Each phase should:
1. Have clear entry criteria (what must exist before starting)
2. Contain tasks that can be parallelized (no inter-dependencies within phase)
3. Have clear exit criteria (how do we know phase is complete?)
4. Build toward something USABLE (not just infrastructure)

Phase ordering follows topological sort of dependency graph.

<example type="good">
Phase 0: Foundation
  Entry: Clean repository
  Tasks:
    - Implement error handling utilities
    - Create base type definitions
    - Setup configuration system
  Exit: Other modules can import foundation without errors

Phase 1: Data Layer
  Entry: Phase 0 complete
  Tasks:
    - Implement schema validator (uses: base types, error handling)
    - Build data ingestion pipeline (uses: validator, config)
  Exit: End-to-end data flow from input to validated output
</example>

<example type="bad">
Phase 1: Build Everything
  Tasks:
    - API
    - Database
    - UI
    - Tests
  (Problem: No clear focus. Too broad. Dependencies not considered.)
</example>
</instruction>

## Development Phases

### Phase 0: [Foundation Name]
**Goal**: [What foundational capability this establishes]

**Entry Criteria**: [What must be true before starting]

**Tasks**:
- [ ] [Task name] (depends on: [none or list])
  - Acceptance criteria: [How we know it's done]
  - Test strategy: [What tests prove it works]

- [ ] [Task name] (depends on: [none or list])

**Exit Criteria**: [Observable outcome that proves phase complete]

**Delivers**: [What can users/developers do after this phase?]

---

### Phase 1: [Layer Name]
**Goal**:

**Entry Criteria**: Phase 0 complete

**Tasks**:
- [ ] [Task name] (depends on: [[tasks-from-phase-0]])
- [ ] [Task name] (depends on: [[tasks-from-phase-0]])

**Exit Criteria**:

**Delivers**:

---

[Continue with more phases...]

</implementation-roadmap>

---

<test-strategy>
<instruction>
Define how testing will be integrated throughout development (TDD approach).

Specify:
1. Test pyramid ratios (unit vs integration vs e2e)
2. Coverage requirements
3. Critical test scenarios
4. Test generation guidelines for Surgical Test Generator

This section guides the AI when generating tests during the RED phase of TDD.

<example type="good">
Critical Test Scenarios for Data Validation module:
  - Happy path: Valid data passes all checks
  - Edge cases: Empty strings, null values, boundary numbers
  - Error cases: Invalid types, missing required fields
  - Integration: Validator works with ingestion pipeline
</example>
</instruction>

## Test Pyramid

```
        /\
       /E2E\       ← [X]% (End-to-end, slow, comprehensive)
      /------\
     /Integration\ ← [Y]% (Module interactions)
    /------------\
   /  Unit Tests  \ ← [Z]% (Fast, isolated, deterministic)
  /----------------\
```

## Coverage Requirements
- Line coverage: [X]% minimum
- Branch coverage: [X]% minimum
- Function coverage: [X]% minimum
- Statement coverage: [X]% minimum

## Critical Test Scenarios

### [Module/Feature Name]
**Happy path**:
- [Scenario description]
- Expected: [What should happen]

**Edge cases**:
- [Scenario description]
- Expected: [What should happen]

**Error cases**:
- [Scenario description]
- Expected: [How system handles failure]

**Integration points**:
- [What interactions to test]
- Expected: [End-to-end behavior]

## Test Generation Guidelines
[Specific instructions for Surgical Test Generator about what to focus on, what patterns to follow, project-specific test conventions]

</test-strategy>

---

<architecture>
<instruction>
Describe technical architecture, data models, and key design decisions.

Keep this section AFTER functional/structural decomposition - implementation details come after understanding structure.
</instruction>

## System Components
[Major architectural pieces and their responsibilities]

## Data Models
[Core data structures, schemas, database design]

## Technology Stack
[Languages, frameworks, key libraries]

**Decision: [Technology/Pattern]**
- **Rationale**: [Why chosen]
- **Trade-offs**: [What we're giving up]
- **Alternatives considered**: [What else we looked at]

</architecture>

---

<risks>
<instruction>
Identify risks that could derail development and how to mitigate them.

Categories:
- Technical risks (complexity, unknowns)
- Dependency risks (blocking issues)
- Scope risks (creep, underestimation)
</instruction>

## Technical Risks
**Risk**: [Description]
- **Impact**: [High/Medium/Low - effect on project]
- **Likelihood**: [High/Medium/Low]
- **Mitigation**: [How to address]
- **Fallback**: [Plan B if mitigation fails]

## Dependency Risks
[External dependencies, blocking issues]

## Scope Risks
[Scope creep, underestimation, unclear requirements]

</risks>

---

<appendix>
## References
[Papers, documentation, similar systems]

## Glossary
[Domain-specific terms]

## Open Questions
[Things to resolve during development]
</appendix>

---

<task-master-integration>
# How Task Master Uses This PRD

When you run `task-master parse-prd <file>.txt`, the parser:

1. **Extracts capabilities** → Main tasks
   - Each `### Capability:` becomes a top-level task

2. **Extracts features** → Subtasks
   - Each `#### Feature:` becomes a subtask under its capability

3. **Parses dependencies** → Task dependencies
   - `Depends on: [X, Y]` sets task.dependencies = ["X", "Y"]

4. **Orders by phases** → Task priorities
   - Phase 0 tasks = highest priority
   - Phase N tasks = lower priority, properly sequenced

5. **Uses test strategy** → Test generation context
   - Feeds test scenarios to Surgical Test Generator during implementation

**Result**: A dependency-aware task graph that can be executed in topological order.

## Why RPG Structure Matters

Traditional flat PRDs lead to:
- ❌ Unclear task dependencies
- ❌ Arbitrary task ordering
- ❌ Circular dependencies discovered late
- ❌ Poorly scoped tasks

RPG-structured PRDs provide:
- ✅ Explicit dependency chains
- ✅ Topological execution order
- ✅ Clear module boundaries
- ✅ Validated task graph before implementation

## Tips for Best Results

1. **Spend time on dependency graph** - This is the most valuable section for Task Master
2. **Keep features atomic** - Each feature should be independently testable
3. **Progressive refinement** - Start broad, use `task-master expand` to break down complex tasks
4. **Use research mode** - `task-master parse-prd --research` leverages AI for better task generation
</task-master-integration>

```

--------------------------------------------------------------------------------
/assets/example_prd_rpg.txt:
--------------------------------------------------------------------------------

```
<rpg-method>
# Repository Planning Graph (RPG) Method - PRD Template

This template teaches you (AI or human) how to create structured, dependency-aware PRDs using the RPG methodology from Microsoft Research. The key insight: separate WHAT (functional) from HOW (structural), then connect them with explicit dependencies.

## Core Principles

1. **Dual-Semantics**: Think functional (capabilities) AND structural (code organization) separately, then map them
2. **Explicit Dependencies**: Never assume - always state what depends on what
3. **Topological Order**: Build foundation first, then layers on top
4. **Progressive Refinement**: Start broad, refine iteratively

## How to Use This Template

- Follow the instructions in each `<instruction>` block
- Look at `<example>` blocks to see good vs bad patterns
- Fill in the content sections with your project details
- The AI reading this will learn the RPG method by following along
- Task Master will parse the resulting PRD into dependency-aware tasks

## Recommended Tools for Creating PRDs

When using this template to **create** a PRD (not parse it), use **code-context-aware AI assistants** for best results:

**Why?** The AI needs to understand your existing codebase to make good architectural decisions about modules, dependencies, and integration points.

**Recommended tools:**
- **Claude Code** (claude-code CLI) - Best for structured reasoning and large contexts
- **Cursor/Windsurf** - IDE integration with full codebase context
- **Gemini CLI** (gemini-cli) - Massive context window for large codebases
- **Codex/Grok CLI** - Strong code generation with context awareness

**Note:** Once your PRD is created, `task-master parse-prd` works with any configured AI model - it just needs to read the PRD text itself, not your codebase.
</rpg-method>

---

<overview>
<instruction>
Start with the problem, not the solution. Be specific about:
- What pain point exists?
- Who experiences it?
- Why existing solutions don't work?
- What success looks like (measurable outcomes)?

Keep this section focused - don't jump into implementation details yet.
</instruction>

## Problem Statement
[Describe the core problem. Be concrete about user pain points.]

## Target Users
[Define personas, their workflows, and what they're trying to achieve.]

## Success Metrics
[Quantifiable outcomes. Examples: "80% task completion via autopilot", "< 5% manual intervention rate"]

</overview>

---

<functional-decomposition>
<instruction>
Now think about CAPABILITIES (what the system DOES), not code structure yet.

Step 1: Identify high-level capability domains
- Think: "What major things does this system do?"
- Examples: Data Management, Core Processing, Presentation Layer

Step 2: For each capability, enumerate specific features
- Use explore-exploit strategy:
  * Exploit: What features are REQUIRED for core value?
  * Explore: What features make this domain COMPLETE?

Step 3: For each feature, define:
- Description: What it does in one sentence
- Inputs: What data/context it needs
- Outputs: What it produces/returns
- Behavior: Key logic or transformations

<example type="good">
Capability: Data Validation
  Feature: Schema validation
    - Description: Validate JSON payloads against defined schemas
    - Inputs: JSON object, schema definition
    - Outputs: Validation result (pass/fail) + error details
    - Behavior: Iterate fields, check types, enforce constraints

  Feature: Business rule validation
    - Description: Apply domain-specific validation rules
    - Inputs: Validated data object, rule set
    - Outputs: Boolean + list of violated rules
    - Behavior: Execute rules sequentially, short-circuit on failure
</example>

<example type="bad">
Capability: validation.js
  (Problem: This is a FILE, not a CAPABILITY. Mixing structure into functional thinking.)

Capability: Validation
  Feature: Make sure data is good
  (Problem: Too vague. No inputs/outputs. Not actionable.)
</example>
</instruction>

## Capability Tree

### Capability: [Name]
[Brief description of what this capability domain covers]

#### Feature: [Name]
- **Description**: [One sentence]
- **Inputs**: [What it needs]
- **Outputs**: [What it produces]
- **Behavior**: [Key logic]

#### Feature: [Name]
- **Description**:
- **Inputs**:
- **Outputs**:
- **Behavior**:

### Capability: [Name]
...

</functional-decomposition>

---

<structural-decomposition>
<instruction>
NOW think about code organization. Map capabilities to actual file/folder structure.

Rules:
1. Each capability maps to a module (folder or file)
2. Features within a capability map to functions/classes
3. Use clear module boundaries - each module has ONE responsibility
4. Define what each module exports (public interface)

The goal: Create a clear mapping between "what it does" (functional) and "where it lives" (structural).

<example type="good">
Capability: Data Validation
  → Maps to: src/validation/
    ├── schema-validator.js      (Schema validation feature)
    ├── rule-validator.js         (Business rule validation feature)
    └── index.js                  (Public exports)

Exports:
  - validateSchema(data, schema)
  - validateRules(data, rules)
</example>

<example type="bad">
Capability: Data Validation
  → Maps to: src/utils.js
  (Problem: "utils" is not a clear module boundary. Where do I find validation logic?)

Capability: Data Validation
  → Maps to: src/validation/everything.js
  (Problem: One giant file. Features should map to separate files for maintainability.)
</example>
</instruction>

## Repository Structure

```
project-root/
├── src/
│   ├── [module-name]/       # Maps to: [Capability Name]
│   │   ├── [file].js        # Maps to: [Feature Name]
│   │   └── index.js         # Public exports
│   └── [module-name]/
├── tests/
└── docs/
```

## Module Definitions

### Module: [Name]
- **Maps to capability**: [Capability from functional decomposition]
- **Responsibility**: [Single clear purpose]
- **File structure**:
  ```
  module-name/
  ├── feature1.js
  ├── feature2.js
  └── index.js
  ```
- **Exports**:
  - `functionName()` - [what it does]
  - `ClassName` - [what it does]

</structural-decomposition>

---

<dependency-graph>
<instruction>
This is THE CRITICAL SECTION for Task Master parsing.

Define explicit dependencies between modules. This creates the topological order for task execution.

Rules:
1. List modules in dependency order (foundation first)
2. For each module, state what it depends on
3. Foundation modules should have NO dependencies
4. Every non-foundation module should depend on at least one other module
5. Think: "What must EXIST before I can build this module?"

<example type="good">
Foundation Layer (no dependencies):
  - error-handling: No dependencies
  - config-manager: No dependencies
  - base-types: No dependencies

Data Layer:
  - schema-validator: Depends on [base-types, error-handling]
  - data-ingestion: Depends on [schema-validator, config-manager]

Core Layer:
  - algorithm-engine: Depends on [base-types, error-handling]
  - pipeline-orchestrator: Depends on [algorithm-engine, data-ingestion]
</example>

<example type="bad">
- validation: Depends on API
- API: Depends on validation
(Problem: Circular dependency. This will cause build/runtime issues.)

- user-auth: Depends on everything
(Problem: Too many dependencies. Should be more focused.)
</example>
</instruction>

## Dependency Chain

### Foundation Layer (Phase 0)
No dependencies - these are built first.

- **[Module Name]**: [What it provides]
- **[Module Name]**: [What it provides]

### [Layer Name] (Phase 1)
- **[Module Name]**: Depends on [[module-from-phase-0], [module-from-phase-0]]
- **[Module Name]**: Depends on [[module-from-phase-0]]

### [Layer Name] (Phase 2)
- **[Module Name]**: Depends on [[module-from-phase-1], [module-from-foundation]]

[Continue building up layers...]

</dependency-graph>

---

<implementation-roadmap>
<instruction>
Turn the dependency graph into concrete development phases.

Each phase should:
1. Have clear entry criteria (what must exist before starting)
2. Contain tasks that can be parallelized (no inter-dependencies within phase)
3. Have clear exit criteria (how do we know phase is complete?)
4. Build toward something USABLE (not just infrastructure)

Phase ordering follows topological sort of dependency graph.

<example type="good">
Phase 0: Foundation
  Entry: Clean repository
  Tasks:
    - Implement error handling utilities
    - Create base type definitions
    - Setup configuration system
  Exit: Other modules can import foundation without errors

Phase 1: Data Layer
  Entry: Phase 0 complete
  Tasks:
    - Implement schema validator (uses: base types, error handling)
    - Build data ingestion pipeline (uses: validator, config)
  Exit: End-to-end data flow from input to validated output
</example>

<example type="bad">
Phase 1: Build Everything
  Tasks:
    - API
    - Database
    - UI
    - Tests
  (Problem: No clear focus. Too broad. Dependencies not considered.)
</example>
</instruction>

## Development Phases

### Phase 0: [Foundation Name]
**Goal**: [What foundational capability this establishes]

**Entry Criteria**: [What must be true before starting]

**Tasks**:
- [ ] [Task name] (depends on: [none or list])
  - Acceptance criteria: [How we know it's done]
  - Test strategy: [What tests prove it works]

- [ ] [Task name] (depends on: [none or list])

**Exit Criteria**: [Observable outcome that proves phase complete]

**Delivers**: [What can users/developers do after this phase?]

---

### Phase 1: [Layer Name]
**Goal**:

**Entry Criteria**: Phase 0 complete

**Tasks**:
- [ ] [Task name] (depends on: [[tasks-from-phase-0]])
- [ ] [Task name] (depends on: [[tasks-from-phase-0]])

**Exit Criteria**:

**Delivers**:

---

[Continue with more phases...]

</implementation-roadmap>

---

<test-strategy>
<instruction>
Define how testing will be integrated throughout development (TDD approach).

Specify:
1. Test pyramid ratios (unit vs integration vs e2e)
2. Coverage requirements
3. Critical test scenarios
4. Test generation guidelines for Surgical Test Generator

This section guides the AI when generating tests during the RED phase of TDD.

<example type="good">
Critical Test Scenarios for Data Validation module:
  - Happy path: Valid data passes all checks
  - Edge cases: Empty strings, null values, boundary numbers
  - Error cases: Invalid types, missing required fields
  - Integration: Validator works with ingestion pipeline
</example>
</instruction>

## Test Pyramid

```
        /\
       /E2E\       ← [X]% (End-to-end, slow, comprehensive)
      /------\
     /Integration\ ← [Y]% (Module interactions)
    /------------\
   /  Unit Tests  \ ← [Z]% (Fast, isolated, deterministic)
  /----------------\
```

## Coverage Requirements
- Line coverage: [X]% minimum
- Branch coverage: [X]% minimum
- Function coverage: [X]% minimum
- Statement coverage: [X]% minimum

## Critical Test Scenarios

### [Module/Feature Name]
**Happy path**:
- [Scenario description]
- Expected: [What should happen]

**Edge cases**:
- [Scenario description]
- Expected: [What should happen]

**Error cases**:
- [Scenario description]
- Expected: [How system handles failure]

**Integration points**:
- [What interactions to test]
- Expected: [End-to-end behavior]

## Test Generation Guidelines
[Specific instructions for Surgical Test Generator about what to focus on, what patterns to follow, project-specific test conventions]

</test-strategy>

---

<architecture>
<instruction>
Describe technical architecture, data models, and key design decisions.

Keep this section AFTER functional/structural decomposition - implementation details come after understanding structure.
</instruction>

## System Components
[Major architectural pieces and their responsibilities]

## Data Models
[Core data structures, schemas, database design]

## Technology Stack
[Languages, frameworks, key libraries]

**Decision: [Technology/Pattern]**
- **Rationale**: [Why chosen]
- **Trade-offs**: [What we're giving up]
- **Alternatives considered**: [What else we looked at]

</architecture>

---

<risks>
<instruction>
Identify risks that could derail development and how to mitigate them.

Categories:
- Technical risks (complexity, unknowns)
- Dependency risks (blocking issues)
- Scope risks (creep, underestimation)
</instruction>

## Technical Risks
**Risk**: [Description]
- **Impact**: [High/Medium/Low - effect on project]
- **Likelihood**: [High/Medium/Low]
- **Mitigation**: [How to address]
- **Fallback**: [Plan B if mitigation fails]

## Dependency Risks
[External dependencies, blocking issues]

## Scope Risks
[Scope creep, underestimation, unclear requirements]

</risks>

---

<appendix>
## References
[Papers, documentation, similar systems]

## Glossary
[Domain-specific terms]

## Open Questions
[Things to resolve during development]
</appendix>

---

<task-master-integration>
# How Task Master Uses This PRD

When you run `task-master parse-prd <file>.txt`, the parser:

1. **Extracts capabilities** → Main tasks
   - Each `### Capability:` becomes a top-level task

2. **Extracts features** → Subtasks
   - Each `#### Feature:` becomes a subtask under its capability

3. **Parses dependencies** → Task dependencies
   - `Depends on: [X, Y]` sets task.dependencies = ["X", "Y"]

4. **Orders by phases** → Task priorities
   - Phase 0 tasks = highest priority
   - Phase N tasks = lower priority, properly sequenced

5. **Uses test strategy** → Test generation context
   - Feeds test scenarios to Surgical Test Generator during implementation

**Result**: A dependency-aware task graph that can be executed in topological order.

## Why RPG Structure Matters

Traditional flat PRDs lead to:
- ❌ Unclear task dependencies
- ❌ Arbitrary task ordering
- ❌ Circular dependencies discovered late
- ❌ Poorly scoped tasks

RPG-structured PRDs provide:
- ✅ Explicit dependency chains
- ✅ Topological execution order
- ✅ Clear module boundaries
- ✅ Validated task graph before implementation

## Tips for Best Results

1. **Spend time on dependency graph** - This is the most valuable section for Task Master
2. **Keep features atomic** - Each feature should be independently testable
3. **Progressive refinement** - Start broad, use `task-master expand` to break down complex tasks
4. **Use research mode** - `task-master parse-prd --research` leverages AI for better task generation
</task-master-integration>

```

--------------------------------------------------------------------------------
/packages/tm-core/src/common/interfaces/configuration.interface.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Configuration interface definitions for the tm-core package
 * This file defines the contract for configuration management
 */

import type {
	StorageType,
	TaskComplexity,
	TaskPriority
} from '../types/index.js';

/**
 * Conventional Commit types allowed in workflow
 */
export type CommitType =
	| 'feat'
	| 'fix'
	| 'refactor'
	| 'test'
	| 'docs'
	| 'chore';

/**
 * Model configuration for different AI roles
 */
export interface ModelConfig {
	/** Primary model for task generation and updates */
	main: string;
	/** Research model for enhanced task analysis (optional) */
	research?: string;
	/** Fallback model when primary fails */
	fallback: string;
}

/**
 * AI provider configuration
 */
export interface ProviderConfig {
	/** Provider name (e.g., 'anthropic', 'openai', 'perplexity') */
	name: string;
	/** API key for the provider */
	apiKey?: string;
	/** Base URL override */
	baseUrl?: string;
	/** Custom configuration options */
	options?: Record<string, unknown>;
	/** Whether this provider is enabled */
	enabled?: boolean;
}

/**
 * Task generation and management settings
 */
export interface TaskSettings {
	/** Default priority for new tasks */
	defaultPriority: TaskPriority;
	/** Default complexity for analysis */
	defaultComplexity: TaskComplexity;
	/**
	 * Maximum number of subtasks per task
	 * @minimum 1
	 */
	maxSubtasks: number;
	/**
	 * Maximum number of concurrent tasks
	 * @minimum 1
	 */
	maxConcurrentTasks: number;
	/** Enable automatic task ID generation */
	autoGenerateIds: boolean;
	/** Task ID prefix (e.g., 'TASK-', 'TM-') */
	taskIdPrefix?: string;
	/** Enable task dependency validation */
	validateDependencies: boolean;
	/** Enable automatic timestamps */
	enableTimestamps: boolean;
	/** Enable effort tracking */
	enableEffortTracking: boolean;
}

/**
 * Tag and context management settings
 */
export interface TagSettings {
	/** Enable tag-based task organization */
	enableTags: boolean;
	/** Default tag for new tasks */
	defaultTag: string;
	/**
	 * Maximum number of tags per task
	 * @minimum 1
	 */
	maxTagsPerTask: number;
	/** Enable automatic tag creation from Git branches */
	autoCreateFromBranch: boolean;
	/** Tag naming convention (kebab-case, camelCase, snake_case) */
	tagNamingConvention: 'kebab-case' | 'camelCase' | 'snake_case';
}

/**
 * Runtime storage configuration used for storage backend selection
 * This is what getStorageConfig() returns and what StorageFactory expects
 */
export interface RuntimeStorageConfig {
	/** Storage backend type */
	type: StorageType;
	/** Base path for file storage (if configured) */
	basePath?: string;
	/** API endpoint for API storage (Hamster integration) */
	apiEndpoint?: string;
	/** Access token for API authentication */
	apiAccessToken?: string;
	/**
	 * Indicates whether API is configured (has endpoint or token)
	 * @computed Derived automatically from presence of apiEndpoint or apiAccessToken
	 * @internal Should not be set manually - computed by ConfigManager
	 */
	readonly apiConfigured: boolean;
}

/**
 * Storage and persistence settings
 * Extended storage settings including file operation preferences
 */
export interface StorageSettings
	extends Omit<RuntimeStorageConfig, 'apiConfigured'> {
	/** Base path for file storage */
	basePath?: string;
	/**
	 * Indicates whether API is configured
	 * @computed Derived automatically from presence of apiEndpoint or apiAccessToken
	 * @internal Should not be set manually in user config - computed by ConfigManager
	 */
	readonly apiConfigured?: boolean;
	/** Enable automatic backups */
	enableBackup: boolean;
	/**
	 * Maximum number of backups to retain
	 * @minimum 0
	 */
	maxBackups: number;
	/** Enable compression for storage */
	enableCompression: boolean;
	/** File encoding for text files */
	encoding: BufferEncoding;
	/** Enable atomic file operations */
	atomicOperations: boolean;
}

/**
 * Retry and resilience settings
 */
export interface RetrySettings {
	/**
	 * Number of retry attempts for failed operations
	 * @minimum 0
	 */
	retryAttempts: number;
	/**
	 * Base delay between retries in milliseconds
	 * @minimum 0
	 */
	retryDelay: number;
	/**
	 * Maximum delay between retries in milliseconds
	 * @minimum 0
	 */
	maxRetryDelay: number;
	/**
	 * Exponential backoff multiplier
	 * @minimum 1
	 */
	backoffMultiplier: number;
	/**
	 * Request timeout in milliseconds
	 * @minimum 0
	 */
	requestTimeout: number;
	/** Enable retry for network errors */
	retryOnNetworkError: boolean;
	/** Enable retry for rate limit errors */
	retryOnRateLimit: boolean;
}

/**
 * Logging and debugging settings
 */
export interface LoggingSettings {
	/** Enable logging */
	enabled: boolean;
	/** Log level (error, warn, info, debug) */
	level: 'error' | 'warn' | 'info' | 'debug';
	/** Log file path (optional) */
	filePath?: string;
	/** Enable request/response logging */
	logRequests: boolean;
	/** Enable performance metrics logging */
	logPerformance: boolean;
	/** Enable error stack traces */
	logStackTraces: boolean;
	/**
	 * Maximum log file size in MB
	 * @minimum 1
	 */
	maxFileSize: number;
	/**
	 * Maximum number of log files to retain
	 * @minimum 1
	 */
	maxFiles: number;
}

/**
 * Security and validation settings
 */
export interface SecuritySettings {
	/** Enable API key validation */
	validateApiKeys: boolean;
	/** Enable request rate limiting */
	enableRateLimit: boolean;
	/**
	 * Maximum requests per minute
	 * @minimum 1
	 */
	maxRequestsPerMinute: number;
	/** Enable input sanitization */
	sanitizeInputs: boolean;
	/**
	 * Maximum prompt length in characters
	 * @minimum 1
	 */
	maxPromptLength: number;
	/** Allowed file extensions for imports */
	allowedFileExtensions: string[];
	/** Enable CORS protection */
	enableCors: boolean;
}

/**
 * Workflow and autopilot TDD settings
 */
export interface WorkflowSettings {
	/** Enable autopilot/TDD workflow features */
	enableAutopilot: boolean;
	/**
	 * Maximum retry attempts for phase validation
	 * @minimum 1
	 * @maximum 10
	 */
	maxPhaseAttempts: number;
	/** Branch naming pattern for workflow branches */
	branchPattern: string;
	/** Require clean working tree before starting workflow */
	requireCleanWorkingTree: boolean;
	/** Automatically stage all changes during commit phase */
	autoStageChanges: boolean;
	/** Include co-author attribution in commits */
	includeCoAuthor: boolean;
	/** Co-author name for commit messages */
	coAuthorName: string;
	/** Co-author email for commit messages (defaults to [email protected]) */
	coAuthorEmail: string;
	/** Test result thresholds for phase validation */
	testThresholds: {
		/**
		 * Minimum test count for valid RED phase
		 * @minimum 0
		 */
		minTests: number;
		/**
		 * Maximum allowed failing tests in GREEN phase
		 * @minimum 0
		 */
		maxFailuresInGreen: number;
	};
	/** Commit message template pattern */
	commitMessageTemplate: string;
	/** Conventional commit types allowed */
	allowedCommitTypes: readonly CommitType[];
	/**
	 * Default commit type for autopilot
	 * @validation Must be present in allowedCommitTypes array
	 */
	defaultCommitType: CommitType;
	/**
	 * Timeout for workflow operations in milliseconds
	 * @minimum 0
	 */
	operationTimeout: number;
	/** Enable activity logging for workflow events */
	enableActivityLogging: boolean;
	/** Path to store workflow activity logs */
	activityLogPath: string;
	/** Enable automatic backup of workflow state */
	enableStateBackup: boolean;
	/**
	 * Maximum workflow state backups to retain
	 * @minimum 0
	 */
	maxStateBackups: number;
	/** Abort workflow if validation fails after max attempts */
	abortOnMaxAttempts: boolean;
}

/**
 * Main configuration interface for Task Master core
 */
export interface IConfiguration {
	/** Project root path */
	projectPath: string;

	/** Current AI provider name */
	aiProvider: string;

	/** API keys for different providers */
	apiKeys: Record<string, string>;

	/** Model configuration for different roles */
	models: ModelConfig;

	/** Provider configurations */
	providers: Record<string, ProviderConfig>;

	/** Task management settings */
	tasks: TaskSettings;

	/** Tag and context settings */
	tags: TagSettings;

	/** Workflow and autopilot settings */
	workflow: WorkflowSettings;

	/** Storage configuration */
	storage: StorageSettings;

	/** Retry and resilience settings */
	retry: RetrySettings;

	/** Logging configuration */
	logging: LoggingSettings;

	/** Security settings */
	security: SecuritySettings;

	/** Custom user-defined settings */
	custom?: Record<string, unknown>;

	/** Configuration version for migration purposes */
	version: string;

	/** Last updated timestamp */
	lastUpdated: string;
}

/**
 * Partial configuration for updates (all fields optional)
 */
export type PartialConfiguration = Partial<IConfiguration>;

/**
 * Configuration validation result
 */
export interface ConfigValidationResult {
	/** Whether the configuration is valid */
	isValid: boolean;
	/** Array of error messages */
	errors: string[];
	/** Array of warning messages */
	warnings: string[];
	/** Suggested fixes */
	suggestions?: string[];
}

/**
 * Environment variable configuration mapping
 */
export interface EnvironmentConfig {
	/** Mapping of environment variables to config paths */
	variables: Record<string, string>;
	/** Prefix for environment variables */
	prefix: string;
	/** Whether to override existing config with env vars */
	override: boolean;
}

/**
 * Configuration schema definition for validation
 */
export interface ConfigSchema {
	/** Schema for the main configuration */
	properties: Record<string, ConfigProperty>;
	/** Required properties */
	required: string[];
	/** Additional properties allowed */
	additionalProperties: boolean;
}

/**
 * Configuration property schema
 */
export interface ConfigProperty {
	/** Property type */
	type: 'string' | 'number' | 'boolean' | 'object' | 'array';
	/** Property description */
	description?: string;
	/** Default value */
	default?: unknown;
	/** Allowed values for enums */
	enum?: unknown[];
	/** Minimum value (for numbers) */
	minimum?: number;
	/** Maximum value (for numbers) */
	maximum?: number;
	/** Pattern for string validation */
	pattern?: string;
	/** Nested properties (for objects) */
	properties?: Record<string, ConfigProperty>;
	/** Array item type (for arrays) */
	items?: ConfigProperty;
	/** Whether the property is required */
	required?: boolean;
}

/**
 * Default configuration factory
 */
export interface IConfigurationFactory {
	/**
	 * Create a default configuration
	 * @param projectPath - Project root path
	 * @returns Default configuration object
	 */
	createDefault(projectPath: string): IConfiguration;

	/**
	 * Merge configurations with precedence
	 * @param base - Base configuration
	 * @param override - Override configuration
	 * @returns Merged configuration
	 */
	merge(base: IConfiguration, override: PartialConfiguration): IConfiguration;

	/**
	 * Validate configuration against schema
	 * @param config - Configuration to validate
	 * @returns Validation result
	 */
	validate(config: IConfiguration): ConfigValidationResult;

	/**
	 * Load configuration from environment variables
	 * @param envConfig - Environment variable mapping
	 * @returns Partial configuration from environment
	 */
	loadFromEnvironment(envConfig: EnvironmentConfig): PartialConfiguration;

	/**
	 * Get configuration schema
	 * @returns Configuration schema definition
	 */
	getSchema(): ConfigSchema;
}

/**
 * Configuration manager interface
 */
export interface IConfigurationManager {
	/**
	 * Load configuration from file or create default
	 * @param configPath - Path to configuration file
	 * @returns Promise that resolves to configuration
	 */
	load(configPath?: string): Promise<IConfiguration>;

	/**
	 * Save configuration to file
	 * @param config - Configuration to save
	 * @param configPath - Optional path override
	 * @returns Promise that resolves when save is complete
	 */
	save(config: IConfiguration, configPath?: string): Promise<void>;

	/**
	 * Update configuration with partial changes
	 * @param updates - Partial configuration updates
	 * @returns Promise that resolves to updated configuration
	 */
	update(updates: PartialConfiguration): Promise<IConfiguration>;

	/**
	 * Get current configuration
	 * @returns Current configuration object
	 */
	getConfig(): IConfiguration;

	/**
	 * Watch for configuration changes
	 * @param callback - Function to call when config changes
	 * @returns Function to stop watching
	 */
	watch(callback: (config: IConfiguration) => void): () => void;

	/**
	 * Validate current configuration
	 * @returns Validation result
	 */
	validate(): ConfigValidationResult;

	/**
	 * Reset configuration to defaults
	 * @returns Promise that resolves when reset is complete
	 */
	reset(): Promise<void>;
}

/**
 * Constants for default configuration values
 */
export const DEFAULT_CONFIG_VALUES = {
	MODELS: {
		MAIN: 'claude-sonnet-4-20250514',
		FALLBACK: 'claude-3-7-sonnet-20250219'
	},
	TASKS: {
		DEFAULT_PRIORITY: 'medium' as TaskPriority,
		DEFAULT_COMPLEXITY: 'moderate' as TaskComplexity,
		MAX_SUBTASKS: 20,
		MAX_CONCURRENT: 5,
		TASK_ID_PREFIX: 'TASK-'
	},
	TAGS: {
		DEFAULT_TAG: 'master',
		MAX_TAGS_PER_TASK: 10,
		NAMING_CONVENTION: 'kebab-case' as const
	},
	WORKFLOW: {
		ENABLE_AUTOPILOT: true,
		MAX_PHASE_ATTEMPTS: 3,
		BRANCH_PATTERN: 'task-{taskId}',
		REQUIRE_CLEAN_WORKING_TREE: true,
		AUTO_STAGE_CHANGES: true,
		INCLUDE_CO_AUTHOR: true,
		CO_AUTHOR_NAME: 'TaskMaster AI',
		CO_AUTHOR_EMAIL: '[email protected]',
		MIN_TESTS: 1,
		MAX_FAILURES_IN_GREEN: 0,
		COMMIT_MESSAGE_TEMPLATE:
			'{type}({scope}): {description} (Task {taskId}.{subtaskIndex})',
		ALLOWED_COMMIT_TYPES: [
			'feat',
			'fix',
			'refactor',
			'test',
			'docs',
			'chore'
		] as const satisfies readonly CommitType[],
		DEFAULT_COMMIT_TYPE: 'feat' as CommitType,
		OPERATION_TIMEOUT: 60000,
		ENABLE_ACTIVITY_LOGGING: true,
		ACTIVITY_LOG_PATH: '.taskmaster/logs/workflow-activity.log',
		ENABLE_STATE_BACKUP: true,
		MAX_STATE_BACKUPS: 5,
		ABORT_ON_MAX_ATTEMPTS: false
	},
	STORAGE: {
		TYPE: 'auto' as const,
		ENCODING: 'utf8' as BufferEncoding,
		MAX_BACKUPS: 5
	},
	RETRY: {
		ATTEMPTS: 3,
		DELAY: 1000,
		MAX_DELAY: 30000,
		BACKOFF_MULTIPLIER: 2,
		TIMEOUT: 30000
	},
	LOGGING: {
		LEVEL: 'info' as const,
		MAX_FILE_SIZE: 10,
		MAX_FILES: 5
	},
	SECURITY: {
		MAX_REQUESTS_PER_MINUTE: 60,
		MAX_PROMPT_LENGTH: 100000,
		ALLOWED_EXTENSIONS: ['.txt', '.md', '.json']
	},
	VERSION: '1.0.0'
} as const;

```

--------------------------------------------------------------------------------
/scripts/modules/prompt-manager.js:
--------------------------------------------------------------------------------

```javascript
import { log } from './utils.js';
import Ajv from 'ajv';
import addFormats from 'ajv-formats';

// Import all prompt templates directly
import analyzeComplexityPrompt from '../../src/prompts/analyze-complexity.json' with {
	type: 'json'
};
import expandTaskPrompt from '../../src/prompts/expand-task.json' with {
	type: 'json'
};
import addTaskPrompt from '../../src/prompts/add-task.json' with {
	type: 'json'
};
import researchPrompt from '../../src/prompts/research.json' with {
	type: 'json'
};
import parsePrdPrompt from '../../src/prompts/parse-prd.json' with {
	type: 'json'
};
import updateTaskPrompt from '../../src/prompts/update-task.json' with {
	type: 'json'
};
import updateTasksPrompt from '../../src/prompts/update-tasks.json' with {
	type: 'json'
};
import updateSubtaskPrompt from '../../src/prompts/update-subtask.json' with {
	type: 'json'
};

// Import schema for validation
import promptTemplateSchema from '../../src/prompts/schemas/prompt-template.schema.json' with {
	type: 'json'
};

/**
 * Manages prompt templates for AI interactions
 */
export class PromptManager {
	constructor() {
		// Store all prompts in a map for easy lookup
		this.prompts = new Map([
			['analyze-complexity', analyzeComplexityPrompt],
			['expand-task', expandTaskPrompt],
			['add-task', addTaskPrompt],
			['research', researchPrompt],
			['parse-prd', parsePrdPrompt],
			['update-task', updateTaskPrompt],
			['update-tasks', updateTasksPrompt],
			['update-subtask', updateSubtaskPrompt]
		]);

		this.cache = new Map();
		this.setupValidation();
	}

	/**
	 * Set up JSON schema validation
	 * @private
	 */
	setupValidation() {
		this.ajv = new Ajv({ allErrors: true, strict: false });
		addFormats(this.ajv);

		try {
			// Use the imported schema directly
			this.validatePrompt = this.ajv.compile(promptTemplateSchema);
			log('debug', '✓ JSON schema validation enabled');
		} catch (error) {
			log('warn', `⚠ Schema validation disabled: ${error.message}`);
			this.validatePrompt = () => true; // Fallback to no validation
		}
	}

	/**
	 * Load a prompt template and render it with variables
	 * @param {string} promptId - The prompt template ID
	 * @param {Object} variables - Variables to inject into the template
	 * @param {string} [variantKey] - Optional specific variant to use
	 * @returns {{systemPrompt: string, userPrompt: string, metadata: Object}}
	 */
	loadPrompt(promptId, variables = {}, variantKey = null) {
		try {
			// Check cache first
			const cacheKey = `${promptId}-${JSON.stringify(variables)}-${variantKey}`;
			if (this.cache.has(cacheKey)) {
				return this.cache.get(cacheKey);
			}

			// Load template
			const template = this.loadTemplate(promptId);

			// Validate parameters if schema validation is available
			if (this.validatePrompt && this.validatePrompt !== true) {
				this.validateParameters(template, variables);
			}

			// Select the variant - use specified key or select based on conditions
			const variant = variantKey
				? { ...template.prompts[variantKey], name: variantKey }
				: this.selectVariant(template, variables);

			// Render the prompts with variables
			const rendered = {
				systemPrompt: this.renderTemplate(variant.system, variables),
				userPrompt: this.renderTemplate(variant.user, variables),
				metadata: {
					templateId: template.id,
					version: template.version,
					variant: variant.name || 'default',
					parameters: variables
				}
			};

			// Cache the result
			this.cache.set(cacheKey, rendered);

			return rendered;
		} catch (error) {
			log('error', `Failed to load prompt ${promptId}: ${error.message}`);
			throw error;
		}
	}

	/**
	 * Load a prompt template from the imported prompts
	 * @private
	 */
	loadTemplate(promptId) {
		// Get template from the map
		const template = this.prompts.get(promptId);

		if (!template) {
			throw new Error(`Prompt template '${promptId}' not found`);
		}

		// Schema validation if available (do this first for detailed errors)
		if (this.validatePrompt && this.validatePrompt !== true) {
			const valid = this.validatePrompt(template);
			if (!valid) {
				const errors = this.validatePrompt.errors
					.map((err) => `${err.instancePath || 'root'}: ${err.message}`)
					.join(', ');
				throw new Error(`Schema validation failed: ${errors}`);
			}
		} else {
			// Fallback basic validation if no schema validation available
			if (!template.id || !template.prompts || !template.prompts.default) {
				throw new Error(
					'Invalid template structure: missing required fields (id, prompts.default)'
				);
			}
		}

		return template;
	}

	/**
	 * Validate parameters against template schema
	 * @private
	 */
	validateParameters(template, variables) {
		if (!template.parameters) return;

		const errors = [];

		for (const [paramName, paramConfig] of Object.entries(
			template.parameters
		)) {
			const value = variables[paramName];

			// Check required parameters
			if (paramConfig.required && value === undefined) {
				errors.push(`Required parameter '${paramName}' missing`);
				continue;
			}

			// Skip validation for undefined optional parameters
			if (value === undefined) continue;

			// Type validation
			if (!this.validateParameterType(value, paramConfig.type)) {
				errors.push(
					`Parameter '${paramName}' expected ${paramConfig.type}, got ${typeof value}`
				);
			}

			// Enum validation
			if (paramConfig.enum && !paramConfig.enum.includes(value)) {
				errors.push(
					`Parameter '${paramName}' must be one of: ${paramConfig.enum.join(', ')}`
				);
			}

			// Pattern validation for strings
			if (paramConfig.pattern && typeof value === 'string') {
				const regex = new RegExp(paramConfig.pattern);
				if (!regex.test(value)) {
					errors.push(
						`Parameter '${paramName}' does not match required pattern: ${paramConfig.pattern}`
					);
				}
			}

			// Range validation for numbers
			if (typeof value === 'number') {
				if (paramConfig.minimum !== undefined && value < paramConfig.minimum) {
					errors.push(
						`Parameter '${paramName}' must be >= ${paramConfig.minimum}`
					);
				}
				if (paramConfig.maximum !== undefined && value > paramConfig.maximum) {
					errors.push(
						`Parameter '${paramName}' must be <= ${paramConfig.maximum}`
					);
				}
			}
		}

		if (errors.length > 0) {
			throw new Error(`Parameter validation failed: ${errors.join('; ')}`);
		}
	}

	/**
	 * Validate parameter type
	 * @private
	 */
	validateParameterType(value, expectedType) {
		switch (expectedType) {
			case 'string':
				return typeof value === 'string';
			case 'number':
				return typeof value === 'number';
			case 'boolean':
				return typeof value === 'boolean';
			case 'array':
				return Array.isArray(value);
			case 'object':
				return (
					typeof value === 'object' && value !== null && !Array.isArray(value)
				);
			default:
				return true;
		}
	}

	/**
	 * Select the best variant based on conditions
	 * @private
	 */
	selectVariant(template, variables) {
		// Check each variant's condition
		for (const [name, variant] of Object.entries(template.prompts)) {
			if (name === 'default') continue;

			if (
				variant.condition &&
				this.evaluateCondition(variant.condition, variables)
			) {
				return { ...variant, name };
			}
		}

		// Fall back to default
		return { ...template.prompts.default, name: 'default' };
	}

	/**
	 * Evaluate a condition string
	 * @private
	 */
	evaluateCondition(condition, variables) {
		try {
			// Create a safe evaluation context
			const context = { ...variables };

			// Simple condition evaluation (can be enhanced)
			// For now, supports basic comparisons
			const func = new Function(...Object.keys(context), `return ${condition}`);
			return func(...Object.values(context));
		} catch (error) {
			log('warn', `Failed to evaluate condition: ${condition}`);
			return false;
		}
	}

	/**
	 * Render a template string with variables
	 * @private
	 */
	renderTemplate(template, variables) {
		let rendered = template;

		// Handle helper functions like (eq variable "value")
		rendered = rendered.replace(
			/\(eq\s+(\w+(?:\.\w+)*)\s+"([^"]+)"\)/g,
			(match, path, compareValue) => {
				const value = this.getNestedValue(variables, path);
				return value === compareValue ? 'true' : 'false';
			}
		);

		// Handle not helper function like (not variable)
		rendered = rendered.replace(/\(not\s+(\w+(?:\.\w+)*)\)/g, (match, path) => {
			const value = this.getNestedValue(variables, path);
			return !value ? 'true' : 'false';
		});

		// Handle gt (greater than) helper function like (gt variable 0)
		rendered = rendered.replace(
			/\(gt\s+(\w+(?:\.\w+)*)\s+(\d+(?:\.\d+)?)\)/g,
			(match, path, compareValue) => {
				const value = this.getNestedValue(variables, path);
				const numValue = parseFloat(compareValue);
				return typeof value === 'number' && value > numValue ? 'true' : 'false';
			}
		);

		// Handle gte (greater than or equal) helper function like (gte variable 0)
		rendered = rendered.replace(
			/\(gte\s+(\w+(?:\.\w+)*)\s+(\d+(?:\.\d+)?)\)/g,
			(match, path, compareValue) => {
				const value = this.getNestedValue(variables, path);
				const numValue = parseFloat(compareValue);
				return typeof value === 'number' && value >= numValue
					? 'true'
					: 'false';
			}
		);

		// Handle conditionals with else {{#if variable}}...{{else}}...{{/if}}
		rendered = rendered.replace(
			/\{\{#if\s+([^}]+)\}\}([\s\S]*?)(?:\{\{else\}\}([\s\S]*?))?\{\{\/if\}\}/g,
			(match, condition, trueContent, falseContent = '') => {
				// Handle boolean values and helper function results
				let value;
				if (condition === 'true') {
					value = true;
				} else if (condition === 'false') {
					value = false;
				} else {
					value = this.getNestedValue(variables, condition);
				}
				return value ? trueContent : falseContent;
			}
		);

		// Handle each loops {{#each array}}...{{/each}}
		rendered = rendered.replace(
			/\{\{#each\s+(\w+(?:\.\w+)*)\}\}([\s\S]*?)\{\{\/each\}\}/g,
			(match, path, content) => {
				const array = this.getNestedValue(variables, path);
				if (!Array.isArray(array)) return '';

				return array
					.map((item, index) => {
						// Create a context with item properties and special variables
						const itemContext = {
							...variables,
							...item,
							'@index': index,
							'@first': index === 0,
							'@last': index === array.length - 1
						};

						// Recursively render the content with item context
						return this.renderTemplate(content, itemContext);
					})
					.join('');
			}
		);

		// Handle json helper {{{json variable}}} (triple braces for raw output)
		rendered = rendered.replace(
			/\{\{\{json\s+(\w+(?:\.\w+)*)\}\}\}/g,
			(match, path) => {
				const value = this.getNestedValue(variables, path);
				return value !== undefined ? JSON.stringify(value, null, 2) : '';
			}
		);

		// Handle variable substitution {{variable}}
		rendered = rendered.replace(/\{\{(\w+(?:\.\w+)*)\}\}/g, (match, path) => {
			const value = this.getNestedValue(variables, path);
			return value !== undefined ? value : '';
		});

		return rendered;
	}

	/**
	 * Get nested value from object using dot notation
	 * @private
	 */
	getNestedValue(obj, path) {
		return path
			.split('.')
			.reduce(
				(current, key) =>
					current && current[key] !== undefined ? current[key] : undefined,
				obj
			);
	}

	/**
	 * Validate all prompt templates
	 */
	validateAllPrompts() {
		const results = { total: 0, errors: [], valid: [] };

		// Iterate through all imported prompts
		for (const [promptId, template] of this.prompts.entries()) {
			results.total++;

			try {
				// Validate the template
				if (this.validatePrompt && this.validatePrompt !== true) {
					const valid = this.validatePrompt(template);
					if (!valid) {
						const errors = this.validatePrompt.errors
							.map((err) => `${err.instancePath || 'root'}: ${err.message}`)
							.join(', ');
						throw new Error(`Schema validation failed: ${errors}`);
					}
				}
				results.valid.push(promptId);
			} catch (error) {
				results.errors.push(`${promptId}: ${error.message}`);
			}
		}

		return results;
	}

	/**
	 * List all available prompt templates
	 */
	listPrompts() {
		const prompts = [];

		// Iterate through all imported prompts
		for (const [promptId, template] of this.prompts.entries()) {
			try {
				prompts.push({
					id: template.id,
					description: template.description,
					version: template.version,
					parameters: template.parameters,
					tags: template.metadata?.tags || []
				});
			} catch (error) {
				log('warn', `Failed to process template ${promptId}: ${error.message}`);
			}
		}

		return prompts;
	}

	/**
	 * Validate template structure
	 * @param {string|Object} templateOrId - Either a template ID or a template object
	 */
	validateTemplate(templateOrId) {
		try {
			let template;

			// Handle both template ID and direct template object
			if (typeof templateOrId === 'string') {
				template = this.prompts.get(templateOrId);
				if (!template) {
					return {
						valid: false,
						error: `Template '${templateOrId}' not found`
					};
				}
			} else {
				template = templateOrId;
			}

			// Check required fields
			const required = ['id', 'version', 'description', 'prompts'];
			for (const field of required) {
				if (!template[field]) {
					return { valid: false, error: `Missing required field: ${field}` };
				}
			}

			// Check default prompt exists
			if (!template.prompts.default) {
				return { valid: false, error: 'Missing default prompt variant' };
			}

			// Check each variant has required fields
			for (const [name, variant] of Object.entries(template.prompts)) {
				if (!variant.system || !variant.user) {
					return {
						valid: false,
						error: `Variant '${name}' missing system or user prompt`
					};
				}
			}

			// Schema validation if available
			if (this.validatePrompt && this.validatePrompt !== true) {
				const valid = this.validatePrompt(template);
				if (!valid) {
					const errors = this.validatePrompt.errors
						.map((err) => `${err.instancePath || 'root'}: ${err.message}`)
						.join(', ');
					return { valid: false, error: `Schema validation failed: ${errors}` };
				}
			}

			return { valid: true };
		} catch (error) {
			return { valid: false, error: error.message };
		}
	}
}

// Singleton instance
let promptManager = null;

/**
 * Get or create the prompt manager instance
 * @returns {PromptManager}
 */
export function getPromptManager() {
	if (!promptManager) {
		promptManager = new PromptManager();
	}
	return promptManager;
}

```

--------------------------------------------------------------------------------
/docs/command-reference.md:
--------------------------------------------------------------------------------

```markdown
# Task Master Command Reference

Here's a comprehensive reference of all available commands:

## Parse PRD

```bash
# Parse a PRD file and generate tasks
task-master parse-prd <prd-file.txt>

# Limit the number of tasks generated (default is 10)
task-master parse-prd <prd-file.txt> --num-tasks=5

# Allow task master to determine the number of tasks based on complexity
task-master parse-prd <prd-file.txt> --num-tasks=0
```

## List Tasks

```bash
# List all tasks
task-master list

# List tasks with a specific status
task-master list --status=<status>

# List tasks with subtasks
task-master list --with-subtasks

# List tasks with a specific status and include subtasks
task-master list --status=<status> --with-subtasks
```

## Show Next Task

```bash
# Show the next task to work on based on dependencies and status
task-master next
```

## Show Specific Task

```bash
# Show details of a specific task
task-master show <id>
# or
task-master show --id=<id>

# View multiple tasks with comma-separated IDs
task-master show 1,3,5
task-master show 44,55

# View a specific subtask (e.g., subtask 2 of task 1)
task-master show 1.2

# Mix parent tasks and subtasks
task-master show 44,44.1,55,55.2
```

**Multiple Task Display:**

- **Single ID**: Shows detailed task view with full implementation details
- **Multiple IDs**: Shows compact summary table with interactive action menu
- **Action Menu**: Provides copy-paste ready commands for batch operations:
  - Mark all as in-progress/done
  - Show next available task
  - Expand all tasks (generate subtasks)
  - View dependency relationships
  - Generate task files

## Update Tasks

```bash
# Update tasks from a specific ID and provide context
task-master update --from=<id> --prompt="<prompt>"

# Update tasks using research role
task-master update --from=<id> --prompt="<prompt>" --research
```

## Update a Specific Task

```bash
# Update a single task by ID with new information
task-master update-task --id=<id> --prompt="<prompt>"

# Use research-backed updates
task-master update-task --id=<id> --prompt="<prompt>" --research
```

## Update a Subtask

```bash
# Append additional information to a specific subtask
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>"

# Example: Add details about API rate limiting to subtask 2 of task 5
task-master update-subtask --id=5.2 --prompt="Add rate limiting of 100 requests per minute"

# Use research-backed updates
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" --research
```

Unlike the `update-task` command which replaces task information, the `update-subtask` command _appends_ new information to the existing subtask details, marking it with a timestamp. This is useful for iteratively enhancing subtasks while preserving the original content.

## Generate Task Files

```bash
# Generate individual task files from tasks.json
task-master generate
```

## Set Task Status

```bash
# Set status of a single task
task-master set-status --id=<id> --status=<status>

# Set status for multiple tasks
task-master set-status --id=1,2,3 --status=<status>

# Set status for subtasks
task-master set-status --id=1.1,1.2 --status=<status>
```

When marking a task as "done", all of its subtasks will automatically be marked as "done" as well.

## Expand Tasks

```bash
# Expand a specific task with subtasks
task-master expand --id=<id> --num=<number>

# Expand a task with a dynamic number of subtasks (ignoring complexity report)
task-master expand --id=<id> --num=0

# Expand with additional context
task-master expand --id=<id> --prompt="<context>"

# Expand all pending tasks
task-master expand --all

# Force regeneration of subtasks for tasks that already have them
task-master expand --all --force

# Research-backed subtask generation for a specific task
task-master expand --id=<id> --research

# Research-backed generation for all tasks
task-master expand --all --research
```

## Clear Subtasks

```bash
# Clear subtasks from a specific task
task-master clear-subtasks --id=<id>

# Clear subtasks from multiple tasks
task-master clear-subtasks --id=1,2,3

# Clear subtasks from all tasks
task-master clear-subtasks --all
```

## Analyze Task Complexity

```bash
# Analyze complexity of all tasks
task-master analyze-complexity

# Save report to a custom location
task-master analyze-complexity --output=my-report.json

# Use a specific LLM model
task-master analyze-complexity --model=claude-3-opus-20240229

# Set a custom complexity threshold (1-10)
task-master analyze-complexity --threshold=6

# Use an alternative tasks file
task-master analyze-complexity --file=custom-tasks.json

# Use Perplexity AI for research-backed complexity analysis
task-master analyze-complexity --research
```

## View Complexity Report

```bash
# Display the task complexity analysis report
task-master complexity-report

# View a report at a custom location
task-master complexity-report --file=my-report.json
```

## Managing Task Dependencies

```bash
# Add a dependency to a task
task-master add-dependency --id=<id> --depends-on=<id>

# Remove a dependency from a task
task-master remove-dependency --id=<id> --depends-on=<id>

# Validate dependencies without fixing them
task-master validate-dependencies

# Find and fix invalid dependencies automatically
task-master fix-dependencies
```

## Move Tasks

```bash
# Move a task or subtask to a new position
task-master move --from=<id> --to=<id>

# Examples:
# Move task to become a subtask
task-master move --from=5 --to=7

# Move subtask to become a standalone task
task-master move --from=5.2 --to=7

# Move subtask to a different parent
task-master move --from=5.2 --to=7.3

# Reorder subtasks within the same parent
task-master move --from=5.2 --to=5.4

# Move a task to a new ID position (creates placeholder if doesn't exist)
task-master move --from=5 --to=25

# Move multiple tasks at once (must have the same number of IDs)
task-master move --from=10,11,12 --to=16,17,18
```

## Add a New Task

```bash
# Add a new task using AI (main role)
task-master add-task --prompt="Description of the new task"

# Add a new task using AI (research role)
task-master add-task --prompt="Description of the new task" --research

# Add a task with dependencies
task-master add-task --prompt="Description" --dependencies=1,2,3

# Add a task with priority
task-master add-task --prompt="Description" --priority=high
```

## Tag Management

Task Master supports tagged task lists for multi-context task management. Each tag represents a separate, isolated context for tasks.

```bash
# List all available tags with task counts and status
task-master tags

# List tags with detailed metadata
task-master tags --show-metadata

# Create a new empty tag
task-master add-tag <tag-name>

# Create a new tag with a description
task-master add-tag <tag-name> --description="Feature development tasks"

# Create a tag based on current git branch name
task-master add-tag --from-branch

# Create a new tag by copying tasks from the current tag
task-master add-tag <new-tag> --copy-from-current

# Create a new tag by copying from a specific tag
task-master add-tag <new-tag> --copy-from=<source-tag>

# Switch to a different tag context
task-master use-tag <tag-name>

# Rename an existing tag
task-master rename-tag <old-name> <new-name>

# Copy an entire tag to create a new one
task-master copy-tag <source-tag> <target-tag>

# Copy a tag with a description
task-master copy-tag <source-tag> <target-tag> --description="Copied for testing"

# Delete a tag and all its tasks (with confirmation)
task-master delete-tag <tag-name>

# Delete a tag without confirmation prompt
task-master delete-tag <tag-name> --yes
```

**Tag Context:**
- All task operations (list, show, add, update, etc.) work within the currently active tag
- Use `--tag=<name>` flag with most commands to operate on a specific tag context
- Tags provide complete isolation - tasks in different tags don't interfere with each other

## Initialize a Project

```bash
# Initialize a new project with Task Master structure
task-master init

# Initialize a new project applying specific rules
task-master init --rules cursor,windsurf,vscode
```

- The `--rules` flag allows you to specify one or more rule profiles (e.g., `cursor`, `roo`, `windsurf`, `cline`) to apply during initialization.
- If omitted, all available rule profiles are installed by default (claude, cline, codex, cursor, roo, trae, vscode, windsurf).
- You can use multiple comma-separated profiles in a single command.

## Manage Rules

```bash
# Add rule profiles to your project
# (e.g., .roo/rules, .windsurf/rules)
task-master rules add <profile1,profile2,...>

# Remove rule sets from your project
task-master rules remove <profile1,profile2,...>

# Remove rule sets bypassing safety check (dangerous)
task-master rules remove <profile1,profile2,...> --force

# Launch interactive rules setup to select rules
# (does not re-initialize project or ask about shell aliases)
task-master rules setup
```

- Adding rules creates the profile and rules directory (e.g., `.roo/rules`) and copies/initializes the rules.
- Removing rules deletes the profile and rules directory and associated MCP config.
- **Safety Check**: Attempting to remove rule profiles will trigger a critical warning requiring confirmation. Use `--force` to bypass.
- You can use multiple comma-separated rules in a single command.
- The `setup` action launches an interactive prompt to select which rules to apply. The list of rules is always current with the available profiles, and no manual updates are needed. This command does **not** re-initialize your project or affect shell aliases; it only manages rules interactively.

**Examples:**

```bash
task-master rules add windsurf,roo,vscode
task-master rules remove windsurf
task-master rules setup
```

### Interactive Rules Setup

You can launch the interactive rules setup at any time with:

```bash
task-master rules setup
```

This command opens a prompt where you can select which rule profiles (e.g., Cursor, Roo, Windsurf) you want to add to your project. This does **not** re-initialize your project or ask about shell aliases; it only manages rules.

- Use this command to add rule profiles interactively after project creation.
- The same interactive prompt is also used during `init` if you don't specify rules with `--rules`.

## Configure AI Models

```bash
# View current AI model configuration and API key status
task-master models

# Set the primary model for generation/updates (provider inferred if known)
task-master models --set-main=claude-3-opus-20240229

# Set the research model
task-master models --set-research=sonar-pro

# Set the fallback model
task-master models --set-fallback=claude-3-haiku-20240307

# Set a custom Ollama model for the main role
task-master models --set-main=my-local-llama --ollama

# Set a custom OpenRouter model for the research role
task-master models --set-research=google/gemini-pro --openrouter

# Set Codex CLI model for the main role (uses ChatGPT subscription via OAuth)
task-master models --set-main=gpt-5-codex --codex-cli

# Set Codex CLI model for the fallback role
task-master models --set-fallback=gpt-5 --codex-cli

# Run interactive setup to configure models, including custom ones
task-master models --setup
```

Configuration is stored in `.taskmaster/config.json` in your project root (legacy `.taskmasterconfig` files are automatically migrated). API keys are still managed via `.env` or MCP configuration. Use `task-master models` without flags to see available built-in models. Use `--setup` for a guided experience.

State is stored in `.taskmaster/state.json` in your project root. It maintains important information like the current tag. Do not manually edit this file.

## Research Fresh Information

```bash
# Perform AI-powered research with fresh, up-to-date information
task-master research "What are the latest best practices for JWT authentication in Node.js?"

# Research with specific task context
task-master research "How to implement OAuth 2.0?" --id=15,16

# Research with file context for code-aware suggestions
task-master research "How can I optimize this API implementation?" --files=src/api.js,src/auth.js

# Research with custom context and project tree
task-master research "Best practices for error handling" --context="We're using Express.js" --tree

# Research with different detail levels
task-master research "React Query v5 migration guide" --detail=high

# Disable interactive follow-up questions (useful for scripting, is the default for MCP)
# Use a custom tasks file location
task-master research "How to implement this feature?" --file=custom-tasks.json

# Research within a specific tag context
task-master research "Database optimization strategies" --tag=feature-branch

# Save research conversation to .taskmaster/docs/research/ directory (for later reference)
task-master research "Database optimization techniques" --save-file

# Save key findings directly to a task or subtask (recommended for actionable insights)
task-master research "How to implement OAuth?" --save-to=15
task-master research "API optimization strategies" --save-to=15.2

# Combine context gathering with automatic saving of findings
task-master research "Best practices for this implementation" --id=15,16 --files=src/auth.js --save-to=15.3
```

**The research command is a powerful exploration tool that provides:**

- **Fresh information beyond AI knowledge cutoffs**
- **Project-aware context** from your tasks and files
- **Automatic task discovery** using fuzzy search
- **Multiple detail levels** (low, medium, high)
- **Token counting and cost tracking**
- **Interactive follow-up questions** for deep exploration
- **Flexible save options** (commit findings to tasks or preserve conversations)
- **Iterative discovery** through continuous questioning and refinement

**Use research frequently to:**

- Get current best practices before implementing features
- Research new technologies and libraries
- Find solutions to complex problems
- Validate your implementation approaches
- Stay updated with latest security recommendations

**Interactive Features (CLI):**

- **Follow-up questions** that maintain conversation context and allow deep exploration
- **Save menu** during or after research with flexible options:
  - **Save to task/subtask**: Commit key findings and actionable insights (recommended)
  - **Save to file**: Preserve entire conversation for later reference if needed
  - **Continue exploring**: Ask more follow-up questions to dig deeper
- **Automatic file naming** with timestamps and query-based slugs when saving conversations

```

--------------------------------------------------------------------------------
/docs/task-structure.md:
--------------------------------------------------------------------------------

```markdown
# Task Structure

Tasks in Task Master follow a specific format designed to provide comprehensive information for both humans and AI assistants.

## Task Fields in tasks.json

Tasks in tasks.json have the following structure:

- `id`: Unique identifier for the task (Example: `1`)
- `title`: Brief, descriptive title of the task (Example: `"Initialize Repo"`)
- `description`: Concise description of what the task involves (Example: `"Create a new repository, set up initial structure."`)
- `status`: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
- `dependencies`: IDs of tasks that must be completed before this task (Example: `[1, 2]`)
  - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
  - This helps quickly identify which prerequisite tasks are blocking work
- `priority`: Importance level of the task (Example: `"high"`, `"medium"`, `"low"`)
- `details`: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`)
- `testStrategy`: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`)
- `subtasks`: List of smaller, more specific tasks that make up the main task (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`)

## Task File Format

Individual task files follow this format:

```
# Task ID: <id>
# Title: <title>
# Status: <status>
# Dependencies: <comma-separated list of dependency IDs>
# Priority: <priority>
# Description: <brief description>
# Details:
<detailed implementation notes>

# Test Strategy:
<verification approach>
```

## Features in Detail

### Analyzing Task Complexity

The `analyze-complexity` command:

- Analyzes each task using AI to assess its complexity on a scale of 1-10
- Recommends optimal number of subtasks based on configured DEFAULT_SUBTASKS
- Generates tailored prompts for expanding each task
- Creates a comprehensive JSON report with ready-to-use commands
- Saves the report to scripts/task-complexity-report.json by default

The generated report contains:

- Complexity analysis for each task (scored 1-10)
- Recommended number of subtasks based on complexity
- AI-generated expansion prompts customized for each task
- Ready-to-run expansion commands directly within each task analysis

### Viewing Complexity Report

The `complexity-report` command:

- Displays a formatted, easy-to-read version of the complexity analysis report
- Shows tasks organized by complexity score (highest to lowest)
- Provides complexity distribution statistics (low, medium, high)
- Highlights tasks recommended for expansion based on threshold score
- Includes ready-to-use expansion commands for each complex task
- If no report exists, offers to generate one on the spot

### Smart Task Expansion

The `expand` command automatically checks for and uses the complexity report:

When a complexity report exists:

- Tasks are automatically expanded using the recommended subtask count and prompts
- When expanding all tasks, they're processed in order of complexity (highest first)
- Research-backed generation is preserved from the complexity analysis
- You can still override recommendations with explicit command-line options

Example workflow:

```bash
# Generate the complexity analysis report with research capabilities
task-master analyze-complexity --research

# Review the report in a readable format
task-master complexity-report

# Expand tasks using the optimized recommendations
task-master expand --id=8
# or expand all tasks
task-master expand --all
```

### Finding the Next Task

The `next` command:

- Identifies tasks that are pending/in-progress and have all dependencies satisfied
- Prioritizes tasks by priority level, dependency count, and task ID
- Displays comprehensive information about the selected task:
  - Basic task details (ID, title, priority, dependencies)
  - Implementation details
  - Subtasks (if they exist)
- Provides contextual suggested actions:
  - Command to mark the task as in-progress
  - Command to mark the task as done
  - Commands for working with subtasks

### Viewing Specific Task Details

The `show` command:

- Displays comprehensive details about a specific task or subtask
- Shows task status, priority, dependencies, and detailed implementation notes
- For parent tasks, displays all subtasks and their status
- For subtasks, shows parent task relationship
- Provides contextual action suggestions based on the task's state
- Works with both regular tasks and subtasks (using the format taskId.subtaskId)

## Best Practices for AI-Driven Development

1. **Start with a detailed PRD**: The more detailed your PRD, the better the generated tasks will be.

2. **Review generated tasks**: After parsing the PRD, review the tasks to ensure they make sense and have appropriate dependencies.

3. **Analyze task complexity**: Use the complexity analysis feature to identify which tasks should be broken down further.

4. **Follow the dependency chain**: Always respect task dependencies - the Cursor agent will help with this.

5. **Update as you go**: If your implementation diverges from the plan, use the update command to keep future tasks aligned with your current approach.

6. **Break down complex tasks**: Use the expand command to break down complex tasks into manageable subtasks.

7. **Regenerate task files**: After any updates to tasks.json, regenerate the task files to keep them in sync.

8. **Communicate context to the agent**: When asking the Cursor agent to help with a task, provide context about what you're trying to achieve.

9. **Validate dependencies**: Periodically run the validate-dependencies command to check for invalid or circular dependencies.

# Task Structure Documentation

Task Master uses a structured JSON format to organize and manage tasks. As of version 0.16.2, Task Master introduces **Tagged Task Lists** for multi-context task management while maintaining full backward compatibility.

## Tagged Task Lists System

Task Master now organizes tasks into separate contexts called **tags**. This enables working across multiple contexts such as different branches, environments, or project phases without conflicts.

### Data Structure Overview

**Tagged Format (Current)**:

```json
{
  "master": {
    "tasks": [
      { "id": 1, "title": "Setup API", "status": "pending", ... }
    ]
  },
  "feature-branch": {
    "tasks": [
      { "id": 1, "title": "New Feature", "status": "pending", ... }
    ]
  }
}
```

**Legacy Format (Automatically Migrated)**:

```json
{
  "tasks": [
    { "id": 1, "title": "Setup API", "status": "pending", ... }
  ]
}
```

### Tag-based Task Lists (v0.17+) and Compatibility

- **Seamless Migration**: Existing `tasks.json` files are automatically migrated to use a "master" tag
- **Zero Disruption**: All existing commands continue to work exactly as before
- **Backward Compatibility**: Existing workflows remain unchanged
- **Silent Process**: Migration happens transparently on first use with a friendly notification

## Core Task Properties

Each task within a tag context contains the following properties:

### Required Properties

- **`id`** (number): Unique identifier within the tag context

  ```json
  "id": 1
  ```

- **`title`** (string): Brief, descriptive title

  ```json
  "title": "Implement user authentication"
  ```

- **`description`** (string): Concise summary of what the task involves

  ```json
  "description": "Create a secure authentication system using JWT tokens"
  ```

- **`status`** (string): Current state of the task
  - Valid values: `"pending"`, `"in-progress"`, `"done"`, `"review"`, `"deferred"`, `"cancelled"`
  ```json
  "status": "pending"
  ```

### Optional Properties

- **`dependencies`** (array): IDs of prerequisite tasks that must be completed first

  ```json
  "dependencies": [2, 3]
  ```

- **`priority`** (string): Importance level

  - Valid values: `"high"`, `"medium"`, `"low"`
  - Default: `"medium"`

  ```json
  "priority": "high"
  ```

- **`details`** (string): In-depth implementation instructions

  ```json
  "details": "Use GitHub OAuth client ID/secret, handle callback, set session token"
  ```

- **`testStrategy`** (string): Verification approach

  ```json
  "testStrategy": "Deploy and call endpoint to confirm authentication flow"
  ```

- **`subtasks`** (array): List of smaller, more specific tasks
  ```json
  "subtasks": [
    {
      "id": 1,
      "title": "Configure OAuth",
      "description": "Set up OAuth configuration",
      "status": "pending",
      "dependencies": [],
      "details": "Configure GitHub OAuth app and store credentials"
    }
  ]
  ```

## Subtask Structure

Subtasks follow a similar structure to main tasks but with some differences:

### Subtask Properties

- **`id`** (number): Unique identifier within the parent task
- **`title`** (string): Brief, descriptive title
- **`description`** (string): Concise summary of the subtask
- **`status`** (string): Current state (same values as main tasks)
- **`dependencies`** (array): Can reference other subtasks or main task IDs
- **`details`** (string): Implementation instructions and notes

### Subtask Example

```json
{
  "id": 2,
  "title": "Handle OAuth callback",
  "description": "Process the OAuth callback and extract user data",
  "status": "pending",
  "dependencies": [1],
  "details": "Parse callback parameters, exchange code for token, fetch user profile"
}
```

## Complete Example

Here's a complete example showing the tagged task structure:

```json
{
  "master": {
    "tasks": [
      {
        "id": 1,
        "title": "Setup Express Server",
        "description": "Initialize and configure Express.js server with middleware",
        "status": "done",
        "dependencies": [],
        "priority": "high",
        "details": "Create Express app with CORS, body parser, and error handling",
        "testStrategy": "Start server and verify health check endpoint responds",
        "subtasks": [
          {
            "id": 1,
            "title": "Initialize npm project",
            "description": "Set up package.json and install dependencies",
            "status": "done",
            "dependencies": [],
            "details": "Run npm init, install express, cors, body-parser"
          },
          {
            "id": 2,
            "title": "Configure middleware",
            "description": "Set up CORS and body parsing middleware",
            "status": "done",
            "dependencies": [1],
            "details": "Add app.use() calls for cors() and express.json()"
          }
        ]
      },
      {
        "id": 2,
        "title": "Implement user authentication",
        "description": "Create secure authentication system",
        "status": "pending",
        "dependencies": [1],
        "priority": "high",
        "details": "Use JWT tokens for session management",
        "testStrategy": "Test login/logout flow with valid and invalid credentials",
        "subtasks": []
      }
    ]
  },
  "feature-auth": {
    "tasks": [
      {
        "id": 1,
        "title": "OAuth Integration",
        "description": "Add OAuth authentication support",
        "status": "pending",
        "dependencies": [],
        "priority": "medium",
        "details": "Integrate with GitHub OAuth for user authentication",
        "testStrategy": "Test OAuth flow with GitHub account",
        "subtasks": []
      }
    ]
  }
}
```

## Tag Context Management

### Current Tag Resolution

Task Master automatically determines the current tag context based on:

1. **State Configuration**: Current tag stored in `.taskmaster/state.json`
2. **Default Fallback**: "master" tag when no context is specified
3. **Future Enhancement**: Git branch-based tag switching (Part 2)

### Tag Isolation

- **Context Separation**: Tasks in different tags are completely isolated
- **Independent Numbering**: Each tag has its own task ID sequence starting from 1
- **Parallel Development**: Multiple team members can work on separate tags without conflicts

## Data Validation

Task Master validates the following aspects of task data:

### Required Validations

- **Unique IDs**: Task IDs must be unique within each tag context
- **Valid Status**: Status values must be from the allowed set
- **Dependency References**: Dependencies must reference existing task IDs within the same tag
- **Subtask IDs**: Subtask IDs must be unique within their parent task

### Optional Validations

- **Circular Dependencies**: System detects and prevents circular dependency chains
- **Priority Values**: Priority must be one of the allowed values if specified
- **Data Types**: All properties must match their expected data types

## File Generation

Task Master can generate individual markdown files for each task based on the JSON structure. These files include:

- **Task Overview**: ID, title, status, dependencies
- **Tag Context**: Which tag the task belongs to
- **Implementation Details**: Full task details and test strategy
- **Subtask Breakdown**: All subtasks with their current status
- **Dependency Status**: Visual indicators showing which dependencies are complete

## Migration Process

When Task Master encounters a legacy format `tasks.json` file:

1. **Detection**: Automatically detects `{"tasks": [...]}` format
2. **Transformation**: Converts to `{"master": {"tasks": [...]}}` format
3. **Configuration**: Updates `.taskmaster/config.json` with tagged system settings
4. **State Creation**: Creates `.taskmaster/state.json` for tag management
5. **Notification**: Shows one-time friendly notice about the new system
6. **Preservation**: All existing task data is preserved exactly as-is

## Best Practices

### Task Organization

- **Logical Grouping**: Use tags to group related tasks (e.g., by feature, branch, or milestone)
- **Clear Titles**: Use descriptive titles that explain the task's purpose
- **Proper Dependencies**: Define dependencies to ensure correct execution order
- **Detailed Instructions**: Include sufficient detail in the `details` field for implementation

### Tag Management

- **Meaningful Names**: Use descriptive tag names that reflect their purpose
- **Consistent Naming**: Establish naming conventions for tags (e.g., branch names, feature names)
- **Context Switching**: Be aware of which tag context you're working in
- **Isolation Benefits**: Leverage tag isolation to prevent merge conflicts

### Subtask Design

- **Granular Tasks**: Break down complex tasks into manageable subtasks
- **Clear Dependencies**: Define subtask dependencies to show implementation order
- **Implementation Notes**: Use subtask details to track progress and decisions
- **Status Tracking**: Keep subtask status updated as work progresses

```

--------------------------------------------------------------------------------
/tests/unit/scripts/modules/task-manager/set-task-status.test.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Tests for the set-task-status.js module
 */
import { jest } from '@jest/globals';

// Mock the dependencies before importing the module under test
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
	readJSON: jest.fn(),
	writeJSON: jest.fn(),
	log: jest.fn(),
	CONFIG: {
		model: 'mock-claude-model',
		maxTokens: 4000,
		temperature: 0.7,
		debug: false
	},
	sanitizePrompt: jest.fn((prompt) => prompt),
	truncate: jest.fn((text) => text),
	isSilentMode: jest.fn(() => false),
	findTaskById: jest.fn((tasks, id) =>
		tasks.find((t) => t.id === parseInt(id))
	),
	ensureTagMetadata: jest.fn((tagObj) => tagObj),
	getCurrentTag: jest.fn(() => 'master')
}));

jest.unstable_mockModule(
	'../../../../../scripts/modules/task-manager/generate-task-files.js',
	() => ({
		default: jest.fn().mockResolvedValue()
	})
);

jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
	formatDependenciesWithStatus: jest.fn(),
	displayBanner: jest.fn(),
	displayTaskList: jest.fn(),
	startLoadingIndicator: jest.fn(() => ({ stop: jest.fn() })),
	stopLoadingIndicator: jest.fn(),
	getStatusWithColor: jest.fn((status) => status)
}));

jest.unstable_mockModule('../../../../../src/constants/task-status.js', () => ({
	isValidTaskStatus: jest.fn((status) =>
		[
			'pending',
			'done',
			'in-progress',
			'review',
			'deferred',
			'cancelled'
		].includes(status)
	),
	TASK_STATUS_OPTIONS: [
		'pending',
		'done',
		'in-progress',
		'review',
		'deferred',
		'cancelled'
	]
}));

jest.unstable_mockModule(
	'../../../../../scripts/modules/task-manager/update-single-task-status.js',
	() => ({
		default: jest.fn()
	})
);

jest.unstable_mockModule(
	'../../../../../scripts/modules/dependency-manager.js',
	() => ({
		validateTaskDependencies: jest.fn()
	})
);

jest.unstable_mockModule(
	'../../../../../scripts/modules/config-manager.js',
	() => ({
		getDebugFlag: jest.fn(() => false)
	})
);

// Import the mocked modules
const { readJSON, writeJSON, log, findTaskById } = await import(
	'../../../../../scripts/modules/utils.js'
);

const generateTaskFiles = (
	await import(
		'../../../../../scripts/modules/task-manager/generate-task-files.js'
	)
).default;

const updateSingleTaskStatus = (
	await import(
		'../../../../../scripts/modules/task-manager/update-single-task-status.js'
	)
).default;

// Import the module under test
const { default: setTaskStatus } = await import(
	'../../../../../scripts/modules/task-manager/set-task-status.js'
);

// Sample data for tests (from main test file) - TAGGED FORMAT
const sampleTasks = {
	master: {
		tasks: [
			{
				id: 1,
				title: 'Task 1',
				description: 'First task description',
				status: 'pending',
				dependencies: [],
				priority: 'high',
				details: 'Detailed information for task 1',
				testStrategy: 'Test strategy for task 1'
			},
			{
				id: 2,
				title: 'Task 2',
				description: 'Second task description',
				status: 'pending',
				dependencies: [1],
				priority: 'medium',
				details: 'Detailed information for task 2',
				testStrategy: 'Test strategy for task 2'
			},
			{
				id: 3,
				title: 'Task with Subtasks',
				description: 'Task with subtasks description',
				status: 'pending',
				dependencies: [1, 2],
				priority: 'high',
				details: 'Detailed information for task 3',
				testStrategy: 'Test strategy for task 3',
				subtasks: [
					{
						id: 1,
						title: 'Subtask 1',
						description: 'First subtask',
						status: 'pending',
						dependencies: [],
						details: 'Details for subtask 1'
					},
					{
						id: 2,
						title: 'Subtask 2',
						description: 'Second subtask',
						status: 'pending',
						dependencies: [1],
						details: 'Details for subtask 2'
					}
				]
			}
		]
	}
};

describe('setTaskStatus', () => {
	beforeEach(() => {
		jest.clearAllMocks();

		// Mock console methods to suppress output
		jest.spyOn(console, 'log').mockImplementation(() => {});
		jest.spyOn(console, 'error').mockImplementation(() => {});

		// Mock process.exit to prevent actual exit
		jest.spyOn(process, 'exit').mockImplementation((code) => {
			throw new Error(`process.exit: ${code}`);
		});

		// Set up updateSingleTaskStatus mock to actually update the data
		updateSingleTaskStatus.mockImplementation(
			async (tasksPath, taskId, newStatus, data) => {
				// This mock now operates on the tasks array passed in the `data` object
				const { tasks } = data;
				// Handle subtask notation (e.g., "3.1")
				if (taskId.includes('.')) {
					const [parentId, subtaskId] = taskId
						.split('.')
						.map((id) => parseInt(id, 10));
					const parentTask = tasks.find((t) => t.id === parentId);
					if (!parentTask) {
						throw new Error(`Parent task ${parentId} not found`);
					}
					if (!parentTask.subtasks) {
						throw new Error(`Parent task ${parentId} has no subtasks`);
					}
					const subtask = parentTask.subtasks.find((st) => st.id === subtaskId);
					if (!subtask) {
						throw new Error(
							`Subtask ${subtaskId} not found in parent task ${parentId}`
						);
					}
					subtask.status = newStatus;
				} else {
					// Handle regular task
					const task = tasks.find((t) => t.id === parseInt(taskId, 10));
					if (!task) {
						throw new Error(`Task ${taskId} not found`);
					}
					task.status = newStatus;

					// If marking parent as done, mark all subtasks as done too
					if (newStatus === 'done' && task.subtasks) {
						task.subtasks.forEach((subtask) => {
							subtask.status = 'done';
						});
					}
				}
			}
		);
	});

	afterEach(() => {
		// Restore console methods
		jest.restoreAllMocks();
	});

	test('should update task status in tasks.json', async () => {
		// Arrange
		const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
		const tasksPath = '/mock/path/tasks.json';

		readJSON.mockReturnValue({
			...testTasksData.master,
			tag: 'master',
			_rawTaggedData: testTasksData
		});

		// Act
		await setTaskStatus(tasksPath, '2', 'done', {
			tag: 'master',
			mcpLog: { info: jest.fn() }
		});

		// Assert
		expect(readJSON).toHaveBeenCalledWith(tasksPath, undefined, 'master');
		expect(writeJSON).toHaveBeenCalledWith(
			tasksPath,
			expect.objectContaining({
				master: expect.objectContaining({
					tasks: expect.arrayContaining([
						expect.objectContaining({ id: 2, status: 'done' })
					])
				})
			}),
			undefined,
			'master'
		);
		// expect(generateTaskFiles).toHaveBeenCalledWith(
		// 	tasksPath,
		// 	expect.any(String),
		// 	expect.any(Object)
		// );
	});

	test('should update subtask status when using dot notation', async () => {
		// Arrange
		const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
		const tasksPath = '/mock/path/tasks.json';

		readJSON.mockReturnValue({
			...testTasksData.master,
			tag: 'master',
			_rawTaggedData: testTasksData
		});

		// Act
		await setTaskStatus(tasksPath, '3.1', 'done', {
			tag: 'master',
			mcpLog: { info: jest.fn() }
		});

		// Assert
		expect(readJSON).toHaveBeenCalledWith(tasksPath, undefined, 'master');
		expect(writeJSON).toHaveBeenCalledWith(
			tasksPath,
			expect.objectContaining({
				master: expect.objectContaining({
					tasks: expect.arrayContaining([
						expect.objectContaining({
							id: 3,
							subtasks: expect.arrayContaining([
								expect.objectContaining({ id: 1, status: 'done' })
							])
						})
					])
				})
			}),
			undefined,
			'master'
		);
	});

	test('should update multiple tasks when given comma-separated IDs', async () => {
		// Arrange
		const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
		const tasksPath = '/mock/path/tasks.json';

		readJSON.mockReturnValue({
			...testTasksData.master,
			tag: 'master',
			_rawTaggedData: testTasksData
		});

		// Act
		await setTaskStatus(tasksPath, '1,2', 'done', {
			tag: 'master',
			mcpLog: { info: jest.fn() }
		});

		// Assert
		expect(readJSON).toHaveBeenCalledWith(tasksPath, undefined, 'master');
		expect(writeJSON).toHaveBeenCalledWith(
			tasksPath,
			expect.objectContaining({
				master: expect.objectContaining({
					tasks: expect.arrayContaining([
						expect.objectContaining({ id: 1, status: 'done' }),
						expect.objectContaining({ id: 2, status: 'done' })
					])
				})
			}),
			undefined,
			'master'
		);
	});

	test('should automatically mark subtasks as done when parent is marked done', async () => {
		// Arrange
		const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
		const tasksPath = '/mock/path/tasks.json';

		readJSON.mockReturnValue({
			...testTasksData.master,
			tag: 'master',
			_rawTaggedData: testTasksData
		});

		// Act
		await setTaskStatus(tasksPath, '3', 'done', {
			tag: 'master',
			mcpLog: { info: jest.fn() }
		});

		// Assert
		expect(writeJSON).toHaveBeenCalledWith(
			tasksPath,
			expect.objectContaining({
				master: expect.objectContaining({
					tasks: expect.arrayContaining([
						expect.objectContaining({
							id: 3,
							status: 'done',
							subtasks: expect.arrayContaining([
								expect.objectContaining({ id: 1, status: 'done' }),
								expect.objectContaining({ id: 2, status: 'done' })
							])
						})
					])
				})
			}),
			undefined,
			'master'
		);
	});

	test('should throw error for non-existent task ID', async () => {
		// Arrange
		const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
		const tasksPath = '/mock/path/tasks.json';

		readJSON.mockReturnValue({
			...testTasksData.master,
			tag: 'master',
			_rawTaggedData: testTasksData
		});

		// Act & Assert
		await expect(
			setTaskStatus(tasksPath, '99', 'done', {
				tag: 'master',
				mcpLog: { info: jest.fn() }
			})
		).rejects.toThrow('Task 99 not found');
	});

	test('should throw error for invalid status', async () => {
		// Arrange
		const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
		const tasksPath = '/mock/path/tasks.json';

		readJSON.mockReturnValue({
			...testTasksData.master,
			tag: 'master',
			_rawTaggedData: testTasksData
		});

		// Act & Assert
		await expect(
			setTaskStatus(tasksPath, '2', 'InvalidStatus', {
				mcpLog: { info: jest.fn() }
			})
		).rejects.toThrow(/Invalid status value: InvalidStatus/);
	});

	test('should handle parent tasks without subtasks when updating subtask', async () => {
		// Arrange
		const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
		// Remove subtasks from task 3
		const { subtasks, ...taskWithoutSubtasks } = testTasksData.master.tasks[2];
		testTasksData.master.tasks[2] = taskWithoutSubtasks;

		const tasksPath = '/mock/path/tasks.json';
		readJSON.mockReturnValue({
			...testTasksData.master,
			tag: 'master',
			_rawTaggedData: testTasksData
		});

		// Act & Assert
		await expect(
			setTaskStatus(tasksPath, '3.1', 'done', {
				tag: 'master',
				mcpLog: { info: jest.fn() }
			})
		).rejects.toThrow('has no subtasks');
	});

	test('should handle non-existent subtask ID', async () => {
		// Arrange
		const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
		const tasksPath = '/mock/path/tasks.json';

		readJSON.mockReturnValue({
			...testTasksData.master,
			tag: 'master',
			_rawTaggedData: testTasksData
		});

		// Act & Assert
		await expect(
			setTaskStatus(tasksPath, '3.99', 'done', {
				tag: 'master',
				mcpLog: { info: jest.fn() }
			})
		).rejects.toThrow('Subtask 99 not found');
	});

	test('should handle file read errors', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';
		const taskId = '2';
		const newStatus = 'done';

		readJSON.mockImplementation(() => {
			throw new Error('File not found');
		});

		// Act & Assert
		await expect(
			setTaskStatus(tasksPath, taskId, newStatus, {
				mcpLog: { info: jest.fn() }
			})
		).rejects.toThrow('File not found');

		// Verify that writeJSON was not called due to read error
		expect(writeJSON).not.toHaveBeenCalled();
	});

	test('should handle empty task ID input', async () => {
		// Arrange
		const tasksPath = 'tasks/tasks.json';
		const emptyTaskId = '';
		const newStatus = 'done';

		// Act & Assert
		await expect(
			setTaskStatus(tasksPath, emptyTaskId, newStatus, {
				mcpLog: { info: jest.fn() }
			})
		).rejects.toThrow();

		// Verify that updateSingleTaskStatus was not called
		expect(updateSingleTaskStatus).not.toHaveBeenCalled();
	});

	test('should handle whitespace in comma-separated IDs', async () => {
		// Arrange
		const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
		const tasksPath = 'tasks/tasks.json';
		const taskIds = ' 1 , 2 , 3 '; // IDs with whitespace
		const newStatus = 'in-progress';

		readJSON.mockReturnValue({
			...testTasksData.master,
			tag: 'master',
			_rawTaggedData: testTasksData
		});

		// Act
		const result = await setTaskStatus(tasksPath, taskIds, newStatus, {
			tag: 'master',
			mcpLog: { info: jest.fn() }
		});

		// Assert
		expect(updateSingleTaskStatus).toHaveBeenCalledTimes(3);
		expect(updateSingleTaskStatus).toHaveBeenCalledWith(
			tasksPath,
			'1',
			newStatus,
			expect.objectContaining({
				tasks: expect.any(Array),
				tag: 'master',
				_rawTaggedData: expect.any(Object)
			}),
			false
		);
		expect(updateSingleTaskStatus).toHaveBeenCalledWith(
			tasksPath,
			'2',
			newStatus,
			expect.objectContaining({
				tasks: expect.any(Array),
				tag: 'master',
				_rawTaggedData: expect.any(Object)
			}),
			false
		);
		expect(updateSingleTaskStatus).toHaveBeenCalledWith(
			tasksPath,
			'3',
			newStatus,
			expect.objectContaining({
				tasks: expect.any(Array),
				tag: 'master',
				_rawTaggedData: expect.any(Object)
			}),
			false
		);
		expect(result).toBeDefined();
	});

	// Regression test to ensure tag preservation when updating in multi-tag environment
	test('should preserve other tags when updating task status', async () => {
		// Arrange
		const multiTagData = {
			master: JSON.parse(JSON.stringify(sampleTasks.master)),
			'feature-branch': {
				tasks: [
					{ id: 10, title: 'FB Task', status: 'pending', dependencies: [] }
				],
				metadata: { description: 'Feature branch tasks' }
			}
		};
		const tasksPath = '/mock/path/tasks.json';

		readJSON.mockReturnValue({
			...multiTagData.master, // resolved view not used
			tag: 'master',
			_rawTaggedData: multiTagData
		});

		// Act
		await setTaskStatus(tasksPath, '1', 'done', {
			tag: 'master',
			mcpLog: { info: jest.fn() }
		});

		// Assert: writeJSON should be called with data containing both tags intact
		const writeArgs = writeJSON.mock.calls[0];
		expect(writeArgs[0]).toBe(tasksPath);
		const writtenData = writeArgs[1];
		expect(writtenData).toHaveProperty('master');
		expect(writtenData).toHaveProperty('feature-branch');
		// master task updated
		const updatedTask = writtenData.master.tasks.find((t) => t.id === 1);
		expect(updatedTask.status).toBe('done');
		// feature-branch untouched
		expect(writtenData['feature-branch'].tasks[0].status).toBe('pending');
		// ensure additional args (projectRoot undefined, tag 'master') present
		expect(writeArgs[2]).toBeUndefined();
		expect(writeArgs[3]).toBe('master');
	});
});

```

--------------------------------------------------------------------------------
/tests/integration/manage-gitignore.test.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Integration tests for manage-gitignore.js module
 * Tests actual file system operations in a temporary directory
 */

import fs from 'fs';
import path from 'path';
import os from 'os';
import manageGitignoreFile from '../../src/utils/manage-gitignore.js';

describe('manage-gitignore.js Integration Tests', () => {
	let tempDir;
	let testGitignorePath;

	beforeEach(() => {
		// Create a temporary directory for each test
		tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'gitignore-test-'));
		testGitignorePath = path.join(tempDir, '.gitignore');
	});

	afterEach(() => {
		// Clean up temporary directory after each test
		if (fs.existsSync(tempDir)) {
			fs.rmSync(tempDir, { recursive: true, force: true });
		}
	});

	describe('New File Creation', () => {
		const templateContent = `# Logs
logs
*.log
npm-debug.log*

# Dependencies
node_modules/
jspm_packages/

# Environment variables
.env
.env.local

# Task files
tasks.json
tasks/ `;

		test('should create new .gitignore file with commented task lines (storeTasksInGit = true)', () => {
			const logs = [];
			const mockLog = (level, message) => logs.push({ level, message });

			manageGitignoreFile(testGitignorePath, templateContent, true, mockLog);

			// Verify file was created
			expect(fs.existsSync(testGitignorePath)).toBe(true);

			// Verify content
			const content = fs.readFileSync(testGitignorePath, 'utf8');
			expect(content).toContain('# Logs');
			expect(content).toContain('logs');
			expect(content).toContain('# Dependencies');
			expect(content).toContain('node_modules/');
			expect(content).toContain('# Task files');
			expect(content).toContain('tasks.json');
			expect(content).toContain('tasks/');

			// Verify task lines are commented (storeTasksInGit = true)
			expect(content).toMatch(
				/# Task files\s*[\r\n]+# tasks\.json\s*[\r\n]+# tasks\/ /
			);

			// Verify log message
			expect(logs).toContainEqual({
				level: 'success',
				message: expect.stringContaining('Created')
			});
		});

		test('should create new .gitignore file with uncommented task lines (storeTasksInGit = false)', () => {
			const logs = [];
			const mockLog = (level, message) => logs.push({ level, message });

			manageGitignoreFile(testGitignorePath, templateContent, false, mockLog);

			// Verify file was created
			expect(fs.existsSync(testGitignorePath)).toBe(true);

			// Verify content
			const content = fs.readFileSync(testGitignorePath, 'utf8');
			expect(content).toContain('# Task files');

			// Verify task lines are uncommented (storeTasksInGit = false)
			expect(content).toMatch(
				/# Task files\s*[\r\n]+tasks\.json\s*[\r\n]+tasks\/ /
			);

			// Verify log message
			expect(logs).toContainEqual({
				level: 'success',
				message: expect.stringContaining('Created')
			});
		});

		test('should work without log function', () => {
			expect(() => {
				manageGitignoreFile(testGitignorePath, templateContent, false);
			}).not.toThrow();

			expect(fs.existsSync(testGitignorePath)).toBe(true);
		});
	});

	describe('File Merging', () => {
		const templateContent = `# Logs
logs
*.log

# Dependencies
node_modules/

# Environment variables
.env

# Task files
tasks.json
tasks/ `;

		test('should merge template with existing file content', () => {
			// Create existing .gitignore file
			const existingContent = `# Existing content
old-files.txt
*.backup

# Old task files (to be replaced)
# Task files
# tasks.json
# tasks/ 

# More existing content
cache/`;

			fs.writeFileSync(testGitignorePath, existingContent);

			const logs = [];
			const mockLog = (level, message) => logs.push({ level, message });

			manageGitignoreFile(testGitignorePath, templateContent, false, mockLog);

			// Verify file still exists
			expect(fs.existsSync(testGitignorePath)).toBe(true);

			const content = fs.readFileSync(testGitignorePath, 'utf8');

			// Should retain existing non-task content
			expect(content).toContain('# Existing content');
			expect(content).toContain('old-files.txt');
			expect(content).toContain('*.backup');
			expect(content).toContain('# More existing content');
			expect(content).toContain('cache/');

			// Should add new template content
			expect(content).toContain('# Logs');
			expect(content).toContain('logs');
			expect(content).toContain('# Dependencies');
			expect(content).toContain('node_modules/');
			expect(content).toContain('# Environment variables');
			expect(content).toContain('.env');

			// Should replace task section with new preference (storeTasksInGit = false means uncommented)
			expect(content).toMatch(
				/# Task files\s*[\r\n]+tasks\.json\s*[\r\n]+tasks\/ /
			);

			// Verify log message
			expect(logs).toContainEqual({
				level: 'success',
				message: expect.stringContaining('Updated')
			});
		});

		test('should handle switching task preferences from commented to uncommented', () => {
			// Create existing file with commented task lines
			const existingContent = `# Existing
existing.txt

# Task files
# tasks.json
# tasks/ `;

			fs.writeFileSync(testGitignorePath, existingContent);

			// Update with storeTasksInGit = true (commented)
			manageGitignoreFile(testGitignorePath, templateContent, true);

			const content = fs.readFileSync(testGitignorePath, 'utf8');

			// Should retain existing content
			expect(content).toContain('# Existing');
			expect(content).toContain('existing.txt');

			// Should have commented task lines (storeTasksInGit = true)
			expect(content).toMatch(
				/# Task files\s*[\r\n]+# tasks\.json\s*[\r\n]+# tasks\/ /
			);
		});

		test('should handle switching task preferences from uncommented to commented', () => {
			// Create existing file with uncommented task lines
			const existingContent = `# Existing
existing.txt

# Task files
tasks.json
tasks/ `;

			fs.writeFileSync(testGitignorePath, existingContent);

			// Update with storeTasksInGit = false (uncommented)
			manageGitignoreFile(testGitignorePath, templateContent, false);

			const content = fs.readFileSync(testGitignorePath, 'utf8');

			// Should retain existing content
			expect(content).toContain('# Existing');
			expect(content).toContain('existing.txt');

			// Should have uncommented task lines (storeTasksInGit = false)
			expect(content).toMatch(
				/# Task files\s*[\r\n]+tasks\.json\s*[\r\n]+tasks\/ /
			);
		});

		test('should not duplicate existing template content', () => {
			// Create existing file that already has some template content
			const existingContent = `# Logs
logs
*.log

# Dependencies
node_modules/

# Custom content
custom.txt

# Task files
# tasks.json
# tasks/ `;

			fs.writeFileSync(testGitignorePath, existingContent);

			manageGitignoreFile(testGitignorePath, templateContent, false);

			const content = fs.readFileSync(testGitignorePath, 'utf8');

			// Should not duplicate logs section
			const logsMatches = content.match(/# Logs/g);
			expect(logsMatches).toHaveLength(1);

			// Should not duplicate dependencies section
			const depsMatches = content.match(/# Dependencies/g);
			expect(depsMatches).toHaveLength(1);

			// Should retain custom content
			expect(content).toContain('# Custom content');
			expect(content).toContain('custom.txt');

			// Should add new template content that wasn't present
			expect(content).toContain('# Environment variables');
			expect(content).toContain('.env');
		});

		test('should handle empty existing file', () => {
			// Create empty file
			fs.writeFileSync(testGitignorePath, '');

			manageGitignoreFile(testGitignorePath, templateContent, false);

			expect(fs.existsSync(testGitignorePath)).toBe(true);

			const content = fs.readFileSync(testGitignorePath, 'utf8');
			expect(content).toContain('# Logs');
			expect(content).toContain('# Task files');
			expect(content).toMatch(
				/# Task files\s*[\r\n]+tasks\.json\s*[\r\n]+tasks\/ /
			);
		});

		test('should handle file with only whitespace', () => {
			// Create file with only whitespace
			fs.writeFileSync(testGitignorePath, '   \n\n  \n');

			manageGitignoreFile(testGitignorePath, templateContent, true);

			const content = fs.readFileSync(testGitignorePath, 'utf8');
			expect(content).toContain('# Logs');
			expect(content).toContain('# Task files');
			expect(content).toMatch(
				/# Task files\s*[\r\n]+# tasks\.json\s*[\r\n]+# tasks\/ /
			);
		});
	});

	describe('Complex Task Section Handling', () => {
		test('should remove task section with mixed comments and spacing', () => {
			const existingContent = `# Dependencies
node_modules/

# Task files

# tasks.json
tasks/


# More content
more.txt`;

			const templateContent = `# New content
new.txt

# Task files
tasks.json
tasks/ `;

			fs.writeFileSync(testGitignorePath, existingContent);

			manageGitignoreFile(testGitignorePath, templateContent, false);

			const content = fs.readFileSync(testGitignorePath, 'utf8');

			// Should retain non-task content
			expect(content).toContain('# Dependencies');
			expect(content).toContain('node_modules/');
			expect(content).toContain('# More content');
			expect(content).toContain('more.txt');

			// Should add new content
			expect(content).toContain('# New content');
			expect(content).toContain('new.txt');

			// Should have clean task section (storeTasksInGit = false means uncommented)
			expect(content).toMatch(
				/# Task files\s*[\r\n]+tasks\.json\s*[\r\n]+tasks\/ /
			);
		});

		test('should handle multiple task file variations', () => {
			const existingContent = `# Existing
existing.txt

# Task files
tasks.json
# tasks.json  
# tasks/ 
tasks/ 
#tasks.json

# More content
more.txt`;

			const templateContent = `# Task files
tasks.json
tasks/ `;

			fs.writeFileSync(testGitignorePath, existingContent);

			manageGitignoreFile(testGitignorePath, templateContent, true);

			const content = fs.readFileSync(testGitignorePath, 'utf8');

			// Should retain non-task content
			expect(content).toContain('# Existing');
			expect(content).toContain('existing.txt');
			expect(content).toContain('# More content');
			expect(content).toContain('more.txt');

			// Should have clean task section with preference applied (storeTasksInGit = true means commented)
			expect(content).toMatch(
				/# Task files\s*[\r\n]+# tasks\.json\s*[\r\n]+# tasks\/ /
			);

			// Should not have multiple task sections
			const taskFileMatches = content.match(/# Task files/g);
			expect(taskFileMatches).toHaveLength(1);
		});
	});

	describe('Error Handling', () => {
		test('should handle permission errors gracefully', () => {
			// Create a directory where we would create the file, then remove write permissions
			const readOnlyDir = path.join(tempDir, 'readonly');
			fs.mkdirSync(readOnlyDir);
			fs.chmodSync(readOnlyDir, 0o444); // Read-only

			const readOnlyGitignorePath = path.join(readOnlyDir, '.gitignore');
			const templateContent = `# Test
test.txt

# Task files
tasks.json
tasks/ `;

			const logs = [];
			const mockLog = (level, message) => logs.push({ level, message });

			expect(() => {
				manageGitignoreFile(
					readOnlyGitignorePath,
					templateContent,
					false,
					mockLog
				);
			}).toThrow();

			// Verify error was logged
			expect(logs).toContainEqual({
				level: 'error',
				message: expect.stringContaining('Failed to create')
			});

			// Restore permissions for cleanup
			fs.chmodSync(readOnlyDir, 0o755);
		});

		test('should handle read errors on existing files', () => {
			// Create a file then remove read permissions
			fs.writeFileSync(testGitignorePath, 'existing content');
			fs.chmodSync(testGitignorePath, 0o000); // No permissions

			const templateContent = `# Test
test.txt

# Task files
tasks.json
tasks/ `;

			const logs = [];
			const mockLog = (level, message) => logs.push({ level, message });

			expect(() => {
				manageGitignoreFile(testGitignorePath, templateContent, false, mockLog);
			}).toThrow();

			// Verify error was logged
			expect(logs).toContainEqual({
				level: 'error',
				message: expect.stringContaining('Failed to merge content')
			});

			// Restore permissions for cleanup
			fs.chmodSync(testGitignorePath, 0o644);
		});
	});

	describe('Real-world Scenarios', () => {
		test('should handle typical Node.js project .gitignore', () => {
			const existingNodeGitignore = `# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*

# Runtime data
pids
*.pid
*.seed
*.pid.lock

# Dependency directories
node_modules/
jspm_packages/

# Optional npm cache directory
.npm

# Output of 'npm pack'
*.tgz

# Yarn Integrity file
.yarn-integrity

# dotenv environment variables file
.env

# next.js build output
.next`;

			const taskMasterTemplate = `# Logs
logs
*.log

# Dependencies  
node_modules/

# Environment variables
.env

# Build output
dist/
build/

# Task files
tasks.json
tasks/ `;

			fs.writeFileSync(testGitignorePath, existingNodeGitignore);

			manageGitignoreFile(testGitignorePath, taskMasterTemplate, false);

			const content = fs.readFileSync(testGitignorePath, 'utf8');

			// Should retain existing Node.js specific entries
			expect(content).toContain('npm-debug.log*');
			expect(content).toContain('yarn-debug.log*');
			expect(content).toContain('*.pid');
			expect(content).toContain('jspm_packages/');
			expect(content).toContain('.npm');
			expect(content).toContain('*.tgz');
			expect(content).toContain('.yarn-integrity');
			expect(content).toContain('.next');

			// Should add new content from template that wasn't present
			expect(content).toContain('dist/');
			expect(content).toContain('build/');

			// Should add task files section with correct preference (storeTasksInGit = false means uncommented)
			expect(content).toMatch(
				/# Task files\s*[\r\n]+tasks\.json\s*[\r\n]+tasks\/ /
			);

			// Should not duplicate common entries
			const nodeModulesMatches = content.match(/node_modules\//g);
			expect(nodeModulesMatches).toHaveLength(1);

			const logsMatches = content.match(/# Logs/g);
			expect(logsMatches).toHaveLength(1);
		});

		test('should handle project with existing task files in git', () => {
			const existingContent = `# Dependencies
node_modules/

# Logs
*.log

# Current task setup - keeping in git
# Task files
tasks.json
tasks/ 

# Build output
dist/`;

			const templateContent = `# New template
# Dependencies
node_modules/

# Task files
tasks.json
tasks/ `;

			fs.writeFileSync(testGitignorePath, existingContent);

			// Change preference to exclude tasks from git (storeTasksInGit = false means uncommented/ignored)
			manageGitignoreFile(testGitignorePath, templateContent, false);

			const content = fs.readFileSync(testGitignorePath, 'utf8');

			// Should retain existing content
			expect(content).toContain('# Dependencies');
			expect(content).toContain('node_modules/');
			expect(content).toContain('# Logs');
			expect(content).toContain('*.log');
			expect(content).toContain('# Build output');
			expect(content).toContain('dist/');

			// Should update task preference to uncommented (storeTasksInGit = false)
			expect(content).toMatch(
				/# Task files\s*[\r\n]+tasks\.json\s*[\r\n]+tasks\/ /
			);
		});
	});
});

```
Page 28/50FirstPrevNextLast