This is page 41 of 50. Use http://codebase.md/eyaltoledano/claude-task-master?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .changeset
│ ├── config.json
│ └── README.md
├── .claude
│ ├── commands
│ │ └── dedupe.md
│ └── TM_COMMANDS_GUIDE.md
├── .claude-plugin
│ └── marketplace.json
├── .coderabbit.yaml
├── .cursor
│ ├── mcp.json
│ └── rules
│ ├── ai_providers.mdc
│ ├── ai_services.mdc
│ ├── architecture.mdc
│ ├── changeset.mdc
│ ├── commands.mdc
│ ├── context_gathering.mdc
│ ├── cursor_rules.mdc
│ ├── dependencies.mdc
│ ├── dev_workflow.mdc
│ ├── git_workflow.mdc
│ ├── glossary.mdc
│ ├── mcp.mdc
│ ├── new_features.mdc
│ ├── self_improve.mdc
│ ├── tags.mdc
│ ├── taskmaster.mdc
│ ├── tasks.mdc
│ ├── telemetry.mdc
│ ├── test_workflow.mdc
│ ├── tests.mdc
│ ├── ui.mdc
│ └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── enhancements---feature-requests.md
│ │ └── feedback.md
│ ├── PULL_REQUEST_TEMPLATE
│ │ ├── bugfix.md
│ │ ├── config.yml
│ │ ├── feature.md
│ │ └── integration.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── scripts
│ │ ├── auto-close-duplicates.mjs
│ │ ├── backfill-duplicate-comments.mjs
│ │ ├── check-pre-release-mode.mjs
│ │ ├── parse-metrics.mjs
│ │ ├── release.mjs
│ │ ├── tag-extension.mjs
│ │ ├── utils.mjs
│ │ └── validate-changesets.mjs
│ └── workflows
│ ├── auto-close-duplicates.yml
│ ├── backfill-duplicate-comments.yml
│ ├── ci.yml
│ ├── claude-dedupe-issues.yml
│ ├── claude-docs-trigger.yml
│ ├── claude-docs-updater.yml
│ ├── claude-issue-triage.yml
│ ├── claude.yml
│ ├── extension-ci.yml
│ ├── extension-release.yml
│ ├── log-issue-events.yml
│ ├── pre-release.yml
│ ├── release-check.yml
│ ├── release.yml
│ ├── update-models-md.yml
│ └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│ ├── hooks
│ │ ├── tm-code-change-task-tracker.kiro.hook
│ │ ├── tm-complexity-analyzer.kiro.hook
│ │ ├── tm-daily-standup-assistant.kiro.hook
│ │ ├── tm-git-commit-task-linker.kiro.hook
│ │ ├── tm-pr-readiness-checker.kiro.hook
│ │ ├── tm-task-dependency-auto-progression.kiro.hook
│ │ └── tm-test-success-task-completer.kiro.hook
│ ├── settings
│ │ └── mcp.json
│ └── steering
│ ├── dev_workflow.md
│ ├── kiro_rules.md
│ ├── self_improve.md
│ ├── taskmaster_hooks_workflow.md
│ └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│ ├── CLAUDE.md
│ ├── config.json
│ ├── docs
│ │ ├── autonomous-tdd-git-workflow.md
│ │ ├── MIGRATION-ROADMAP.md
│ │ ├── prd-tm-start.txt
│ │ ├── prd.txt
│ │ ├── README.md
│ │ ├── research
│ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│ │ │ ├── 2025-06-14_test-save-functionality.md
│ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│ │ ├── task-template-importing-prd.txt
│ │ ├── tdd-workflow-phase-0-spike.md
│ │ ├── tdd-workflow-phase-1-core-rails.md
│ │ ├── tdd-workflow-phase-1-orchestrator.md
│ │ ├── tdd-workflow-phase-2-pr-resumability.md
│ │ ├── tdd-workflow-phase-3-extensibility-guardrails.md
│ │ ├── test-prd.txt
│ │ └── tm-core-phase-1.txt
│ ├── reports
│ │ ├── task-complexity-report_autonomous-tdd-git-workflow.json
│ │ ├── task-complexity-report_cc-kiro-hooks.json
│ │ ├── task-complexity-report_tdd-phase-1-core-rails.json
│ │ ├── task-complexity-report_tdd-workflow-phase-0.json
│ │ ├── task-complexity-report_test-prd-tag.json
│ │ ├── task-complexity-report_tm-core-phase-1.json
│ │ ├── task-complexity-report.json
│ │ └── tm-core-complexity.json
│ ├── state.json
│ ├── tasks
│ │ ├── task_001_tm-start.txt
│ │ ├── task_002_tm-start.txt
│ │ ├── task_003_tm-start.txt
│ │ ├── task_004_tm-start.txt
│ │ ├── task_007_tm-start.txt
│ │ └── tasks.json
│ └── templates
│ ├── example_prd_rpg.md
│ └── example_prd.md
├── .vscode
│ ├── extensions.json
│ └── settings.json
├── apps
│ ├── cli
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── src
│ │ │ ├── command-registry.ts
│ │ │ ├── commands
│ │ │ │ ├── auth.command.ts
│ │ │ │ ├── autopilot
│ │ │ │ │ ├── abort.command.ts
│ │ │ │ │ ├── commit.command.ts
│ │ │ │ │ ├── complete.command.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── next.command.ts
│ │ │ │ │ ├── resume.command.ts
│ │ │ │ │ ├── shared.ts
│ │ │ │ │ ├── start.command.ts
│ │ │ │ │ └── status.command.ts
│ │ │ │ ├── briefs.command.ts
│ │ │ │ ├── context.command.ts
│ │ │ │ ├── export.command.ts
│ │ │ │ ├── list.command.ts
│ │ │ │ ├── models
│ │ │ │ │ ├── custom-providers.ts
│ │ │ │ │ ├── fetchers.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── prompts.ts
│ │ │ │ │ ├── setup.ts
│ │ │ │ │ └── types.ts
│ │ │ │ ├── next.command.ts
│ │ │ │ ├── set-status.command.ts
│ │ │ │ ├── show.command.ts
│ │ │ │ ├── start.command.ts
│ │ │ │ └── tags.command.ts
│ │ │ ├── index.ts
│ │ │ ├── lib
│ │ │ │ └── model-management.ts
│ │ │ ├── types
│ │ │ │ └── tag-management.d.ts
│ │ │ ├── ui
│ │ │ │ ├── components
│ │ │ │ │ ├── cardBox.component.ts
│ │ │ │ │ ├── dashboard.component.ts
│ │ │ │ │ ├── header.component.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── next-task.component.ts
│ │ │ │ │ ├── suggested-steps.component.ts
│ │ │ │ │ └── task-detail.component.ts
│ │ │ │ ├── display
│ │ │ │ │ ├── messages.ts
│ │ │ │ │ └── tables.ts
│ │ │ │ ├── formatters
│ │ │ │ │ ├── complexity-formatters.ts
│ │ │ │ │ ├── dependency-formatters.ts
│ │ │ │ │ ├── priority-formatters.ts
│ │ │ │ │ ├── status-formatters.spec.ts
│ │ │ │ │ └── status-formatters.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── layout
│ │ │ │ ├── helpers.spec.ts
│ │ │ │ └── helpers.ts
│ │ │ └── utils
│ │ │ ├── auth-helpers.ts
│ │ │ ├── auto-update.ts
│ │ │ ├── brief-selection.ts
│ │ │ ├── display-helpers.ts
│ │ │ ├── error-handler.ts
│ │ │ ├── index.ts
│ │ │ ├── project-root.ts
│ │ │ ├── task-status.ts
│ │ │ ├── ui.spec.ts
│ │ │ └── ui.ts
│ │ ├── tests
│ │ │ ├── integration
│ │ │ │ └── commands
│ │ │ │ └── autopilot
│ │ │ │ └── workflow.test.ts
│ │ │ └── unit
│ │ │ ├── commands
│ │ │ │ ├── autopilot
│ │ │ │ │ └── shared.test.ts
│ │ │ │ ├── list.command.spec.ts
│ │ │ │ └── show.command.spec.ts
│ │ │ └── ui
│ │ │ └── dashboard.component.spec.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── docs
│ │ ├── archive
│ │ │ ├── ai-client-utils-example.mdx
│ │ │ ├── ai-development-workflow.mdx
│ │ │ ├── command-reference.mdx
│ │ │ ├── configuration.mdx
│ │ │ ├── cursor-setup.mdx
│ │ │ ├── examples.mdx
│ │ │ └── Installation.mdx
│ │ ├── best-practices
│ │ │ ├── advanced-tasks.mdx
│ │ │ ├── configuration-advanced.mdx
│ │ │ └── index.mdx
│ │ ├── capabilities
│ │ │ ├── cli-root-commands.mdx
│ │ │ ├── index.mdx
│ │ │ ├── mcp.mdx
│ │ │ ├── rpg-method.mdx
│ │ │ └── task-structure.mdx
│ │ ├── CHANGELOG.md
│ │ ├── command-reference.mdx
│ │ ├── configuration.mdx
│ │ ├── docs.json
│ │ ├── favicon.svg
│ │ ├── getting-started
│ │ │ ├── api-keys.mdx
│ │ │ ├── contribute.mdx
│ │ │ ├── faq.mdx
│ │ │ └── quick-start
│ │ │ ├── configuration-quick.mdx
│ │ │ ├── execute-quick.mdx
│ │ │ ├── installation.mdx
│ │ │ ├── moving-forward.mdx
│ │ │ ├── prd-quick.mdx
│ │ │ ├── quick-start.mdx
│ │ │ ├── requirements.mdx
│ │ │ ├── rules-quick.mdx
│ │ │ └── tasks-quick.mdx
│ │ ├── introduction.mdx
│ │ ├── licensing.md
│ │ ├── logo
│ │ │ ├── dark.svg
│ │ │ ├── light.svg
│ │ │ └── task-master-logo.png
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── style.css
│ │ ├── tdd-workflow
│ │ │ ├── ai-agent-integration.mdx
│ │ │ └── quickstart.mdx
│ │ ├── vercel.json
│ │ └── whats-new.mdx
│ ├── extension
│ │ ├── .vscodeignore
│ │ ├── assets
│ │ │ ├── banner.png
│ │ │ ├── icon-dark.svg
│ │ │ ├── icon-light.svg
│ │ │ ├── icon.png
│ │ │ ├── screenshots
│ │ │ │ ├── kanban-board.png
│ │ │ │ └── task-details.png
│ │ │ └── sidebar-icon.svg
│ │ ├── CHANGELOG.md
│ │ ├── components.json
│ │ ├── docs
│ │ │ ├── extension-CI-setup.md
│ │ │ └── extension-development-guide.md
│ │ ├── esbuild.js
│ │ ├── LICENSE
│ │ ├── package.json
│ │ ├── package.mjs
│ │ ├── package.publish.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── components
│ │ │ │ ├── ConfigView.tsx
│ │ │ │ ├── constants.ts
│ │ │ │ ├── TaskDetails
│ │ │ │ │ ├── AIActionsSection.tsx
│ │ │ │ │ ├── DetailsSection.tsx
│ │ │ │ │ ├── PriorityBadge.tsx
│ │ │ │ │ ├── SubtasksSection.tsx
│ │ │ │ │ ├── TaskMetadataSidebar.tsx
│ │ │ │ │ └── useTaskDetails.ts
│ │ │ │ ├── TaskDetailsView.tsx
│ │ │ │ ├── TaskMasterLogo.tsx
│ │ │ │ └── ui
│ │ │ │ ├── badge.tsx
│ │ │ │ ├── breadcrumb.tsx
│ │ │ │ ├── button.tsx
│ │ │ │ ├── card.tsx
│ │ │ │ ├── collapsible.tsx
│ │ │ │ ├── CollapsibleSection.tsx
│ │ │ │ ├── dropdown-menu.tsx
│ │ │ │ ├── label.tsx
│ │ │ │ ├── scroll-area.tsx
│ │ │ │ ├── separator.tsx
│ │ │ │ ├── shadcn-io
│ │ │ │ │ └── kanban
│ │ │ │ │ └── index.tsx
│ │ │ │ └── textarea.tsx
│ │ │ ├── extension.ts
│ │ │ ├── index.ts
│ │ │ ├── lib
│ │ │ │ └── utils.ts
│ │ │ ├── services
│ │ │ │ ├── config-service.ts
│ │ │ │ ├── error-handler.ts
│ │ │ │ ├── notification-preferences.ts
│ │ │ │ ├── polling-service.ts
│ │ │ │ ├── polling-strategies.ts
│ │ │ │ ├── sidebar-webview-manager.ts
│ │ │ │ ├── task-repository.ts
│ │ │ │ ├── terminal-manager.ts
│ │ │ │ └── webview-manager.ts
│ │ │ ├── test
│ │ │ │ └── extension.test.ts
│ │ │ ├── utils
│ │ │ │ ├── configManager.ts
│ │ │ │ ├── connectionManager.ts
│ │ │ │ ├── errorHandler.ts
│ │ │ │ ├── event-emitter.ts
│ │ │ │ ├── logger.ts
│ │ │ │ ├── mcpClient.ts
│ │ │ │ ├── notificationPreferences.ts
│ │ │ │ └── task-master-api
│ │ │ │ ├── cache
│ │ │ │ │ └── cache-manager.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── mcp-client.ts
│ │ │ │ ├── transformers
│ │ │ │ │ └── task-transformer.ts
│ │ │ │ └── types
│ │ │ │ └── index.ts
│ │ │ └── webview
│ │ │ ├── App.tsx
│ │ │ ├── components
│ │ │ │ ├── AppContent.tsx
│ │ │ │ ├── EmptyState.tsx
│ │ │ │ ├── ErrorBoundary.tsx
│ │ │ │ ├── PollingStatus.tsx
│ │ │ │ ├── PriorityBadge.tsx
│ │ │ │ ├── SidebarView.tsx
│ │ │ │ ├── TagDropdown.tsx
│ │ │ │ ├── TaskCard.tsx
│ │ │ │ ├── TaskEditModal.tsx
│ │ │ │ ├── TaskMasterKanban.tsx
│ │ │ │ ├── ToastContainer.tsx
│ │ │ │ └── ToastNotification.tsx
│ │ │ ├── constants
│ │ │ │ └── index.ts
│ │ │ ├── contexts
│ │ │ │ └── VSCodeContext.tsx
│ │ │ ├── hooks
│ │ │ │ ├── useTaskQueries.ts
│ │ │ │ ├── useVSCodeMessages.ts
│ │ │ │ └── useWebviewHeight.ts
│ │ │ ├── index.css
│ │ │ ├── index.tsx
│ │ │ ├── providers
│ │ │ │ └── QueryProvider.tsx
│ │ │ ├── reducers
│ │ │ │ └── appReducer.ts
│ │ │ ├── sidebar.tsx
│ │ │ ├── types
│ │ │ │ └── index.ts
│ │ │ └── utils
│ │ │ ├── logger.ts
│ │ │ └── toast.ts
│ │ └── tsconfig.json
│ └── mcp
│ ├── CHANGELOG.md
│ ├── package.json
│ ├── src
│ │ ├── index.ts
│ │ ├── shared
│ │ │ ├── types.ts
│ │ │ └── utils.ts
│ │ └── tools
│ │ ├── autopilot
│ │ │ ├── abort.tool.ts
│ │ │ ├── commit.tool.ts
│ │ │ ├── complete.tool.ts
│ │ │ ├── finalize.tool.ts
│ │ │ ├── index.ts
│ │ │ ├── next.tool.ts
│ │ │ ├── resume.tool.ts
│ │ │ ├── start.tool.ts
│ │ │ └── status.tool.ts
│ │ ├── README-ZOD-V3.md
│ │ └── tasks
│ │ ├── get-task.tool.ts
│ │ ├── get-tasks.tool.ts
│ │ └── index.ts
│ ├── tsconfig.json
│ └── vitest.config.ts
├── assets
│ ├── .windsurfrules
│ ├── AGENTS.md
│ ├── claude
│ │ └── TM_COMMANDS_GUIDE.md
│ ├── config.json
│ ├── env.example
│ ├── example_prd_rpg.txt
│ ├── example_prd.txt
│ ├── GEMINI.md
│ ├── gitignore
│ ├── kiro-hooks
│ │ ├── tm-code-change-task-tracker.kiro.hook
│ │ ├── tm-complexity-analyzer.kiro.hook
│ │ ├── tm-daily-standup-assistant.kiro.hook
│ │ ├── tm-git-commit-task-linker.kiro.hook
│ │ ├── tm-pr-readiness-checker.kiro.hook
│ │ ├── tm-task-dependency-auto-progression.kiro.hook
│ │ └── tm-test-success-task-completer.kiro.hook
│ ├── roocode
│ │ ├── .roo
│ │ │ ├── rules-architect
│ │ │ │ └── architect-rules
│ │ │ ├── rules-ask
│ │ │ │ └── ask-rules
│ │ │ ├── rules-code
│ │ │ │ └── code-rules
│ │ │ ├── rules-debug
│ │ │ │ └── debug-rules
│ │ │ ├── rules-orchestrator
│ │ │ │ └── orchestrator-rules
│ │ │ └── rules-test
│ │ │ └── test-rules
│ │ └── .roomodes
│ ├── rules
│ │ ├── cursor_rules.mdc
│ │ ├── dev_workflow.mdc
│ │ ├── self_improve.mdc
│ │ ├── taskmaster_hooks_workflow.mdc
│ │ └── taskmaster.mdc
│ └── scripts_README.md
├── bin
│ └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE_CODE_PLUGIN.md
├── CLAUDE.md
├── context
│ ├── chats
│ │ ├── add-task-dependencies-1.md
│ │ └── max-min-tokens.txt.md
│ ├── fastmcp-core.txt
│ ├── fastmcp-docs.txt
│ ├── MCP_INTEGRATION.md
│ ├── mcp-js-sdk-docs.txt
│ ├── mcp-protocol-repo.txt
│ ├── mcp-protocol-schema-03262025.json
│ └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│ ├── claude-code-integration.md
│ ├── CLI-COMMANDER-PATTERN.md
│ ├── command-reference.md
│ ├── configuration.md
│ ├── contributor-docs
│ │ ├── testing-roo-integration.md
│ │ └── worktree-setup.md
│ ├── cross-tag-task-movement.md
│ ├── examples
│ │ ├── claude-code-usage.md
│ │ └── codex-cli-usage.md
│ ├── examples.md
│ ├── licensing.md
│ ├── mcp-provider-guide.md
│ ├── mcp-provider.md
│ ├── migration-guide.md
│ ├── models.md
│ ├── providers
│ │ ├── codex-cli.md
│ │ └── gemini-cli.md
│ ├── README.md
│ ├── scripts
│ │ └── models-json-to-markdown.js
│ ├── task-structure.md
│ └── tutorial.md
├── images
│ ├── hamster-hiring.png
│ └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│ ├── server.js
│ └── src
│ ├── core
│ │ ├── __tests__
│ │ │ └── context-manager.test.js
│ │ ├── context-manager.js
│ │ ├── direct-functions
│ │ │ ├── add-dependency.js
│ │ │ ├── add-subtask.js
│ │ │ ├── add-tag.js
│ │ │ ├── add-task.js
│ │ │ ├── analyze-task-complexity.js
│ │ │ ├── cache-stats.js
│ │ │ ├── clear-subtasks.js
│ │ │ ├── complexity-report.js
│ │ │ ├── copy-tag.js
│ │ │ ├── create-tag-from-branch.js
│ │ │ ├── delete-tag.js
│ │ │ ├── expand-all-tasks.js
│ │ │ ├── expand-task.js
│ │ │ ├── fix-dependencies.js
│ │ │ ├── generate-task-files.js
│ │ │ ├── initialize-project.js
│ │ │ ├── list-tags.js
│ │ │ ├── models.js
│ │ │ ├── move-task-cross-tag.js
│ │ │ ├── move-task.js
│ │ │ ├── next-task.js
│ │ │ ├── parse-prd.js
│ │ │ ├── remove-dependency.js
│ │ │ ├── remove-subtask.js
│ │ │ ├── remove-task.js
│ │ │ ├── rename-tag.js
│ │ │ ├── research.js
│ │ │ ├── response-language.js
│ │ │ ├── rules.js
│ │ │ ├── scope-down.js
│ │ │ ├── scope-up.js
│ │ │ ├── set-task-status.js
│ │ │ ├── update-subtask-by-id.js
│ │ │ ├── update-task-by-id.js
│ │ │ ├── update-tasks.js
│ │ │ ├── use-tag.js
│ │ │ └── validate-dependencies.js
│ │ ├── task-master-core.js
│ │ └── utils
│ │ ├── env-utils.js
│ │ └── path-utils.js
│ ├── custom-sdk
│ │ ├── errors.js
│ │ ├── index.js
│ │ ├── json-extractor.js
│ │ ├── language-model.js
│ │ ├── message-converter.js
│ │ └── schema-converter.js
│ ├── index.js
│ ├── logger.js
│ ├── providers
│ │ └── mcp-provider.js
│ └── tools
│ ├── add-dependency.js
│ ├── add-subtask.js
│ ├── add-tag.js
│ ├── add-task.js
│ ├── analyze.js
│ ├── clear-subtasks.js
│ ├── complexity-report.js
│ ├── copy-tag.js
│ ├── delete-tag.js
│ ├── expand-all.js
│ ├── expand-task.js
│ ├── fix-dependencies.js
│ ├── generate.js
│ ├── get-operation-status.js
│ ├── index.js
│ ├── initialize-project.js
│ ├── list-tags.js
│ ├── models.js
│ ├── move-task.js
│ ├── next-task.js
│ ├── parse-prd.js
│ ├── README-ZOD-V3.md
│ ├── remove-dependency.js
│ ├── remove-subtask.js
│ ├── remove-task.js
│ ├── rename-tag.js
│ ├── research.js
│ ├── response-language.js
│ ├── rules.js
│ ├── scope-down.js
│ ├── scope-up.js
│ ├── set-task-status.js
│ ├── tool-registry.js
│ ├── update-subtask.js
│ ├── update-task.js
│ ├── update.js
│ ├── use-tag.js
│ ├── utils.js
│ └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│ ├── ai-sdk-provider-grok-cli
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── errors.test.ts
│ │ │ ├── errors.ts
│ │ │ ├── grok-cli-language-model.ts
│ │ │ ├── grok-cli-provider.test.ts
│ │ │ ├── grok-cli-provider.ts
│ │ │ ├── index.ts
│ │ │ ├── json-extractor.test.ts
│ │ │ ├── json-extractor.ts
│ │ │ ├── message-converter.test.ts
│ │ │ ├── message-converter.ts
│ │ │ └── types.ts
│ │ └── tsconfig.json
│ ├── build-config
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── src
│ │ │ └── tsdown.base.ts
│ │ └── tsconfig.json
│ ├── claude-code-plugin
│ │ ├── .claude-plugin
│ │ │ └── plugin.json
│ │ ├── .gitignore
│ │ ├── agents
│ │ │ ├── task-checker.md
│ │ │ ├── task-executor.md
│ │ │ └── task-orchestrator.md
│ │ ├── CHANGELOG.md
│ │ ├── commands
│ │ │ ├── add-dependency.md
│ │ │ ├── add-subtask.md
│ │ │ ├── add-task.md
│ │ │ ├── analyze-complexity.md
│ │ │ ├── analyze-project.md
│ │ │ ├── auto-implement-tasks.md
│ │ │ ├── command-pipeline.md
│ │ │ ├── complexity-report.md
│ │ │ ├── convert-task-to-subtask.md
│ │ │ ├── expand-all-tasks.md
│ │ │ ├── expand-task.md
│ │ │ ├── fix-dependencies.md
│ │ │ ├── generate-tasks.md
│ │ │ ├── help.md
│ │ │ ├── init-project-quick.md
│ │ │ ├── init-project.md
│ │ │ ├── install-taskmaster.md
│ │ │ ├── learn.md
│ │ │ ├── list-tasks-by-status.md
│ │ │ ├── list-tasks-with-subtasks.md
│ │ │ ├── list-tasks.md
│ │ │ ├── next-task.md
│ │ │ ├── parse-prd-with-research.md
│ │ │ ├── parse-prd.md
│ │ │ ├── project-status.md
│ │ │ ├── quick-install-taskmaster.md
│ │ │ ├── remove-all-subtasks.md
│ │ │ ├── remove-dependency.md
│ │ │ ├── remove-subtask.md
│ │ │ ├── remove-subtasks.md
│ │ │ ├── remove-task.md
│ │ │ ├── setup-models.md
│ │ │ ├── show-task.md
│ │ │ ├── smart-workflow.md
│ │ │ ├── sync-readme.md
│ │ │ ├── tm-main.md
│ │ │ ├── to-cancelled.md
│ │ │ ├── to-deferred.md
│ │ │ ├── to-done.md
│ │ │ ├── to-in-progress.md
│ │ │ ├── to-pending.md
│ │ │ ├── to-review.md
│ │ │ ├── update-single-task.md
│ │ │ ├── update-task.md
│ │ │ ├── update-tasks-from-id.md
│ │ │ ├── validate-dependencies.md
│ │ │ └── view-models.md
│ │ ├── mcp.json
│ │ └── package.json
│ ├── tm-bridge
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── add-tag-bridge.ts
│ │ │ ├── bridge-types.ts
│ │ │ ├── bridge-utils.ts
│ │ │ ├── expand-bridge.ts
│ │ │ ├── index.ts
│ │ │ ├── tags-bridge.ts
│ │ │ ├── update-bridge.ts
│ │ │ └── use-tag-bridge.ts
│ │ └── tsconfig.json
│ └── tm-core
│ ├── .gitignore
│ ├── CHANGELOG.md
│ ├── docs
│ │ └── listTasks-architecture.md
│ ├── package.json
│ ├── POC-STATUS.md
│ ├── README.md
│ ├── src
│ │ ├── common
│ │ │ ├── constants
│ │ │ │ ├── index.ts
│ │ │ │ ├── paths.ts
│ │ │ │ └── providers.ts
│ │ │ ├── errors
│ │ │ │ ├── index.ts
│ │ │ │ └── task-master-error.ts
│ │ │ ├── interfaces
│ │ │ │ ├── configuration.interface.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── storage.interface.ts
│ │ │ ├── logger
│ │ │ │ ├── factory.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── logger.spec.ts
│ │ │ │ └── logger.ts
│ │ │ ├── mappers
│ │ │ │ ├── TaskMapper.test.ts
│ │ │ │ └── TaskMapper.ts
│ │ │ ├── types
│ │ │ │ ├── database.types.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── legacy.ts
│ │ │ │ └── repository-types.ts
│ │ │ └── utils
│ │ │ ├── git-utils.ts
│ │ │ ├── id-generator.ts
│ │ │ ├── index.ts
│ │ │ ├── path-helpers.ts
│ │ │ ├── path-normalizer.spec.ts
│ │ │ ├── path-normalizer.ts
│ │ │ ├── project-root-finder.spec.ts
│ │ │ ├── project-root-finder.ts
│ │ │ ├── run-id-generator.spec.ts
│ │ │ └── run-id-generator.ts
│ │ ├── index.ts
│ │ ├── modules
│ │ │ ├── ai
│ │ │ │ ├── index.ts
│ │ │ │ ├── interfaces
│ │ │ │ │ └── ai-provider.interface.ts
│ │ │ │ └── providers
│ │ │ │ ├── base-provider.ts
│ │ │ │ └── index.ts
│ │ │ ├── auth
│ │ │ │ ├── auth-domain.spec.ts
│ │ │ │ ├── auth-domain.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ ├── auth-manager.spec.ts
│ │ │ │ │ └── auth-manager.ts
│ │ │ │ ├── services
│ │ │ │ │ ├── context-store.ts
│ │ │ │ │ ├── oauth-service.ts
│ │ │ │ │ ├── organization.service.ts
│ │ │ │ │ ├── supabase-session-storage.spec.ts
│ │ │ │ │ └── supabase-session-storage.ts
│ │ │ │ └── types.ts
│ │ │ ├── briefs
│ │ │ │ ├── briefs-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── brief-service.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── utils
│ │ │ │ └── url-parser.ts
│ │ │ ├── commands
│ │ │ │ └── index.ts
│ │ │ ├── config
│ │ │ │ ├── config-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ ├── config-manager.spec.ts
│ │ │ │ │ └── config-manager.ts
│ │ │ │ └── services
│ │ │ │ ├── config-loader.service.spec.ts
│ │ │ │ ├── config-loader.service.ts
│ │ │ │ ├── config-merger.service.spec.ts
│ │ │ │ ├── config-merger.service.ts
│ │ │ │ ├── config-persistence.service.spec.ts
│ │ │ │ ├── config-persistence.service.ts
│ │ │ │ ├── environment-config-provider.service.spec.ts
│ │ │ │ ├── environment-config-provider.service.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── runtime-state-manager.service.spec.ts
│ │ │ │ └── runtime-state-manager.service.ts
│ │ │ ├── dependencies
│ │ │ │ └── index.ts
│ │ │ ├── execution
│ │ │ │ ├── executors
│ │ │ │ │ ├── base-executor.ts
│ │ │ │ │ ├── claude-executor.ts
│ │ │ │ │ └── executor-factory.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── executor-service.ts
│ │ │ │ └── types.ts
│ │ │ ├── git
│ │ │ │ ├── adapters
│ │ │ │ │ ├── git-adapter.test.ts
│ │ │ │ │ └── git-adapter.ts
│ │ │ │ ├── git-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── services
│ │ │ │ ├── branch-name-generator.spec.ts
│ │ │ │ ├── branch-name-generator.ts
│ │ │ │ ├── commit-message-generator.test.ts
│ │ │ │ ├── commit-message-generator.ts
│ │ │ │ ├── scope-detector.test.ts
│ │ │ │ ├── scope-detector.ts
│ │ │ │ ├── template-engine.test.ts
│ │ │ │ └── template-engine.ts
│ │ │ ├── integration
│ │ │ │ ├── clients
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── supabase-client.ts
│ │ │ │ ├── integration-domain.ts
│ │ │ │ └── services
│ │ │ │ ├── export.service.ts
│ │ │ │ ├── task-expansion.service.ts
│ │ │ │ └── task-retrieval.service.ts
│ │ │ ├── reports
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ └── complexity-report-manager.ts
│ │ │ │ └── types.ts
│ │ │ ├── storage
│ │ │ │ ├── adapters
│ │ │ │ │ ├── activity-logger.ts
│ │ │ │ │ ├── api-storage.ts
│ │ │ │ │ └── file-storage
│ │ │ │ │ ├── file-operations.ts
│ │ │ │ │ ├── file-storage.ts
│ │ │ │ │ ├── format-handler.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── path-resolver.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── storage-factory.ts
│ │ │ │ └── utils
│ │ │ │ └── api-client.ts
│ │ │ ├── tasks
│ │ │ │ ├── entities
│ │ │ │ │ └── task.entity.ts
│ │ │ │ ├── parser
│ │ │ │ │ └── index.ts
│ │ │ │ ├── repositories
│ │ │ │ │ ├── supabase
│ │ │ │ │ │ ├── dependency-fetcher.ts
│ │ │ │ │ │ ├── index.ts
│ │ │ │ │ │ └── supabase-repository.ts
│ │ │ │ │ └── task-repository.interface.ts
│ │ │ │ ├── services
│ │ │ │ │ ├── preflight-checker.service.ts
│ │ │ │ │ ├── tag.service.ts
│ │ │ │ │ ├── task-execution-service.ts
│ │ │ │ │ ├── task-loader.service.ts
│ │ │ │ │ └── task-service.ts
│ │ │ │ └── tasks-domain.ts
│ │ │ ├── ui
│ │ │ │ └── index.ts
│ │ │ └── workflow
│ │ │ ├── managers
│ │ │ │ ├── workflow-state-manager.spec.ts
│ │ │ │ └── workflow-state-manager.ts
│ │ │ ├── orchestrators
│ │ │ │ ├── workflow-orchestrator.test.ts
│ │ │ │ └── workflow-orchestrator.ts
│ │ │ ├── services
│ │ │ │ ├── test-result-validator.test.ts
│ │ │ │ ├── test-result-validator.ts
│ │ │ │ ├── test-result-validator.types.ts
│ │ │ │ ├── workflow-activity-logger.ts
│ │ │ │ └── workflow.service.ts
│ │ │ ├── types.ts
│ │ │ └── workflow-domain.ts
│ │ ├── subpath-exports.test.ts
│ │ ├── tm-core.ts
│ │ └── utils
│ │ └── time.utils.ts
│ ├── tests
│ │ ├── auth
│ │ │ └── auth-refresh.test.ts
│ │ ├── integration
│ │ │ ├── auth-token-refresh.test.ts
│ │ │ ├── list-tasks.test.ts
│ │ │ └── storage
│ │ │ └── activity-logger.test.ts
│ │ ├── mocks
│ │ │ └── mock-provider.ts
│ │ ├── setup.ts
│ │ └── unit
│ │ ├── base-provider.test.ts
│ │ ├── executor.test.ts
│ │ └── smoke.test.ts
│ ├── tsconfig.json
│ └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│ ├── create-worktree.sh
│ ├── dev.js
│ ├── init.js
│ ├── list-worktrees.sh
│ ├── modules
│ │ ├── ai-services-unified.js
│ │ ├── bridge-utils.js
│ │ ├── commands.js
│ │ ├── config-manager.js
│ │ ├── dependency-manager.js
│ │ ├── index.js
│ │ ├── prompt-manager.js
│ │ ├── supported-models.json
│ │ ├── sync-readme.js
│ │ ├── task-manager
│ │ │ ├── add-subtask.js
│ │ │ ├── add-task.js
│ │ │ ├── analyze-task-complexity.js
│ │ │ ├── clear-subtasks.js
│ │ │ ├── expand-all-tasks.js
│ │ │ ├── expand-task.js
│ │ │ ├── find-next-task.js
│ │ │ ├── generate-task-files.js
│ │ │ ├── is-task-dependent.js
│ │ │ ├── list-tasks.js
│ │ │ ├── migrate.js
│ │ │ ├── models.js
│ │ │ ├── move-task.js
│ │ │ ├── parse-prd
│ │ │ │ ├── index.js
│ │ │ │ ├── parse-prd-config.js
│ │ │ │ ├── parse-prd-helpers.js
│ │ │ │ ├── parse-prd-non-streaming.js
│ │ │ │ ├── parse-prd-streaming.js
│ │ │ │ └── parse-prd.js
│ │ │ ├── remove-subtask.js
│ │ │ ├── remove-task.js
│ │ │ ├── research.js
│ │ │ ├── response-language.js
│ │ │ ├── scope-adjustment.js
│ │ │ ├── set-task-status.js
│ │ │ ├── tag-management.js
│ │ │ ├── task-exists.js
│ │ │ ├── update-single-task-status.js
│ │ │ ├── update-subtask-by-id.js
│ │ │ ├── update-task-by-id.js
│ │ │ └── update-tasks.js
│ │ ├── task-manager.js
│ │ ├── ui.js
│ │ ├── update-config-tokens.js
│ │ ├── utils
│ │ │ ├── contextGatherer.js
│ │ │ ├── fuzzyTaskSearch.js
│ │ │ └── git-utils.js
│ │ └── utils.js
│ ├── task-complexity-report.json
│ ├── test-claude-errors.js
│ └── test-claude.js
├── sonar-project.properties
├── src
│ ├── ai-providers
│ │ ├── anthropic.js
│ │ ├── azure.js
│ │ ├── base-provider.js
│ │ ├── bedrock.js
│ │ ├── claude-code.js
│ │ ├── codex-cli.js
│ │ ├── gemini-cli.js
│ │ ├── google-vertex.js
│ │ ├── google.js
│ │ ├── grok-cli.js
│ │ ├── groq.js
│ │ ├── index.js
│ │ ├── lmstudio.js
│ │ ├── ollama.js
│ │ ├── openai-compatible.js
│ │ ├── openai.js
│ │ ├── openrouter.js
│ │ ├── perplexity.js
│ │ ├── xai.js
│ │ ├── zai-coding.js
│ │ └── zai.js
│ ├── constants
│ │ ├── commands.js
│ │ ├── paths.js
│ │ ├── profiles.js
│ │ ├── rules-actions.js
│ │ ├── task-priority.js
│ │ └── task-status.js
│ ├── profiles
│ │ ├── amp.js
│ │ ├── base-profile.js
│ │ ├── claude.js
│ │ ├── cline.js
│ │ ├── codex.js
│ │ ├── cursor.js
│ │ ├── gemini.js
│ │ ├── index.js
│ │ ├── kilo.js
│ │ ├── kiro.js
│ │ ├── opencode.js
│ │ ├── roo.js
│ │ ├── trae.js
│ │ ├── vscode.js
│ │ ├── windsurf.js
│ │ └── zed.js
│ ├── progress
│ │ ├── base-progress-tracker.js
│ │ ├── cli-progress-factory.js
│ │ ├── parse-prd-tracker.js
│ │ ├── progress-tracker-builder.js
│ │ └── tracker-ui.js
│ ├── prompts
│ │ ├── add-task.json
│ │ ├── analyze-complexity.json
│ │ ├── expand-task.json
│ │ ├── parse-prd.json
│ │ ├── README.md
│ │ ├── research.json
│ │ ├── schemas
│ │ │ ├── parameter.schema.json
│ │ │ ├── prompt-template.schema.json
│ │ │ ├── README.md
│ │ │ └── variant.schema.json
│ │ ├── update-subtask.json
│ │ ├── update-task.json
│ │ └── update-tasks.json
│ ├── provider-registry
│ │ └── index.js
│ ├── schemas
│ │ ├── add-task.js
│ │ ├── analyze-complexity.js
│ │ ├── base-schemas.js
│ │ ├── expand-task.js
│ │ ├── parse-prd.js
│ │ ├── registry.js
│ │ ├── update-subtask.js
│ │ ├── update-task.js
│ │ └── update-tasks.js
│ ├── task-master.js
│ ├── ui
│ │ ├── confirm.js
│ │ ├── indicators.js
│ │ └── parse-prd.js
│ └── utils
│ ├── asset-resolver.js
│ ├── create-mcp-config.js
│ ├── format.js
│ ├── getVersion.js
│ ├── logger-utils.js
│ ├── manage-gitignore.js
│ ├── path-utils.js
│ ├── profiles.js
│ ├── rule-transformer.js
│ ├── stream-parser.js
│ └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│ ├── e2e
│ │ ├── e2e_helpers.sh
│ │ ├── parse_llm_output.cjs
│ │ ├── run_e2e.sh
│ │ ├── run_fallback_verification.sh
│ │ └── test_llm_analysis.sh
│ ├── fixtures
│ │ ├── .taskmasterconfig
│ │ ├── sample-claude-response.js
│ │ ├── sample-prd.txt
│ │ └── sample-tasks.js
│ ├── helpers
│ │ └── tool-counts.js
│ ├── integration
│ │ ├── claude-code-error-handling.test.js
│ │ ├── claude-code-optional.test.js
│ │ ├── cli
│ │ │ ├── commands.test.js
│ │ │ ├── complex-cross-tag-scenarios.test.js
│ │ │ └── move-cross-tag.test.js
│ │ ├── manage-gitignore.test.js
│ │ ├── mcp-server
│ │ │ └── direct-functions.test.js
│ │ ├── move-task-cross-tag.integration.test.js
│ │ ├── move-task-simple.integration.test.js
│ │ ├── profiles
│ │ │ ├── amp-init-functionality.test.js
│ │ │ ├── claude-init-functionality.test.js
│ │ │ ├── cline-init-functionality.test.js
│ │ │ ├── codex-init-functionality.test.js
│ │ │ ├── cursor-init-functionality.test.js
│ │ │ ├── gemini-init-functionality.test.js
│ │ │ ├── opencode-init-functionality.test.js
│ │ │ ├── roo-files-inclusion.test.js
│ │ │ ├── roo-init-functionality.test.js
│ │ │ ├── rules-files-inclusion.test.js
│ │ │ ├── trae-init-functionality.test.js
│ │ │ ├── vscode-init-functionality.test.js
│ │ │ └── windsurf-init-functionality.test.js
│ │ └── providers
│ │ └── temperature-support.test.js
│ ├── manual
│ │ ├── progress
│ │ │ ├── parse-prd-analysis.js
│ │ │ ├── test-parse-prd.js
│ │ │ └── TESTING_GUIDE.md
│ │ └── prompts
│ │ ├── prompt-test.js
│ │ └── README.md
│ ├── README.md
│ ├── setup.js
│ └── unit
│ ├── ai-providers
│ │ ├── base-provider.test.js
│ │ ├── claude-code.test.js
│ │ ├── codex-cli.test.js
│ │ ├── gemini-cli.test.js
│ │ ├── lmstudio.test.js
│ │ ├── mcp-components.test.js
│ │ ├── openai-compatible.test.js
│ │ ├── openai.test.js
│ │ ├── provider-registry.test.js
│ │ ├── zai-coding.test.js
│ │ ├── zai-provider.test.js
│ │ ├── zai-schema-introspection.test.js
│ │ └── zai.test.js
│ ├── ai-services-unified.test.js
│ ├── commands.test.js
│ ├── config-manager.test.js
│ ├── config-manager.test.mjs
│ ├── dependency-manager.test.js
│ ├── init.test.js
│ ├── initialize-project.test.js
│ ├── kebab-case-validation.test.js
│ ├── manage-gitignore.test.js
│ ├── mcp
│ │ └── tools
│ │ ├── __mocks__
│ │ │ └── move-task.js
│ │ ├── add-task.test.js
│ │ ├── analyze-complexity.test.js
│ │ ├── expand-all.test.js
│ │ ├── get-tasks.test.js
│ │ ├── initialize-project.test.js
│ │ ├── move-task-cross-tag-options.test.js
│ │ ├── move-task-cross-tag.test.js
│ │ ├── remove-task.test.js
│ │ └── tool-registration.test.js
│ ├── mcp-providers
│ │ ├── mcp-components.test.js
│ │ └── mcp-provider.test.js
│ ├── parse-prd.test.js
│ ├── profiles
│ │ ├── amp-integration.test.js
│ │ ├── claude-integration.test.js
│ │ ├── cline-integration.test.js
│ │ ├── codex-integration.test.js
│ │ ├── cursor-integration.test.js
│ │ ├── gemini-integration.test.js
│ │ ├── kilo-integration.test.js
│ │ ├── kiro-integration.test.js
│ │ ├── mcp-config-validation.test.js
│ │ ├── opencode-integration.test.js
│ │ ├── profile-safety-check.test.js
│ │ ├── roo-integration.test.js
│ │ ├── rule-transformer-cline.test.js
│ │ ├── rule-transformer-cursor.test.js
│ │ ├── rule-transformer-gemini.test.js
│ │ ├── rule-transformer-kilo.test.js
│ │ ├── rule-transformer-kiro.test.js
│ │ ├── rule-transformer-opencode.test.js
│ │ ├── rule-transformer-roo.test.js
│ │ ├── rule-transformer-trae.test.js
│ │ ├── rule-transformer-vscode.test.js
│ │ ├── rule-transformer-windsurf.test.js
│ │ ├── rule-transformer-zed.test.js
│ │ ├── rule-transformer.test.js
│ │ ├── selective-profile-removal.test.js
│ │ ├── subdirectory-support.test.js
│ │ ├── trae-integration.test.js
│ │ ├── vscode-integration.test.js
│ │ ├── windsurf-integration.test.js
│ │ └── zed-integration.test.js
│ ├── progress
│ │ └── base-progress-tracker.test.js
│ ├── prompt-manager.test.js
│ ├── prompts
│ │ ├── expand-task-prompt.test.js
│ │ └── prompt-migration.test.js
│ ├── scripts
│ │ └── modules
│ │ ├── commands
│ │ │ ├── move-cross-tag.test.js
│ │ │ └── README.md
│ │ ├── dependency-manager
│ │ │ ├── circular-dependencies.test.js
│ │ │ ├── cross-tag-dependencies.test.js
│ │ │ └── fix-dependencies-command.test.js
│ │ ├── task-manager
│ │ │ ├── add-subtask.test.js
│ │ │ ├── add-task.test.js
│ │ │ ├── analyze-task-complexity.test.js
│ │ │ ├── clear-subtasks.test.js
│ │ │ ├── complexity-report-tag-isolation.test.js
│ │ │ ├── expand-all-tasks.test.js
│ │ │ ├── expand-task.test.js
│ │ │ ├── find-next-task.test.js
│ │ │ ├── generate-task-files.test.js
│ │ │ ├── list-tasks.test.js
│ │ │ ├── models-baseurl.test.js
│ │ │ ├── move-task-cross-tag.test.js
│ │ │ ├── move-task.test.js
│ │ │ ├── parse-prd-schema.test.js
│ │ │ ├── parse-prd.test.js
│ │ │ ├── remove-subtask.test.js
│ │ │ ├── remove-task.test.js
│ │ │ ├── research.test.js
│ │ │ ├── scope-adjustment.test.js
│ │ │ ├── set-task-status.test.js
│ │ │ ├── setup.js
│ │ │ ├── update-single-task-status.test.js
│ │ │ ├── update-subtask-by-id.test.js
│ │ │ ├── update-task-by-id.test.js
│ │ │ └── update-tasks.test.js
│ │ ├── ui
│ │ │ └── cross-tag-error-display.test.js
│ │ └── utils-tag-aware-paths.test.js
│ ├── task-finder.test.js
│ ├── task-manager
│ │ ├── clear-subtasks.test.js
│ │ ├── move-task.test.js
│ │ ├── tag-boundary.test.js
│ │ └── tag-management.test.js
│ ├── task-master.test.js
│ ├── ui
│ │ └── indicators.test.js
│ ├── ui.test.js
│ ├── utils-strip-ansi.test.js
│ └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
├── turbo.json
└── update-task-migration-plan.md
```
# Files
--------------------------------------------------------------------------------
/tests/e2e/run_e2e.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
# Treat unset variables as an error when substituting.
set -u
# Prevent errors in pipelines from being masked.
set -o pipefail
# --- Default Settings ---
run_verification_test=true
# --- Argument Parsing ---
# Simple loop to check for the skip flag
# Note: This needs to happen *before* the main block piped to tee
# if we want the decision logged early. Or handle args inside.
# Let's handle it before for clarity.
processed_args=()
while [[ $# -gt 0 ]]; do
case "$1" in
--skip-verification)
run_verification_test=false
echo "[INFO] Argument '--skip-verification' detected. Fallback verification will be skipped."
shift # Consume the flag
;;
--analyze-log)
# Keep the analyze-log flag handling separate for now
# It exits early, so doesn't conflict with the main run flags
processed_args+=("$1")
if [[ $# -gt 1 ]]; then
processed_args+=("$2")
shift 2
else
shift 1
fi
;;
*)
# Unknown argument, pass it along or handle error
# For now, just pass it along in case --analyze-log needs it later
processed_args+=("$1")
shift
;;
esac
done
# Restore processed arguments ONLY if the array is not empty
if [ ${#processed_args[@]} -gt 0 ]; then
set -- "${processed_args[@]}"
fi
# --- Configuration ---
# Assumes script is run from the project root (claude-task-master)
TASKMASTER_SOURCE_DIR="." # Current directory is the source
# Base directory for test runs, relative to project root
BASE_TEST_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/_runs"
# Log directory, relative to project root
LOG_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/log"
# Path to the sample PRD, relative to project root
SAMPLE_PRD_SOURCE="$TASKMASTER_SOURCE_DIR/tests/fixtures/sample-prd.txt"
# Path to the main .env file in the source directory
MAIN_ENV_FILE="$TASKMASTER_SOURCE_DIR/.env"
# ---
# <<< Source the helper script >>>
# shellcheck source=tests/e2e/e2e_helpers.sh
source "$TASKMASTER_SOURCE_DIR/tests/e2e/e2e_helpers.sh"
# ==========================================
# >>> Global Helper Functions Defined in run_e2e.sh <<<
# --- Helper Functions (Define globally before export) ---
_format_duration() {
local total_seconds=$1
local minutes=$((total_seconds / 60))
local seconds=$((total_seconds % 60))
printf "%dm%02ds" "$minutes" "$seconds"
}
# Note: This relies on 'overall_start_time' being set globally before the function is called
_get_elapsed_time_for_log() {
local current_time
current_time=$(date +%s)
# Use overall_start_time here, as start_time_for_helpers might not be relevant globally
local elapsed_seconds
elapsed_seconds=$((current_time - overall_start_time))
_format_duration "$elapsed_seconds"
}
log_info() {
echo "[INFO] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
}
log_success() {
echo "[SUCCESS] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
}
log_error() {
echo "[ERROR] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1" >&2
}
log_step() {
test_step_count=$((test_step_count + 1))
echo ""
echo "============================================="
echo " STEP ${test_step_count}: [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
echo "============================================="
}
# ==========================================
# <<< Export helper functions for subshells >>>
export -f log_info log_success log_error log_step _format_duration _get_elapsed_time_for_log extract_and_sum_cost
# --- Argument Parsing for Analysis-Only Mode ---
# This remains the same, as it exits early if matched
if [ "$#" -ge 1 ] && [ "$1" == "--analyze-log" ]; then
LOG_TO_ANALYZE=""
# Check if a log file path was provided as the second argument
if [ "$#" -ge 2 ] && [ -n "$2" ]; then
LOG_TO_ANALYZE="$2"
echo "[INFO] Using specified log file for analysis: $LOG_TO_ANALYZE"
else
echo "[INFO] Log file not specified. Attempting to find the latest log..."
# Find the latest log file in the LOG_DIR
# Ensure LOG_DIR is absolute for ls to work correctly regardless of PWD
ABS_LOG_DIR="$(cd "$TASKMASTER_SOURCE_DIR/$LOG_DIR" && pwd)"
LATEST_LOG=$(ls -t "$ABS_LOG_DIR"/e2e_run_*.log 2>/dev/null | head -n 1)
if [ -z "$LATEST_LOG" ]; then
echo "[ERROR] No log files found matching 'e2e_run_*.log' in $ABS_LOG_DIR. Cannot analyze." >&2
exit 1
fi
LOG_TO_ANALYZE="$LATEST_LOG"
echo "[INFO] Found latest log file: $LOG_TO_ANALYZE"
fi
# Ensure the log path is absolute (it should be if found by ls, but double-check)
if [[ "$LOG_TO_ANALYZE" != /* ]]; then
LOG_TO_ANALYZE="$(pwd)/$LOG_TO_ANALYZE" # Fallback if relative path somehow occurred
fi
echo "[INFO] Running in analysis-only mode for log: $LOG_TO_ANALYZE"
# --- Derive TEST_RUN_DIR from log file path ---
# Extract timestamp like YYYYMMDD_HHMMSS from e2e_run_YYYYMMDD_HHMMSS.log
log_basename=$(basename "$LOG_TO_ANALYZE")
# Ensure the sed command matches the .log suffix correctly
timestamp_match=$(echo "$log_basename" | sed -n 's/^e2e_run_\([0-9]\{8\}_[0-9]\{6\}\)\.log$/\1/p')
if [ -z "$timestamp_match" ]; then
echo "[ERROR] Could not extract timestamp from log file name: $log_basename" >&2
echo "[ERROR] Expected format: e2e_run_YYYYMMDD_HHMMSS.log" >&2
exit 1
fi
# Construct the expected run directory path relative to project root
EXPECTED_RUN_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/_runs/run_$timestamp_match"
# Make it absolute
EXPECTED_RUN_DIR_ABS="$(cd "$TASKMASTER_SOURCE_DIR" && pwd)/tests/e2e/_runs/run_$timestamp_match"
if [ ! -d "$EXPECTED_RUN_DIR_ABS" ]; then
echo "[ERROR] Corresponding test run directory not found: $EXPECTED_RUN_DIR_ABS" >&2
exit 1
fi
# Save original dir before changing
ORIGINAL_DIR=$(pwd)
echo "[INFO] Changing directory to $EXPECTED_RUN_DIR_ABS for analysis context..."
cd "$EXPECTED_RUN_DIR_ABS"
# Call the analysis function (sourced from helpers)
echo "[INFO] Calling analyze_log_with_llm function..."
analyze_log_with_llm "$LOG_TO_ANALYZE" "$(cd "$ORIGINAL_DIR/$TASKMASTER_SOURCE_DIR" && pwd)" # Pass absolute project root
ANALYSIS_EXIT_CODE=$?
# Return to original directory
cd "$ORIGINAL_DIR"
exit $ANALYSIS_EXIT_CODE
fi
# --- End Analysis-Only Mode Logic ---
# --- Normal Execution Starts Here (if not in analysis-only mode) ---
# --- Test State Variables ---
# Note: These are mainly for step numbering within the log now, not for final summary
test_step_count=0
start_time_for_helpers=0 # Separate start time for helper functions inside the pipe
total_e2e_cost="0.0" # Initialize total E2E cost
# ---
# --- Log File Setup ---
# Create the log directory if it doesn't exist
mkdir -p "$LOG_DIR"
# Define timestamped log file path
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
# <<< Use pwd to create an absolute path AND add .log extension >>>
LOG_FILE="$(pwd)/$LOG_DIR/e2e_run_${TIMESTAMP}.log"
# Define and create the test run directory *before* the main pipe
mkdir -p "$BASE_TEST_DIR" # Ensure base exists first
TEST_RUN_DIR="$BASE_TEST_DIR/run_$TIMESTAMP"
mkdir -p "$TEST_RUN_DIR"
# Echo starting message to the original terminal BEFORE the main piped block
echo "Starting E2E test. Output will be shown here and saved to: $LOG_FILE"
echo "Running from directory: $(pwd)"
echo "--- Starting E2E Run ---" # Separator before piped output starts
# Record start time for overall duration *before* the pipe
overall_start_time=$(date +%s)
# <<< DEFINE ORIGINAL_DIR GLOBALLY HERE >>>
ORIGINAL_DIR=$(pwd)
# ==========================================
# >>> MOVE FUNCTION DEFINITION HERE <<<
# --- Helper Functions (Define globally) ---
_format_duration() {
local total_seconds=$1
local minutes=$((total_seconds / 60))
local seconds=$((total_seconds % 60))
printf "%dm%02ds" "$minutes" "$seconds"
}
# Note: This relies on 'overall_start_time' being set globally before the function is called
_get_elapsed_time_for_log() {
local current_time=$(date +%s)
# Use overall_start_time here, as start_time_for_helpers might not be relevant globally
local elapsed_seconds=$((current_time - overall_start_time))
_format_duration "$elapsed_seconds"
}
log_info() {
echo "[INFO] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
}
log_success() {
echo "[SUCCESS] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
}
log_error() {
echo "[ERROR] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1" >&2
}
log_step() {
test_step_count=$((test_step_count + 1))
echo ""
echo "============================================="
echo " STEP ${test_step_count}: [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
echo "============================================="
}
# ==========================================
# --- Main Execution Block (Piped to tee) ---
# Wrap the main part of the script in braces and pipe its output (stdout and stderr) to tee
{
# Note: Helper functions are now defined globally above,
# but we still need start_time_for_helpers if any logging functions
# called *inside* this block depend on it. If not, it can be removed.
start_time_for_helpers=$(date +%s) # Keep if needed by helpers called inside this block
# Log the verification decision
if [ "$run_verification_test" = true ]; then
log_info "Fallback verification test will be run as part of this E2E test."
else
log_info "Fallback verification test will be SKIPPED (--skip-verification flag detected)."
fi
# --- Dependency Checks ---
log_step "Checking for dependencies (jq, bc)"
if ! command -v jq &> /dev/null; then
log_error "Dependency 'jq' is not installed or not found in PATH. Please install jq (e.g., 'brew install jq' or 'sudo apt-get install jq')."
exit 1
fi
if ! command -v bc &> /dev/null; then
log_error "Dependency 'bc' not installed (for cost calculation). Please install bc (e.g., 'brew install bc' or 'sudo apt-get install bc')."
exit 1
fi
log_success "Dependencies 'jq' and 'bc' found."
# --- Test Setup (Output to tee) ---
log_step "Setting up test environment"
log_step "Creating global npm link for task-master-ai"
if npm link; then
log_success "Global link created/updated."
else
log_error "Failed to run 'npm link'. Check permissions or output for details."
exit 1
fi
log_info "Ensured base test directory exists: $BASE_TEST_DIR"
log_info "Using test run directory (created earlier): $TEST_RUN_DIR"
# Check if source .env file exists
if [ ! -f "$MAIN_ENV_FILE" ]; then
log_error "Source .env file not found at $MAIN_ENV_FILE. Cannot proceed with API-dependent tests."
exit 1
fi
log_info "Source .env file found at $MAIN_ENV_FILE."
# Check if sample PRD exists
if [ ! -f "$SAMPLE_PRD_SOURCE" ]; then
log_error "Sample PRD not found at $SAMPLE_PRD_SOURCE. Please check path."
exit 1
fi
log_info "Copying sample PRD to test directory..."
cp "$SAMPLE_PRD_SOURCE" "$TEST_RUN_DIR/prd.txt"
if [ ! -f "$TEST_RUN_DIR/prd.txt" ]; then
log_error "Failed to copy sample PRD to $TEST_RUN_DIR."
exit 1
fi
log_success "Sample PRD copied."
# ORIGINAL_DIR=$(pwd) # Save original dir # <<< REMOVED FROM HERE
cd "$TEST_RUN_DIR"
log_info "Changed directory to $(pwd)"
# === Copy .env file BEFORE init ===
log_step "Copying source .env file for API keys"
if cp "$ORIGINAL_DIR/.env" ".env"; then
log_success ".env file copied successfully."
else
log_error "Failed to copy .env file from $ORIGINAL_DIR/.env"
exit 1
fi
# ========================================
# --- Test Execution (Output to tee) ---
log_step "Linking task-master-ai package locally"
npm link task-master-ai
log_success "Package linked locally."
log_step "Initializing Task Master project (non-interactive)"
task-master init -y --name="E2E Test $TIMESTAMP" --description="Automated E2E test run"
if [ ! -f ".taskmaster/config.json" ]; then
log_error "Initialization failed: .taskmaster/config.json not found."
exit 1
fi
log_success "Project initialized."
log_step "Parsing PRD"
cmd_output_prd=$(task-master parse-prd ./prd.txt --force 2>&1)
exit_status_prd=$?
echo "$cmd_output_prd"
extract_and_sum_cost "$cmd_output_prd"
if [ $exit_status_prd -ne 0 ] || [ ! -s ".taskmaster/tasks/tasks.json" ]; then
log_error "Parsing PRD failed: .taskmaster/tasks/tasks.json not found or is empty. Exit status: $exit_status_prd"
exit 1
else
log_success "PRD parsed successfully."
fi
log_step "Expanding Task 1 (to ensure subtask 1.1 exists)"
cmd_output_analyze=$(task-master analyze-complexity --research --output complexity_results.json 2>&1)
exit_status_analyze=$?
echo "$cmd_output_analyze"
extract_and_sum_cost "$cmd_output_analyze"
if [ $exit_status_analyze -ne 0 ] || [ ! -f "complexity_results.json" ]; then
log_error "Complexity analysis failed: complexity_results.json not found. Exit status: $exit_status_analyze"
exit 1
else
log_success "Complexity analysis saved to complexity_results.json"
fi
log_step "Generating complexity report"
task-master complexity-report --file complexity_results.json > complexity_report_formatted.log
log_success "Formatted complexity report saved to complexity_report_formatted.log"
log_step "Expanding Task 1 (assuming it exists)"
cmd_output_expand1=$(task-master expand --id=1 --cr complexity_results.json 2>&1)
exit_status_expand1=$?
echo "$cmd_output_expand1"
extract_and_sum_cost "$cmd_output_expand1"
if [ $exit_status_expand1 -ne 0 ]; then
log_error "Expanding Task 1 failed. Exit status: $exit_status_expand1"
else
log_success "Attempted to expand Task 1."
fi
log_step "Setting status for Subtask 1.1 (assuming it exists)"
task-master set-status --id=1.1 --status=done
log_success "Attempted to set status for Subtask 1.1 to 'done'."
log_step "Listing tasks again (after changes)"
task-master list --with-subtasks > task_list_after_changes.log
log_success "Task list after changes saved to task_list_after_changes.log"
# === Start New Test Section: Tag-Aware Expand Testing ===
log_step "Creating additional tag for expand testing"
task-master add-tag feature-expand --description="Tag for testing expand command with tag preservation"
log_success "Created feature-expand tag."
log_step "Adding task to feature-expand tag"
task-master add-task --tag=feature-expand --prompt="Test task for tag-aware expansion" --priority=medium
# Get the new task ID dynamically
new_expand_task_id=$(jq -r '.["feature-expand"].tasks[-1].id' .taskmaster/tasks/tasks.json)
log_success "Added task $new_expand_task_id to feature-expand tag."
log_step "Verifying tags exist before expand test"
task-master tags > tags_before_expand.log
tag_count_before=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
log_success "Tag count before expand: $tag_count_before"
log_step "Expanding task in feature-expand tag (testing tag corruption fix)"
cmd_output_expand_tagged=$(task-master expand --tag=feature-expand --id="$new_expand_task_id" 2>&1)
exit_status_expand_tagged=$?
echo "$cmd_output_expand_tagged"
extract_and_sum_cost "$cmd_output_expand_tagged"
if [ $exit_status_expand_tagged -ne 0 ]; then
log_error "Tagged expand failed. Exit status: $exit_status_expand_tagged"
else
log_success "Tagged expand completed."
fi
log_step "Verifying tag preservation after expand"
task-master tags > tags_after_expand.log
tag_count_after=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
if [ "$tag_count_before" -eq "$tag_count_after" ]; then
log_success "Tag count preserved: $tag_count_after (no corruption detected)"
else
log_error "Tag corruption detected! Before: $tag_count_before, After: $tag_count_after"
fi
log_step "Verifying master tag still exists and has tasks"
master_task_count=$(jq -r '.master.tasks | length' .taskmaster/tasks/tasks.json 2>/dev/null || echo "0")
if [ "$master_task_count" -gt "0" ]; then
log_success "Master tag preserved with $master_task_count tasks"
else
log_error "Master tag corrupted or empty after tagged expand"
fi
log_step "Verifying feature-expand tag has expanded subtasks"
expanded_subtask_count=$(jq -r ".\"feature-expand\".tasks[] | select(.id == $new_expand_task_id) | .subtasks | length" .taskmaster/tasks/tasks.json 2>/dev/null || echo "0")
if [ "$expanded_subtask_count" -gt "0" ]; then
log_success "Expand successful: $expanded_subtask_count subtasks created in feature-expand tag"
else
log_error "Expand failed: No subtasks found in feature-expand tag"
fi
log_step "Testing force expand with tag preservation"
cmd_output_force_expand=$(task-master expand --tag=feature-expand --id="$new_expand_task_id" --force 2>&1)
exit_status_force_expand=$?
echo "$cmd_output_force_expand"
extract_and_sum_cost "$cmd_output_force_expand"
# Verify tags still preserved after force expand
tag_count_after_force=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
if [ "$tag_count_before" -eq "$tag_count_after_force" ]; then
log_success "Force expand preserved all tags"
else
log_error "Force expand caused tag corruption"
fi
log_step "Testing expand --all with tag preservation"
# Add another task to feature-expand for expand-all testing
task-master add-task --tag=feature-expand --prompt="Second task for expand-all testing" --priority=low
second_expand_task_id=$(jq -r '.["feature-expand"].tasks[-1].id' .taskmaster/tasks/tasks.json)
cmd_output_expand_all=$(task-master expand --tag=feature-expand --all 2>&1)
exit_status_expand_all=$?
echo "$cmd_output_expand_all"
extract_and_sum_cost "$cmd_output_expand_all"
# Verify tags preserved after expand-all
tag_count_after_all=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
if [ "$tag_count_before" -eq "$tag_count_after_all" ]; then
log_success "Expand --all preserved all tags"
else
log_error "Expand --all caused tag corruption"
fi
log_success "Completed expand --all tag preservation test."
# === End New Test Section: Tag-Aware Expand Testing ===
# === Test Model Commands ===
log_step "Checking initial model configuration"
task-master models > models_initial_config.log
log_success "Initial model config saved to models_initial_config.log"
log_step "Setting main model"
task-master models --set-main claude-3-7-sonnet-20250219
log_success "Set main model."
log_step "Setting research model"
task-master models --set-research sonar-pro
log_success "Set research model."
log_step "Setting fallback model"
task-master models --set-fallback claude-3-5-sonnet-20241022
log_success "Set fallback model."
log_step "Checking final model configuration"
task-master models > models_final_config.log
log_success "Final model config saved to models_final_config.log"
log_step "Resetting main model to default (Claude Sonnet) before provider tests"
task-master models --set-main claude-3-7-sonnet-20250219
log_success "Main model reset to claude-3-7-sonnet-20250219."
# === End Model Commands Test ===
# === Fallback Model generateObjectService Verification ===
if [ "$run_verification_test" = true ]; then
log_step "Starting Fallback Model (generateObjectService) Verification (Calls separate script)"
verification_script_path="$ORIGINAL_DIR/tests/e2e/run_fallback_verification.sh"
if [ -x "$verification_script_path" ]; then
log_info "--- Executing Fallback Verification Script: $verification_script_path ---"
verification_output=$("$verification_script_path" "$(pwd)" 2>&1)
verification_exit_code=$?
echo "$verification_output"
extract_and_sum_cost "$verification_output"
log_info "--- Finished Fallback Verification Script Execution (Exit Code: $verification_exit_code) ---"
# Log success/failure based on captured exit code
if [ $verification_exit_code -eq 0 ]; then
log_success "Fallback verification script reported success."
else
log_error "Fallback verification script reported FAILURE (Exit Code: $verification_exit_code)."
fi
else
log_error "Fallback verification script not found or not executable at $verification_script_path. Skipping verification."
fi
else
log_info "Skipping Fallback Verification test as requested by flag."
fi
# === END Verification Section ===
# === Multi-Provider Add-Task Test (Keep as is) ===
log_step "Starting Multi-Provider Add-Task Test Sequence"
# Define providers, models, and flags
# Array order matters: providers[i] corresponds to models[i] and flags[i]
declare -a providers=("anthropic" "openai" "google" "perplexity" "xai" "openrouter")
declare -a models=(
"claude-3-7-sonnet-20250219"
"gpt-4o"
"gemini-2.5-pro-preview-05-06"
"sonar-pro" # Note: This is research-only, add-task might fail if not using research model
"grok-3"
"anthropic/claude-3.7-sonnet" # OpenRouter uses Claude 3.7
)
# Flags: Add provider-specific flags here, e.g., --openrouter. Use empty string if none.
declare -a flags=("" "" "" "" "" "--openrouter")
# Consistent prompt for all providers
add_task_prompt="Create a task to implement user authentication using OAuth 2.0 with Google as the provider. Include steps for registering the app, handling the callback, and storing user sessions."
log_info "Using consistent prompt for add-task tests: \"$add_task_prompt\""
echo "--- Multi-Provider Add Task Summary ---" > provider_add_task_summary.log # Initialize summary log
for i in "${!providers[@]}"; do
provider="${providers[$i]}"
model="${models[$i]}"
flag="${flags[$i]}"
log_step "Testing Add-Task with Provider: $provider (Model: $model)"
# 1. Set the main model for this provider
log_info "Setting main model to $model for $provider ${flag:+using flag $flag}..."
set_model_cmd="task-master models --set-main \"$model\" $flag"
echo "Executing: $set_model_cmd"
if eval $set_model_cmd; then
log_success "Successfully set main model for $provider."
else
log_error "Failed to set main model for $provider. Skipping add-task for this provider."
# Optionally save failure info here if needed for LLM analysis
echo "Provider $provider set-main FAILED" >> provider_add_task_summary.log
continue # Skip to the next provider
fi
# 2. Run add-task
log_info "Running add-task with prompt..."
add_task_output_file="add_task_raw_output_${provider}_${model//\//_}.log" # Sanitize ID
# Run add-task and capture ALL output (stdout & stderr) to a file AND a variable
add_task_cmd_output=$(task-master add-task --prompt "$add_task_prompt" 2>&1 | tee "$add_task_output_file")
add_task_exit_code=${PIPESTATUS[0]}
# 3. Check for success and extract task ID
new_task_id=""
extract_and_sum_cost "$add_task_cmd_output"
if [ $add_task_exit_code -eq 0 ] && (echo "$add_task_cmd_output" | grep -q "✓ Added new task #" || echo "$add_task_cmd_output" | grep -q "✅ New task created successfully:" || echo "$add_task_cmd_output" | grep -q "Task [0-9]\+ Created Successfully"); then
new_task_id=$(echo "$add_task_cmd_output" | grep -o -E "(Task |#)[0-9.]+" | grep -o -E "[0-9.]+" | head -n 1)
if [ -n "$new_task_id" ]; then
log_success "Add-task succeeded for $provider. New task ID: $new_task_id"
echo "Provider $provider add-task SUCCESS (ID: $new_task_id)" >> provider_add_task_summary.log
else
# Succeeded but couldn't parse ID - treat as warning/anomaly
log_error "Add-task command succeeded for $provider, but failed to extract task ID from output."
echo "Provider $provider add-task SUCCESS (ID extraction FAILED)" >> provider_add_task_summary.log
new_task_id="UNKNOWN_ID_EXTRACTION_FAILED"
fi
else
log_error "Add-task command failed for $provider (Exit Code: $add_task_exit_code). See $add_task_output_file for details."
echo "Provider $provider add-task FAILED (Exit Code: $add_task_exit_code)" >> provider_add_task_summary.log
new_task_id="FAILED"
fi
# 4. Run task show if ID was obtained (even if extraction failed, use placeholder)
if [ "$new_task_id" != "FAILED" ] && [ "$new_task_id" != "UNKNOWN_ID_EXTRACTION_FAILED" ]; then
log_info "Running task show for new task ID: $new_task_id"
show_output_file="add_task_show_output_${provider}_id_${new_task_id}.log"
if task-master show "$new_task_id" > "$show_output_file"; then
log_success "Task show output saved to $show_output_file"
else
log_error "task show command failed for ID $new_task_id. Check log."
# Still keep the file, it might contain error output
fi
elif [ "$new_task_id" == "UNKNOWN_ID_EXTRACTION_FAILED" ]; then
log_info "Skipping task show for $provider due to ID extraction failure."
else
log_info "Skipping task show for $provider due to add-task failure."
fi
done # End of provider loop
log_step "Finished Multi-Provider Add-Task Test Sequence"
echo "Provider add-task summary log available at: provider_add_task_summary.log"
# === End Multi-Provider Add-Task Test ===
log_step "Listing tasks again (after multi-add)"
task-master list --with-subtasks > task_list_after_multi_add.log
log_success "Task list after multi-add saved to task_list_after_multi_add.log"
# === Resume Core Task Commands Test ===
log_step "Listing tasks (for core tests)"
task-master list > task_list_core_test_start.log
log_success "Core test initial task list saved."
log_step "Getting next task"
task-master next > next_task_core_test.log
log_success "Core test next task saved."
log_step "Showing Task 1 details"
task-master show 1 > task_1_details_core_test.log
log_success "Task 1 details saved."
log_step "Adding dependency (Task 2 depends on Task 1)"
task-master add-dependency --id=2 --depends-on=1
log_success "Added dependency 2->1."
log_step "Validating dependencies (after add)"
task-master validate-dependencies > validate_dependencies_after_add_core.log
log_success "Dependency validation after add saved."
log_step "Removing dependency (Task 2 depends on Task 1)"
task-master remove-dependency --id=2 --depends-on=1
log_success "Removed dependency 2->1."
log_step "Fixing dependencies (should be no-op now)"
task-master fix-dependencies > fix_dependencies_output_core.log
log_success "Fix dependencies attempted."
# === Start New Test Section: Validate/Fix Bad Dependencies ===
log_step "Intentionally adding non-existent dependency (1 -> 999)"
task-master add-dependency --id=1 --depends-on=999 || log_error "Failed to add non-existent dependency (unexpected)"
# Don't exit even if the above fails, the goal is to test validation
log_success "Attempted to add dependency 1 -> 999."
log_step "Validating dependencies (expecting non-existent error)"
task-master validate-dependencies > validate_deps_non_existent.log 2>&1 || true # Allow command to fail without exiting script
if grep -q "Non-existent dependency ID: 999" validate_deps_non_existent.log; then
log_success "Validation correctly identified non-existent dependency 999."
else
log_error "Validation DID NOT report non-existent dependency 999 as expected. Check validate_deps_non_existent.log"
fi
log_step "Fixing dependencies (should remove 1 -> 999)"
task-master fix-dependencies > fix_deps_after_non_existent.log
log_success "Attempted to fix dependencies."
log_step "Validating dependencies (after fix)"
task-master validate-dependencies > validate_deps_after_fix_non_existent.log 2>&1 || true # Allow potential failure
if grep -q "Non-existent dependency ID: 999" validate_deps_after_fix_non_existent.log; then
log_error "Validation STILL reports non-existent dependency 999 after fix. Check logs."
else
log_success "Validation shows non-existent dependency 999 was removed."
fi
log_step "Intentionally adding circular dependency (4 -> 5 -> 4)"
task-master add-dependency --id=4 --depends-on=5 || log_error "Failed to add dependency 4->5"
task-master add-dependency --id=5 --depends-on=4 || log_error "Failed to add dependency 5->4"
log_success "Attempted to add dependencies 4 -> 5 and 5 -> 4."
log_step "Validating dependencies (expecting circular error)"
task-master validate-dependencies > validate_deps_circular.log 2>&1 || true # Allow command to fail
# Note: Adjust the grep pattern based on the EXACT error message from validate-dependencies
if grep -q -E "Circular dependency detected involving task IDs: (4, 5|5, 4)" validate_deps_circular.log; then
log_success "Validation correctly identified circular dependency between 4 and 5."
else
log_error "Validation DID NOT report circular dependency 4<->5 as expected. Check validate_deps_circular.log"
fi
log_step "Fixing dependencies (should remove one side of 4 <-> 5)"
task-master fix-dependencies > fix_deps_after_circular.log
log_success "Attempted to fix dependencies."
log_step "Validating dependencies (after fix circular)"
task-master validate-dependencies > validate_deps_after_fix_circular.log 2>&1 || true # Allow potential failure
if grep -q -E "Circular dependency detected involving task IDs: (4, 5|5, 4)" validate_deps_after_fix_circular.log; then
log_error "Validation STILL reports circular dependency 4<->5 after fix. Check logs."
else
log_success "Validation shows circular dependency 4<->5 was resolved."
fi
# === End New Test Section ===
# Find the next available task ID dynamically instead of hardcoding 11, 12
# Assuming tasks are added sequentially and we didn't remove any core tasks yet
last_task_id=$(jq '[.master.tasks[].id] | max' .taskmaster/tasks/tasks.json)
manual_task_id=$((last_task_id + 1))
ai_task_id=$((manual_task_id + 1))
log_step "Adding Task $manual_task_id (Manual)"
task-master add-task --title="Manual E2E Task" --description="Add basic health check endpoint" --priority=low --dependencies=3 # Depends on backend setup
log_success "Added Task $manual_task_id manually."
log_step "Adding Task $ai_task_id (AI)"
cmd_output_add_ai=$(task-master add-task --prompt="Implement basic UI styling using CSS variables for colors and spacing" --priority=medium --dependencies=1 2>&1)
exit_status_add_ai=$?
echo "$cmd_output_add_ai"
extract_and_sum_cost "$cmd_output_add_ai"
if [ $exit_status_add_ai -ne 0 ]; then
log_error "Adding AI Task $ai_task_id failed. Exit status: $exit_status_add_ai"
else
log_success "Added Task $ai_task_id via AI prompt."
fi
log_step "Updating Task 3 (update-task AI)"
cmd_output_update_task3=$(task-master update-task --id=3 --prompt="Update backend server setup: Ensure CORS is configured to allow requests from the frontend origin." 2>&1)
exit_status_update_task3=$?
echo "$cmd_output_update_task3"
extract_and_sum_cost "$cmd_output_update_task3"
if [ $exit_status_update_task3 -ne 0 ]; then
log_error "Updating Task 3 failed. Exit status: $exit_status_update_task3"
else
log_success "Attempted update for Task 3."
fi
log_step "Updating Tasks from Task 5 (update AI)"
cmd_output_update_from5=$(task-master update --from=5 --prompt="Refactor the backend storage module to use a simple JSON file (storage.json) instead of an in-memory object for persistence. Update relevant tasks." 2>&1)
exit_status_update_from5=$?
echo "$cmd_output_update_from5"
extract_and_sum_cost "$cmd_output_update_from5"
if [ $exit_status_update_from5 -ne 0 ]; then
log_error "Updating from Task 5 failed. Exit status: $exit_status_update_from5"
else
log_success "Attempted update from Task 5 onwards."
fi
log_step "Expanding Task 8 (AI)"
cmd_output_expand8=$(task-master expand --id=8 2>&1)
exit_status_expand8=$?
echo "$cmd_output_expand8"
extract_and_sum_cost "$cmd_output_expand8"
if [ $exit_status_expand8 -ne 0 ]; then
log_error "Expanding Task 8 failed. Exit status: $exit_status_expand8"
else
log_success "Attempted to expand Task 8."
fi
log_step "Updating Subtask 8.1 (update-subtask AI)"
cmd_output_update_subtask81=$(task-master update-subtask --id=8.1 --prompt="Implementation note: Remember to handle potential API errors and display a user-friendly message." 2>&1)
exit_status_update_subtask81=$?
echo "$cmd_output_update_subtask81"
extract_and_sum_cost "$cmd_output_update_subtask81"
if [ $exit_status_update_subtask81 -ne 0 ]; then
log_error "Updating Subtask 8.1 failed. Exit status: $exit_status_update_subtask81"
else
log_success "Attempted update for Subtask 8.1."
fi
# Add a couple more subtasks for multi-remove test
log_step 'Adding subtasks to Task 2 (for multi-remove test)'
task-master add-subtask --parent=2 --title="Subtask 2.1 for removal"
task-master add-subtask --parent=2 --title="Subtask 2.2 for removal"
log_success "Added subtasks 2.1 and 2.2."
log_step "Removing Subtasks 2.1 and 2.2 (multi-ID)"
task-master remove-subtask --id=2.1,2.2
log_success "Removed subtasks 2.1 and 2.2."
log_step "Setting status for Task 1 to done"
task-master set-status --id=1 --status=done
log_success "Set status for Task 1 to done."
log_step "Getting next task (after status change)"
task-master next > next_task_after_change_core.log
log_success "Next task after change saved."
# === Start New Test Section: List Filtering ===
log_step "Listing tasks filtered by status 'done'"
task-master list --status=done > task_list_status_done.log
log_success "Filtered list saved to task_list_status_done.log (Manual/LLM check recommended)"
# Optional assertion: Check if Task 1 ID exists and Task 2 ID does NOT
# if grep -q "^1\." task_list_status_done.log && ! grep -q "^2\." task_list_status_done.log; then
# log_success "Basic check passed: Task 1 found, Task 2 not found in 'done' list."
# else
# log_error "Basic check failed for list --status=done."
# fi
# === End New Test Section ===
log_step "Clearing subtasks from Task 8"
task-master clear-subtasks --id=8
log_success "Attempted to clear subtasks from Task 8."
log_step "Removing Tasks $manual_task_id and $ai_task_id (multi-ID)"
# Remove the tasks we added earlier
task-master remove-task --id="$manual_task_id,$ai_task_id" -y
log_success "Removed tasks $manual_task_id and $ai_task_id."
# === Start New Test Section: Subtasks & Dependencies ===
log_step "Expanding Task 2 (to ensure multiple tasks have subtasks)"
task-master expand --id=2 # Expand task 2: Backend setup
log_success "Attempted to expand Task 2."
log_step "Listing tasks with subtasks (Before Clear All)"
task-master list --with-subtasks > task_list_before_clear_all.log
log_success "Task list before clear-all saved."
log_step "Clearing ALL subtasks"
task-master clear-subtasks --all
log_success "Attempted to clear all subtasks."
log_step "Listing tasks with subtasks (After Clear All)"
task-master list --with-subtasks > task_list_after_clear_all.log
log_success "Task list after clear-all saved. (Manual/LLM check recommended to verify subtasks removed)"
log_step "Expanding Task 3 again (to have subtasks for next test)"
task-master expand --id=3
log_success "Attempted to expand Task 3."
# Verify 3.1 exists
if ! jq -e '.master.tasks[] | select(.id == 3) | .subtasks[] | select(.id == 1)' .taskmaster/tasks/tasks.json > /dev/null; then
log_error "Subtask 3.1 not found in tasks.json after expanding Task 3."
exit 1
fi
log_step "Adding dependency: Task 4 depends on Subtask 3.1"
task-master add-dependency --id=4 --depends-on=3.1
log_success "Added dependency 4 -> 3.1."
log_step "Showing Task 4 details (after adding subtask dependency)"
task-master show 4 > task_4_details_after_dep_add.log
log_success "Task 4 details saved. (Manual/LLM check recommended for dependency [3.1])"
log_step "Removing dependency: Task 4 depends on Subtask 3.1"
task-master remove-dependency --id=4 --depends-on=3.1
log_success "Removed dependency 4 -> 3.1."
log_step "Showing Task 4 details (after removing subtask dependency)"
task-master show 4 > task_4_details_after_dep_remove.log
log_success "Task 4 details saved. (Manual/LLM check recommended to verify dependency removed)"
# === End New Test Section ===
log_step "Generating task files (final)"
task-master generate
log_success "Generated task files."
# === End Core Task Commands Test ===
# === AI Commands (Re-test some after changes) ===
log_step "Analyzing complexity (AI with Research - Final Check)"
cmd_output_analyze_final=$(task-master analyze-complexity --research --output complexity_results_final.json 2>&1)
exit_status_analyze_final=$?
echo "$cmd_output_analyze_final"
extract_and_sum_cost "$cmd_output_analyze_final"
if [ $exit_status_analyze_final -ne 0 ] || [ ! -f "complexity_results_final.json" ]; then
log_error "Final Complexity analysis failed. Exit status: $exit_status_analyze_final. File found: $(test -f complexity_results_final.json && echo true || echo false)"
exit 1 # Critical for subsequent report step
else
log_success "Final Complexity analysis command executed and file created."
fi
log_step "Generating complexity report (Non-AI - Final Check)"
task-master complexity-report --file complexity_results_final.json > complexity_report_formatted_final.log
log_success "Final Formatted complexity report saved."
# === End AI Commands Re-test ===
log_step "Listing tasks again (final)"
task-master list --with-subtasks > task_list_final.log
log_success "Final task list saved to task_list_final.log"
# --- Test Completion (Output to tee) ---
log_step "E2E Test Steps Completed"
echo ""
ABS_TEST_RUN_DIR="$(pwd)"
echo "Test artifacts and logs are located in: $ABS_TEST_RUN_DIR"
echo "Key artifact files (within above dir):"
ls -1 # List files in the current directory
echo ""
echo "Full script log also available at: $LOG_FILE (relative to project root)"
# Optional: cd back to original directory
# cd "$ORIGINAL_DIR"
# End of the main execution block brace
} 2>&1 | tee "$LOG_FILE"
# --- Final Terminal Message ---
EXIT_CODE=${PIPESTATUS[0]}
overall_end_time=$(date +%s)
total_elapsed_seconds=$((overall_end_time - overall_start_time))
# Format total duration
total_minutes=$((total_elapsed_seconds / 60))
total_sec_rem=$((total_elapsed_seconds % 60))
formatted_total_time=$(printf "%dm%02ds" "$total_minutes" "$total_sec_rem")
# Count steps and successes from the log file *after* the pipe finishes
# Use grep -c for counting lines matching the pattern
# Corrected pattern to match ' STEP X:' format
final_step_count=$(grep -c '^[[:space:]]\+STEP [0-9]\+:' "$LOG_FILE" || true)
final_success_count=$(grep -c '\[SUCCESS\]' "$LOG_FILE" || true) # Count lines containing [SUCCESS]
echo "--- E2E Run Summary ---"
echo "Log File: $LOG_FILE"
echo "Total Elapsed Time: ${formatted_total_time}"
echo "Total Steps Executed: ${final_step_count}" # Use count from log
if [ $EXIT_CODE -eq 0 ]; then
echo "Status: SUCCESS"
# Use counts from log file
echo "Successful Steps: ${final_success_count}/${final_step_count}"
else
echo "Status: FAILED"
# Use count from log file for total steps attempted
echo "Failure likely occurred during/after Step: ${final_step_count}"
# Use count from log file for successes before failure
echo "Successful Steps Before Failure: ${final_success_count}"
echo "Please check the log file '$LOG_FILE' for error details."
fi
echo "-------------------------"
# --- Attempt LLM Analysis ---
# Run this *after* the main execution block and tee pipe finish writing the log file
if [ -d "$TEST_RUN_DIR" ]; then
# Define absolute path to source dir if not already defined (though it should be by setup)
TASKMASTER_SOURCE_DIR_ABS=${TASKMASTER_SOURCE_DIR_ABS:-$(cd "$ORIGINAL_DIR/$TASKMASTER_SOURCE_DIR" && pwd)}
cd "$TEST_RUN_DIR"
# Pass the absolute source directory path
analyze_log_with_llm "$LOG_FILE" "$TASKMASTER_SOURCE_DIR_ABS"
ANALYSIS_EXIT_CODE=$? # Capture the exit code of the analysis function
# Optional: cd back again if needed
cd "$ORIGINAL_DIR" # Ensure we change back to the original directory
else
formatted_duration_for_error=$(_format_duration "$total_elapsed_seconds")
echo "[ERROR] [$formatted_duration_for_error] $(date +"%Y-%m-%d %H:%M:%S") Test run directory $TEST_RUN_DIR not found. Cannot perform LLM analysis." >&2
fi
# Final cost formatting
formatted_total_e2e_cost=$(printf "%.6f" "$total_e2e_cost")
echo "Total E2E AI Cost: $formatted_total_e2e_cost USD"
exit $EXIT_CODE
```
--------------------------------------------------------------------------------
/packages/tm-core/src/modules/workflow/orchestrators/workflow-orchestrator.test.ts:
--------------------------------------------------------------------------------
```typescript
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { WorkflowOrchestrator } from '../orchestrators/workflow-orchestrator.js';
import { TestResultValidator } from '../services/test-result-validator.js';
import type { TestResult } from '../services/test-result-validator.types.js';
import type {
WorkflowContext,
WorkflowError,
WorkflowEventData,
WorkflowPhase
} from '../types.js';
describe('WorkflowOrchestrator - State Machine Structure', () => {
let orchestrator: WorkflowOrchestrator;
let initialContext: WorkflowContext;
beforeEach(() => {
initialContext = {
taskId: 'task-1',
subtasks: [
{ id: '1.1', title: 'Subtask 1', status: 'pending', attempts: 0 },
{ id: '1.2', title: 'Subtask 2', status: 'pending', attempts: 0 }
],
currentSubtaskIndex: 0,
errors: [],
metadata: {}
};
orchestrator = new WorkflowOrchestrator(initialContext);
});
describe('Initial State', () => {
it('should start in PREFLIGHT phase', () => {
expect(orchestrator.getCurrentPhase()).toBe('PREFLIGHT');
});
it('should have the provided context', () => {
const context = orchestrator.getContext();
expect(context.taskId).toBe('task-1');
expect(context.subtasks).toHaveLength(2);
});
});
describe('State Transitions', () => {
it('should transition from PREFLIGHT to BRANCH_SETUP', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
expect(orchestrator.getCurrentPhase()).toBe('BRANCH_SETUP');
});
it('should transition from BRANCH_SETUP to SUBTASK_LOOP', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
expect(orchestrator.getCurrentPhase()).toBe('SUBTASK_LOOP');
});
it('should store branch name in context', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
expect(orchestrator.getContext().branchName).toBe('feature/test');
});
it('should transition from SUBTASK_LOOP to FINALIZE when all subtasks complete', () => {
// Navigate to SUBTASK_LOOP
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
// Complete all subtasks
orchestrator.transition({ type: 'ALL_SUBTASKS_COMPLETE' });
expect(orchestrator.getCurrentPhase()).toBe('FINALIZE');
});
it('should transition from FINALIZE to COMPLETE', () => {
// Navigate to FINALIZE
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
orchestrator.transition({ type: 'ALL_SUBTASKS_COMPLETE' });
// Complete finalization
orchestrator.transition({ type: 'FINALIZE_COMPLETE' });
expect(orchestrator.getCurrentPhase()).toBe('COMPLETE');
});
it('should reject invalid transitions', () => {
expect(() => {
orchestrator.transition({ type: 'FINALIZE_COMPLETE' });
}).toThrow('Invalid transition');
});
});
describe('TDD Cycle in SUBTASK_LOOP', () => {
beforeEach(() => {
// Navigate to SUBTASK_LOOP
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
});
it('should start with RED phase when entering SUBTASK_LOOP', () => {
expect(orchestrator.getCurrentTDDPhase()).toBe('RED');
});
it('should transition from RED to GREEN', () => {
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
expect(orchestrator.getCurrentTDDPhase()).toBe('GREEN');
});
it('should transition from GREEN to COMMIT', () => {
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
expect(orchestrator.getCurrentTDDPhase()).toBe('COMMIT');
});
it('should complete subtask after COMMIT', () => {
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
const context = orchestrator.getContext();
expect(context.subtasks[0].status).toBe('completed');
});
it('should move to next subtask after completion', () => {
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
expect(orchestrator.getContext().currentSubtaskIndex).toBe(1);
expect(orchestrator.getCurrentTDDPhase()).toBe('RED');
});
});
describe('State Serialization', () => {
it('should serialize current state', () => {
const state = orchestrator.getState();
expect(state).toHaveProperty('phase');
expect(state).toHaveProperty('context');
expect(state.phase).toBe('PREFLIGHT');
});
it('should restore from serialized state', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
const state = orchestrator.getState();
const restored = new WorkflowOrchestrator(state.context);
restored.restoreState(state);
expect(restored.getCurrentPhase()).toBe('SUBTASK_LOOP');
expect(restored.getContext().branchName).toBe('feature/test');
});
});
describe('Event Emission', () => {
it('should emit phase:entered event on state transition', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('phase:entered', (event) => events.push(event));
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
expect(events).toHaveLength(1);
expect(events[0].type).toBe('phase:entered');
expect(events[0].phase).toBe('BRANCH_SETUP');
});
it('should emit phase:exited event on state transition', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('phase:exited', (event) => events.push(event));
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
expect(events).toHaveLength(1);
expect(events[0].type).toBe('phase:exited');
expect(events[0].phase).toBe('PREFLIGHT');
});
it('should emit tdd phase events', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('tdd:red:started', (event) => events.push(event));
orchestrator.on('tdd:green:started', (event) => events.push(event));
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
expect(events).toHaveLength(1);
expect(events[0].type).toBe('tdd:red:started');
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
expect(events).toHaveLength(2);
expect(events[1].type).toBe('tdd:green:started');
});
it('should emit subtask events', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('subtask:started', (event) => events.push(event));
orchestrator.on('subtask:completed', (event) => events.push(event));
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
expect(events).toHaveLength(1);
expect(events[0].type).toBe('subtask:started');
expect(events[0].subtaskId).toBe('1.1');
// Complete TDD cycle
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
expect(events).toHaveLength(3);
expect(events[1].type).toBe('subtask:completed');
expect(events[2].type).toBe('subtask:started');
expect(events[2].subtaskId).toBe('1.2');
});
it('should support multiple listeners for same event', () => {
const listener1 = vi.fn();
const listener2 = vi.fn();
orchestrator.on('phase:entered', listener1);
orchestrator.on('phase:entered', listener2);
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
expect(listener1).toHaveBeenCalledOnce();
expect(listener2).toHaveBeenCalledOnce();
});
it('should allow removing event listeners', () => {
const listener = vi.fn();
orchestrator.on('phase:entered', listener);
orchestrator.off('phase:entered', listener);
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
expect(listener).not.toHaveBeenCalled();
});
it('should include timestamp in all events', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('phase:entered', (event) => events.push(event));
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
expect(events[0].timestamp).toBeInstanceOf(Date);
});
it('should include additional data in events', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('git:branch:created', (event) => events.push(event));
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
const branchEvent = events.find((e) => e.type === 'git:branch:created');
expect(branchEvent).toBeDefined();
expect(branchEvent?.data?.branchName).toBe('feature/test');
});
});
describe('State Persistence', () => {
it('should persist state after transitions when auto-persist enabled', async () => {
const persistMock = vi.fn();
orchestrator.enableAutoPersist(persistMock);
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
expect(persistMock).toHaveBeenCalledOnce();
const state = persistMock.mock.calls[0][0];
expect(state.phase).toBe('BRANCH_SETUP');
});
it('should emit state:persisted event', async () => {
const events: WorkflowEventData[] = [];
orchestrator.on('state:persisted', (event) => events.push(event));
await orchestrator.persistState();
expect(events).toHaveLength(1);
expect(events[0].type).toBe('state:persisted');
});
it('should auto-persist after each transition when enabled', () => {
const persistMock = vi.fn();
orchestrator.enableAutoPersist(persistMock);
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
expect(persistMock).toHaveBeenCalledTimes(1);
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
expect(persistMock).toHaveBeenCalledTimes(2);
});
it('should not auto-persist when disabled', () => {
const persistMock = vi.fn();
orchestrator.enableAutoPersist(persistMock);
orchestrator.disableAutoPersist();
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
expect(persistMock).not.toHaveBeenCalled();
});
it('should serialize state with all context data', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
const state = orchestrator.getState();
expect(state.phase).toBe('SUBTASK_LOOP');
expect(state.context.branchName).toBe('feature/test');
expect(state.context.currentTDDPhase).toBe('RED');
expect(state.context.taskId).toBe('task-1');
});
});
describe('Phase Transition Guards and Validation', () => {
it('should enforce guard conditions on transitions', () => {
// Create orchestrator with guard condition that should fail
const guardedContext: WorkflowContext = {
taskId: 'task-1',
subtasks: [],
currentSubtaskIndex: 0,
errors: [],
metadata: { guardTest: true }
};
const guardedOrchestrator = new WorkflowOrchestrator(guardedContext);
// Add guard that checks for subtasks (should fail since we have no subtasks)
guardedOrchestrator.addGuard('SUBTASK_LOOP', (context) => {
return context.subtasks.length > 0;
});
guardedOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
expect(() => {
guardedOrchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
}).toThrow('Guard condition failed');
});
it('should allow transition when guard condition passes', () => {
const guardedContext: WorkflowContext = {
taskId: 'task-1',
subtasks: [
{ id: '1.1', title: 'Test', status: 'pending', attempts: 0 }
],
currentSubtaskIndex: 0,
errors: [],
metadata: {}
};
const guardedOrchestrator = new WorkflowOrchestrator(guardedContext);
guardedOrchestrator.addGuard('SUBTASK_LOOP', (context) => {
return context.subtasks.length > 0;
});
guardedOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
guardedOrchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
expect(guardedOrchestrator.getCurrentPhase()).toBe('SUBTASK_LOOP');
});
it('should validate test results before GREEN phase transition', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
// Attempt to transition to GREEN without test results
expect(() => {
orchestrator.transition({ type: 'RED_PHASE_COMPLETE' });
}).toThrow('Test results required');
});
it('should validate RED phase test results have failures', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
// Provide passing test results (should fail RED phase validation)
expect(() => {
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'RED'
}
});
}).toThrow('RED phase must have at least one failing test');
});
it('should allow RED to GREEN transition with valid failing tests', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
expect(orchestrator.getCurrentTDDPhase()).toBe('GREEN');
});
it('should validate GREEN phase test results have no failures', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
// Provide test results with failures (should fail GREEN phase validation)
expect(() => {
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 3,
failed: 2,
skipped: 0,
phase: 'GREEN'
}
});
}).toThrow('GREEN phase must have zero failures');
});
it('should allow GREEN to COMMIT transition with all tests passing', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
expect(orchestrator.getCurrentTDDPhase()).toBe('COMMIT');
});
it('should store test results in context', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
const redResults = {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED' as const
};
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: redResults
});
const context = orchestrator.getContext();
expect(context.lastTestResults).toEqual(redResults);
});
it('should validate git repository state before BRANCH_SETUP', () => {
// Set up orchestrator with git validation enabled
const gitContext: WorkflowContext = {
taskId: 'task-1',
subtasks: [
{ id: '1.1', title: 'Test', status: 'pending', attempts: 0 }
],
currentSubtaskIndex: 0,
errors: [],
metadata: { requireGit: false }
};
const gitOrchestrator = new WorkflowOrchestrator(gitContext);
// Guard that requires git to be true (but it's false)
gitOrchestrator.addGuard('BRANCH_SETUP', (context) => {
return context.metadata.requireGit === true;
});
expect(() => {
gitOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
}).toThrow('Guard condition failed');
});
});
describe('Subtask Iteration and Progress Tracking', () => {
beforeEach(() => {
// Navigate to SUBTASK_LOOP for all tests
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
});
it('should return current subtask', () => {
const currentSubtask = orchestrator.getCurrentSubtask();
expect(currentSubtask).toBeDefined();
expect(currentSubtask?.id).toBe('1.1');
expect(currentSubtask?.title).toBe('Subtask 1');
});
it('should return undefined when no current subtask', () => {
// Complete all subtasks
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
const currentSubtask = orchestrator.getCurrentSubtask();
expect(currentSubtask).toBeUndefined();
});
it('should calculate workflow progress', () => {
const progress = orchestrator.getProgress();
expect(progress.completed).toBe(0);
expect(progress.total).toBe(2);
expect(progress.current).toBe(1);
expect(progress.percentage).toBe(0);
});
it('should update progress as subtasks complete', () => {
// Complete first subtask
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
const progress = orchestrator.getProgress();
expect(progress.completed).toBe(1);
expect(progress.total).toBe(2);
expect(progress.current).toBe(2);
expect(progress.percentage).toBe(50);
});
it('should show 100% progress when all subtasks complete', () => {
// Complete all subtasks
for (let i = 0; i < 2; i++) {
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
}
const progress = orchestrator.getProgress();
expect(progress.completed).toBe(2);
expect(progress.total).toBe(2);
expect(progress.percentage).toBe(100);
});
it('should validate if can proceed to next phase', () => {
// In RED phase - cannot proceed without completing TDD cycle
expect(orchestrator.canProceed()).toBe(false);
// Complete RED phase
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
// In GREEN phase - still cannot proceed
expect(orchestrator.canProceed()).toBe(false);
// Complete GREEN phase
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
// In COMMIT phase - still cannot proceed
expect(orchestrator.canProceed()).toBe(false);
// Complete COMMIT phase
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
// Subtask complete - can proceed
expect(orchestrator.canProceed()).toBe(true);
});
it('should track subtask attempts', () => {
const context = orchestrator.getContext();
expect(context.subtasks[0].attempts).toBe(0);
// Increment attempt on starting RED phase
orchestrator.incrementAttempts();
expect(orchestrator.getContext().subtasks[0].attempts).toBe(1);
});
it('should enforce max attempts limit', () => {
// Set max attempts to 3
const limitedContext: WorkflowContext = {
taskId: 'task-1',
subtasks: [
{
id: '1.1',
title: 'Subtask 1',
status: 'pending',
attempts: 0,
maxAttempts: 3
}
],
currentSubtaskIndex: 0,
errors: [],
metadata: {}
};
const limitedOrchestrator = new WorkflowOrchestrator(limitedContext);
limitedOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
limitedOrchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
// Increment attempts to max
for (let i = 0; i < 3; i++) {
limitedOrchestrator.incrementAttempts();
}
expect(limitedOrchestrator.hasExceededMaxAttempts()).toBe(false);
// One more attempt should exceed
limitedOrchestrator.incrementAttempts();
expect(limitedOrchestrator.hasExceededMaxAttempts()).toBe(true);
});
it('should allow unlimited attempts when maxAttempts is undefined', () => {
for (let i = 0; i < 100; i++) {
orchestrator.incrementAttempts();
}
expect(orchestrator.hasExceededMaxAttempts()).toBe(false);
});
it('should emit progress events on subtask completion', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('progress:updated', (event) => events.push(event));
// Complete first subtask
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
expect(events).toHaveLength(1);
expect(events[0].type).toBe('progress:updated');
expect(events[0].data?.completed).toBe(1);
expect(events[0].data?.total).toBe(2);
});
});
describe('Error Handling and Recovery', () => {
beforeEach(() => {
// Navigate to SUBTASK_LOOP for all tests
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
});
it('should handle errors with ERROR event', () => {
const error: WorkflowError = {
phase: 'SUBTASK_LOOP',
message: 'Test execution failed',
timestamp: new Date(),
recoverable: true
};
orchestrator.transition({ type: 'ERROR', error });
const context = orchestrator.getContext();
expect(context.errors).toHaveLength(1);
expect(context.errors[0].message).toBe('Test execution failed');
});
it('should emit error:occurred event', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('error:occurred', (event) => events.push(event));
const error: WorkflowError = {
phase: 'SUBTASK_LOOP',
message: 'Test execution failed',
timestamp: new Date(),
recoverable: true
};
orchestrator.transition({ type: 'ERROR', error });
expect(events).toHaveLength(1);
expect(events[0].type).toBe('error:occurred');
expect(events[0].data?.error).toEqual(error);
});
it('should support retry attempts', () => {
const currentSubtask = orchestrator.getCurrentSubtask();
expect(currentSubtask?.attempts).toBe(0);
// Simulate failed attempt
orchestrator.incrementAttempts();
orchestrator.retryCurrentSubtask();
const context = orchestrator.getContext();
expect(context.currentTDDPhase).toBe('RED');
expect(context.subtasks[0].attempts).toBe(1);
});
it('should mark subtask as failed when max attempts exceeded', () => {
const limitedContext: WorkflowContext = {
taskId: 'task-1',
subtasks: [
{
id: '1.1',
title: 'Subtask 1',
status: 'pending',
attempts: 0,
maxAttempts: 2
}
],
currentSubtaskIndex: 0,
errors: [],
metadata: {}
};
const limitedOrchestrator = new WorkflowOrchestrator(limitedContext);
limitedOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
limitedOrchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
// Exceed max attempts
for (let i = 0; i < 3; i++) {
limitedOrchestrator.incrementAttempts();
}
limitedOrchestrator.handleMaxAttemptsExceeded();
const context = limitedOrchestrator.getContext();
expect(context.subtasks[0].status).toBe('failed');
});
it('should emit subtask:failed event when max attempts exceeded', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('subtask:failed', (event) => events.push(event));
const limitedContext: WorkflowContext = {
taskId: 'task-1',
subtasks: [
{
id: '1.1',
title: 'Subtask 1',
status: 'pending',
attempts: 0,
maxAttempts: 2
}
],
currentSubtaskIndex: 0,
errors: [],
metadata: {}
};
const limitedOrchestrator = new WorkflowOrchestrator(limitedContext);
limitedOrchestrator.on('subtask:failed', (event) => events.push(event));
limitedOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
limitedOrchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
// Exceed max attempts
for (let i = 0; i < 3; i++) {
limitedOrchestrator.incrementAttempts();
}
limitedOrchestrator.handleMaxAttemptsExceeded();
expect(events).toHaveLength(1);
expect(events[0].type).toBe('subtask:failed');
});
it('should support abort workflow', () => {
orchestrator.transition({ type: 'ABORT' });
// Should still be in SUBTASK_LOOP but workflow should be aborted
expect(orchestrator.getCurrentPhase()).toBe('SUBTASK_LOOP');
expect(orchestrator.isAborted()).toBe(true);
});
it('should prevent transitions after abort', () => {
orchestrator.transition({ type: 'ABORT' });
expect(() => {
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
}).toThrow('Workflow has been aborted');
});
it('should allow retry after recoverable error', () => {
const error: WorkflowError = {
phase: 'SUBTASK_LOOP',
message: 'Temporary failure',
timestamp: new Date(),
recoverable: true
};
orchestrator.transition({ type: 'ERROR', error });
// Should be able to retry
expect(() => {
orchestrator.transition({ type: 'RETRY' });
}).not.toThrow();
expect(orchestrator.getCurrentTDDPhase()).toBe('RED');
});
it('should track error history in context', () => {
const error1: WorkflowError = {
phase: 'SUBTASK_LOOP',
message: 'Error 1',
timestamp: new Date(),
recoverable: true
};
const error2: WorkflowError = {
phase: 'SUBTASK_LOOP',
message: 'Error 2',
timestamp: new Date(),
recoverable: false
};
orchestrator.transition({ type: 'ERROR', error: error1 });
orchestrator.transition({ type: 'RETRY' });
orchestrator.transition({ type: 'ERROR', error: error2 });
const context = orchestrator.getContext();
expect(context.errors).toHaveLength(2);
expect(context.errors[0].message).toBe('Error 1');
expect(context.errors[1].message).toBe('Error 2');
});
});
describe('Resume Functionality from Checkpoints', () => {
it('should restore state from checkpoint', () => {
// Advance to SUBTASK_LOOP and complete first subtask
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
// Save state
const state = orchestrator.getState();
// Create new orchestrator and restore
const restored = new WorkflowOrchestrator(state.context);
restored.restoreState(state);
expect(restored.getCurrentPhase()).toBe('SUBTASK_LOOP');
expect(restored.getContext().currentSubtaskIndex).toBe(1);
expect(restored.getContext().branchName).toBe('feature/test');
});
it('should resume from mid-TDD cycle', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
// Save state in GREEN phase
const state = orchestrator.getState();
// Restore and verify in GREEN phase
const restored = new WorkflowOrchestrator(state.context);
restored.restoreState(state);
expect(restored.getCurrentPhase()).toBe('SUBTASK_LOOP');
expect(restored.getCurrentTDDPhase()).toBe('GREEN');
});
it('should validate restored state integrity', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
const state = orchestrator.getState();
// Validate state structure
expect(orchestrator.canResumeFromState(state)).toBe(true);
});
it('should reject invalid checkpoint state', () => {
const invalidState = {
phase: 'INVALID_PHASE' as WorkflowPhase,
context: initialContext
};
expect(orchestrator.canResumeFromState(invalidState)).toBe(false);
});
it('should preserve subtask attempts on resume', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
// Increment attempts
orchestrator.incrementAttempts();
orchestrator.incrementAttempts();
const state = orchestrator.getState();
// Restore
const restored = new WorkflowOrchestrator(state.context);
restored.restoreState(state);
const currentSubtask = restored.getCurrentSubtask();
expect(currentSubtask?.attempts).toBe(2);
});
it('should preserve errors on resume', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
const error: WorkflowError = {
phase: 'SUBTASK_LOOP',
message: 'Test error',
timestamp: new Date(),
recoverable: true
};
orchestrator.transition({ type: 'ERROR', error });
const state = orchestrator.getState();
// Restore
const restored = new WorkflowOrchestrator(state.context);
restored.restoreState(state);
expect(restored.getContext().errors).toHaveLength(1);
expect(restored.getContext().errors[0].message).toBe('Test error');
});
it('should preserve completed subtask statuses on resume', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
// Complete first subtask
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
const state = orchestrator.getState();
// Restore
const restored = new WorkflowOrchestrator(state.context);
restored.restoreState(state);
const progress = restored.getProgress();
expect(progress.completed).toBe(1);
expect(progress.current).toBe(2);
});
it('should emit workflow:resumed event on restore', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
const state = orchestrator.getState();
// Create new orchestrator with event listener
const events: WorkflowEventData[] = [];
const restored = new WorkflowOrchestrator(state.context);
restored.on('workflow:resumed', (event) => events.push(event));
restored.restoreState(state);
expect(events).toHaveLength(1);
expect(events[0].type).toBe('workflow:resumed');
expect(events[0].phase).toBe('SUBTASK_LOOP');
});
it('should calculate correct progress after resume', () => {
// Complete first subtask
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'GREEN'
}
});
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
const state = orchestrator.getState();
// Restore and check progress
const restored = new WorkflowOrchestrator(state.context);
restored.restoreState(state);
const progress = restored.getProgress();
expect(progress.completed).toBe(1);
expect(progress.total).toBe(2);
expect(progress.percentage).toBe(50);
});
});
describe('Adapter Integration', () => {
let testValidator: TestResultValidator;
beforeEach(() => {
testValidator = new TestResultValidator();
});
it('should integrate with TestResultValidator', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
// Set validator
orchestrator.setTestResultValidator(testValidator);
// Validator should be used internally
expect(orchestrator.hasTestResultValidator()).toBe(true);
});
it('should use TestResultValidator to validate RED phase', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
orchestrator.setTestResultValidator(testValidator);
// Should reject passing tests in RED phase
expect(() => {
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 5,
failed: 0,
skipped: 0,
phase: 'RED'
}
});
}).toThrow('RED phase must have at least one failing test');
});
it('should use TestResultValidator to validate GREEN phase', () => {
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
orchestrator.setTestResultValidator(testValidator);
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 0,
failed: 5,
skipped: 0,
phase: 'RED'
}
});
// Should reject failing tests in GREEN phase
expect(() => {
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults: {
total: 5,
passed: 3,
failed: 2,
skipped: 0,
phase: 'GREEN'
}
});
}).toThrow('GREEN phase must have zero failures');
});
it('should support git adapter hooks', () => {
const gitOperations: string[] = [];
orchestrator.onGitOperation((operation, data) => {
gitOperations.push(operation);
});
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
// Verify git operation hook was called
expect(gitOperations).toContain('branch:created');
});
it('should support executor adapter hooks', () => {
const executions: string[] = [];
orchestrator.onExecute((command, context) => {
executions.push(command);
});
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
orchestrator.executeCommand('run-tests');
expect(executions).toContain('run-tests');
});
it('should provide adapter context in events', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('phase:entered', (event) => events.push(event));
orchestrator.setTestResultValidator(testValidator);
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
// Event should include adapter availability
expect(events[0].data?.adapters).toBeDefined();
});
it('should allow adapter reconfiguration', () => {
orchestrator.setTestResultValidator(testValidator);
expect(orchestrator.hasTestResultValidator()).toBe(true);
orchestrator.removeTestResultValidator();
expect(orchestrator.hasTestResultValidator()).toBe(false);
});
it('should work without adapters (optional integration)', () => {
// Should work fine without adapters
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
orchestrator.transition({
type: 'BRANCH_CREATED',
branchName: 'feature/test'
});
expect(orchestrator.getCurrentPhase()).toBe('SUBTASK_LOOP');
});
it('should emit adapter-related events', () => {
const events: WorkflowEventData[] = [];
orchestrator.on('adapter:configured', (event) => events.push(event));
orchestrator.setTestResultValidator(testValidator);
expect(events).toHaveLength(1);
expect(events[0].type).toBe('adapter:configured');
expect(events[0].data?.adapterType).toBe('test-validator');
});
});
});
```
--------------------------------------------------------------------------------
/update-task-migration-plan.md:
--------------------------------------------------------------------------------
```markdown
# Update Task Migration Plan
## Overview
Migrate and unify `update-tasks.js` and `update-subtask-by-id.js` into a single `update-task` command that handles both task and subtask updates. This migration will move from the legacy `scripts/modules/task-manager/` structure to the new `apps/cli` and `packages/tm-core` architecture.
## Current State Analysis
### `update-tasks.js` - Bulk Task Updates
**Purpose**: Update multiple tasks from a specified ID onwards
**Input Format**: `--from=<id> --prompt="context"`
**AI Service**: `generateObjectService` with structured schema
### `update-subtask-by-id.js` - Single Subtask Updates
**Purpose**: Append timestamped information to a specific subtask
**Input Format**: `--id=<parentId.subtaskId> --prompt="notes"`
**AI Service**: `generateTextService` for freeform content
## Unified Command Design
### New Command: `update-task`
```bash
# Update single task (replaces update-task)
task-master update-task --id=3 --prompt="changes"
# Update single subtask (replaces update-subtask)
task-master update-task --id=3.2 --prompt="implementation notes"
# Update multiple tasks from ID onwards (replaces update --from)
task-master update-task --from=3 --prompt="changes"
```
### Intelligent Behavior Detection
The command should automatically determine behavior based on:
1. **ID format**: Contains `.` → subtask mode
2. **--from flag**: Present → bulk update mode
3. **Default**: Single task update mode
---
## Functionality Checklist
### Core Functionality
#### Input Validation & Parsing
- [ ] Validate `tasksPath` exists
- [ ] Validate `id` parameter (task: integer, subtask: "parent.child" format)
- [ ] Validate `fromId` parameter (integer, positive)
- [ ] Validate `prompt` parameter (non-empty string)
- [ ] Parse subtask ID format: split "parentId.subtaskId" and validate both parts
- [ ] Determine project root (from context or `findProjectRoot()`)
- [ ] Support both MCP and CLI modes (detect via `mcpLog` presence)
- [ ] Handle `outputFormat` ('text' or 'json', auto-detect for MCP)
#### Task Loading & Filtering
- [ ] Load tasks from `tasks.json` using `readJSON(tasksPath, projectRoot, tag)`
- [ ] Validate tasks data structure exists
- [ ] **Bulk mode**: Filter tasks where `id >= fromId AND status !== 'done'`
- [ ] **Single task mode**: Find specific task by ID
- [ ] **Subtask mode**: Find parent task, validate subtasks array, find specific subtask
- [ ] Handle "no tasks to update" scenario gracefully
#### Context Gathering
- [ ] Initialize `ContextGatherer` with projectRoot and tag
- [ ] Flatten all tasks with subtasks using `flattenTasksWithSubtasks()`
- [ ] Initialize `FuzzyTaskSearch` with appropriate command type:
- `'update'` for bulk/single task mode
- `'update-subtask'` for subtask mode
- [ ] **Bulk/Single task**: Search with prompt, max 5 results, include self
- [ ] **Subtask mode**: Search with combined query: `${parentTask.title} ${subtask.title} ${prompt}`
- [ ] Merge task IDs to update with relevant context task IDs
- [ ] Gather context in 'research' format
- [ ] Handle context gathering errors gracefully (log warning, continue)
#### Prompt Building
- [ ] Initialize `PromptManager` via `getPromptManager()`
- [ ] **Bulk/Single task mode**: Load 'update-tasks' prompt template with params:
- `tasks` (array of tasks to update)
- `updatePrompt`
- `useResearch`
- `projectContext` (gathered context)
- `hasCodebaseAnalysis` (from config)
- `projectRoot`
- [ ] **Subtask mode**: Load 'update-subtask' prompt template with params:
- `parentTask` (id, title)
- `prevSubtask` (id, title, status) - if exists
- `nextSubtask` (id, title, status) - if exists
- `currentDetails` (existing subtask details or fallback)
- `updatePrompt`
- `useResearch`
- `gatheredContext`
- `hasCodebaseAnalysis`
- `projectRoot`
- [ ] **Subtask mode**: Support variant key ('research' or 'default')
- [ ] Extract `systemPrompt` and `userPrompt` from prompt manager
#### AI Service Integration
- [ ] Determine service role: `useResearch ? 'research' : 'main'`
- [ ] **Bulk/Single task mode**: Call `generateObjectService` with:
- `role`, `session`, `projectRoot`
- `systemPrompt`, `prompt` (userPrompt)
- `schema: COMMAND_SCHEMAS['update-tasks']`
- `objectName: 'tasks'`
- `commandName: 'update-tasks'`
- `outputType: isMCP ? 'mcp' : 'cli'`
- [ ] **Subtask mode**: Call `generateTextService` with:
- `prompt` (userPrompt), `systemPrompt`
- `role`, `session`, `projectRoot`
- `maxRetries: 2`
- `commandName: 'update-subtask'`
- `outputType: isMCP ? 'mcp' : 'cli'`
- [ ] Handle empty/invalid AI responses
- [ ] Capture `telemetryData` and `tagInfo` from response
#### Data Updates & Persistence
- [ ] **Bulk/Single task mode**:
- Parse `aiServiceResponse.mainResult.tasks` array
- Validate array structure
- Create Map for efficient lookup
- Merge updated tasks with existing, preserving subtasks field
- Track actual update count
- [ ] **Subtask mode**:
- Extract text string from `aiServiceResponse.mainResult`
- Generate ISO timestamp
- Format as: `<info added on ${timestamp}>\n${content}\n</info added on ${timestamp}>`
- Append to `subtask.details` (create if doesn't exist)
- Store newly added snippet separately for display
- If prompt < 100 chars: append `[Updated: ${date}]` to subtask.description
- [ ] Write updated data using `writeJSON(tasksPath, data, projectRoot, tag)`
- [ ] Optionally call `generateTaskFiles()` (currently commented out in both)
#### CLI Display & UX
- [ ] **Pre-update display** (CLI only, text mode):
- Create table with columns: ID, Title, Status
- Truncate titles appropriately (57 chars for tasks, 52 for subtasks)
- Apply status colors via `getStatusWithColor()`
- Show boxed header with update count/target
- **Bulk mode**: Show info box about completed subtasks handling
- Display table
- [ ] **Loading indicators** (CLI only, text mode):
- Start loading indicator before AI call
- Message: "Updating tasks with AI..." (bulk/single) or "Updating subtask..." (subtask)
- Support research variant message
- Stop indicator when complete or on error
- [ ] **Post-update display** (CLI only, text mode):
- **Bulk/Single task**: Success message with update count
- **Subtask mode**: Boxed success message with:
- Subtask ID
- Title
- "Newly Added Snippet" section showing timestamped content
- Display AI usage summary via `displayAiUsageSummary(telemetryData, 'cli')`
#### Logging & Debugging
- [ ] Use appropriate logger: `mcpLog` (MCP) or `consoleLog` (CLI)
- [ ] Log info messages with proper format (MCP vs CLI differences)
- [ ] Log start of operation with key parameters
- [ ] Log task counts and AI response details
- [ ] Log successful completion
- [ ] **Debug mode** (when `getDebugFlag(session)` true):
- Log subtask details before/after update
- Log writeJSON calls
- Log full error stack traces
#### Error Handling
- [ ] Catch and handle errors at multiple levels:
- Context gathering errors (warn and continue)
- AI service errors (stop and report)
- General operation errors (report and exit/throw)
- [ ] **CLI mode**:
- Print colored error messages
- Show helpful troubleshooting for common errors:
- API key missing/invalid
- Model overloaded
- Task/subtask not found
- Invalid ID format
- Empty prompt
- Empty AI response
- Exit with code 1
- [ ] **MCP mode**: Re-throw errors for caller handling
- [ ] Always stop loading indicators on error
#### Return Values
- [ ] **Success returns** (both modes):
```javascript
{
success: true, // bulk/single task only
updatedTasks: [...], // bulk/single task only
updatedSubtask: {...}, // subtask only
telemetryData: {...},
tagInfo: {...}
}
```
- [ ] **Failure returns**:
- CLI: exits with code 1
- MCP: throws error
- Subtask mode: returns `null` on error
### Special Features
#### Completed Subtasks Handling (Bulk Mode)
- [ ] Display informational box explaining:
- Done/completed subtasks are preserved
- New subtasks build upon completed work
- Revisions create new subtasks instead of modifying done items
- Maintains clear record of progress
#### Subtask Context Awareness
- [ ] Provide parent task context (id, title) to AI
- [ ] Provide previous subtask context (if exists) to AI
- [ ] Provide next subtask context (if exists) to AI
- [ ] Include current subtask details in prompt
#### Timestamp Tracking
- [ ] Use ISO format timestamps for subtask updates
- [ ] Wrap appended content in timestamped tags
- [ ] Update description field with simple date stamp (short prompts only)
---
## Migration Architecture
### Object-Oriented Design Philosophy
This migration will follow the established patterns in `tm-core` and `apps/cli`:
- **Domain separation** with clear bounded contexts
- **Dependency injection** for testability and flexibility
- **Abstract base classes** for shared behavior
- **Interfaces** for contracts and loose coupling
- **Service layer** for business logic orchestration
- **Factory pattern** for object creation
- **Single Responsibility Principle** throughout
### Package Structure
```
packages/tm-core/
src/
commands/
update-task/
# Core Interfaces & Types
types.ts # Shared types, enums, interfaces
interfaces/
update-strategy.interface.ts # IUpdateStrategy contract
update-context.interface.ts # IUpdateContext contract
display.interface.ts # IDisplayManager contract
# Services (Business Logic)
update-task.service.ts # Main orchestrator service
context-builder.service.ts # Builds AI context (uses ContextGatherer, FuzzySearch)
prompt-builder.service.ts # Builds prompts (uses PromptManager)
data-merger.service.ts # Merges AI results with existing data
# Strategies (Update Mode Logic)
strategies/
base-update.strategy.ts # Abstract base class for all strategies
bulk-update.strategy.ts # Bulk task update implementation
single-task-update.strategy.ts # Single task update implementation
subtask-update.strategy.ts # Subtask update implementation
# Utilities & Helpers
validators/
update-input.validator.ts # Validates all input parameters
task-id.validator.ts # Parses and validates task/subtask IDs
display/
cli-display.manager.ts # CLI output formatting
json-display.manager.ts # JSON output formatting
update-display.factory.ts # Creates appropriate display manager
factories/
update-strategy.factory.ts # Creates appropriate strategy based on mode
# Main Entry Point
index.ts # Public API export
apps/cli/
src/
commands/
update-task.command.ts # CLI command definition (uses UpdateTaskService)
```
### Core Classes & Their Responsibilities
#### 1. **UpdateTaskService** (Main Orchestrator)
```typescript
/**
* Main service that coordinates the entire update process
* Handles initialization, strategy selection, and result aggregation
*/
export class UpdateTaskService {
constructor(
private readonly configManager: ConfigManager,
private readonly storage: IStorage,
private readonly logger: Logger,
private readonly strategyFactory: UpdateStrategyFactory,
private readonly contextBuilder: ContextBuilderService,
private readonly displayFactory: UpdateDisplayFactory
) {}
async updateTask(options: UpdateTaskOptions): Promise<UpdateTaskResult> {
// 1. Validate inputs
// 2. Detect mode and create strategy
// 3. Build context
// 4. Execute strategy
// 5. Display results
// 6. Return result
}
}
```
**Uses (existing classes):**
- `ConfigManager` - Project configuration
- `IStorage` - Task persistence
- `Logger` - Logging
- `ContextGatherer` - Gather related context
- `FuzzyTaskSearch` - Find relevant tasks
- `PromptManager` - Load prompt templates
**Uses (new classes):**
- `UpdateStrategyFactory` - Create update strategy
- `ContextBuilderService` - Build AI context
- `UpdateDisplayFactory` - Create display manager
---
#### 2. **IUpdateStrategy** (Strategy Interface)
```typescript
/**
* Contract for all update strategies
* Defines the common interface for bulk, single, and subtask updates
*/
export interface IUpdateStrategy {
/**
* Validate that the strategy can handle the given context
*/
validate(context: IUpdateContext): Promise<void>;
/**
* Load and filter tasks that need updating
*/
loadTasks(context: IUpdateContext): Promise<TaskLoadResult>;
/**
* Build prompts for AI service
*/
buildPrompts(
context: IUpdateContext,
tasks: TaskLoadResult
): Promise<PromptResult>;
/**
* Call appropriate AI service
*/
callAIService(
context: IUpdateContext,
prompts: PromptResult
): Promise<AIServiceResult>;
/**
* Merge AI results with existing data
*/
mergeResults(
context: IUpdateContext,
aiResult: AIServiceResult,
originalTasks: TaskLoadResult
): Promise<MergeResult>;
/**
* Get the mode this strategy handles
*/
getMode(): UpdateMode;
}
```
---
#### 3. **BaseUpdateStrategy** (Abstract Base Class)
```typescript
/**
* Provides common functionality for all update strategies
* Implements template method pattern for the update workflow
*/
export abstract class BaseUpdateStrategy implements IUpdateStrategy {
protected readonly logger: Logger;
constructor(
protected readonly contextBuilder: ContextBuilderService,
protected readonly promptBuilder: PromptBuilderService,
protected readonly dataMerger: DataMergerService,
protected readonly aiService: AIService // wrapper around generate[Object|Text]Service
) {
this.logger = getLogger(`UpdateStrategy:${this.getMode()}`);
}
// Template method - defines the workflow
async execute(context: IUpdateContext): Promise<UpdateStrategyResult> {
await this.validate(context);
const tasks = await this.loadTasks(context);
const prompts = await this.buildPrompts(context, tasks);
const aiResult = await this.callAIService(context, prompts);
const merged = await this.mergeResults(context, aiResult, tasks);
return merged;
}
// Subclasses must implement these
abstract validate(context: IUpdateContext): Promise<void>;
abstract loadTasks(context: IUpdateContext): Promise<TaskLoadResult>;
abstract getMode(): UpdateMode;
// Shared implementations with extensibility
async buildPrompts(
context: IUpdateContext,
tasks: TaskLoadResult
): Promise<PromptResult> {
// Delegates to PromptBuilderService with mode-specific params
}
protected abstract getPromptParams(
context: IUpdateContext,
tasks: TaskLoadResult
): PromptParams;
}
```
---
#### 4. **BulkUpdateStrategy** (Concrete Strategy)
```typescript
/**
* Handles bulk task updates (--from flag)
* Uses generateObjectService for structured updates
*/
export class BulkUpdateStrategy extends BaseUpdateStrategy {
getMode(): UpdateMode {
return UpdateMode.BULK;
}
async validate(context: IUpdateContext): Promise<void> {
if (!context.options.from) {
throw new TaskMasterError('Bulk mode requires --from parameter');
}
// Additional validations...
}
async loadTasks(context: IUpdateContext): Promise<TaskLoadResult> {
// Filter tasks where id >= fromId AND status !== 'done'
}
async callAIService(
context: IUpdateContext,
prompts: PromptResult
): Promise<AIServiceResult> {
// Call generateObjectService with update-tasks schema
}
protected getPromptParams(
context: IUpdateContext,
tasks: TaskLoadResult
): PromptParams {
return {
tasks: tasks.tasks,
updatePrompt: context.options.prompt,
useResearch: context.options.useResearch,
projectContext: tasks.gatheredContext,
// ...
};
}
}
```
---
#### 5. **SubtaskUpdateStrategy** (Concrete Strategy)
```typescript
/**
* Handles single subtask updates (--id with dot notation)
* Uses generateTextService for timestamped appends
*/
export class SubtaskUpdateStrategy extends BaseUpdateStrategy {
getMode(): UpdateMode {
return UpdateMode.SUBTASK;
}
async validate(context: IUpdateContext): Promise<void> {
const parsed = TaskIdValidator.parseSubtaskId(context.options.id);
if (!parsed) {
throw new TaskMasterError('Invalid subtask ID format');
}
}
async loadTasks(context: IUpdateContext): Promise<TaskLoadResult> {
// Find parent task, locate specific subtask
// Build context with prev/next subtask info
}
async callAIService(
context: IUpdateContext,
prompts: PromptResult
): Promise<AIServiceResult> {
// Call generateTextService for freeform content
}
async mergeResults(
context: IUpdateContext,
aiResult: AIServiceResult,
originalTasks: TaskLoadResult
): Promise<MergeResult> {
// Append timestamped content to subtask.details
const timestamp = new Date().toISOString();
const formatted = `<info added on ${timestamp}>\n${aiResult.text}\n</info>`;
// ...
}
}
```
---
#### 6. **SingleTaskUpdateStrategy** (Concrete Strategy)
```typescript
/**
* Handles single task updates (--id without dot)
* Uses generateObjectService for structured updates
*/
export class SingleTaskUpdateStrategy extends BaseUpdateStrategy {
getMode(): UpdateMode {
return UpdateMode.SINGLE;
}
async validate(context: IUpdateContext): Promise<void> {
TaskIdValidator.validateTaskId(context.options.id);
}
async loadTasks(context: IUpdateContext): Promise<TaskLoadResult> {
// Find single task by ID
}
// Similar to BulkUpdateStrategy but operates on single task
}
```
---
#### 7. **ContextBuilderService** (Helper Service)
```typescript
/**
* Builds context for AI prompts
* Coordinates ContextGatherer and FuzzyTaskSearch
*/
export class ContextBuilderService {
constructor(
private readonly logger: Logger
) {}
async buildContext(
options: ContextBuildOptions
): Promise<BuiltContext> {
try {
const gatherer = new ContextGatherer(
options.projectRoot,
options.tag
);
const allTasksFlat = flattenTasksWithSubtasks(options.allTasks);
const fuzzySearch = new FuzzyTaskSearch(
allTasksFlat,
options.searchMode // 'update' or 'update-subtask'
);
const searchResults = fuzzySearch.findRelevantTasks(
options.searchQuery,
{ maxResults: 5, includeSelf: true }
);
const relevantTaskIds = fuzzySearch.getTaskIds(searchResults);
const finalTaskIds = [
...new Set([...options.targetTaskIds, ...relevantTaskIds])
];
const contextResult = await gatherer.gather({
tasks: finalTaskIds,
format: 'research'
});
return {
context: contextResult.context || '',
taskIds: finalTaskIds
};
} catch (error) {
this.logger.warn(`Context gathering failed: ${error.message}`);
return { context: '', taskIds: options.targetTaskIds };
}
}
}
```
**Uses (existing):**
- `ContextGatherer`
- `FuzzyTaskSearch`
---
#### 8. **PromptBuilderService** (Helper Service)
```typescript
/**
* Builds system and user prompts for AI services
* Wraps PromptManager with strategy-specific logic
*/
export class PromptBuilderService {
constructor(
private readonly promptManager: PromptManager,
private readonly logger: Logger
) {}
async buildPrompt(
templateName: string,
params: PromptParams,
variant?: string
): Promise<PromptResult> {
const { systemPrompt, userPrompt } = await this.promptManager.loadPrompt(
templateName,
params,
variant
);
return {
systemPrompt,
userPrompt,
templateName,
params
};
}
}
```
**Uses (existing):**
- `PromptManager`
---
#### 9. **DataMergerService** (Helper Service)
```typescript
/**
* Merges AI service results with existing task data
* Handles different merge strategies for different modes
*/
export class DataMergerService {
constructor(private readonly logger: Logger) {}
/**
* Merge for bulk/single task mode (structured updates)
*/
mergeTasks(
existingTasks: Task[],
updatedTasks: Task[],
options: MergeOptions
): MergeResult {
const updatedTasksMap = new Map(
updatedTasks.map(t => [t.id, t])
);
let updateCount = 0;
const merged = existingTasks.map(task => {
if (updatedTasksMap.has(task.id)) {
const updated = updatedTasksMap.get(task.id)!;
updateCount++;
return {
...task,
...updated,
// Preserve subtasks if not provided by AI
subtasks: updated.subtasks !== undefined
? updated.subtasks
: task.subtasks
};
}
return task;
});
return {
tasks: merged,
updateCount,
mode: 'structured'
};
}
/**
* Merge for subtask mode (timestamped append)
*/
mergeSubtask(
parentTask: Task,
subtaskIndex: number,
newContent: string,
options: SubtaskMergeOptions
): SubtaskMergeResult {
const subtask = parentTask.subtasks![subtaskIndex];
const timestamp = new Date().toISOString();
const formatted = `<info added on ${timestamp}>\n${newContent.trim()}\n</info added on ${timestamp}>`;
subtask.details = (subtask.details ? subtask.details + '\n' : '') + formatted;
// Short prompts get description timestamp
if (options.prompt.length < 100 && subtask.description) {
subtask.description += ` [Updated: ${new Date().toLocaleDateString()}]`;
}
return {
updatedSubtask: subtask,
newlyAddedSnippet: formatted,
parentTask
};
}
}
```
---
#### 10. **IDisplayManager** (Display Interface)
```typescript
/**
* Contract for display managers
* Allows different output formats (CLI, JSON, etc.)
*/
export interface IDisplayManager {
/**
* Show tasks before update
*/
showPreUpdate(tasks: Task[], mode: UpdateMode): void;
/**
* Show loading indicator
*/
startLoading(message: string): void;
stopLoading(success?: boolean): void;
/**
* Show post-update results
*/
showPostUpdate(result: UpdateStrategyResult, mode: UpdateMode): void;
/**
* Show telemetry/usage data
*/
showTelemetry(telemetry: TelemetryData): void;
/**
* Show errors
*/
showError(error: Error): void;
}
```
---
#### 11. **CLIDisplayManager** (Concrete Display)
```typescript
/**
* Formats output for CLI with colors, tables, and boxes
*/
export class CLIDisplayManager implements IDisplayManager {
constructor(
private readonly logger: Logger,
private readonly isSilent: boolean
) {}
showPreUpdate(tasks: Task[], mode: UpdateMode): void {
// Create table with ID, Title, Status columns
// Show boxed header
// For bulk mode: show completed subtasks info box
}
startLoading(message: string): void {
// startLoadingIndicator(message)
}
// ... implement other methods with chalk, boxen, cli-table3
}
```
---
#### 12. **UpdateStrategyFactory** (Factory)
```typescript
/**
* Creates the appropriate update strategy based on mode
*/
export class UpdateStrategyFactory {
constructor(
private readonly contextBuilder: ContextBuilderService,
private readonly promptBuilder: PromptBuilderService,
private readonly dataMerger: DataMergerService,
private readonly aiService: AIService
) {}
createStrategy(mode: UpdateMode): IUpdateStrategy {
switch (mode) {
case UpdateMode.BULK:
return new BulkUpdateStrategy(
this.contextBuilder,
this.promptBuilder,
this.dataMerger,
this.aiService
);
case UpdateMode.SINGLE:
return new SingleTaskUpdateStrategy(
this.contextBuilder,
this.promptBuilder,
this.dataMerger,
this.aiService
);
case UpdateMode.SUBTASK:
return new SubtaskUpdateStrategy(
this.contextBuilder,
this.promptBuilder,
this.dataMerger,
this.aiService
);
default:
throw new TaskMasterError(`Unknown update mode: ${mode}`);
}
}
detectMode(options: UpdateTaskOptions): UpdateMode {
if (options.from !== undefined) {
return UpdateMode.BULK;
}
if (options.id && typeof options.id === 'string' && options.id.includes('.')) {
return UpdateMode.SUBTASK;
}
if (options.id !== undefined) {
return UpdateMode.SINGLE;
}
throw new TaskMasterError('Must provide either --id or --from parameter');
}
}
```
---
#### 13. **Validators** (Utility Classes)
```typescript
/**
* Validates all update task inputs
*/
export class UpdateInputValidator {
static validate(options: UpdateTaskOptions): void {
// Validate tasksPath, prompt, etc.
}
}
/**
* Parses and validates task/subtask IDs
*/
export class TaskIdValidator {
static validateTaskId(id: any): number {
// Parse and validate task ID
}
static parseSubtaskId(id: string): SubtaskIdParts | null {
// Parse "parentId.subtaskId" format
}
}
```
---
### Class Diagram (Relationships)
```
┌─────────────────────────┐
│ UpdateTaskService │ ◄─── Main Orchestrator
│ (Coordinates) │
└───────┬─────────────────┘
│ uses
├──► UpdateStrategyFactory ──creates──► IUpdateStrategy
│ │
├──► ContextBuilderService │ implements
│ ▼
├──► IDisplayManager ◄──creates── UpdateDisplayFactory
│ │
│ ├── CLIDisplayManager
│ └── JSONDisplayManager
│
└──► ConfigManager (existing)
IStorage (existing)
Logger (existing)
┌────────────────────────────────────────────────────┐
│ IUpdateStrategy │
└────────────────────────────────────────────────────┘
△
│ extends
┌───────────┴────────────┐
│ │
┌───────────────────────┐ ┌──────────────────────┐
│ BaseUpdateStrategy │ │ Abstract base with │
│ (Template Method) │ │ common workflow │
└───────────┬───────────┘ └──────────────────────┘
│ extends
┌───────┼──────────┬─────────────┐
│ │ │ │
┌───▼───┐ ┌─▼────┐ ┌─▼──────────┐ │
│ Bulk │ │Single│ │ Subtask │ │
│Update │ │Task │ │ Update │ │
│ │ │Update│ │ │ │
└───────┘ └──────┘ └────────────┘ │
│
├──► ContextBuilderService
│ ├─uses─► ContextGatherer (existing)
│ └─uses─► FuzzyTaskSearch (existing)
│
├──► PromptBuilderService
│ └─uses─► PromptManager (existing)
│
└──► DataMergerService
```
---
### Dependency Injection & Initialization
```typescript
// In packages/tm-core/src/commands/update-task/index.ts
/**
* Factory function to create a fully initialized UpdateTaskService
*/
export async function createUpdateTaskService(
configManager: ConfigManager,
storage: IStorage
): Promise<UpdateTaskService> {
const logger = getLogger('UpdateTaskService');
// Create helper services
const contextBuilder = new ContextBuilderService(logger);
const promptManager = getPromptManager(); // existing
const promptBuilder = new PromptBuilderService(promptManager, logger);
const dataMerger = new DataMergerService(logger);
const aiService = new AIService(); // wrapper around generateObjectService/generateTextService
// Create factory
const strategyFactory = new UpdateStrategyFactory(
contextBuilder,
promptBuilder,
dataMerger,
aiService
);
// Create display factory
const displayFactory = new UpdateDisplayFactory();
// Create service
return new UpdateTaskService(
configManager,
storage,
logger,
strategyFactory,
contextBuilder,
displayFactory
);
}
```
---
### Types & Interfaces
```typescript
// packages/tm-core/src/commands/update-task/types.ts
export enum UpdateMode {
BULK = 'bulk',
SINGLE = 'single',
SUBTASK = 'subtask'
}
export interface UpdateTaskOptions {
tasksPath: string;
id?: number | string;
from?: number;
prompt: string;
useResearch?: boolean;
context?: UpdateContext;
outputFormat?: 'text' | 'json';
}
export interface UpdateContext {
session?: any;
mcpLog?: any;
projectRoot?: string;
tag?: string;
}
export interface UpdateTaskResult {
success: boolean;
mode: UpdateMode;
updatedTasks?: Task[];
updatedSubtask?: Subtask;
updateCount?: number;
telemetryData?: TelemetryData;
tagInfo?: TagInfo;
}
export interface IUpdateContext {
options: UpdateTaskOptions;
projectRoot: string;
tag?: string;
mode: UpdateMode;
isMCP: boolean;
logger: Logger;
}
export interface TaskLoadResult {
tasks: Task[];
gatheredContext: string;
originalData: TasksData;
}
export interface PromptResult {
systemPrompt: string;
userPrompt: string;
templateName: string;
params: PromptParams;
}
export interface AIServiceResult {
mainResult: any; // structured object or text string
telemetryData?: TelemetryData;
tagInfo?: TagInfo;
}
export interface MergeResult {
tasks?: Task[];
updatedSubtask?: Subtask;
newlyAddedSnippet?: string;
updateCount: number;
mode: 'structured' | 'timestamped';
}
```
---
## Implementation Phases
### Phase 1: Foundation & Core Types
**Goal**: Establish the type system and interfaces
**New Files to Create**:
1. `packages/tm-core/src/commands/update-task/types.ts`
- Define `UpdateMode` enum
- Define all shared interfaces (`UpdateTaskOptions`, `UpdateTaskResult`, etc.)
2. `packages/tm-core/src/commands/update-task/interfaces/update-strategy.interface.ts`
- Define `IUpdateStrategy` interface
3. `packages/tm-core/src/commands/update-task/interfaces/update-context.interface.ts`
- Define `IUpdateContext` interface
4. `packages/tm-core/src/commands/update-task/interfaces/display.interface.ts`
- Define `IDisplayManager` interface
**Existing Classes to Study**:
- `BaseExecutor` - For abstract class patterns
- `TaskService` - For service patterns
- `IStorage` - For interface patterns
---
### Phase 2: Validator & Helper Utilities
**Goal**: Build validation and utility classes
**New Files to Create**:
1. `packages/tm-core/src/commands/update-task/validators/update-input.validator.ts`
- Create `UpdateInputValidator` class
- Port validation logic from both old files
2. `packages/tm-core/src/commands/update-task/validators/task-id.validator.ts`
- Create `TaskIdValidator` class
- Implement `validateTaskId()` and `parseSubtaskId()` methods
**Tests to Create**:
- `update-input.validator.spec.ts`
- `task-id.validator.spec.ts`
---
### Phase 3: Service Layer
**Goal**: Build the helper services that strategies will use
**New Files to Create**:
1. `packages/tm-core/src/commands/update-task/context-builder.service.ts`
- Create `ContextBuilderService` class
- **Uses existing**: `ContextGatherer`, `FuzzyTaskSearch`
- Port context gathering logic from both old files
2. `packages/tm-core/src/commands/update-task/prompt-builder.service.ts`
- Create `PromptBuilderService` class
- **Uses existing**: `PromptManager` (via `getPromptManager()`)
- Port prompt building logic
3. `packages/tm-core/src/commands/update-task/data-merger.service.ts`
- Create `DataMergerService` class
- Implement `mergeTasks()` method (from `update-tasks.js` lines 250-273)
- Implement `mergeSubtask()` method (from `update-subtask-by-id.js` lines 291-332)
**Tests to Create**:
- `context-builder.service.spec.ts`
- `prompt-builder.service.spec.ts`
- `data-merger.service.spec.ts`
**Existing Classes Used**:
- `ContextGatherer` (from `scripts/modules/utils/contextGatherer.js`)
- `FuzzyTaskSearch` (from `scripts/modules/utils/fuzzyTaskSearch.js`)
- `PromptManager` (from `scripts/modules/prompt-manager.js`)
---
### Phase 4: Strategy Pattern Implementation
**Goal**: Implement the update strategies
**New Files to Create**:
1. `packages/tm-core/src/commands/update-task/strategies/base-update.strategy.ts`
- Create `BaseUpdateStrategy` abstract class implementing `IUpdateStrategy`
- Implement template method pattern
- Define abstract methods for subclasses
2. `packages/tm-core/src/commands/update-task/strategies/bulk-update.strategy.ts`
- Create `BulkUpdateStrategy` class extending `BaseUpdateStrategy`
- Port logic from `update-tasks.js` lines 79-293
- **Uses**: `generateObjectService` with `COMMAND_SCHEMAS['update-tasks']`
3. `packages/tm-core/src/commands/update-task/strategies/single-task-update.strategy.ts`
- Create `SingleTaskUpdateStrategy` class extending `BaseUpdateStrategy`
- Similar to bulk but for single task
- **Uses**: `generateObjectService` with `COMMAND_SCHEMAS['update-tasks']`
4. `packages/tm-core/src/commands/update-task/strategies/subtask-update.strategy.ts`
- Create `SubtaskUpdateStrategy` class extending `BaseUpdateStrategy`
- Port logic from `update-subtask-by-id.js` lines 67-378
- **Uses**: `generateTextService` for freeform content
**Tests to Create**:
- `bulk-update.strategy.spec.ts`
- `single-task-update.strategy.spec.ts`
- `subtask-update.strategy.spec.ts`
**Existing Classes/Functions Used**:
- `generateObjectService` (from `scripts/modules/ai-services-unified.js`)
- `generateTextService` (from `scripts/modules/ai-services-unified.js`)
- `COMMAND_SCHEMAS` (from `src/schemas/registry.js`)
- `readJSON`, `writeJSON`, `flattenTasksWithSubtasks` (from `scripts/modules/utils.js`)
---
### Phase 5: Display Layer
**Goal**: Implement display managers for different output formats
**New Files to Create**:
1. `packages/tm-core/src/commands/update-task/display/cli-display.manager.ts`
- Create `CLIDisplayManager` class implementing `IDisplayManager`
- Port CLI display logic from both old files
- **Uses existing**: `chalk`, `boxen`, `cli-table3`, `getStatusWithColor`, `truncate`
2. `packages/tm-core/src/commands/update-task/display/json-display.manager.ts`
- Create `JSONDisplayManager` class implementing `IDisplayManager`
- Implement JSON output format (for MCP)
3. `packages/tm-core/src/commands/update-task/display/update-display.factory.ts`
- Create `UpdateDisplayFactory` class
- Factory method to create appropriate display manager
**Tests to Create**:
- `cli-display.manager.spec.ts`
- `json-display.manager.spec.ts`
**Existing Functions Used**:
- `getStatusWithColor`, `startLoadingIndicator`, `stopLoadingIndicator`, `displayAiUsageSummary` (from `scripts/modules/ui.js`)
- `truncate`, `isSilentMode` (from `scripts/modules/utils.js`)
---
### Phase 6: Factory Pattern
**Goal**: Implement factory for creating strategies
**New Files to Create**:
1. `packages/tm-core/src/commands/update-task/factories/update-strategy.factory.ts`
- Create `UpdateStrategyFactory` class
- Implement `createStrategy(mode)` method
- Implement `detectMode(options)` method
- Handles dependency injection for all strategies
**Tests to Create**:
- `update-strategy.factory.spec.ts` (test mode detection and strategy creation)
---
### Phase 7: Main Service Orchestrator
**Goal**: Create the main service that ties everything together
**New Files to Create**:
1. `packages/tm-core/src/commands/update-task/update-task.service.ts`
- Create `UpdateTaskService` class
- Main orchestrator that coordinates all components
- Implements high-level workflow
2. `packages/tm-core/src/commands/update-task/index.ts`
- Export all public types and interfaces
- Export `createUpdateTaskService()` factory function
- Export `UpdateTaskService` class
**Tests to Create**:
- `update-task.service.spec.ts` (integration tests)
**Existing Classes Used**:
- `ConfigManager` (from `packages/tm-core/src/config/config-manager.ts`)
- `IStorage` (from `packages/tm-core/src/interfaces/storage.interface.ts`)
- `Logger`, `getLogger` (from `packages/tm-core/src/logger/`)
---
### Phase 8: CLI Integration
**Goal**: Wire up the new service to the CLI
**New Files to Create**:
1. `apps/cli/src/commands/update-task.command.ts`
- CLI command definition using `commander`
- Calls `createUpdateTaskService()` and executes
- Handles CLI-specific argument parsing
**Files to Modify**:
1. `apps/cli/src/index.ts` (or main CLI entry point)
- Register new `update-task` command
- Optionally add aliases for backward compatibility
**Existing Patterns to Follow**:
- Study existing CLI commands in `apps/cli/src/commands/`
- Follow same pattern for option parsing and service invocation
---
### Phase 9: Integration & Testing
**Goal**: Ensure everything works together
**Tasks**:
1. Run full integration tests
- Test bulk update workflow end-to-end
- Test single task update workflow
- Test subtask update workflow
- Test MCP mode vs CLI mode
- Test all edge cases from checklist
2. Verify against original functionality
- Use the functionality checklist
- Ensure no regressions
- Test with real task data
3. Performance testing
- Compare execution time with old implementation
- Ensure context gathering performs well
**Tests to Create**:
- `update-task.integration.spec.ts` - Full workflow tests
- End-to-end tests with real task files
---
### Phase 10: Documentation & Migration
**Goal**: Document the new system and deprecate old code
**Tasks**:
1. Update documentation
- Update `apps/docs/command-reference.mdx`
- Add JSDoc comments to all public APIs
- Create migration guide for users
2. Add deprecation warnings
- Mark old `update` and `update-subtask` commands as deprecated
- Add console warnings directing users to new command
3. Create changeset
- Document breaking changes (if any)
- Document new features (unified command)
- Note backward compatibility
**Files to Modify**:
1. `apps/docs/command-reference.mdx` - Update command documentation
2. Legacy files (add deprecation warnings):
- `scripts/modules/task-manager/update-tasks.js`
- `scripts/modules/task-manager/update-subtask-by-id.js`
---
### Phase 11: Cleanup
**Goal**: Remove deprecated code (future version)
**Tasks**:
1. Remove old files:
- `scripts/modules/task-manager/update-tasks.js`
- `scripts/modules/task-manager/update-subtask-by-id.js`
- Any related old command handlers
2. Clean up any temporary compatibility shims
3. Update all references in codebase to use new command
---
## Testing Strategy
### Unit Tests
- [ ] Mode detection logic
- [ ] ID parsing and validation
- [ ] Context gathering integration
- [ ] Prompt building for each mode
- [ ] Data merging logic
### Integration Tests
- [ ] Bulk update workflow
- [ ] Single task update workflow
- [ ] Single subtask update workflow
- [ ] MCP mode operation
- [ ] CLI mode operation
### Edge Cases
- [ ] Empty tasks.json
- [ ] Invalid ID formats
- [ ] Non-existent IDs
- [ ] Tasks with no subtasks
- [ ] Empty AI responses
- [ ] Context gathering failures
---
## Backward Compatibility
### Deprecation Strategy
1. Keep old commands working initially
2. Add deprecation warnings
3. Update all documentation
4. Remove old commands in next major version
### Alias Support (Optional)
```bash
# Could maintain old command names as aliases
task-master update --from=3 --prompt="..." # Still works, calls update-task
task-master update-subtask --id=3.2 --prompt="..." # Still works, calls update-task
```
---
## Risk Mitigation
### High-Risk Areas
1. **Data integrity**: Ensure writeJSON doesn't corrupt existing data
2. **AI service compatibility**: Both generateObjectService and generateTextService must work
3. **Subtask detail format**: Maintain timestamp format consistency
4. **Context gathering**: Same behavior across all modes
### Rollback Plan
- Keep old files until new version is fully tested
- Version bump allows reverting if issues found
- Comprehensive test coverage before release
---
## Success Criteria
- [ ] All checklist items verified working
- [ ] Tests passing for all modes
- [ ] MCP integration functional
- [ ] CLI display matches existing behavior
- [ ] Documentation updated
- [ ] No regression in existing functionality
- [ ] Performance comparable or better than current implementation
```