This is page 57 of 69. Use http://codebase.md/eyaltoledano/claude-task-master?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .changeset
│ ├── config.json
│ └── README.md
├── .claude
│ ├── commands
│ │ └── dedupe.md
│ └── TM_COMMANDS_GUIDE.md
├── .claude-plugin
│ └── marketplace.json
├── .coderabbit.yaml
├── .cursor
│ ├── mcp.json
│ └── rules
│ ├── ai_providers.mdc
│ ├── ai_services.mdc
│ ├── architecture.mdc
│ ├── changeset.mdc
│ ├── commands.mdc
│ ├── context_gathering.mdc
│ ├── cursor_rules.mdc
│ ├── dependencies.mdc
│ ├── dev_workflow.mdc
│ ├── git_workflow.mdc
│ ├── glossary.mdc
│ ├── mcp.mdc
│ ├── new_features.mdc
│ ├── self_improve.mdc
│ ├── tags.mdc
│ ├── taskmaster.mdc
│ ├── tasks.mdc
│ ├── telemetry.mdc
│ ├── test_workflow.mdc
│ ├── tests.mdc
│ ├── ui.mdc
│ └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── enhancements---feature-requests.md
│ │ └── feedback.md
│ ├── PULL_REQUEST_TEMPLATE
│ │ ├── bugfix.md
│ │ ├── config.yml
│ │ ├── feature.md
│ │ └── integration.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── scripts
│ │ ├── auto-close-duplicates.mjs
│ │ ├── backfill-duplicate-comments.mjs
│ │ ├── check-pre-release-mode.mjs
│ │ ├── parse-metrics.mjs
│ │ ├── release.mjs
│ │ ├── tag-extension.mjs
│ │ ├── utils.mjs
│ │ └── validate-changesets.mjs
│ └── workflows
│ ├── auto-close-duplicates.yml
│ ├── backfill-duplicate-comments.yml
│ ├── ci.yml
│ ├── claude-dedupe-issues.yml
│ ├── claude-docs-trigger.yml
│ ├── claude-docs-updater.yml
│ ├── claude-issue-triage.yml
│ ├── claude.yml
│ ├── extension-ci.yml
│ ├── extension-release.yml
│ ├── log-issue-events.yml
│ ├── pre-release.yml
│ ├── release-check.yml
│ ├── release.yml
│ ├── update-models-md.yml
│ └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│ ├── hooks
│ │ ├── tm-code-change-task-tracker.kiro.hook
│ │ ├── tm-complexity-analyzer.kiro.hook
│ │ ├── tm-daily-standup-assistant.kiro.hook
│ │ ├── tm-git-commit-task-linker.kiro.hook
│ │ ├── tm-pr-readiness-checker.kiro.hook
│ │ ├── tm-task-dependency-auto-progression.kiro.hook
│ │ └── tm-test-success-task-completer.kiro.hook
│ ├── settings
│ │ └── mcp.json
│ └── steering
│ ├── dev_workflow.md
│ ├── kiro_rules.md
│ ├── self_improve.md
│ ├── taskmaster_hooks_workflow.md
│ └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│ ├── CLAUDE.md
│ ├── config.json
│ ├── docs
│ │ ├── autonomous-tdd-git-workflow.md
│ │ ├── MIGRATION-ROADMAP.md
│ │ ├── prd-tm-start.txt
│ │ ├── prd.txt
│ │ ├── README.md
│ │ ├── research
│ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│ │ │ ├── 2025-06-14_test-save-functionality.md
│ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│ │ ├── task-template-importing-prd.txt
│ │ ├── tdd-workflow-phase-0-spike.md
│ │ ├── tdd-workflow-phase-1-core-rails.md
│ │ ├── tdd-workflow-phase-1-orchestrator.md
│ │ ├── tdd-workflow-phase-2-pr-resumability.md
│ │ ├── tdd-workflow-phase-3-extensibility-guardrails.md
│ │ ├── test-prd.txt
│ │ └── tm-core-phase-1.txt
│ ├── reports
│ │ ├── task-complexity-report_autonomous-tdd-git-workflow.json
│ │ ├── task-complexity-report_cc-kiro-hooks.json
│ │ ├── task-complexity-report_tdd-phase-1-core-rails.json
│ │ ├── task-complexity-report_tdd-workflow-phase-0.json
│ │ ├── task-complexity-report_test-prd-tag.json
│ │ ├── task-complexity-report_tm-core-phase-1.json
│ │ ├── task-complexity-report.json
│ │ └── tm-core-complexity.json
│ ├── state.json
│ ├── tasks
│ │ ├── task_001_tm-start.txt
│ │ ├── task_002_tm-start.txt
│ │ ├── task_003_tm-start.txt
│ │ ├── task_004_tm-start.txt
│ │ ├── task_007_tm-start.txt
│ │ └── tasks.json
│ └── templates
│ ├── example_prd_rpg.md
│ └── example_prd.md
├── .vscode
│ ├── extensions.json
│ └── settings.json
├── apps
│ ├── cli
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── src
│ │ │ ├── command-registry.ts
│ │ │ ├── commands
│ │ │ │ ├── auth.command.ts
│ │ │ │ ├── autopilot
│ │ │ │ │ ├── abort.command.ts
│ │ │ │ │ ├── commit.command.ts
│ │ │ │ │ ├── complete.command.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── next.command.ts
│ │ │ │ │ ├── resume.command.ts
│ │ │ │ │ ├── shared.ts
│ │ │ │ │ ├── start.command.ts
│ │ │ │ │ └── status.command.ts
│ │ │ │ ├── briefs.command.ts
│ │ │ │ ├── context.command.ts
│ │ │ │ ├── export.command.ts
│ │ │ │ ├── list.command.ts
│ │ │ │ ├── models
│ │ │ │ │ ├── custom-providers.ts
│ │ │ │ │ ├── fetchers.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── prompts.ts
│ │ │ │ │ ├── setup.ts
│ │ │ │ │ └── types.ts
│ │ │ │ ├── next.command.ts
│ │ │ │ ├── set-status.command.ts
│ │ │ │ ├── show.command.ts
│ │ │ │ ├── start.command.ts
│ │ │ │ └── tags.command.ts
│ │ │ ├── index.ts
│ │ │ ├── lib
│ │ │ │ └── model-management.ts
│ │ │ ├── types
│ │ │ │ └── tag-management.d.ts
│ │ │ ├── ui
│ │ │ │ ├── components
│ │ │ │ │ ├── cardBox.component.ts
│ │ │ │ │ ├── dashboard.component.ts
│ │ │ │ │ ├── header.component.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── next-task.component.ts
│ │ │ │ │ ├── suggested-steps.component.ts
│ │ │ │ │ └── task-detail.component.ts
│ │ │ │ ├── display
│ │ │ │ │ ├── messages.ts
│ │ │ │ │ └── tables.ts
│ │ │ │ ├── formatters
│ │ │ │ │ ├── complexity-formatters.ts
│ │ │ │ │ ├── dependency-formatters.ts
│ │ │ │ │ ├── priority-formatters.ts
│ │ │ │ │ ├── status-formatters.spec.ts
│ │ │ │ │ └── status-formatters.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── layout
│ │ │ │ ├── helpers.spec.ts
│ │ │ │ └── helpers.ts
│ │ │ └── utils
│ │ │ ├── auth-helpers.ts
│ │ │ ├── auto-update.ts
│ │ │ ├── brief-selection.ts
│ │ │ ├── display-helpers.ts
│ │ │ ├── error-handler.ts
│ │ │ ├── index.ts
│ │ │ ├── project-root.ts
│ │ │ ├── task-status.ts
│ │ │ ├── ui.spec.ts
│ │ │ └── ui.ts
│ │ ├── tests
│ │ │ ├── integration
│ │ │ │ └── commands
│ │ │ │ └── autopilot
│ │ │ │ └── workflow.test.ts
│ │ │ └── unit
│ │ │ ├── commands
│ │ │ │ ├── autopilot
│ │ │ │ │ └── shared.test.ts
│ │ │ │ ├── list.command.spec.ts
│ │ │ │ └── show.command.spec.ts
│ │ │ └── ui
│ │ │ └── dashboard.component.spec.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── docs
│ │ ├── archive
│ │ │ ├── ai-client-utils-example.mdx
│ │ │ ├── ai-development-workflow.mdx
│ │ │ ├── command-reference.mdx
│ │ │ ├── configuration.mdx
│ │ │ ├── cursor-setup.mdx
│ │ │ ├── examples.mdx
│ │ │ └── Installation.mdx
│ │ ├── best-practices
│ │ │ ├── advanced-tasks.mdx
│ │ │ ├── configuration-advanced.mdx
│ │ │ └── index.mdx
│ │ ├── capabilities
│ │ │ ├── cli-root-commands.mdx
│ │ │ ├── index.mdx
│ │ │ ├── mcp.mdx
│ │ │ ├── rpg-method.mdx
│ │ │ └── task-structure.mdx
│ │ ├── CHANGELOG.md
│ │ ├── command-reference.mdx
│ │ ├── configuration.mdx
│ │ ├── docs.json
│ │ ├── favicon.svg
│ │ ├── getting-started
│ │ │ ├── api-keys.mdx
│ │ │ ├── contribute.mdx
│ │ │ ├── faq.mdx
│ │ │ └── quick-start
│ │ │ ├── configuration-quick.mdx
│ │ │ ├── execute-quick.mdx
│ │ │ ├── installation.mdx
│ │ │ ├── moving-forward.mdx
│ │ │ ├── prd-quick.mdx
│ │ │ ├── quick-start.mdx
│ │ │ ├── requirements.mdx
│ │ │ ├── rules-quick.mdx
│ │ │ └── tasks-quick.mdx
│ │ ├── introduction.mdx
│ │ ├── licensing.md
│ │ ├── logo
│ │ │ ├── dark.svg
│ │ │ ├── light.svg
│ │ │ └── task-master-logo.png
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── style.css
│ │ ├── tdd-workflow
│ │ │ ├── ai-agent-integration.mdx
│ │ │ └── quickstart.mdx
│ │ ├── vercel.json
│ │ └── whats-new.mdx
│ ├── extension
│ │ ├── .vscodeignore
│ │ ├── assets
│ │ │ ├── banner.png
│ │ │ ├── icon-dark.svg
│ │ │ ├── icon-light.svg
│ │ │ ├── icon.png
│ │ │ ├── screenshots
│ │ │ │ ├── kanban-board.png
│ │ │ │ └── task-details.png
│ │ │ └── sidebar-icon.svg
│ │ ├── CHANGELOG.md
│ │ ├── components.json
│ │ ├── docs
│ │ │ ├── extension-CI-setup.md
│ │ │ └── extension-development-guide.md
│ │ ├── esbuild.js
│ │ ├── LICENSE
│ │ ├── package.json
│ │ ├── package.mjs
│ │ ├── package.publish.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── components
│ │ │ │ ├── ConfigView.tsx
│ │ │ │ ├── constants.ts
│ │ │ │ ├── TaskDetails
│ │ │ │ │ ├── AIActionsSection.tsx
│ │ │ │ │ ├── DetailsSection.tsx
│ │ │ │ │ ├── PriorityBadge.tsx
│ │ │ │ │ ├── SubtasksSection.tsx
│ │ │ │ │ ├── TaskMetadataSidebar.tsx
│ │ │ │ │ └── useTaskDetails.ts
│ │ │ │ ├── TaskDetailsView.tsx
│ │ │ │ ├── TaskMasterLogo.tsx
│ │ │ │ └── ui
│ │ │ │ ├── badge.tsx
│ │ │ │ ├── breadcrumb.tsx
│ │ │ │ ├── button.tsx
│ │ │ │ ├── card.tsx
│ │ │ │ ├── collapsible.tsx
│ │ │ │ ├── CollapsibleSection.tsx
│ │ │ │ ├── dropdown-menu.tsx
│ │ │ │ ├── label.tsx
│ │ │ │ ├── scroll-area.tsx
│ │ │ │ ├── separator.tsx
│ │ │ │ ├── shadcn-io
│ │ │ │ │ └── kanban
│ │ │ │ │ └── index.tsx
│ │ │ │ └── textarea.tsx
│ │ │ ├── extension.ts
│ │ │ ├── index.ts
│ │ │ ├── lib
│ │ │ │ └── utils.ts
│ │ │ ├── services
│ │ │ │ ├── config-service.ts
│ │ │ │ ├── error-handler.ts
│ │ │ │ ├── notification-preferences.ts
│ │ │ │ ├── polling-service.ts
│ │ │ │ ├── polling-strategies.ts
│ │ │ │ ├── sidebar-webview-manager.ts
│ │ │ │ ├── task-repository.ts
│ │ │ │ ├── terminal-manager.ts
│ │ │ │ └── webview-manager.ts
│ │ │ ├── test
│ │ │ │ └── extension.test.ts
│ │ │ ├── utils
│ │ │ │ ├── configManager.ts
│ │ │ │ ├── connectionManager.ts
│ │ │ │ ├── errorHandler.ts
│ │ │ │ ├── event-emitter.ts
│ │ │ │ ├── logger.ts
│ │ │ │ ├── mcpClient.ts
│ │ │ │ ├── notificationPreferences.ts
│ │ │ │ └── task-master-api
│ │ │ │ ├── cache
│ │ │ │ │ └── cache-manager.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── mcp-client.ts
│ │ │ │ ├── transformers
│ │ │ │ │ └── task-transformer.ts
│ │ │ │ └── types
│ │ │ │ └── index.ts
│ │ │ └── webview
│ │ │ ├── App.tsx
│ │ │ ├── components
│ │ │ │ ├── AppContent.tsx
│ │ │ │ ├── EmptyState.tsx
│ │ │ │ ├── ErrorBoundary.tsx
│ │ │ │ ├── PollingStatus.tsx
│ │ │ │ ├── PriorityBadge.tsx
│ │ │ │ ├── SidebarView.tsx
│ │ │ │ ├── TagDropdown.tsx
│ │ │ │ ├── TaskCard.tsx
│ │ │ │ ├── TaskEditModal.tsx
│ │ │ │ ├── TaskMasterKanban.tsx
│ │ │ │ ├── ToastContainer.tsx
│ │ │ │ └── ToastNotification.tsx
│ │ │ ├── constants
│ │ │ │ └── index.ts
│ │ │ ├── contexts
│ │ │ │ └── VSCodeContext.tsx
│ │ │ ├── hooks
│ │ │ │ ├── useTaskQueries.ts
│ │ │ │ ├── useVSCodeMessages.ts
│ │ │ │ └── useWebviewHeight.ts
│ │ │ ├── index.css
│ │ │ ├── index.tsx
│ │ │ ├── providers
│ │ │ │ └── QueryProvider.tsx
│ │ │ ├── reducers
│ │ │ │ └── appReducer.ts
│ │ │ ├── sidebar.tsx
│ │ │ ├── types
│ │ │ │ └── index.ts
│ │ │ └── utils
│ │ │ ├── logger.ts
│ │ │ └── toast.ts
│ │ └── tsconfig.json
│ └── mcp
│ ├── CHANGELOG.md
│ ├── package.json
│ ├── src
│ │ ├── index.ts
│ │ ├── shared
│ │ │ ├── types.ts
│ │ │ └── utils.ts
│ │ └── tools
│ │ ├── autopilot
│ │ │ ├── abort.tool.ts
│ │ │ ├── commit.tool.ts
│ │ │ ├── complete.tool.ts
│ │ │ ├── finalize.tool.ts
│ │ │ ├── index.ts
│ │ │ ├── next.tool.ts
│ │ │ ├── resume.tool.ts
│ │ │ ├── start.tool.ts
│ │ │ └── status.tool.ts
│ │ ├── README-ZOD-V3.md
│ │ └── tasks
│ │ ├── get-task.tool.ts
│ │ ├── get-tasks.tool.ts
│ │ └── index.ts
│ ├── tsconfig.json
│ └── vitest.config.ts
├── assets
│ ├── .windsurfrules
│ ├── AGENTS.md
│ ├── claude
│ │ └── TM_COMMANDS_GUIDE.md
│ ├── config.json
│ ├── env.example
│ ├── example_prd_rpg.txt
│ ├── example_prd.txt
│ ├── GEMINI.md
│ ├── gitignore
│ ├── kiro-hooks
│ │ ├── tm-code-change-task-tracker.kiro.hook
│ │ ├── tm-complexity-analyzer.kiro.hook
│ │ ├── tm-daily-standup-assistant.kiro.hook
│ │ ├── tm-git-commit-task-linker.kiro.hook
│ │ ├── tm-pr-readiness-checker.kiro.hook
│ │ ├── tm-task-dependency-auto-progression.kiro.hook
│ │ └── tm-test-success-task-completer.kiro.hook
│ ├── roocode
│ │ ├── .roo
│ │ │ ├── rules-architect
│ │ │ │ └── architect-rules
│ │ │ ├── rules-ask
│ │ │ │ └── ask-rules
│ │ │ ├── rules-code
│ │ │ │ └── code-rules
│ │ │ ├── rules-debug
│ │ │ │ └── debug-rules
│ │ │ ├── rules-orchestrator
│ │ │ │ └── orchestrator-rules
│ │ │ └── rules-test
│ │ │ └── test-rules
│ │ └── .roomodes
│ ├── rules
│ │ ├── cursor_rules.mdc
│ │ ├── dev_workflow.mdc
│ │ ├── self_improve.mdc
│ │ ├── taskmaster_hooks_workflow.mdc
│ │ └── taskmaster.mdc
│ └── scripts_README.md
├── bin
│ └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE_CODE_PLUGIN.md
├── CLAUDE.md
├── context
│ ├── chats
│ │ ├── add-task-dependencies-1.md
│ │ └── max-min-tokens.txt.md
│ ├── fastmcp-core.txt
│ ├── fastmcp-docs.txt
│ ├── MCP_INTEGRATION.md
│ ├── mcp-js-sdk-docs.txt
│ ├── mcp-protocol-repo.txt
│ ├── mcp-protocol-schema-03262025.json
│ └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│ ├── claude-code-integration.md
│ ├── CLI-COMMANDER-PATTERN.md
│ ├── command-reference.md
│ ├── configuration.md
│ ├── contributor-docs
│ │ ├── testing-roo-integration.md
│ │ └── worktree-setup.md
│ ├── cross-tag-task-movement.md
│ ├── examples
│ │ ├── claude-code-usage.md
│ │ └── codex-cli-usage.md
│ ├── examples.md
│ ├── licensing.md
│ ├── mcp-provider-guide.md
│ ├── mcp-provider.md
│ ├── migration-guide.md
│ ├── models.md
│ ├── providers
│ │ ├── codex-cli.md
│ │ └── gemini-cli.md
│ ├── README.md
│ ├── scripts
│ │ └── models-json-to-markdown.js
│ ├── task-structure.md
│ └── tutorial.md
├── images
│ ├── hamster-hiring.png
│ └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│ ├── server.js
│ └── src
│ ├── core
│ │ ├── __tests__
│ │ │ └── context-manager.test.js
│ │ ├── context-manager.js
│ │ ├── direct-functions
│ │ │ ├── add-dependency.js
│ │ │ ├── add-subtask.js
│ │ │ ├── add-tag.js
│ │ │ ├── add-task.js
│ │ │ ├── analyze-task-complexity.js
│ │ │ ├── cache-stats.js
│ │ │ ├── clear-subtasks.js
│ │ │ ├── complexity-report.js
│ │ │ ├── copy-tag.js
│ │ │ ├── create-tag-from-branch.js
│ │ │ ├── delete-tag.js
│ │ │ ├── expand-all-tasks.js
│ │ │ ├── expand-task.js
│ │ │ ├── fix-dependencies.js
│ │ │ ├── generate-task-files.js
│ │ │ ├── initialize-project.js
│ │ │ ├── list-tags.js
│ │ │ ├── models.js
│ │ │ ├── move-task-cross-tag.js
│ │ │ ├── move-task.js
│ │ │ ├── next-task.js
│ │ │ ├── parse-prd.js
│ │ │ ├── remove-dependency.js
│ │ │ ├── remove-subtask.js
│ │ │ ├── remove-task.js
│ │ │ ├── rename-tag.js
│ │ │ ├── research.js
│ │ │ ├── response-language.js
│ │ │ ├── rules.js
│ │ │ ├── scope-down.js
│ │ │ ├── scope-up.js
│ │ │ ├── set-task-status.js
│ │ │ ├── update-subtask-by-id.js
│ │ │ ├── update-task-by-id.js
│ │ │ ├── update-tasks.js
│ │ │ ├── use-tag.js
│ │ │ └── validate-dependencies.js
│ │ ├── task-master-core.js
│ │ └── utils
│ │ ├── env-utils.js
│ │ └── path-utils.js
│ ├── custom-sdk
│ │ ├── errors.js
│ │ ├── index.js
│ │ ├── json-extractor.js
│ │ ├── language-model.js
│ │ ├── message-converter.js
│ │ └── schema-converter.js
│ ├── index.js
│ ├── logger.js
│ ├── providers
│ │ └── mcp-provider.js
│ └── tools
│ ├── add-dependency.js
│ ├── add-subtask.js
│ ├── add-tag.js
│ ├── add-task.js
│ ├── analyze.js
│ ├── clear-subtasks.js
│ ├── complexity-report.js
│ ├── copy-tag.js
│ ├── delete-tag.js
│ ├── expand-all.js
│ ├── expand-task.js
│ ├── fix-dependencies.js
│ ├── generate.js
│ ├── get-operation-status.js
│ ├── index.js
│ ├── initialize-project.js
│ ├── list-tags.js
│ ├── models.js
│ ├── move-task.js
│ ├── next-task.js
│ ├── parse-prd.js
│ ├── README-ZOD-V3.md
│ ├── remove-dependency.js
│ ├── remove-subtask.js
│ ├── remove-task.js
│ ├── rename-tag.js
│ ├── research.js
│ ├── response-language.js
│ ├── rules.js
│ ├── scope-down.js
│ ├── scope-up.js
│ ├── set-task-status.js
│ ├── tool-registry.js
│ ├── update-subtask.js
│ ├── update-task.js
│ ├── update.js
│ ├── use-tag.js
│ ├── utils.js
│ └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│ ├── ai-sdk-provider-grok-cli
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── errors.test.ts
│ │ │ ├── errors.ts
│ │ │ ├── grok-cli-language-model.ts
│ │ │ ├── grok-cli-provider.test.ts
│ │ │ ├── grok-cli-provider.ts
│ │ │ ├── index.ts
│ │ │ ├── json-extractor.test.ts
│ │ │ ├── json-extractor.ts
│ │ │ ├── message-converter.test.ts
│ │ │ ├── message-converter.ts
│ │ │ └── types.ts
│ │ └── tsconfig.json
│ ├── build-config
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── src
│ │ │ └── tsdown.base.ts
│ │ └── tsconfig.json
│ ├── claude-code-plugin
│ │ ├── .claude-plugin
│ │ │ └── plugin.json
│ │ ├── .gitignore
│ │ ├── agents
│ │ │ ├── task-checker.md
│ │ │ ├── task-executor.md
│ │ │ └── task-orchestrator.md
│ │ ├── CHANGELOG.md
│ │ ├── commands
│ │ │ ├── add-dependency.md
│ │ │ ├── add-subtask.md
│ │ │ ├── add-task.md
│ │ │ ├── analyze-complexity.md
│ │ │ ├── analyze-project.md
│ │ │ ├── auto-implement-tasks.md
│ │ │ ├── command-pipeline.md
│ │ │ ├── complexity-report.md
│ │ │ ├── convert-task-to-subtask.md
│ │ │ ├── expand-all-tasks.md
│ │ │ ├── expand-task.md
│ │ │ ├── fix-dependencies.md
│ │ │ ├── generate-tasks.md
│ │ │ ├── help.md
│ │ │ ├── init-project-quick.md
│ │ │ ├── init-project.md
│ │ │ ├── install-taskmaster.md
│ │ │ ├── learn.md
│ │ │ ├── list-tasks-by-status.md
│ │ │ ├── list-tasks-with-subtasks.md
│ │ │ ├── list-tasks.md
│ │ │ ├── next-task.md
│ │ │ ├── parse-prd-with-research.md
│ │ │ ├── parse-prd.md
│ │ │ ├── project-status.md
│ │ │ ├── quick-install-taskmaster.md
│ │ │ ├── remove-all-subtasks.md
│ │ │ ├── remove-dependency.md
│ │ │ ├── remove-subtask.md
│ │ │ ├── remove-subtasks.md
│ │ │ ├── remove-task.md
│ │ │ ├── setup-models.md
│ │ │ ├── show-task.md
│ │ │ ├── smart-workflow.md
│ │ │ ├── sync-readme.md
│ │ │ ├── tm-main.md
│ │ │ ├── to-cancelled.md
│ │ │ ├── to-deferred.md
│ │ │ ├── to-done.md
│ │ │ ├── to-in-progress.md
│ │ │ ├── to-pending.md
│ │ │ ├── to-review.md
│ │ │ ├── update-single-task.md
│ │ │ ├── update-task.md
│ │ │ ├── update-tasks-from-id.md
│ │ │ ├── validate-dependencies.md
│ │ │ └── view-models.md
│ │ ├── mcp.json
│ │ └── package.json
│ ├── tm-bridge
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── add-tag-bridge.ts
│ │ │ ├── bridge-types.ts
│ │ │ ├── bridge-utils.ts
│ │ │ ├── expand-bridge.ts
│ │ │ ├── index.ts
│ │ │ ├── tags-bridge.ts
│ │ │ ├── update-bridge.ts
│ │ │ └── use-tag-bridge.ts
│ │ └── tsconfig.json
│ └── tm-core
│ ├── .gitignore
│ ├── CHANGELOG.md
│ ├── docs
│ │ └── listTasks-architecture.md
│ ├── package.json
│ ├── POC-STATUS.md
│ ├── README.md
│ ├── src
│ │ ├── common
│ │ │ ├── constants
│ │ │ │ ├── index.ts
│ │ │ │ ├── paths.ts
│ │ │ │ └── providers.ts
│ │ │ ├── errors
│ │ │ │ ├── index.ts
│ │ │ │ └── task-master-error.ts
│ │ │ ├── interfaces
│ │ │ │ ├── configuration.interface.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── storage.interface.ts
│ │ │ ├── logger
│ │ │ │ ├── factory.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── logger.spec.ts
│ │ │ │ └── logger.ts
│ │ │ ├── mappers
│ │ │ │ ├── TaskMapper.test.ts
│ │ │ │ └── TaskMapper.ts
│ │ │ ├── types
│ │ │ │ ├── database.types.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── legacy.ts
│ │ │ │ └── repository-types.ts
│ │ │ └── utils
│ │ │ ├── git-utils.ts
│ │ │ ├── id-generator.ts
│ │ │ ├── index.ts
│ │ │ ├── path-helpers.ts
│ │ │ ├── path-normalizer.spec.ts
│ │ │ ├── path-normalizer.ts
│ │ │ ├── project-root-finder.spec.ts
│ │ │ ├── project-root-finder.ts
│ │ │ ├── run-id-generator.spec.ts
│ │ │ └── run-id-generator.ts
│ │ ├── index.ts
│ │ ├── modules
│ │ │ ├── ai
│ │ │ │ ├── index.ts
│ │ │ │ ├── interfaces
│ │ │ │ │ └── ai-provider.interface.ts
│ │ │ │ └── providers
│ │ │ │ ├── base-provider.ts
│ │ │ │ └── index.ts
│ │ │ ├── auth
│ │ │ │ ├── auth-domain.spec.ts
│ │ │ │ ├── auth-domain.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ ├── auth-manager.spec.ts
│ │ │ │ │ └── auth-manager.ts
│ │ │ │ ├── services
│ │ │ │ │ ├── context-store.ts
│ │ │ │ │ ├── oauth-service.ts
│ │ │ │ │ ├── organization.service.ts
│ │ │ │ │ ├── supabase-session-storage.spec.ts
│ │ │ │ │ └── supabase-session-storage.ts
│ │ │ │ └── types.ts
│ │ │ ├── briefs
│ │ │ │ ├── briefs-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── brief-service.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── utils
│ │ │ │ └── url-parser.ts
│ │ │ ├── commands
│ │ │ │ └── index.ts
│ │ │ ├── config
│ │ │ │ ├── config-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ ├── config-manager.spec.ts
│ │ │ │ │ └── config-manager.ts
│ │ │ │ └── services
│ │ │ │ ├── config-loader.service.spec.ts
│ │ │ │ ├── config-loader.service.ts
│ │ │ │ ├── config-merger.service.spec.ts
│ │ │ │ ├── config-merger.service.ts
│ │ │ │ ├── config-persistence.service.spec.ts
│ │ │ │ ├── config-persistence.service.ts
│ │ │ │ ├── environment-config-provider.service.spec.ts
│ │ │ │ ├── environment-config-provider.service.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── runtime-state-manager.service.spec.ts
│ │ │ │ └── runtime-state-manager.service.ts
│ │ │ ├── dependencies
│ │ │ │ └── index.ts
│ │ │ ├── execution
│ │ │ │ ├── executors
│ │ │ │ │ ├── base-executor.ts
│ │ │ │ │ ├── claude-executor.ts
│ │ │ │ │ └── executor-factory.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── executor-service.ts
│ │ │ │ └── types.ts
│ │ │ ├── git
│ │ │ │ ├── adapters
│ │ │ │ │ ├── git-adapter.test.ts
│ │ │ │ │ └── git-adapter.ts
│ │ │ │ ├── git-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── services
│ │ │ │ ├── branch-name-generator.spec.ts
│ │ │ │ ├── branch-name-generator.ts
│ │ │ │ ├── commit-message-generator.test.ts
│ │ │ │ ├── commit-message-generator.ts
│ │ │ │ ├── scope-detector.test.ts
│ │ │ │ ├── scope-detector.ts
│ │ │ │ ├── template-engine.test.ts
│ │ │ │ └── template-engine.ts
│ │ │ ├── integration
│ │ │ │ ├── clients
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── supabase-client.ts
│ │ │ │ ├── integration-domain.ts
│ │ │ │ └── services
│ │ │ │ ├── export.service.ts
│ │ │ │ ├── task-expansion.service.ts
│ │ │ │ └── task-retrieval.service.ts
│ │ │ ├── reports
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ └── complexity-report-manager.ts
│ │ │ │ └── types.ts
│ │ │ ├── storage
│ │ │ │ ├── adapters
│ │ │ │ │ ├── activity-logger.ts
│ │ │ │ │ ├── api-storage.ts
│ │ │ │ │ └── file-storage
│ │ │ │ │ ├── file-operations.ts
│ │ │ │ │ ├── file-storage.ts
│ │ │ │ │ ├── format-handler.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── path-resolver.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── storage-factory.ts
│ │ │ │ └── utils
│ │ │ │ └── api-client.ts
│ │ │ ├── tasks
│ │ │ │ ├── entities
│ │ │ │ │ └── task.entity.ts
│ │ │ │ ├── parser
│ │ │ │ │ └── index.ts
│ │ │ │ ├── repositories
│ │ │ │ │ ├── supabase
│ │ │ │ │ │ ├── dependency-fetcher.ts
│ │ │ │ │ │ ├── index.ts
│ │ │ │ │ │ └── supabase-repository.ts
│ │ │ │ │ └── task-repository.interface.ts
│ │ │ │ ├── services
│ │ │ │ │ ├── preflight-checker.service.ts
│ │ │ │ │ ├── tag.service.ts
│ │ │ │ │ ├── task-execution-service.ts
│ │ │ │ │ ├── task-loader.service.ts
│ │ │ │ │ └── task-service.ts
│ │ │ │ └── tasks-domain.ts
│ │ │ ├── ui
│ │ │ │ └── index.ts
│ │ │ └── workflow
│ │ │ ├── managers
│ │ │ │ ├── workflow-state-manager.spec.ts
│ │ │ │ └── workflow-state-manager.ts
│ │ │ ├── orchestrators
│ │ │ │ ├── workflow-orchestrator.test.ts
│ │ │ │ └── workflow-orchestrator.ts
│ │ │ ├── services
│ │ │ │ ├── test-result-validator.test.ts
│ │ │ │ ├── test-result-validator.ts
│ │ │ │ ├── test-result-validator.types.ts
│ │ │ │ ├── workflow-activity-logger.ts
│ │ │ │ └── workflow.service.ts
│ │ │ ├── types.ts
│ │ │ └── workflow-domain.ts
│ │ ├── subpath-exports.test.ts
│ │ ├── tm-core.ts
│ │ └── utils
│ │ └── time.utils.ts
│ ├── tests
│ │ ├── auth
│ │ │ └── auth-refresh.test.ts
│ │ ├── integration
│ │ │ ├── auth-token-refresh.test.ts
│ │ │ ├── list-tasks.test.ts
│ │ │ └── storage
│ │ │ └── activity-logger.test.ts
│ │ ├── mocks
│ │ │ └── mock-provider.ts
│ │ ├── setup.ts
│ │ └── unit
│ │ ├── base-provider.test.ts
│ │ ├── executor.test.ts
│ │ └── smoke.test.ts
│ ├── tsconfig.json
│ └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│ ├── create-worktree.sh
│ ├── dev.js
│ ├── init.js
│ ├── list-worktrees.sh
│ ├── modules
│ │ ├── ai-services-unified.js
│ │ ├── bridge-utils.js
│ │ ├── commands.js
│ │ ├── config-manager.js
│ │ ├── dependency-manager.js
│ │ ├── index.js
│ │ ├── prompt-manager.js
│ │ ├── supported-models.json
│ │ ├── sync-readme.js
│ │ ├── task-manager
│ │ │ ├── add-subtask.js
│ │ │ ├── add-task.js
│ │ │ ├── analyze-task-complexity.js
│ │ │ ├── clear-subtasks.js
│ │ │ ├── expand-all-tasks.js
│ │ │ ├── expand-task.js
│ │ │ ├── find-next-task.js
│ │ │ ├── generate-task-files.js
│ │ │ ├── is-task-dependent.js
│ │ │ ├── list-tasks.js
│ │ │ ├── migrate.js
│ │ │ ├── models.js
│ │ │ ├── move-task.js
│ │ │ ├── parse-prd
│ │ │ │ ├── index.js
│ │ │ │ ├── parse-prd-config.js
│ │ │ │ ├── parse-prd-helpers.js
│ │ │ │ ├── parse-prd-non-streaming.js
│ │ │ │ ├── parse-prd-streaming.js
│ │ │ │ └── parse-prd.js
│ │ │ ├── remove-subtask.js
│ │ │ ├── remove-task.js
│ │ │ ├── research.js
│ │ │ ├── response-language.js
│ │ │ ├── scope-adjustment.js
│ │ │ ├── set-task-status.js
│ │ │ ├── tag-management.js
│ │ │ ├── task-exists.js
│ │ │ ├── update-single-task-status.js
│ │ │ ├── update-subtask-by-id.js
│ │ │ ├── update-task-by-id.js
│ │ │ └── update-tasks.js
│ │ ├── task-manager.js
│ │ ├── ui.js
│ │ ├── update-config-tokens.js
│ │ ├── utils
│ │ │ ├── contextGatherer.js
│ │ │ ├── fuzzyTaskSearch.js
│ │ │ └── git-utils.js
│ │ └── utils.js
│ ├── task-complexity-report.json
│ ├── test-claude-errors.js
│ └── test-claude.js
├── sonar-project.properties
├── src
│ ├── ai-providers
│ │ ├── anthropic.js
│ │ ├── azure.js
│ │ ├── base-provider.js
│ │ ├── bedrock.js
│ │ ├── claude-code.js
│ │ ├── codex-cli.js
│ │ ├── gemini-cli.js
│ │ ├── google-vertex.js
│ │ ├── google.js
│ │ ├── grok-cli.js
│ │ ├── groq.js
│ │ ├── index.js
│ │ ├── lmstudio.js
│ │ ├── ollama.js
│ │ ├── openai-compatible.js
│ │ ├── openai.js
│ │ ├── openrouter.js
│ │ ├── perplexity.js
│ │ ├── xai.js
│ │ ├── zai-coding.js
│ │ └── zai.js
│ ├── constants
│ │ ├── commands.js
│ │ ├── paths.js
│ │ ├── profiles.js
│ │ ├── rules-actions.js
│ │ ├── task-priority.js
│ │ └── task-status.js
│ ├── profiles
│ │ ├── amp.js
│ │ ├── base-profile.js
│ │ ├── claude.js
│ │ ├── cline.js
│ │ ├── codex.js
│ │ ├── cursor.js
│ │ ├── gemini.js
│ │ ├── index.js
│ │ ├── kilo.js
│ │ ├── kiro.js
│ │ ├── opencode.js
│ │ ├── roo.js
│ │ ├── trae.js
│ │ ├── vscode.js
│ │ ├── windsurf.js
│ │ └── zed.js
│ ├── progress
│ │ ├── base-progress-tracker.js
│ │ ├── cli-progress-factory.js
│ │ ├── parse-prd-tracker.js
│ │ ├── progress-tracker-builder.js
│ │ └── tracker-ui.js
│ ├── prompts
│ │ ├── add-task.json
│ │ ├── analyze-complexity.json
│ │ ├── expand-task.json
│ │ ├── parse-prd.json
│ │ ├── README.md
│ │ ├── research.json
│ │ ├── schemas
│ │ │ ├── parameter.schema.json
│ │ │ ├── prompt-template.schema.json
│ │ │ ├── README.md
│ │ │ └── variant.schema.json
│ │ ├── update-subtask.json
│ │ ├── update-task.json
│ │ └── update-tasks.json
│ ├── provider-registry
│ │ └── index.js
│ ├── schemas
│ │ ├── add-task.js
│ │ ├── analyze-complexity.js
│ │ ├── base-schemas.js
│ │ ├── expand-task.js
│ │ ├── parse-prd.js
│ │ ├── registry.js
│ │ ├── update-subtask.js
│ │ ├── update-task.js
│ │ └── update-tasks.js
│ ├── task-master.js
│ ├── ui
│ │ ├── confirm.js
│ │ ├── indicators.js
│ │ └── parse-prd.js
│ └── utils
│ ├── asset-resolver.js
│ ├── create-mcp-config.js
│ ├── format.js
│ ├── getVersion.js
│ ├── logger-utils.js
│ ├── manage-gitignore.js
│ ├── path-utils.js
│ ├── profiles.js
│ ├── rule-transformer.js
│ ├── stream-parser.js
│ └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│ ├── e2e
│ │ ├── e2e_helpers.sh
│ │ ├── parse_llm_output.cjs
│ │ ├── run_e2e.sh
│ │ ├── run_fallback_verification.sh
│ │ └── test_llm_analysis.sh
│ ├── fixtures
│ │ ├── .taskmasterconfig
│ │ ├── sample-claude-response.js
│ │ ├── sample-prd.txt
│ │ └── sample-tasks.js
│ ├── helpers
│ │ └── tool-counts.js
│ ├── integration
│ │ ├── claude-code-error-handling.test.js
│ │ ├── claude-code-optional.test.js
│ │ ├── cli
│ │ │ ├── commands.test.js
│ │ │ ├── complex-cross-tag-scenarios.test.js
│ │ │ └── move-cross-tag.test.js
│ │ ├── manage-gitignore.test.js
│ │ ├── mcp-server
│ │ │ └── direct-functions.test.js
│ │ ├── move-task-cross-tag.integration.test.js
│ │ ├── move-task-simple.integration.test.js
│ │ ├── profiles
│ │ │ ├── amp-init-functionality.test.js
│ │ │ ├── claude-init-functionality.test.js
│ │ │ ├── cline-init-functionality.test.js
│ │ │ ├── codex-init-functionality.test.js
│ │ │ ├── cursor-init-functionality.test.js
│ │ │ ├── gemini-init-functionality.test.js
│ │ │ ├── opencode-init-functionality.test.js
│ │ │ ├── roo-files-inclusion.test.js
│ │ │ ├── roo-init-functionality.test.js
│ │ │ ├── rules-files-inclusion.test.js
│ │ │ ├── trae-init-functionality.test.js
│ │ │ ├── vscode-init-functionality.test.js
│ │ │ └── windsurf-init-functionality.test.js
│ │ └── providers
│ │ └── temperature-support.test.js
│ ├── manual
│ │ ├── progress
│ │ │ ├── parse-prd-analysis.js
│ │ │ ├── test-parse-prd.js
│ │ │ └── TESTING_GUIDE.md
│ │ └── prompts
│ │ ├── prompt-test.js
│ │ └── README.md
│ ├── README.md
│ ├── setup.js
│ └── unit
│ ├── ai-providers
│ │ ├── base-provider.test.js
│ │ ├── claude-code.test.js
│ │ ├── codex-cli.test.js
│ │ ├── gemini-cli.test.js
│ │ ├── lmstudio.test.js
│ │ ├── mcp-components.test.js
│ │ ├── openai-compatible.test.js
│ │ ├── openai.test.js
│ │ ├── provider-registry.test.js
│ │ ├── zai-coding.test.js
│ │ ├── zai-provider.test.js
│ │ ├── zai-schema-introspection.test.js
│ │ └── zai.test.js
│ ├── ai-services-unified.test.js
│ ├── commands.test.js
│ ├── config-manager.test.js
│ ├── config-manager.test.mjs
│ ├── dependency-manager.test.js
│ ├── init.test.js
│ ├── initialize-project.test.js
│ ├── kebab-case-validation.test.js
│ ├── manage-gitignore.test.js
│ ├── mcp
│ │ └── tools
│ │ ├── __mocks__
│ │ │ └── move-task.js
│ │ ├── add-task.test.js
│ │ ├── analyze-complexity.test.js
│ │ ├── expand-all.test.js
│ │ ├── get-tasks.test.js
│ │ ├── initialize-project.test.js
│ │ ├── move-task-cross-tag-options.test.js
│ │ ├── move-task-cross-tag.test.js
│ │ ├── remove-task.test.js
│ │ └── tool-registration.test.js
│ ├── mcp-providers
│ │ ├── mcp-components.test.js
│ │ └── mcp-provider.test.js
│ ├── parse-prd.test.js
│ ├── profiles
│ │ ├── amp-integration.test.js
│ │ ├── claude-integration.test.js
│ │ ├── cline-integration.test.js
│ │ ├── codex-integration.test.js
│ │ ├── cursor-integration.test.js
│ │ ├── gemini-integration.test.js
│ │ ├── kilo-integration.test.js
│ │ ├── kiro-integration.test.js
│ │ ├── mcp-config-validation.test.js
│ │ ├── opencode-integration.test.js
│ │ ├── profile-safety-check.test.js
│ │ ├── roo-integration.test.js
│ │ ├── rule-transformer-cline.test.js
│ │ ├── rule-transformer-cursor.test.js
│ │ ├── rule-transformer-gemini.test.js
│ │ ├── rule-transformer-kilo.test.js
│ │ ├── rule-transformer-kiro.test.js
│ │ ├── rule-transformer-opencode.test.js
│ │ ├── rule-transformer-roo.test.js
│ │ ├── rule-transformer-trae.test.js
│ │ ├── rule-transformer-vscode.test.js
│ │ ├── rule-transformer-windsurf.test.js
│ │ ├── rule-transformer-zed.test.js
│ │ ├── rule-transformer.test.js
│ │ ├── selective-profile-removal.test.js
│ │ ├── subdirectory-support.test.js
│ │ ├── trae-integration.test.js
│ │ ├── vscode-integration.test.js
│ │ ├── windsurf-integration.test.js
│ │ └── zed-integration.test.js
│ ├── progress
│ │ └── base-progress-tracker.test.js
│ ├── prompt-manager.test.js
│ ├── prompts
│ │ ├── expand-task-prompt.test.js
│ │ └── prompt-migration.test.js
│ ├── scripts
│ │ └── modules
│ │ ├── commands
│ │ │ ├── move-cross-tag.test.js
│ │ │ └── README.md
│ │ ├── dependency-manager
│ │ │ ├── circular-dependencies.test.js
│ │ │ ├── cross-tag-dependencies.test.js
│ │ │ └── fix-dependencies-command.test.js
│ │ ├── task-manager
│ │ │ ├── add-subtask.test.js
│ │ │ ├── add-task.test.js
│ │ │ ├── analyze-task-complexity.test.js
│ │ │ ├── clear-subtasks.test.js
│ │ │ ├── complexity-report-tag-isolation.test.js
│ │ │ ├── expand-all-tasks.test.js
│ │ │ ├── expand-task.test.js
│ │ │ ├── find-next-task.test.js
│ │ │ ├── generate-task-files.test.js
│ │ │ ├── list-tasks.test.js
│ │ │ ├── models-baseurl.test.js
│ │ │ ├── move-task-cross-tag.test.js
│ │ │ ├── move-task.test.js
│ │ │ ├── parse-prd-schema.test.js
│ │ │ ├── parse-prd.test.js
│ │ │ ├── remove-subtask.test.js
│ │ │ ├── remove-task.test.js
│ │ │ ├── research.test.js
│ │ │ ├── scope-adjustment.test.js
│ │ │ ├── set-task-status.test.js
│ │ │ ├── setup.js
│ │ │ ├── update-single-task-status.test.js
│ │ │ ├── update-subtask-by-id.test.js
│ │ │ ├── update-task-by-id.test.js
│ │ │ └── update-tasks.test.js
│ │ ├── ui
│ │ │ └── cross-tag-error-display.test.js
│ │ └── utils-tag-aware-paths.test.js
│ ├── task-finder.test.js
│ ├── task-manager
│ │ ├── clear-subtasks.test.js
│ │ ├── move-task.test.js
│ │ ├── tag-boundary.test.js
│ │ └── tag-management.test.js
│ ├── task-master.test.js
│ ├── ui
│ │ └── indicators.test.js
│ ├── ui.test.js
│ ├── utils-strip-ansi.test.js
│ └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
├── turbo.json
└── update-task-migration-plan.md
```
# Files
--------------------------------------------------------------------------------
/tests/e2e/run_e2e.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 |
3 | # Treat unset variables as an error when substituting.
4 | set -u
5 | # Prevent errors in pipelines from being masked.
6 | set -o pipefail
7 |
8 | # --- Default Settings ---
9 | run_verification_test=true
10 |
11 | # --- Argument Parsing ---
12 | # Simple loop to check for the skip flag
13 | # Note: This needs to happen *before* the main block piped to tee
14 | # if we want the decision logged early. Or handle args inside.
15 | # Let's handle it before for clarity.
16 | processed_args=()
17 | while [[ $# -gt 0 ]]; do
18 | case "$1" in
19 | --skip-verification)
20 | run_verification_test=false
21 | echo "[INFO] Argument '--skip-verification' detected. Fallback verification will be skipped."
22 | shift # Consume the flag
23 | ;;
24 | --analyze-log)
25 | # Keep the analyze-log flag handling separate for now
26 | # It exits early, so doesn't conflict with the main run flags
27 | processed_args+=("$1")
28 | if [[ $# -gt 1 ]]; then
29 | processed_args+=("$2")
30 | shift 2
31 | else
32 | shift 1
33 | fi
34 | ;;
35 | *)
36 | # Unknown argument, pass it along or handle error
37 | # For now, just pass it along in case --analyze-log needs it later
38 | processed_args+=("$1")
39 | shift
40 | ;;
41 | esac
42 | done
43 | # Restore processed arguments ONLY if the array is not empty
44 | if [ ${#processed_args[@]} -gt 0 ]; then
45 | set -- "${processed_args[@]}"
46 | fi
47 |
48 |
49 | # --- Configuration ---
50 | # Assumes script is run from the project root (claude-task-master)
51 | TASKMASTER_SOURCE_DIR="." # Current directory is the source
52 | # Base directory for test runs, relative to project root
53 | BASE_TEST_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/_runs"
54 | # Log directory, relative to project root
55 | LOG_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/log"
56 | # Path to the sample PRD, relative to project root
57 | SAMPLE_PRD_SOURCE="$TASKMASTER_SOURCE_DIR/tests/fixtures/sample-prd.txt"
58 | # Path to the main .env file in the source directory
59 | MAIN_ENV_FILE="$TASKMASTER_SOURCE_DIR/.env"
60 | # ---
61 |
62 | # <<< Source the helper script >>>
63 | # shellcheck source=tests/e2e/e2e_helpers.sh
64 | source "$TASKMASTER_SOURCE_DIR/tests/e2e/e2e_helpers.sh"
65 |
66 | # ==========================================
67 | # >>> Global Helper Functions Defined in run_e2e.sh <<<
68 | # --- Helper Functions (Define globally before export) ---
69 | _format_duration() {
70 | local total_seconds=$1
71 | local minutes=$((total_seconds / 60))
72 | local seconds=$((total_seconds % 60))
73 | printf "%dm%02ds" "$minutes" "$seconds"
74 | }
75 |
76 | # Note: This relies on 'overall_start_time' being set globally before the function is called
77 | _get_elapsed_time_for_log() {
78 | local current_time
79 | current_time=$(date +%s)
80 | # Use overall_start_time here, as start_time_for_helpers might not be relevant globally
81 | local elapsed_seconds
82 | elapsed_seconds=$((current_time - overall_start_time))
83 | _format_duration "$elapsed_seconds"
84 | }
85 |
86 | log_info() {
87 | echo "[INFO] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
88 | }
89 |
90 | log_success() {
91 | echo "[SUCCESS] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
92 | }
93 |
94 | log_error() {
95 | echo "[ERROR] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1" >&2
96 | }
97 |
98 | log_step() {
99 | test_step_count=$((test_step_count + 1))
100 | echo ""
101 | echo "============================================="
102 | echo " STEP ${test_step_count}: [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
103 | echo "============================================="
104 | }
105 | # ==========================================
106 |
107 | # <<< Export helper functions for subshells >>>
108 | export -f log_info log_success log_error log_step _format_duration _get_elapsed_time_for_log extract_and_sum_cost
109 |
110 | # --- Argument Parsing for Analysis-Only Mode ---
111 | # This remains the same, as it exits early if matched
112 | if [ "$#" -ge 1 ] && [ "$1" == "--analyze-log" ]; then
113 | LOG_TO_ANALYZE=""
114 | # Check if a log file path was provided as the second argument
115 | if [ "$#" -ge 2 ] && [ -n "$2" ]; then
116 | LOG_TO_ANALYZE="$2"
117 | echo "[INFO] Using specified log file for analysis: $LOG_TO_ANALYZE"
118 | else
119 | echo "[INFO] Log file not specified. Attempting to find the latest log..."
120 | # Find the latest log file in the LOG_DIR
121 | # Ensure LOG_DIR is absolute for ls to work correctly regardless of PWD
122 | ABS_LOG_DIR="$(cd "$TASKMASTER_SOURCE_DIR/$LOG_DIR" && pwd)"
123 | LATEST_LOG=$(ls -t "$ABS_LOG_DIR"/e2e_run_*.log 2>/dev/null | head -n 1)
124 |
125 | if [ -z "$LATEST_LOG" ]; then
126 | echo "[ERROR] No log files found matching 'e2e_run_*.log' in $ABS_LOG_DIR. Cannot analyze." >&2
127 | exit 1
128 | fi
129 | LOG_TO_ANALYZE="$LATEST_LOG"
130 | echo "[INFO] Found latest log file: $LOG_TO_ANALYZE"
131 | fi
132 |
133 | # Ensure the log path is absolute (it should be if found by ls, but double-check)
134 | if [[ "$LOG_TO_ANALYZE" != /* ]]; then
135 | LOG_TO_ANALYZE="$(pwd)/$LOG_TO_ANALYZE" # Fallback if relative path somehow occurred
136 | fi
137 | echo "[INFO] Running in analysis-only mode for log: $LOG_TO_ANALYZE"
138 |
139 | # --- Derive TEST_RUN_DIR from log file path ---
140 | # Extract timestamp like YYYYMMDD_HHMMSS from e2e_run_YYYYMMDD_HHMMSS.log
141 | log_basename=$(basename "$LOG_TO_ANALYZE")
142 | # Ensure the sed command matches the .log suffix correctly
143 | timestamp_match=$(echo "$log_basename" | sed -n 's/^e2e_run_\([0-9]\{8\}_[0-9]\{6\}\)\.log$/\1/p')
144 |
145 | if [ -z "$timestamp_match" ]; then
146 | echo "[ERROR] Could not extract timestamp from log file name: $log_basename" >&2
147 | echo "[ERROR] Expected format: e2e_run_YYYYMMDD_HHMMSS.log" >&2
148 | exit 1
149 | fi
150 |
151 | # Construct the expected run directory path relative to project root
152 | EXPECTED_RUN_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/_runs/run_$timestamp_match"
153 | # Make it absolute
154 | EXPECTED_RUN_DIR_ABS="$(cd "$TASKMASTER_SOURCE_DIR" && pwd)/tests/e2e/_runs/run_$timestamp_match"
155 |
156 | if [ ! -d "$EXPECTED_RUN_DIR_ABS" ]; then
157 | echo "[ERROR] Corresponding test run directory not found: $EXPECTED_RUN_DIR_ABS" >&2
158 | exit 1
159 | fi
160 |
161 | # Save original dir before changing
162 | ORIGINAL_DIR=$(pwd)
163 |
164 | echo "[INFO] Changing directory to $EXPECTED_RUN_DIR_ABS for analysis context..."
165 | cd "$EXPECTED_RUN_DIR_ABS"
166 |
167 | # Call the analysis function (sourced from helpers)
168 | echo "[INFO] Calling analyze_log_with_llm function..."
169 | analyze_log_with_llm "$LOG_TO_ANALYZE" "$(cd "$ORIGINAL_DIR/$TASKMASTER_SOURCE_DIR" && pwd)" # Pass absolute project root
170 | ANALYSIS_EXIT_CODE=$?
171 |
172 | # Return to original directory
173 | cd "$ORIGINAL_DIR"
174 | exit $ANALYSIS_EXIT_CODE
175 | fi
176 | # --- End Analysis-Only Mode Logic ---
177 |
178 | # --- Normal Execution Starts Here (if not in analysis-only mode) ---
179 |
180 | # --- Test State Variables ---
181 | # Note: These are mainly for step numbering within the log now, not for final summary
182 | test_step_count=0
183 | start_time_for_helpers=0 # Separate start time for helper functions inside the pipe
184 | total_e2e_cost="0.0" # Initialize total E2E cost
185 | # ---
186 |
187 | # --- Log File Setup ---
188 | # Create the log directory if it doesn't exist
189 | mkdir -p "$LOG_DIR"
190 | # Define timestamped log file path
191 | TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
192 | # <<< Use pwd to create an absolute path AND add .log extension >>>
193 | LOG_FILE="$(pwd)/$LOG_DIR/e2e_run_${TIMESTAMP}.log"
194 |
195 | # Define and create the test run directory *before* the main pipe
196 | mkdir -p "$BASE_TEST_DIR" # Ensure base exists first
197 | TEST_RUN_DIR="$BASE_TEST_DIR/run_$TIMESTAMP"
198 | mkdir -p "$TEST_RUN_DIR"
199 |
200 | # Echo starting message to the original terminal BEFORE the main piped block
201 | echo "Starting E2E test. Output will be shown here and saved to: $LOG_FILE"
202 | echo "Running from directory: $(pwd)"
203 | echo "--- Starting E2E Run ---" # Separator before piped output starts
204 |
205 | # Record start time for overall duration *before* the pipe
206 | overall_start_time=$(date +%s)
207 |
208 | # <<< DEFINE ORIGINAL_DIR GLOBALLY HERE >>>
209 | ORIGINAL_DIR=$(pwd)
210 |
211 | # ==========================================
212 | # >>> MOVE FUNCTION DEFINITION HERE <<<
213 | # --- Helper Functions (Define globally) ---
214 | _format_duration() {
215 | local total_seconds=$1
216 | local minutes=$((total_seconds / 60))
217 | local seconds=$((total_seconds % 60))
218 | printf "%dm%02ds" "$minutes" "$seconds"
219 | }
220 |
221 | # Note: This relies on 'overall_start_time' being set globally before the function is called
222 | _get_elapsed_time_for_log() {
223 | local current_time=$(date +%s)
224 | # Use overall_start_time here, as start_time_for_helpers might not be relevant globally
225 | local elapsed_seconds=$((current_time - overall_start_time))
226 | _format_duration "$elapsed_seconds"
227 | }
228 |
229 | log_info() {
230 | echo "[INFO] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
231 | }
232 |
233 | log_success() {
234 | echo "[SUCCESS] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
235 | }
236 |
237 | log_error() {
238 | echo "[ERROR] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1" >&2
239 | }
240 |
241 | log_step() {
242 | test_step_count=$((test_step_count + 1))
243 | echo ""
244 | echo "============================================="
245 | echo " STEP ${test_step_count}: [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
246 | echo "============================================="
247 | }
248 |
249 | # ==========================================
250 |
251 | # --- Main Execution Block (Piped to tee) ---
252 | # Wrap the main part of the script in braces and pipe its output (stdout and stderr) to tee
253 | {
254 | # Note: Helper functions are now defined globally above,
255 | # but we still need start_time_for_helpers if any logging functions
256 | # called *inside* this block depend on it. If not, it can be removed.
257 | start_time_for_helpers=$(date +%s) # Keep if needed by helpers called inside this block
258 |
259 | # Log the verification decision
260 | if [ "$run_verification_test" = true ]; then
261 | log_info "Fallback verification test will be run as part of this E2E test."
262 | else
263 | log_info "Fallback verification test will be SKIPPED (--skip-verification flag detected)."
264 | fi
265 |
266 | # --- Dependency Checks ---
267 | log_step "Checking for dependencies (jq, bc)"
268 | if ! command -v jq &> /dev/null; then
269 | log_error "Dependency 'jq' is not installed or not found in PATH. Please install jq (e.g., 'brew install jq' or 'sudo apt-get install jq')."
270 | exit 1
271 | fi
272 | if ! command -v bc &> /dev/null; then
273 | log_error "Dependency 'bc' not installed (for cost calculation). Please install bc (e.g., 'brew install bc' or 'sudo apt-get install bc')."
274 | exit 1
275 | fi
276 | log_success "Dependencies 'jq' and 'bc' found."
277 |
278 | # --- Test Setup (Output to tee) ---
279 | log_step "Setting up test environment"
280 |
281 | log_step "Creating global npm link for task-master-ai"
282 | if npm link; then
283 | log_success "Global link created/updated."
284 | else
285 | log_error "Failed to run 'npm link'. Check permissions or output for details."
286 | exit 1
287 | fi
288 |
289 | log_info "Ensured base test directory exists: $BASE_TEST_DIR"
290 |
291 | log_info "Using test run directory (created earlier): $TEST_RUN_DIR"
292 |
293 | # Check if source .env file exists
294 | if [ ! -f "$MAIN_ENV_FILE" ]; then
295 | log_error "Source .env file not found at $MAIN_ENV_FILE. Cannot proceed with API-dependent tests."
296 | exit 1
297 | fi
298 | log_info "Source .env file found at $MAIN_ENV_FILE."
299 |
300 | # Check if sample PRD exists
301 | if [ ! -f "$SAMPLE_PRD_SOURCE" ]; then
302 | log_error "Sample PRD not found at $SAMPLE_PRD_SOURCE. Please check path."
303 | exit 1
304 | fi
305 |
306 | log_info "Copying sample PRD to test directory..."
307 | cp "$SAMPLE_PRD_SOURCE" "$TEST_RUN_DIR/prd.txt"
308 | if [ ! -f "$TEST_RUN_DIR/prd.txt" ]; then
309 | log_error "Failed to copy sample PRD to $TEST_RUN_DIR."
310 | exit 1
311 | fi
312 | log_success "Sample PRD copied."
313 |
314 | # ORIGINAL_DIR=$(pwd) # Save original dir # <<< REMOVED FROM HERE
315 | cd "$TEST_RUN_DIR"
316 | log_info "Changed directory to $(pwd)"
317 |
318 | # === Copy .env file BEFORE init ===
319 | log_step "Copying source .env file for API keys"
320 | if cp "$ORIGINAL_DIR/.env" ".env"; then
321 | log_success ".env file copied successfully."
322 | else
323 | log_error "Failed to copy .env file from $ORIGINAL_DIR/.env"
324 | exit 1
325 | fi
326 | # ========================================
327 |
328 | # --- Test Execution (Output to tee) ---
329 |
330 | log_step "Linking task-master-ai package locally"
331 | npm link task-master-ai
332 | log_success "Package linked locally."
333 |
334 | log_step "Initializing Task Master project (non-interactive)"
335 | task-master init -y --name="E2E Test $TIMESTAMP" --description="Automated E2E test run"
336 | if [ ! -f ".taskmaster/config.json" ]; then
337 | log_error "Initialization failed: .taskmaster/config.json not found."
338 | exit 1
339 | fi
340 | log_success "Project initialized."
341 |
342 | log_step "Parsing PRD"
343 | cmd_output_prd=$(task-master parse-prd ./prd.txt --force 2>&1)
344 | exit_status_prd=$?
345 | echo "$cmd_output_prd"
346 | extract_and_sum_cost "$cmd_output_prd"
347 | if [ $exit_status_prd -ne 0 ] || [ ! -s ".taskmaster/tasks/tasks.json" ]; then
348 | log_error "Parsing PRD failed: .taskmaster/tasks/tasks.json not found or is empty. Exit status: $exit_status_prd"
349 | exit 1
350 | else
351 | log_success "PRD parsed successfully."
352 | fi
353 |
354 | log_step "Expanding Task 1 (to ensure subtask 1.1 exists)"
355 | cmd_output_analyze=$(task-master analyze-complexity --research --output complexity_results.json 2>&1)
356 | exit_status_analyze=$?
357 | echo "$cmd_output_analyze"
358 | extract_and_sum_cost "$cmd_output_analyze"
359 | if [ $exit_status_analyze -ne 0 ] || [ ! -f "complexity_results.json" ]; then
360 | log_error "Complexity analysis failed: complexity_results.json not found. Exit status: $exit_status_analyze"
361 | exit 1
362 | else
363 | log_success "Complexity analysis saved to complexity_results.json"
364 | fi
365 |
366 | log_step "Generating complexity report"
367 | task-master complexity-report --file complexity_results.json > complexity_report_formatted.log
368 | log_success "Formatted complexity report saved to complexity_report_formatted.log"
369 |
370 | log_step "Expanding Task 1 (assuming it exists)"
371 | cmd_output_expand1=$(task-master expand --id=1 --cr complexity_results.json 2>&1)
372 | exit_status_expand1=$?
373 | echo "$cmd_output_expand1"
374 | extract_and_sum_cost "$cmd_output_expand1"
375 | if [ $exit_status_expand1 -ne 0 ]; then
376 | log_error "Expanding Task 1 failed. Exit status: $exit_status_expand1"
377 | else
378 | log_success "Attempted to expand Task 1."
379 | fi
380 |
381 | log_step "Setting status for Subtask 1.1 (assuming it exists)"
382 | task-master set-status --id=1.1 --status=done
383 | log_success "Attempted to set status for Subtask 1.1 to 'done'."
384 |
385 | log_step "Listing tasks again (after changes)"
386 | task-master list --with-subtasks > task_list_after_changes.log
387 | log_success "Task list after changes saved to task_list_after_changes.log"
388 |
389 | # === Start New Test Section: Tag-Aware Expand Testing ===
390 | log_step "Creating additional tag for expand testing"
391 | task-master add-tag feature-expand --description="Tag for testing expand command with tag preservation"
392 | log_success "Created feature-expand tag."
393 |
394 | log_step "Adding task to feature-expand tag"
395 | task-master add-task --tag=feature-expand --prompt="Test task for tag-aware expansion" --priority=medium
396 | # Get the new task ID dynamically
397 | new_expand_task_id=$(jq -r '.["feature-expand"].tasks[-1].id' .taskmaster/tasks/tasks.json)
398 | log_success "Added task $new_expand_task_id to feature-expand tag."
399 |
400 | log_step "Verifying tags exist before expand test"
401 | task-master tags > tags_before_expand.log
402 | tag_count_before=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
403 | log_success "Tag count before expand: $tag_count_before"
404 |
405 | log_step "Expanding task in feature-expand tag (testing tag corruption fix)"
406 | cmd_output_expand_tagged=$(task-master expand --tag=feature-expand --id="$new_expand_task_id" 2>&1)
407 | exit_status_expand_tagged=$?
408 | echo "$cmd_output_expand_tagged"
409 | extract_and_sum_cost "$cmd_output_expand_tagged"
410 | if [ $exit_status_expand_tagged -ne 0 ]; then
411 | log_error "Tagged expand failed. Exit status: $exit_status_expand_tagged"
412 | else
413 | log_success "Tagged expand completed."
414 | fi
415 |
416 | log_step "Verifying tag preservation after expand"
417 | task-master tags > tags_after_expand.log
418 | tag_count_after=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
419 |
420 | if [ "$tag_count_before" -eq "$tag_count_after" ]; then
421 | log_success "Tag count preserved: $tag_count_after (no corruption detected)"
422 | else
423 | log_error "Tag corruption detected! Before: $tag_count_before, After: $tag_count_after"
424 | fi
425 |
426 | log_step "Verifying master tag still exists and has tasks"
427 | master_task_count=$(jq -r '.master.tasks | length' .taskmaster/tasks/tasks.json 2>/dev/null || echo "0")
428 | if [ "$master_task_count" -gt "0" ]; then
429 | log_success "Master tag preserved with $master_task_count tasks"
430 | else
431 | log_error "Master tag corrupted or empty after tagged expand"
432 | fi
433 |
434 | log_step "Verifying feature-expand tag has expanded subtasks"
435 | expanded_subtask_count=$(jq -r ".\"feature-expand\".tasks[] | select(.id == $new_expand_task_id) | .subtasks | length" .taskmaster/tasks/tasks.json 2>/dev/null || echo "0")
436 | if [ "$expanded_subtask_count" -gt "0" ]; then
437 | log_success "Expand successful: $expanded_subtask_count subtasks created in feature-expand tag"
438 | else
439 | log_error "Expand failed: No subtasks found in feature-expand tag"
440 | fi
441 |
442 | log_step "Testing force expand with tag preservation"
443 | cmd_output_force_expand=$(task-master expand --tag=feature-expand --id="$new_expand_task_id" --force 2>&1)
444 | exit_status_force_expand=$?
445 | echo "$cmd_output_force_expand"
446 | extract_and_sum_cost "$cmd_output_force_expand"
447 |
448 | # Verify tags still preserved after force expand
449 | tag_count_after_force=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
450 | if [ "$tag_count_before" -eq "$tag_count_after_force" ]; then
451 | log_success "Force expand preserved all tags"
452 | else
453 | log_error "Force expand caused tag corruption"
454 | fi
455 |
456 | log_step "Testing expand --all with tag preservation"
457 | # Add another task to feature-expand for expand-all testing
458 | task-master add-task --tag=feature-expand --prompt="Second task for expand-all testing" --priority=low
459 | second_expand_task_id=$(jq -r '.["feature-expand"].tasks[-1].id' .taskmaster/tasks/tasks.json)
460 |
461 | cmd_output_expand_all=$(task-master expand --tag=feature-expand --all 2>&1)
462 | exit_status_expand_all=$?
463 | echo "$cmd_output_expand_all"
464 | extract_and_sum_cost "$cmd_output_expand_all"
465 |
466 | # Verify tags preserved after expand-all
467 | tag_count_after_all=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
468 | if [ "$tag_count_before" -eq "$tag_count_after_all" ]; then
469 | log_success "Expand --all preserved all tags"
470 | else
471 | log_error "Expand --all caused tag corruption"
472 | fi
473 |
474 | log_success "Completed expand --all tag preservation test."
475 |
476 | # === End New Test Section: Tag-Aware Expand Testing ===
477 |
478 | # === Test Model Commands ===
479 | log_step "Checking initial model configuration"
480 | task-master models > models_initial_config.log
481 | log_success "Initial model config saved to models_initial_config.log"
482 |
483 | log_step "Setting main model"
484 | task-master models --set-main claude-3-7-sonnet-20250219
485 | log_success "Set main model."
486 |
487 | log_step "Setting research model"
488 | task-master models --set-research sonar-pro
489 | log_success "Set research model."
490 |
491 | log_step "Setting fallback model"
492 | task-master models --set-fallback claude-3-5-sonnet-20241022
493 | log_success "Set fallback model."
494 |
495 | log_step "Checking final model configuration"
496 | task-master models > models_final_config.log
497 | log_success "Final model config saved to models_final_config.log"
498 |
499 | log_step "Resetting main model to default (Claude Sonnet) before provider tests"
500 | task-master models --set-main claude-3-7-sonnet-20250219
501 | log_success "Main model reset to claude-3-7-sonnet-20250219."
502 |
503 | # === End Model Commands Test ===
504 |
505 | # === Fallback Model generateObjectService Verification ===
506 | if [ "$run_verification_test" = true ]; then
507 | log_step "Starting Fallback Model (generateObjectService) Verification (Calls separate script)"
508 | verification_script_path="$ORIGINAL_DIR/tests/e2e/run_fallback_verification.sh"
509 |
510 | if [ -x "$verification_script_path" ]; then
511 | log_info "--- Executing Fallback Verification Script: $verification_script_path ---"
512 | verification_output=$("$verification_script_path" "$(pwd)" 2>&1)
513 | verification_exit_code=$?
514 | echo "$verification_output"
515 | extract_and_sum_cost "$verification_output"
516 |
517 | log_info "--- Finished Fallback Verification Script Execution (Exit Code: $verification_exit_code) ---"
518 |
519 | # Log success/failure based on captured exit code
520 | if [ $verification_exit_code -eq 0 ]; then
521 | log_success "Fallback verification script reported success."
522 | else
523 | log_error "Fallback verification script reported FAILURE (Exit Code: $verification_exit_code)."
524 | fi
525 | else
526 | log_error "Fallback verification script not found or not executable at $verification_script_path. Skipping verification."
527 | fi
528 | else
529 | log_info "Skipping Fallback Verification test as requested by flag."
530 | fi
531 | # === END Verification Section ===
532 |
533 |
534 | # === Multi-Provider Add-Task Test (Keep as is) ===
535 | log_step "Starting Multi-Provider Add-Task Test Sequence"
536 |
537 | # Define providers, models, and flags
538 | # Array order matters: providers[i] corresponds to models[i] and flags[i]
539 | declare -a providers=("anthropic" "openai" "google" "perplexity" "xai" "openrouter")
540 | declare -a models=(
541 | "claude-3-7-sonnet-20250219"
542 | "gpt-4o"
543 | "gemini-2.5-pro-preview-05-06"
544 | "sonar-pro" # Note: This is research-only, add-task might fail if not using research model
545 | "grok-3"
546 | "anthropic/claude-3.7-sonnet" # OpenRouter uses Claude 3.7
547 | )
548 | # Flags: Add provider-specific flags here, e.g., --openrouter. Use empty string if none.
549 | declare -a flags=("" "" "" "" "" "--openrouter")
550 |
551 | # Consistent prompt for all providers
552 | add_task_prompt="Create a task to implement user authentication using OAuth 2.0 with Google as the provider. Include steps for registering the app, handling the callback, and storing user sessions."
553 | log_info "Using consistent prompt for add-task tests: \"$add_task_prompt\""
554 | echo "--- Multi-Provider Add Task Summary ---" > provider_add_task_summary.log # Initialize summary log
555 |
556 | for i in "${!providers[@]}"; do
557 | provider="${providers[$i]}"
558 | model="${models[$i]}"
559 | flag="${flags[$i]}"
560 |
561 | log_step "Testing Add-Task with Provider: $provider (Model: $model)"
562 |
563 | # 1. Set the main model for this provider
564 | log_info "Setting main model to $model for $provider ${flag:+using flag $flag}..."
565 | set_model_cmd="task-master models --set-main \"$model\" $flag"
566 | echo "Executing: $set_model_cmd"
567 | if eval $set_model_cmd; then
568 | log_success "Successfully set main model for $provider."
569 | else
570 | log_error "Failed to set main model for $provider. Skipping add-task for this provider."
571 | # Optionally save failure info here if needed for LLM analysis
572 | echo "Provider $provider set-main FAILED" >> provider_add_task_summary.log
573 | continue # Skip to the next provider
574 | fi
575 |
576 | # 2. Run add-task
577 | log_info "Running add-task with prompt..."
578 | add_task_output_file="add_task_raw_output_${provider}_${model//\//_}.log" # Sanitize ID
579 | # Run add-task and capture ALL output (stdout & stderr) to a file AND a variable
580 | add_task_cmd_output=$(task-master add-task --prompt "$add_task_prompt" 2>&1 | tee "$add_task_output_file")
581 | add_task_exit_code=${PIPESTATUS[0]}
582 |
583 | # 3. Check for success and extract task ID
584 | new_task_id=""
585 | extract_and_sum_cost "$add_task_cmd_output"
586 | if [ $add_task_exit_code -eq 0 ] && (echo "$add_task_cmd_output" | grep -q "✓ Added new task #" || echo "$add_task_cmd_output" | grep -q "✅ New task created successfully:" || echo "$add_task_cmd_output" | grep -q "Task [0-9]\+ Created Successfully"); then
587 | new_task_id=$(echo "$add_task_cmd_output" | grep -o -E "(Task |#)[0-9.]+" | grep -o -E "[0-9.]+" | head -n 1)
588 | if [ -n "$new_task_id" ]; then
589 | log_success "Add-task succeeded for $provider. New task ID: $new_task_id"
590 | echo "Provider $provider add-task SUCCESS (ID: $new_task_id)" >> provider_add_task_summary.log
591 | else
592 | # Succeeded but couldn't parse ID - treat as warning/anomaly
593 | log_error "Add-task command succeeded for $provider, but failed to extract task ID from output."
594 | echo "Provider $provider add-task SUCCESS (ID extraction FAILED)" >> provider_add_task_summary.log
595 | new_task_id="UNKNOWN_ID_EXTRACTION_FAILED"
596 | fi
597 | else
598 | log_error "Add-task command failed for $provider (Exit Code: $add_task_exit_code). See $add_task_output_file for details."
599 | echo "Provider $provider add-task FAILED (Exit Code: $add_task_exit_code)" >> provider_add_task_summary.log
600 | new_task_id="FAILED"
601 | fi
602 |
603 | # 4. Run task show if ID was obtained (even if extraction failed, use placeholder)
604 | if [ "$new_task_id" != "FAILED" ] && [ "$new_task_id" != "UNKNOWN_ID_EXTRACTION_FAILED" ]; then
605 | log_info "Running task show for new task ID: $new_task_id"
606 | show_output_file="add_task_show_output_${provider}_id_${new_task_id}.log"
607 | if task-master show "$new_task_id" > "$show_output_file"; then
608 | log_success "Task show output saved to $show_output_file"
609 | else
610 | log_error "task show command failed for ID $new_task_id. Check log."
611 | # Still keep the file, it might contain error output
612 | fi
613 | elif [ "$new_task_id" == "UNKNOWN_ID_EXTRACTION_FAILED" ]; then
614 | log_info "Skipping task show for $provider due to ID extraction failure."
615 | else
616 | log_info "Skipping task show for $provider due to add-task failure."
617 | fi
618 |
619 | done # End of provider loop
620 |
621 | log_step "Finished Multi-Provider Add-Task Test Sequence"
622 | echo "Provider add-task summary log available at: provider_add_task_summary.log"
623 | # === End Multi-Provider Add-Task Test ===
624 |
625 | log_step "Listing tasks again (after multi-add)"
626 | task-master list --with-subtasks > task_list_after_multi_add.log
627 | log_success "Task list after multi-add saved to task_list_after_multi_add.log"
628 |
629 |
630 | # === Resume Core Task Commands Test ===
631 | log_step "Listing tasks (for core tests)"
632 | task-master list > task_list_core_test_start.log
633 | log_success "Core test initial task list saved."
634 |
635 | log_step "Getting next task"
636 | task-master next > next_task_core_test.log
637 | log_success "Core test next task saved."
638 |
639 | log_step "Showing Task 1 details"
640 | task-master show 1 > task_1_details_core_test.log
641 | log_success "Task 1 details saved."
642 |
643 | log_step "Adding dependency (Task 2 depends on Task 1)"
644 | task-master add-dependency --id=2 --depends-on=1
645 | log_success "Added dependency 2->1."
646 |
647 | log_step "Validating dependencies (after add)"
648 | task-master validate-dependencies > validate_dependencies_after_add_core.log
649 | log_success "Dependency validation after add saved."
650 |
651 | log_step "Removing dependency (Task 2 depends on Task 1)"
652 | task-master remove-dependency --id=2 --depends-on=1
653 | log_success "Removed dependency 2->1."
654 |
655 | log_step "Fixing dependencies (should be no-op now)"
656 | task-master fix-dependencies > fix_dependencies_output_core.log
657 | log_success "Fix dependencies attempted."
658 |
659 | # === Start New Test Section: Validate/Fix Bad Dependencies ===
660 |
661 | log_step "Intentionally adding non-existent dependency (1 -> 999)"
662 | task-master add-dependency --id=1 --depends-on=999 || log_error "Failed to add non-existent dependency (unexpected)"
663 | # Don't exit even if the above fails, the goal is to test validation
664 | log_success "Attempted to add dependency 1 -> 999."
665 |
666 | log_step "Validating dependencies (expecting non-existent error)"
667 | task-master validate-dependencies > validate_deps_non_existent.log 2>&1 || true # Allow command to fail without exiting script
668 | if grep -q "Non-existent dependency ID: 999" validate_deps_non_existent.log; then
669 | log_success "Validation correctly identified non-existent dependency 999."
670 | else
671 | log_error "Validation DID NOT report non-existent dependency 999 as expected. Check validate_deps_non_existent.log"
672 | fi
673 |
674 | log_step "Fixing dependencies (should remove 1 -> 999)"
675 | task-master fix-dependencies > fix_deps_after_non_existent.log
676 | log_success "Attempted to fix dependencies."
677 |
678 | log_step "Validating dependencies (after fix)"
679 | task-master validate-dependencies > validate_deps_after_fix_non_existent.log 2>&1 || true # Allow potential failure
680 | if grep -q "Non-existent dependency ID: 999" validate_deps_after_fix_non_existent.log; then
681 | log_error "Validation STILL reports non-existent dependency 999 after fix. Check logs."
682 | else
683 | log_success "Validation shows non-existent dependency 999 was removed."
684 | fi
685 |
686 |
687 | log_step "Intentionally adding circular dependency (4 -> 5 -> 4)"
688 | task-master add-dependency --id=4 --depends-on=5 || log_error "Failed to add dependency 4->5"
689 | task-master add-dependency --id=5 --depends-on=4 || log_error "Failed to add dependency 5->4"
690 | log_success "Attempted to add dependencies 4 -> 5 and 5 -> 4."
691 |
692 |
693 | log_step "Validating dependencies (expecting circular error)"
694 | task-master validate-dependencies > validate_deps_circular.log 2>&1 || true # Allow command to fail
695 | # Note: Adjust the grep pattern based on the EXACT error message from validate-dependencies
696 | if grep -q -E "Circular dependency detected involving task IDs: (4, 5|5, 4)" validate_deps_circular.log; then
697 | log_success "Validation correctly identified circular dependency between 4 and 5."
698 | else
699 | log_error "Validation DID NOT report circular dependency 4<->5 as expected. Check validate_deps_circular.log"
700 | fi
701 |
702 | log_step "Fixing dependencies (should remove one side of 4 <-> 5)"
703 | task-master fix-dependencies > fix_deps_after_circular.log
704 | log_success "Attempted to fix dependencies."
705 |
706 | log_step "Validating dependencies (after fix circular)"
707 | task-master validate-dependencies > validate_deps_after_fix_circular.log 2>&1 || true # Allow potential failure
708 | if grep -q -E "Circular dependency detected involving task IDs: (4, 5|5, 4)" validate_deps_after_fix_circular.log; then
709 | log_error "Validation STILL reports circular dependency 4<->5 after fix. Check logs."
710 | else
711 | log_success "Validation shows circular dependency 4<->5 was resolved."
712 | fi
713 |
714 | # === End New Test Section ===
715 |
716 | # Find the next available task ID dynamically instead of hardcoding 11, 12
717 | # Assuming tasks are added sequentially and we didn't remove any core tasks yet
718 | last_task_id=$(jq '[.master.tasks[].id] | max' .taskmaster/tasks/tasks.json)
719 | manual_task_id=$((last_task_id + 1))
720 | ai_task_id=$((manual_task_id + 1))
721 |
722 | log_step "Adding Task $manual_task_id (Manual)"
723 | task-master add-task --title="Manual E2E Task" --description="Add basic health check endpoint" --priority=low --dependencies=3 # Depends on backend setup
724 | log_success "Added Task $manual_task_id manually."
725 |
726 | log_step "Adding Task $ai_task_id (AI)"
727 | cmd_output_add_ai=$(task-master add-task --prompt="Implement basic UI styling using CSS variables for colors and spacing" --priority=medium --dependencies=1 2>&1)
728 | exit_status_add_ai=$?
729 | echo "$cmd_output_add_ai"
730 | extract_and_sum_cost "$cmd_output_add_ai"
731 | if [ $exit_status_add_ai -ne 0 ]; then
732 | log_error "Adding AI Task $ai_task_id failed. Exit status: $exit_status_add_ai"
733 | else
734 | log_success "Added Task $ai_task_id via AI prompt."
735 | fi
736 |
737 |
738 | log_step "Updating Task 3 (update-task AI)"
739 | cmd_output_update_task3=$(task-master update-task --id=3 --prompt="Update backend server setup: Ensure CORS is configured to allow requests from the frontend origin." 2>&1)
740 | exit_status_update_task3=$?
741 | echo "$cmd_output_update_task3"
742 | extract_and_sum_cost "$cmd_output_update_task3"
743 | if [ $exit_status_update_task3 -ne 0 ]; then
744 | log_error "Updating Task 3 failed. Exit status: $exit_status_update_task3"
745 | else
746 | log_success "Attempted update for Task 3."
747 | fi
748 |
749 | log_step "Updating Tasks from Task 5 (update AI)"
750 | cmd_output_update_from5=$(task-master update --from=5 --prompt="Refactor the backend storage module to use a simple JSON file (storage.json) instead of an in-memory object for persistence. Update relevant tasks." 2>&1)
751 | exit_status_update_from5=$?
752 | echo "$cmd_output_update_from5"
753 | extract_and_sum_cost "$cmd_output_update_from5"
754 | if [ $exit_status_update_from5 -ne 0 ]; then
755 | log_error "Updating from Task 5 failed. Exit status: $exit_status_update_from5"
756 | else
757 | log_success "Attempted update from Task 5 onwards."
758 | fi
759 |
760 | log_step "Expanding Task 8 (AI)"
761 | cmd_output_expand8=$(task-master expand --id=8 2>&1)
762 | exit_status_expand8=$?
763 | echo "$cmd_output_expand8"
764 | extract_and_sum_cost "$cmd_output_expand8"
765 | if [ $exit_status_expand8 -ne 0 ]; then
766 | log_error "Expanding Task 8 failed. Exit status: $exit_status_expand8"
767 | else
768 | log_success "Attempted to expand Task 8."
769 | fi
770 |
771 | log_step "Updating Subtask 8.1 (update-subtask AI)"
772 | cmd_output_update_subtask81=$(task-master update-subtask --id=8.1 --prompt="Implementation note: Remember to handle potential API errors and display a user-friendly message." 2>&1)
773 | exit_status_update_subtask81=$?
774 | echo "$cmd_output_update_subtask81"
775 | extract_and_sum_cost "$cmd_output_update_subtask81"
776 | if [ $exit_status_update_subtask81 -ne 0 ]; then
777 | log_error "Updating Subtask 8.1 failed. Exit status: $exit_status_update_subtask81"
778 | else
779 | log_success "Attempted update for Subtask 8.1."
780 | fi
781 |
782 | # Add a couple more subtasks for multi-remove test
783 | log_step 'Adding subtasks to Task 2 (for multi-remove test)'
784 | task-master add-subtask --parent=2 --title="Subtask 2.1 for removal"
785 | task-master add-subtask --parent=2 --title="Subtask 2.2 for removal"
786 | log_success "Added subtasks 2.1 and 2.2."
787 |
788 | log_step "Removing Subtasks 2.1 and 2.2 (multi-ID)"
789 | task-master remove-subtask --id=2.1,2.2
790 | log_success "Removed subtasks 2.1 and 2.2."
791 |
792 | log_step "Setting status for Task 1 to done"
793 | task-master set-status --id=1 --status=done
794 | log_success "Set status for Task 1 to done."
795 |
796 | log_step "Getting next task (after status change)"
797 | task-master next > next_task_after_change_core.log
798 | log_success "Next task after change saved."
799 |
800 | # === Start New Test Section: List Filtering ===
801 | log_step "Listing tasks filtered by status 'done'"
802 | task-master list --status=done > task_list_status_done.log
803 | log_success "Filtered list saved to task_list_status_done.log (Manual/LLM check recommended)"
804 | # Optional assertion: Check if Task 1 ID exists and Task 2 ID does NOT
805 | # if grep -q "^1\." task_list_status_done.log && ! grep -q "^2\." task_list_status_done.log; then
806 | # log_success "Basic check passed: Task 1 found, Task 2 not found in 'done' list."
807 | # else
808 | # log_error "Basic check failed for list --status=done."
809 | # fi
810 | # === End New Test Section ===
811 |
812 | log_step "Clearing subtasks from Task 8"
813 | task-master clear-subtasks --id=8
814 | log_success "Attempted to clear subtasks from Task 8."
815 |
816 | log_step "Removing Tasks $manual_task_id and $ai_task_id (multi-ID)"
817 | # Remove the tasks we added earlier
818 | task-master remove-task --id="$manual_task_id,$ai_task_id" -y
819 | log_success "Removed tasks $manual_task_id and $ai_task_id."
820 |
821 | # === Start New Test Section: Subtasks & Dependencies ===
822 |
823 | log_step "Expanding Task 2 (to ensure multiple tasks have subtasks)"
824 | task-master expand --id=2 # Expand task 2: Backend setup
825 | log_success "Attempted to expand Task 2."
826 |
827 | log_step "Listing tasks with subtasks (Before Clear All)"
828 | task-master list --with-subtasks > task_list_before_clear_all.log
829 | log_success "Task list before clear-all saved."
830 |
831 | log_step "Clearing ALL subtasks"
832 | task-master clear-subtasks --all
833 | log_success "Attempted to clear all subtasks."
834 |
835 | log_step "Listing tasks with subtasks (After Clear All)"
836 | task-master list --with-subtasks > task_list_after_clear_all.log
837 | log_success "Task list after clear-all saved. (Manual/LLM check recommended to verify subtasks removed)"
838 |
839 | log_step "Expanding Task 3 again (to have subtasks for next test)"
840 | task-master expand --id=3
841 | log_success "Attempted to expand Task 3."
842 | # Verify 3.1 exists
843 | if ! jq -e '.master.tasks[] | select(.id == 3) | .subtasks[] | select(.id == 1)' .taskmaster/tasks/tasks.json > /dev/null; then
844 | log_error "Subtask 3.1 not found in tasks.json after expanding Task 3."
845 | exit 1
846 | fi
847 |
848 | log_step "Adding dependency: Task 4 depends on Subtask 3.1"
849 | task-master add-dependency --id=4 --depends-on=3.1
850 | log_success "Added dependency 4 -> 3.1."
851 |
852 | log_step "Showing Task 4 details (after adding subtask dependency)"
853 | task-master show 4 > task_4_details_after_dep_add.log
854 | log_success "Task 4 details saved. (Manual/LLM check recommended for dependency [3.1])"
855 |
856 | log_step "Removing dependency: Task 4 depends on Subtask 3.1"
857 | task-master remove-dependency --id=4 --depends-on=3.1
858 | log_success "Removed dependency 4 -> 3.1."
859 |
860 | log_step "Showing Task 4 details (after removing subtask dependency)"
861 | task-master show 4 > task_4_details_after_dep_remove.log
862 | log_success "Task 4 details saved. (Manual/LLM check recommended to verify dependency removed)"
863 |
864 | # === End New Test Section ===
865 |
866 | log_step "Generating task files (final)"
867 | task-master generate
868 | log_success "Generated task files."
869 | # === End Core Task Commands Test ===
870 |
871 | # === AI Commands (Re-test some after changes) ===
872 | log_step "Analyzing complexity (AI with Research - Final Check)"
873 | cmd_output_analyze_final=$(task-master analyze-complexity --research --output complexity_results_final.json 2>&1)
874 | exit_status_analyze_final=$?
875 | echo "$cmd_output_analyze_final"
876 | extract_and_sum_cost "$cmd_output_analyze_final"
877 | if [ $exit_status_analyze_final -ne 0 ] || [ ! -f "complexity_results_final.json" ]; then
878 | log_error "Final Complexity analysis failed. Exit status: $exit_status_analyze_final. File found: $(test -f complexity_results_final.json && echo true || echo false)"
879 | exit 1 # Critical for subsequent report step
880 | else
881 | log_success "Final Complexity analysis command executed and file created."
882 | fi
883 |
884 | log_step "Generating complexity report (Non-AI - Final Check)"
885 | task-master complexity-report --file complexity_results_final.json > complexity_report_formatted_final.log
886 | log_success "Final Formatted complexity report saved."
887 |
888 | # === End AI Commands Re-test ===
889 |
890 | log_step "Listing tasks again (final)"
891 | task-master list --with-subtasks > task_list_final.log
892 | log_success "Final task list saved to task_list_final.log"
893 |
894 | # --- Test Completion (Output to tee) ---
895 | log_step "E2E Test Steps Completed"
896 | echo ""
897 | ABS_TEST_RUN_DIR="$(pwd)"
898 | echo "Test artifacts and logs are located in: $ABS_TEST_RUN_DIR"
899 | echo "Key artifact files (within above dir):"
900 | ls -1 # List files in the current directory
901 | echo ""
902 | echo "Full script log also available at: $LOG_FILE (relative to project root)"
903 |
904 | # Optional: cd back to original directory
905 | # cd "$ORIGINAL_DIR"
906 |
907 | # End of the main execution block brace
908 | } 2>&1 | tee "$LOG_FILE"
909 |
910 | # --- Final Terminal Message ---
911 | EXIT_CODE=${PIPESTATUS[0]}
912 | overall_end_time=$(date +%s)
913 | total_elapsed_seconds=$((overall_end_time - overall_start_time))
914 |
915 | # Format total duration
916 | total_minutes=$((total_elapsed_seconds / 60))
917 | total_sec_rem=$((total_elapsed_seconds % 60))
918 | formatted_total_time=$(printf "%dm%02ds" "$total_minutes" "$total_sec_rem")
919 |
920 | # Count steps and successes from the log file *after* the pipe finishes
921 | # Use grep -c for counting lines matching the pattern
922 | # Corrected pattern to match ' STEP X:' format
923 | final_step_count=$(grep -c '^[[:space:]]\+STEP [0-9]\+:' "$LOG_FILE" || true)
924 | final_success_count=$(grep -c '\[SUCCESS\]' "$LOG_FILE" || true) # Count lines containing [SUCCESS]
925 |
926 | echo "--- E2E Run Summary ---"
927 | echo "Log File: $LOG_FILE"
928 | echo "Total Elapsed Time: ${formatted_total_time}"
929 | echo "Total Steps Executed: ${final_step_count}" # Use count from log
930 |
931 | if [ $EXIT_CODE -eq 0 ]; then
932 | echo "Status: SUCCESS"
933 | # Use counts from log file
934 | echo "Successful Steps: ${final_success_count}/${final_step_count}"
935 | else
936 | echo "Status: FAILED"
937 | # Use count from log file for total steps attempted
938 | echo "Failure likely occurred during/after Step: ${final_step_count}"
939 | # Use count from log file for successes before failure
940 | echo "Successful Steps Before Failure: ${final_success_count}"
941 | echo "Please check the log file '$LOG_FILE' for error details."
942 | fi
943 | echo "-------------------------"
944 |
945 | # --- Attempt LLM Analysis ---
946 | # Run this *after* the main execution block and tee pipe finish writing the log file
947 | if [ -d "$TEST_RUN_DIR" ]; then
948 | # Define absolute path to source dir if not already defined (though it should be by setup)
949 | TASKMASTER_SOURCE_DIR_ABS=${TASKMASTER_SOURCE_DIR_ABS:-$(cd "$ORIGINAL_DIR/$TASKMASTER_SOURCE_DIR" && pwd)}
950 |
951 | cd "$TEST_RUN_DIR"
952 | # Pass the absolute source directory path
953 | analyze_log_with_llm "$LOG_FILE" "$TASKMASTER_SOURCE_DIR_ABS"
954 | ANALYSIS_EXIT_CODE=$? # Capture the exit code of the analysis function
955 | # Optional: cd back again if needed
956 | cd "$ORIGINAL_DIR" # Ensure we change back to the original directory
957 | else
958 | formatted_duration_for_error=$(_format_duration "$total_elapsed_seconds")
959 | echo "[ERROR] [$formatted_duration_for_error] $(date +"%Y-%m-%d %H:%M:%S") Test run directory $TEST_RUN_DIR not found. Cannot perform LLM analysis." >&2
960 | fi
961 |
962 | # Final cost formatting
963 | formatted_total_e2e_cost=$(printf "%.6f" "$total_e2e_cost")
964 | echo "Total E2E AI Cost: $formatted_total_e2e_cost USD"
965 |
966 | exit $EXIT_CODE
```
--------------------------------------------------------------------------------
/packages/tm-core/src/modules/workflow/orchestrators/workflow-orchestrator.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { beforeEach, describe, expect, it, vi } from 'vitest';
2 | import { WorkflowOrchestrator } from '../orchestrators/workflow-orchestrator.js';
3 | import { TestResultValidator } from '../services/test-result-validator.js';
4 | import type { TestResult } from '../services/test-result-validator.types.js';
5 | import type {
6 | WorkflowContext,
7 | WorkflowError,
8 | WorkflowEventData,
9 | WorkflowPhase
10 | } from '../types.js';
11 |
12 | describe('WorkflowOrchestrator - State Machine Structure', () => {
13 | let orchestrator: WorkflowOrchestrator;
14 | let initialContext: WorkflowContext;
15 |
16 | beforeEach(() => {
17 | initialContext = {
18 | taskId: 'task-1',
19 | subtasks: [
20 | { id: '1.1', title: 'Subtask 1', status: 'pending', attempts: 0 },
21 | { id: '1.2', title: 'Subtask 2', status: 'pending', attempts: 0 }
22 | ],
23 | currentSubtaskIndex: 0,
24 | errors: [],
25 | metadata: {}
26 | };
27 | orchestrator = new WorkflowOrchestrator(initialContext);
28 | });
29 |
30 | describe('Initial State', () => {
31 | it('should start in PREFLIGHT phase', () => {
32 | expect(orchestrator.getCurrentPhase()).toBe('PREFLIGHT');
33 | });
34 |
35 | it('should have the provided context', () => {
36 | const context = orchestrator.getContext();
37 | expect(context.taskId).toBe('task-1');
38 | expect(context.subtasks).toHaveLength(2);
39 | });
40 | });
41 |
42 | describe('State Transitions', () => {
43 | it('should transition from PREFLIGHT to BRANCH_SETUP', () => {
44 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
45 | expect(orchestrator.getCurrentPhase()).toBe('BRANCH_SETUP');
46 | });
47 |
48 | it('should transition from BRANCH_SETUP to SUBTASK_LOOP', () => {
49 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
50 | orchestrator.transition({
51 | type: 'BRANCH_CREATED',
52 | branchName: 'feature/test'
53 | });
54 | expect(orchestrator.getCurrentPhase()).toBe('SUBTASK_LOOP');
55 | });
56 |
57 | it('should store branch name in context', () => {
58 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
59 | orchestrator.transition({
60 | type: 'BRANCH_CREATED',
61 | branchName: 'feature/test'
62 | });
63 | expect(orchestrator.getContext().branchName).toBe('feature/test');
64 | });
65 |
66 | it('should transition from SUBTASK_LOOP to FINALIZE when all subtasks complete', () => {
67 | // Navigate to SUBTASK_LOOP
68 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
69 | orchestrator.transition({
70 | type: 'BRANCH_CREATED',
71 | branchName: 'feature/test'
72 | });
73 |
74 | // Complete all subtasks
75 | orchestrator.transition({ type: 'ALL_SUBTASKS_COMPLETE' });
76 | expect(orchestrator.getCurrentPhase()).toBe('FINALIZE');
77 | });
78 |
79 | it('should transition from FINALIZE to COMPLETE', () => {
80 | // Navigate to FINALIZE
81 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
82 | orchestrator.transition({
83 | type: 'BRANCH_CREATED',
84 | branchName: 'feature/test'
85 | });
86 | orchestrator.transition({ type: 'ALL_SUBTASKS_COMPLETE' });
87 |
88 | // Complete finalization
89 | orchestrator.transition({ type: 'FINALIZE_COMPLETE' });
90 | expect(orchestrator.getCurrentPhase()).toBe('COMPLETE');
91 | });
92 |
93 | it('should reject invalid transitions', () => {
94 | expect(() => {
95 | orchestrator.transition({ type: 'FINALIZE_COMPLETE' });
96 | }).toThrow('Invalid transition');
97 | });
98 | });
99 |
100 | describe('TDD Cycle in SUBTASK_LOOP', () => {
101 | beforeEach(() => {
102 | // Navigate to SUBTASK_LOOP
103 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
104 | orchestrator.transition({
105 | type: 'BRANCH_CREATED',
106 | branchName: 'feature/test'
107 | });
108 | });
109 |
110 | it('should start with RED phase when entering SUBTASK_LOOP', () => {
111 | expect(orchestrator.getCurrentTDDPhase()).toBe('RED');
112 | });
113 |
114 | it('should transition from RED to GREEN', () => {
115 | orchestrator.transition({
116 | type: 'RED_PHASE_COMPLETE',
117 | testResults: {
118 | total: 5,
119 | passed: 0,
120 | failed: 5,
121 | skipped: 0,
122 | phase: 'RED'
123 | }
124 | });
125 | expect(orchestrator.getCurrentTDDPhase()).toBe('GREEN');
126 | });
127 |
128 | it('should transition from GREEN to COMMIT', () => {
129 | orchestrator.transition({
130 | type: 'RED_PHASE_COMPLETE',
131 | testResults: {
132 | total: 5,
133 | passed: 0,
134 | failed: 5,
135 | skipped: 0,
136 | phase: 'RED'
137 | }
138 | });
139 | orchestrator.transition({
140 | type: 'GREEN_PHASE_COMPLETE',
141 | testResults: {
142 | total: 5,
143 | passed: 5,
144 | failed: 0,
145 | skipped: 0,
146 | phase: 'GREEN'
147 | }
148 | });
149 | expect(orchestrator.getCurrentTDDPhase()).toBe('COMMIT');
150 | });
151 |
152 | it('should complete subtask after COMMIT', () => {
153 | orchestrator.transition({
154 | type: 'RED_PHASE_COMPLETE',
155 | testResults: {
156 | total: 5,
157 | passed: 0,
158 | failed: 5,
159 | skipped: 0,
160 | phase: 'RED'
161 | }
162 | });
163 | orchestrator.transition({
164 | type: 'GREEN_PHASE_COMPLETE',
165 | testResults: {
166 | total: 5,
167 | passed: 5,
168 | failed: 0,
169 | skipped: 0,
170 | phase: 'GREEN'
171 | }
172 | });
173 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
174 |
175 | const context = orchestrator.getContext();
176 | expect(context.subtasks[0].status).toBe('completed');
177 | });
178 |
179 | it('should move to next subtask after completion', () => {
180 | orchestrator.transition({
181 | type: 'RED_PHASE_COMPLETE',
182 | testResults: {
183 | total: 5,
184 | passed: 0,
185 | failed: 5,
186 | skipped: 0,
187 | phase: 'RED'
188 | }
189 | });
190 | orchestrator.transition({
191 | type: 'GREEN_PHASE_COMPLETE',
192 | testResults: {
193 | total: 5,
194 | passed: 5,
195 | failed: 0,
196 | skipped: 0,
197 | phase: 'GREEN'
198 | }
199 | });
200 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
201 | orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
202 |
203 | expect(orchestrator.getContext().currentSubtaskIndex).toBe(1);
204 | expect(orchestrator.getCurrentTDDPhase()).toBe('RED');
205 | });
206 | });
207 |
208 | describe('State Serialization', () => {
209 | it('should serialize current state', () => {
210 | const state = orchestrator.getState();
211 | expect(state).toHaveProperty('phase');
212 | expect(state).toHaveProperty('context');
213 | expect(state.phase).toBe('PREFLIGHT');
214 | });
215 |
216 | it('should restore from serialized state', () => {
217 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
218 | orchestrator.transition({
219 | type: 'BRANCH_CREATED',
220 | branchName: 'feature/test'
221 | });
222 |
223 | const state = orchestrator.getState();
224 | const restored = new WorkflowOrchestrator(state.context);
225 | restored.restoreState(state);
226 |
227 | expect(restored.getCurrentPhase()).toBe('SUBTASK_LOOP');
228 | expect(restored.getContext().branchName).toBe('feature/test');
229 | });
230 | });
231 |
232 | describe('Event Emission', () => {
233 | it('should emit phase:entered event on state transition', () => {
234 | const events: WorkflowEventData[] = [];
235 | orchestrator.on('phase:entered', (event) => events.push(event));
236 |
237 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
238 |
239 | expect(events).toHaveLength(1);
240 | expect(events[0].type).toBe('phase:entered');
241 | expect(events[0].phase).toBe('BRANCH_SETUP');
242 | });
243 |
244 | it('should emit phase:exited event on state transition', () => {
245 | const events: WorkflowEventData[] = [];
246 | orchestrator.on('phase:exited', (event) => events.push(event));
247 |
248 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
249 |
250 | expect(events).toHaveLength(1);
251 | expect(events[0].type).toBe('phase:exited');
252 | expect(events[0].phase).toBe('PREFLIGHT');
253 | });
254 |
255 | it('should emit tdd phase events', () => {
256 | const events: WorkflowEventData[] = [];
257 | orchestrator.on('tdd:red:started', (event) => events.push(event));
258 | orchestrator.on('tdd:green:started', (event) => events.push(event));
259 |
260 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
261 | orchestrator.transition({
262 | type: 'BRANCH_CREATED',
263 | branchName: 'feature/test'
264 | });
265 | expect(events).toHaveLength(1);
266 | expect(events[0].type).toBe('tdd:red:started');
267 |
268 | orchestrator.transition({
269 | type: 'RED_PHASE_COMPLETE',
270 | testResults: {
271 | total: 5,
272 | passed: 0,
273 | failed: 5,
274 | skipped: 0,
275 | phase: 'RED'
276 | }
277 | });
278 | expect(events).toHaveLength(2);
279 | expect(events[1].type).toBe('tdd:green:started');
280 | });
281 |
282 | it('should emit subtask events', () => {
283 | const events: WorkflowEventData[] = [];
284 | orchestrator.on('subtask:started', (event) => events.push(event));
285 | orchestrator.on('subtask:completed', (event) => events.push(event));
286 |
287 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
288 | orchestrator.transition({
289 | type: 'BRANCH_CREATED',
290 | branchName: 'feature/test'
291 | });
292 | expect(events).toHaveLength(1);
293 | expect(events[0].type).toBe('subtask:started');
294 | expect(events[0].subtaskId).toBe('1.1');
295 |
296 | // Complete TDD cycle
297 | orchestrator.transition({
298 | type: 'RED_PHASE_COMPLETE',
299 | testResults: {
300 | total: 5,
301 | passed: 0,
302 | failed: 5,
303 | skipped: 0,
304 | phase: 'RED'
305 | }
306 | });
307 | orchestrator.transition({
308 | type: 'GREEN_PHASE_COMPLETE',
309 | testResults: {
310 | total: 5,
311 | passed: 5,
312 | failed: 0,
313 | skipped: 0,
314 | phase: 'GREEN'
315 | }
316 | });
317 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
318 | orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
319 |
320 | expect(events).toHaveLength(3);
321 | expect(events[1].type).toBe('subtask:completed');
322 | expect(events[2].type).toBe('subtask:started');
323 | expect(events[2].subtaskId).toBe('1.2');
324 | });
325 |
326 | it('should support multiple listeners for same event', () => {
327 | const listener1 = vi.fn();
328 | const listener2 = vi.fn();
329 |
330 | orchestrator.on('phase:entered', listener1);
331 | orchestrator.on('phase:entered', listener2);
332 |
333 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
334 |
335 | expect(listener1).toHaveBeenCalledOnce();
336 | expect(listener2).toHaveBeenCalledOnce();
337 | });
338 |
339 | it('should allow removing event listeners', () => {
340 | const listener = vi.fn();
341 | orchestrator.on('phase:entered', listener);
342 | orchestrator.off('phase:entered', listener);
343 |
344 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
345 |
346 | expect(listener).not.toHaveBeenCalled();
347 | });
348 |
349 | it('should include timestamp in all events', () => {
350 | const events: WorkflowEventData[] = [];
351 | orchestrator.on('phase:entered', (event) => events.push(event));
352 |
353 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
354 |
355 | expect(events[0].timestamp).toBeInstanceOf(Date);
356 | });
357 |
358 | it('should include additional data in events', () => {
359 | const events: WorkflowEventData[] = [];
360 | orchestrator.on('git:branch:created', (event) => events.push(event));
361 |
362 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
363 | orchestrator.transition({
364 | type: 'BRANCH_CREATED',
365 | branchName: 'feature/test'
366 | });
367 |
368 | const branchEvent = events.find((e) => e.type === 'git:branch:created');
369 | expect(branchEvent).toBeDefined();
370 | expect(branchEvent?.data?.branchName).toBe('feature/test');
371 | });
372 | });
373 |
374 | describe('State Persistence', () => {
375 | it('should persist state after transitions when auto-persist enabled', async () => {
376 | const persistMock = vi.fn();
377 | orchestrator.enableAutoPersist(persistMock);
378 |
379 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
380 |
381 | expect(persistMock).toHaveBeenCalledOnce();
382 | const state = persistMock.mock.calls[0][0];
383 | expect(state.phase).toBe('BRANCH_SETUP');
384 | });
385 |
386 | it('should emit state:persisted event', async () => {
387 | const events: WorkflowEventData[] = [];
388 | orchestrator.on('state:persisted', (event) => events.push(event));
389 |
390 | await orchestrator.persistState();
391 |
392 | expect(events).toHaveLength(1);
393 | expect(events[0].type).toBe('state:persisted');
394 | });
395 |
396 | it('should auto-persist after each transition when enabled', () => {
397 | const persistMock = vi.fn();
398 | orchestrator.enableAutoPersist(persistMock);
399 |
400 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
401 | expect(persistMock).toHaveBeenCalledTimes(1);
402 |
403 | orchestrator.transition({
404 | type: 'BRANCH_CREATED',
405 | branchName: 'feature/test'
406 | });
407 | expect(persistMock).toHaveBeenCalledTimes(2);
408 | });
409 |
410 | it('should not auto-persist when disabled', () => {
411 | const persistMock = vi.fn();
412 | orchestrator.enableAutoPersist(persistMock);
413 | orchestrator.disableAutoPersist();
414 |
415 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
416 |
417 | expect(persistMock).not.toHaveBeenCalled();
418 | });
419 |
420 | it('should serialize state with all context data', () => {
421 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
422 | orchestrator.transition({
423 | type: 'BRANCH_CREATED',
424 | branchName: 'feature/test'
425 | });
426 |
427 | const state = orchestrator.getState();
428 |
429 | expect(state.phase).toBe('SUBTASK_LOOP');
430 | expect(state.context.branchName).toBe('feature/test');
431 | expect(state.context.currentTDDPhase).toBe('RED');
432 | expect(state.context.taskId).toBe('task-1');
433 | });
434 | });
435 |
436 | describe('Phase Transition Guards and Validation', () => {
437 | it('should enforce guard conditions on transitions', () => {
438 | // Create orchestrator with guard condition that should fail
439 | const guardedContext: WorkflowContext = {
440 | taskId: 'task-1',
441 | subtasks: [],
442 | currentSubtaskIndex: 0,
443 | errors: [],
444 | metadata: { guardTest: true }
445 | };
446 |
447 | const guardedOrchestrator = new WorkflowOrchestrator(guardedContext);
448 |
449 | // Add guard that checks for subtasks (should fail since we have no subtasks)
450 | guardedOrchestrator.addGuard('SUBTASK_LOOP', (context) => {
451 | return context.subtasks.length > 0;
452 | });
453 |
454 | guardedOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
455 |
456 | expect(() => {
457 | guardedOrchestrator.transition({
458 | type: 'BRANCH_CREATED',
459 | branchName: 'feature/test'
460 | });
461 | }).toThrow('Guard condition failed');
462 | });
463 |
464 | it('should allow transition when guard condition passes', () => {
465 | const guardedContext: WorkflowContext = {
466 | taskId: 'task-1',
467 | subtasks: [
468 | { id: '1.1', title: 'Test', status: 'pending', attempts: 0 }
469 | ],
470 | currentSubtaskIndex: 0,
471 | errors: [],
472 | metadata: {}
473 | };
474 |
475 | const guardedOrchestrator = new WorkflowOrchestrator(guardedContext);
476 |
477 | guardedOrchestrator.addGuard('SUBTASK_LOOP', (context) => {
478 | return context.subtasks.length > 0;
479 | });
480 |
481 | guardedOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
482 | guardedOrchestrator.transition({
483 | type: 'BRANCH_CREATED',
484 | branchName: 'feature/test'
485 | });
486 |
487 | expect(guardedOrchestrator.getCurrentPhase()).toBe('SUBTASK_LOOP');
488 | });
489 |
490 | it('should validate test results before GREEN phase transition', () => {
491 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
492 | orchestrator.transition({
493 | type: 'BRANCH_CREATED',
494 | branchName: 'feature/test'
495 | });
496 |
497 | // Attempt to transition to GREEN without test results
498 | expect(() => {
499 | orchestrator.transition({ type: 'RED_PHASE_COMPLETE' });
500 | }).toThrow('Test results required');
501 | });
502 |
503 | it('should validate RED phase test results have failures', () => {
504 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
505 | orchestrator.transition({
506 | type: 'BRANCH_CREATED',
507 | branchName: 'feature/test'
508 | });
509 |
510 | // Provide passing test results (should fail RED phase validation)
511 | expect(() => {
512 | orchestrator.transition({
513 | type: 'RED_PHASE_COMPLETE',
514 | testResults: {
515 | total: 5,
516 | passed: 5,
517 | failed: 0,
518 | skipped: 0,
519 | phase: 'RED'
520 | }
521 | });
522 | }).toThrow('RED phase must have at least one failing test');
523 | });
524 |
525 | it('should allow RED to GREEN transition with valid failing tests', () => {
526 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
527 | orchestrator.transition({
528 | type: 'BRANCH_CREATED',
529 | branchName: 'feature/test'
530 | });
531 |
532 | orchestrator.transition({
533 | type: 'RED_PHASE_COMPLETE',
534 | testResults: {
535 | total: 5,
536 | passed: 0,
537 | failed: 5,
538 | skipped: 0,
539 | phase: 'RED'
540 | }
541 | });
542 |
543 | expect(orchestrator.getCurrentTDDPhase()).toBe('GREEN');
544 | });
545 |
546 | it('should validate GREEN phase test results have no failures', () => {
547 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
548 | orchestrator.transition({
549 | type: 'BRANCH_CREATED',
550 | branchName: 'feature/test'
551 | });
552 |
553 | orchestrator.transition({
554 | type: 'RED_PHASE_COMPLETE',
555 | testResults: {
556 | total: 5,
557 | passed: 0,
558 | failed: 5,
559 | skipped: 0,
560 | phase: 'RED'
561 | }
562 | });
563 |
564 | // Provide test results with failures (should fail GREEN phase validation)
565 | expect(() => {
566 | orchestrator.transition({
567 | type: 'GREEN_PHASE_COMPLETE',
568 | testResults: {
569 | total: 5,
570 | passed: 3,
571 | failed: 2,
572 | skipped: 0,
573 | phase: 'GREEN'
574 | }
575 | });
576 | }).toThrow('GREEN phase must have zero failures');
577 | });
578 |
579 | it('should allow GREEN to COMMIT transition with all tests passing', () => {
580 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
581 | orchestrator.transition({
582 | type: 'BRANCH_CREATED',
583 | branchName: 'feature/test'
584 | });
585 |
586 | orchestrator.transition({
587 | type: 'RED_PHASE_COMPLETE',
588 | testResults: {
589 | total: 5,
590 | passed: 0,
591 | failed: 5,
592 | skipped: 0,
593 | phase: 'RED'
594 | }
595 | });
596 |
597 | orchestrator.transition({
598 | type: 'GREEN_PHASE_COMPLETE',
599 | testResults: {
600 | total: 5,
601 | passed: 5,
602 | failed: 0,
603 | skipped: 0,
604 | phase: 'GREEN'
605 | }
606 | });
607 |
608 | expect(orchestrator.getCurrentTDDPhase()).toBe('COMMIT');
609 | });
610 |
611 | it('should store test results in context', () => {
612 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
613 | orchestrator.transition({
614 | type: 'BRANCH_CREATED',
615 | branchName: 'feature/test'
616 | });
617 |
618 | const redResults = {
619 | total: 5,
620 | passed: 0,
621 | failed: 5,
622 | skipped: 0,
623 | phase: 'RED' as const
624 | };
625 |
626 | orchestrator.transition({
627 | type: 'RED_PHASE_COMPLETE',
628 | testResults: redResults
629 | });
630 |
631 | const context = orchestrator.getContext();
632 | expect(context.lastTestResults).toEqual(redResults);
633 | });
634 |
635 | it('should validate git repository state before BRANCH_SETUP', () => {
636 | // Set up orchestrator with git validation enabled
637 | const gitContext: WorkflowContext = {
638 | taskId: 'task-1',
639 | subtasks: [
640 | { id: '1.1', title: 'Test', status: 'pending', attempts: 0 }
641 | ],
642 | currentSubtaskIndex: 0,
643 | errors: [],
644 | metadata: { requireGit: false }
645 | };
646 |
647 | const gitOrchestrator = new WorkflowOrchestrator(gitContext);
648 |
649 | // Guard that requires git to be true (but it's false)
650 | gitOrchestrator.addGuard('BRANCH_SETUP', (context) => {
651 | return context.metadata.requireGit === true;
652 | });
653 |
654 | expect(() => {
655 | gitOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
656 | }).toThrow('Guard condition failed');
657 | });
658 | });
659 |
660 | describe('Subtask Iteration and Progress Tracking', () => {
661 | beforeEach(() => {
662 | // Navigate to SUBTASK_LOOP for all tests
663 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
664 | orchestrator.transition({
665 | type: 'BRANCH_CREATED',
666 | branchName: 'feature/test'
667 | });
668 | });
669 |
670 | it('should return current subtask', () => {
671 | const currentSubtask = orchestrator.getCurrentSubtask();
672 | expect(currentSubtask).toBeDefined();
673 | expect(currentSubtask?.id).toBe('1.1');
674 | expect(currentSubtask?.title).toBe('Subtask 1');
675 | });
676 |
677 | it('should return undefined when no current subtask', () => {
678 | // Complete all subtasks
679 | orchestrator.transition({
680 | type: 'RED_PHASE_COMPLETE',
681 | testResults: {
682 | total: 5,
683 | passed: 0,
684 | failed: 5,
685 | skipped: 0,
686 | phase: 'RED'
687 | }
688 | });
689 | orchestrator.transition({
690 | type: 'GREEN_PHASE_COMPLETE',
691 | testResults: {
692 | total: 5,
693 | passed: 5,
694 | failed: 0,
695 | skipped: 0,
696 | phase: 'GREEN'
697 | }
698 | });
699 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
700 | orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
701 |
702 | orchestrator.transition({
703 | type: 'RED_PHASE_COMPLETE',
704 | testResults: {
705 | total: 5,
706 | passed: 0,
707 | failed: 5,
708 | skipped: 0,
709 | phase: 'RED'
710 | }
711 | });
712 | orchestrator.transition({
713 | type: 'GREEN_PHASE_COMPLETE',
714 | testResults: {
715 | total: 5,
716 | passed: 5,
717 | failed: 0,
718 | skipped: 0,
719 | phase: 'GREEN'
720 | }
721 | });
722 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
723 | orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
724 |
725 | const currentSubtask = orchestrator.getCurrentSubtask();
726 | expect(currentSubtask).toBeUndefined();
727 | });
728 |
729 | it('should calculate workflow progress', () => {
730 | const progress = orchestrator.getProgress();
731 | expect(progress.completed).toBe(0);
732 | expect(progress.total).toBe(2);
733 | expect(progress.current).toBe(1);
734 | expect(progress.percentage).toBe(0);
735 | });
736 |
737 | it('should update progress as subtasks complete', () => {
738 | // Complete first subtask
739 | orchestrator.transition({
740 | type: 'RED_PHASE_COMPLETE',
741 | testResults: {
742 | total: 5,
743 | passed: 0,
744 | failed: 5,
745 | skipped: 0,
746 | phase: 'RED'
747 | }
748 | });
749 | orchestrator.transition({
750 | type: 'GREEN_PHASE_COMPLETE',
751 | testResults: {
752 | total: 5,
753 | passed: 5,
754 | failed: 0,
755 | skipped: 0,
756 | phase: 'GREEN'
757 | }
758 | });
759 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
760 | orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
761 |
762 | const progress = orchestrator.getProgress();
763 | expect(progress.completed).toBe(1);
764 | expect(progress.total).toBe(2);
765 | expect(progress.current).toBe(2);
766 | expect(progress.percentage).toBe(50);
767 | });
768 |
769 | it('should show 100% progress when all subtasks complete', () => {
770 | // Complete all subtasks
771 | for (let i = 0; i < 2; i++) {
772 | orchestrator.transition({
773 | type: 'RED_PHASE_COMPLETE',
774 | testResults: {
775 | total: 5,
776 | passed: 0,
777 | failed: 5,
778 | skipped: 0,
779 | phase: 'RED'
780 | }
781 | });
782 | orchestrator.transition({
783 | type: 'GREEN_PHASE_COMPLETE',
784 | testResults: {
785 | total: 5,
786 | passed: 5,
787 | failed: 0,
788 | skipped: 0,
789 | phase: 'GREEN'
790 | }
791 | });
792 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
793 | orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
794 | }
795 |
796 | const progress = orchestrator.getProgress();
797 | expect(progress.completed).toBe(2);
798 | expect(progress.total).toBe(2);
799 | expect(progress.percentage).toBe(100);
800 | });
801 |
802 | it('should validate if can proceed to next phase', () => {
803 | // In RED phase - cannot proceed without completing TDD cycle
804 | expect(orchestrator.canProceed()).toBe(false);
805 |
806 | // Complete RED phase
807 | orchestrator.transition({
808 | type: 'RED_PHASE_COMPLETE',
809 | testResults: {
810 | total: 5,
811 | passed: 0,
812 | failed: 5,
813 | skipped: 0,
814 | phase: 'RED'
815 | }
816 | });
817 |
818 | // In GREEN phase - still cannot proceed
819 | expect(orchestrator.canProceed()).toBe(false);
820 |
821 | // Complete GREEN phase
822 | orchestrator.transition({
823 | type: 'GREEN_PHASE_COMPLETE',
824 | testResults: {
825 | total: 5,
826 | passed: 5,
827 | failed: 0,
828 | skipped: 0,
829 | phase: 'GREEN'
830 | }
831 | });
832 |
833 | // In COMMIT phase - still cannot proceed
834 | expect(orchestrator.canProceed()).toBe(false);
835 |
836 | // Complete COMMIT phase
837 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
838 |
839 | // Subtask complete - can proceed
840 | expect(orchestrator.canProceed()).toBe(true);
841 | });
842 |
843 | it('should track subtask attempts', () => {
844 | const context = orchestrator.getContext();
845 | expect(context.subtasks[0].attempts).toBe(0);
846 |
847 | // Increment attempt on starting RED phase
848 | orchestrator.incrementAttempts();
849 | expect(orchestrator.getContext().subtasks[0].attempts).toBe(1);
850 | });
851 |
852 | it('should enforce max attempts limit', () => {
853 | // Set max attempts to 3
854 | const limitedContext: WorkflowContext = {
855 | taskId: 'task-1',
856 | subtasks: [
857 | {
858 | id: '1.1',
859 | title: 'Subtask 1',
860 | status: 'pending',
861 | attempts: 0,
862 | maxAttempts: 3
863 | }
864 | ],
865 | currentSubtaskIndex: 0,
866 | errors: [],
867 | metadata: {}
868 | };
869 |
870 | const limitedOrchestrator = new WorkflowOrchestrator(limitedContext);
871 | limitedOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
872 | limitedOrchestrator.transition({
873 | type: 'BRANCH_CREATED',
874 | branchName: 'feature/test'
875 | });
876 |
877 | // Increment attempts to max
878 | for (let i = 0; i < 3; i++) {
879 | limitedOrchestrator.incrementAttempts();
880 | }
881 |
882 | expect(limitedOrchestrator.hasExceededMaxAttempts()).toBe(false);
883 |
884 | // One more attempt should exceed
885 | limitedOrchestrator.incrementAttempts();
886 | expect(limitedOrchestrator.hasExceededMaxAttempts()).toBe(true);
887 | });
888 |
889 | it('should allow unlimited attempts when maxAttempts is undefined', () => {
890 | for (let i = 0; i < 100; i++) {
891 | orchestrator.incrementAttempts();
892 | }
893 |
894 | expect(orchestrator.hasExceededMaxAttempts()).toBe(false);
895 | });
896 |
897 | it('should emit progress events on subtask completion', () => {
898 | const events: WorkflowEventData[] = [];
899 | orchestrator.on('progress:updated', (event) => events.push(event));
900 |
901 | // Complete first subtask
902 | orchestrator.transition({
903 | type: 'RED_PHASE_COMPLETE',
904 | testResults: {
905 | total: 5,
906 | passed: 0,
907 | failed: 5,
908 | skipped: 0,
909 | phase: 'RED'
910 | }
911 | });
912 | orchestrator.transition({
913 | type: 'GREEN_PHASE_COMPLETE',
914 | testResults: {
915 | total: 5,
916 | passed: 5,
917 | failed: 0,
918 | skipped: 0,
919 | phase: 'GREEN'
920 | }
921 | });
922 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
923 | orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
924 |
925 | expect(events).toHaveLength(1);
926 | expect(events[0].type).toBe('progress:updated');
927 | expect(events[0].data?.completed).toBe(1);
928 | expect(events[0].data?.total).toBe(2);
929 | });
930 | });
931 |
932 | describe('Error Handling and Recovery', () => {
933 | beforeEach(() => {
934 | // Navigate to SUBTASK_LOOP for all tests
935 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
936 | orchestrator.transition({
937 | type: 'BRANCH_CREATED',
938 | branchName: 'feature/test'
939 | });
940 | });
941 |
942 | it('should handle errors with ERROR event', () => {
943 | const error: WorkflowError = {
944 | phase: 'SUBTASK_LOOP',
945 | message: 'Test execution failed',
946 | timestamp: new Date(),
947 | recoverable: true
948 | };
949 |
950 | orchestrator.transition({ type: 'ERROR', error });
951 |
952 | const context = orchestrator.getContext();
953 | expect(context.errors).toHaveLength(1);
954 | expect(context.errors[0].message).toBe('Test execution failed');
955 | });
956 |
957 | it('should emit error:occurred event', () => {
958 | const events: WorkflowEventData[] = [];
959 | orchestrator.on('error:occurred', (event) => events.push(event));
960 |
961 | const error: WorkflowError = {
962 | phase: 'SUBTASK_LOOP',
963 | message: 'Test execution failed',
964 | timestamp: new Date(),
965 | recoverable: true
966 | };
967 |
968 | orchestrator.transition({ type: 'ERROR', error });
969 |
970 | expect(events).toHaveLength(1);
971 | expect(events[0].type).toBe('error:occurred');
972 | expect(events[0].data?.error).toEqual(error);
973 | });
974 |
975 | it('should support retry attempts', () => {
976 | const currentSubtask = orchestrator.getCurrentSubtask();
977 | expect(currentSubtask?.attempts).toBe(0);
978 |
979 | // Simulate failed attempt
980 | orchestrator.incrementAttempts();
981 | orchestrator.retryCurrentSubtask();
982 |
983 | const context = orchestrator.getContext();
984 | expect(context.currentTDDPhase).toBe('RED');
985 | expect(context.subtasks[0].attempts).toBe(1);
986 | });
987 |
988 | it('should mark subtask as failed when max attempts exceeded', () => {
989 | const limitedContext: WorkflowContext = {
990 | taskId: 'task-1',
991 | subtasks: [
992 | {
993 | id: '1.1',
994 | title: 'Subtask 1',
995 | status: 'pending',
996 | attempts: 0,
997 | maxAttempts: 2
998 | }
999 | ],
1000 | currentSubtaskIndex: 0,
1001 | errors: [],
1002 | metadata: {}
1003 | };
1004 |
1005 | const limitedOrchestrator = new WorkflowOrchestrator(limitedContext);
1006 | limitedOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1007 | limitedOrchestrator.transition({
1008 | type: 'BRANCH_CREATED',
1009 | branchName: 'feature/test'
1010 | });
1011 |
1012 | // Exceed max attempts
1013 | for (let i = 0; i < 3; i++) {
1014 | limitedOrchestrator.incrementAttempts();
1015 | }
1016 |
1017 | limitedOrchestrator.handleMaxAttemptsExceeded();
1018 |
1019 | const context = limitedOrchestrator.getContext();
1020 | expect(context.subtasks[0].status).toBe('failed');
1021 | });
1022 |
1023 | it('should emit subtask:failed event when max attempts exceeded', () => {
1024 | const events: WorkflowEventData[] = [];
1025 | orchestrator.on('subtask:failed', (event) => events.push(event));
1026 |
1027 | const limitedContext: WorkflowContext = {
1028 | taskId: 'task-1',
1029 | subtasks: [
1030 | {
1031 | id: '1.1',
1032 | title: 'Subtask 1',
1033 | status: 'pending',
1034 | attempts: 0,
1035 | maxAttempts: 2
1036 | }
1037 | ],
1038 | currentSubtaskIndex: 0,
1039 | errors: [],
1040 | metadata: {}
1041 | };
1042 |
1043 | const limitedOrchestrator = new WorkflowOrchestrator(limitedContext);
1044 | limitedOrchestrator.on('subtask:failed', (event) => events.push(event));
1045 |
1046 | limitedOrchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1047 | limitedOrchestrator.transition({
1048 | type: 'BRANCH_CREATED',
1049 | branchName: 'feature/test'
1050 | });
1051 |
1052 | // Exceed max attempts
1053 | for (let i = 0; i < 3; i++) {
1054 | limitedOrchestrator.incrementAttempts();
1055 | }
1056 |
1057 | limitedOrchestrator.handleMaxAttemptsExceeded();
1058 |
1059 | expect(events).toHaveLength(1);
1060 | expect(events[0].type).toBe('subtask:failed');
1061 | });
1062 |
1063 | it('should support abort workflow', () => {
1064 | orchestrator.transition({ type: 'ABORT' });
1065 |
1066 | // Should still be in SUBTASK_LOOP but workflow should be aborted
1067 | expect(orchestrator.getCurrentPhase()).toBe('SUBTASK_LOOP');
1068 | expect(orchestrator.isAborted()).toBe(true);
1069 | });
1070 |
1071 | it('should prevent transitions after abort', () => {
1072 | orchestrator.transition({ type: 'ABORT' });
1073 |
1074 | expect(() => {
1075 | orchestrator.transition({
1076 | type: 'RED_PHASE_COMPLETE',
1077 | testResults: {
1078 | total: 5,
1079 | passed: 0,
1080 | failed: 5,
1081 | skipped: 0,
1082 | phase: 'RED'
1083 | }
1084 | });
1085 | }).toThrow('Workflow has been aborted');
1086 | });
1087 |
1088 | it('should allow retry after recoverable error', () => {
1089 | const error: WorkflowError = {
1090 | phase: 'SUBTASK_LOOP',
1091 | message: 'Temporary failure',
1092 | timestamp: new Date(),
1093 | recoverable: true
1094 | };
1095 |
1096 | orchestrator.transition({ type: 'ERROR', error });
1097 |
1098 | // Should be able to retry
1099 | expect(() => {
1100 | orchestrator.transition({ type: 'RETRY' });
1101 | }).not.toThrow();
1102 |
1103 | expect(orchestrator.getCurrentTDDPhase()).toBe('RED');
1104 | });
1105 |
1106 | it('should track error history in context', () => {
1107 | const error1: WorkflowError = {
1108 | phase: 'SUBTASK_LOOP',
1109 | message: 'Error 1',
1110 | timestamp: new Date(),
1111 | recoverable: true
1112 | };
1113 |
1114 | const error2: WorkflowError = {
1115 | phase: 'SUBTASK_LOOP',
1116 | message: 'Error 2',
1117 | timestamp: new Date(),
1118 | recoverable: false
1119 | };
1120 |
1121 | orchestrator.transition({ type: 'ERROR', error: error1 });
1122 | orchestrator.transition({ type: 'RETRY' });
1123 | orchestrator.transition({ type: 'ERROR', error: error2 });
1124 |
1125 | const context = orchestrator.getContext();
1126 | expect(context.errors).toHaveLength(2);
1127 | expect(context.errors[0].message).toBe('Error 1');
1128 | expect(context.errors[1].message).toBe('Error 2');
1129 | });
1130 | });
1131 |
1132 | describe('Resume Functionality from Checkpoints', () => {
1133 | it('should restore state from checkpoint', () => {
1134 | // Advance to SUBTASK_LOOP and complete first subtask
1135 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1136 | orchestrator.transition({
1137 | type: 'BRANCH_CREATED',
1138 | branchName: 'feature/test'
1139 | });
1140 | orchestrator.transition({
1141 | type: 'RED_PHASE_COMPLETE',
1142 | testResults: {
1143 | total: 5,
1144 | passed: 0,
1145 | failed: 5,
1146 | skipped: 0,
1147 | phase: 'RED'
1148 | }
1149 | });
1150 | orchestrator.transition({
1151 | type: 'GREEN_PHASE_COMPLETE',
1152 | testResults: {
1153 | total: 5,
1154 | passed: 5,
1155 | failed: 0,
1156 | skipped: 0,
1157 | phase: 'GREEN'
1158 | }
1159 | });
1160 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
1161 | orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
1162 |
1163 | // Save state
1164 | const state = orchestrator.getState();
1165 |
1166 | // Create new orchestrator and restore
1167 | const restored = new WorkflowOrchestrator(state.context);
1168 | restored.restoreState(state);
1169 |
1170 | expect(restored.getCurrentPhase()).toBe('SUBTASK_LOOP');
1171 | expect(restored.getContext().currentSubtaskIndex).toBe(1);
1172 | expect(restored.getContext().branchName).toBe('feature/test');
1173 | });
1174 |
1175 | it('should resume from mid-TDD cycle', () => {
1176 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1177 | orchestrator.transition({
1178 | type: 'BRANCH_CREATED',
1179 | branchName: 'feature/test'
1180 | });
1181 | orchestrator.transition({
1182 | type: 'RED_PHASE_COMPLETE',
1183 | testResults: {
1184 | total: 5,
1185 | passed: 0,
1186 | failed: 5,
1187 | skipped: 0,
1188 | phase: 'RED'
1189 | }
1190 | });
1191 |
1192 | // Save state in GREEN phase
1193 | const state = orchestrator.getState();
1194 |
1195 | // Restore and verify in GREEN phase
1196 | const restored = new WorkflowOrchestrator(state.context);
1197 | restored.restoreState(state);
1198 |
1199 | expect(restored.getCurrentPhase()).toBe('SUBTASK_LOOP');
1200 | expect(restored.getCurrentTDDPhase()).toBe('GREEN');
1201 | });
1202 |
1203 | it('should validate restored state integrity', () => {
1204 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1205 | orchestrator.transition({
1206 | type: 'BRANCH_CREATED',
1207 | branchName: 'feature/test'
1208 | });
1209 |
1210 | const state = orchestrator.getState();
1211 |
1212 | // Validate state structure
1213 | expect(orchestrator.canResumeFromState(state)).toBe(true);
1214 | });
1215 |
1216 | it('should reject invalid checkpoint state', () => {
1217 | const invalidState = {
1218 | phase: 'INVALID_PHASE' as WorkflowPhase,
1219 | context: initialContext
1220 | };
1221 |
1222 | expect(orchestrator.canResumeFromState(invalidState)).toBe(false);
1223 | });
1224 |
1225 | it('should preserve subtask attempts on resume', () => {
1226 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1227 | orchestrator.transition({
1228 | type: 'BRANCH_CREATED',
1229 | branchName: 'feature/test'
1230 | });
1231 |
1232 | // Increment attempts
1233 | orchestrator.incrementAttempts();
1234 | orchestrator.incrementAttempts();
1235 |
1236 | const state = orchestrator.getState();
1237 |
1238 | // Restore
1239 | const restored = new WorkflowOrchestrator(state.context);
1240 | restored.restoreState(state);
1241 |
1242 | const currentSubtask = restored.getCurrentSubtask();
1243 | expect(currentSubtask?.attempts).toBe(2);
1244 | });
1245 |
1246 | it('should preserve errors on resume', () => {
1247 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1248 | orchestrator.transition({
1249 | type: 'BRANCH_CREATED',
1250 | branchName: 'feature/test'
1251 | });
1252 |
1253 | const error: WorkflowError = {
1254 | phase: 'SUBTASK_LOOP',
1255 | message: 'Test error',
1256 | timestamp: new Date(),
1257 | recoverable: true
1258 | };
1259 |
1260 | orchestrator.transition({ type: 'ERROR', error });
1261 |
1262 | const state = orchestrator.getState();
1263 |
1264 | // Restore
1265 | const restored = new WorkflowOrchestrator(state.context);
1266 | restored.restoreState(state);
1267 |
1268 | expect(restored.getContext().errors).toHaveLength(1);
1269 | expect(restored.getContext().errors[0].message).toBe('Test error');
1270 | });
1271 |
1272 | it('should preserve completed subtask statuses on resume', () => {
1273 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1274 | orchestrator.transition({
1275 | type: 'BRANCH_CREATED',
1276 | branchName: 'feature/test'
1277 | });
1278 |
1279 | // Complete first subtask
1280 | orchestrator.transition({
1281 | type: 'RED_PHASE_COMPLETE',
1282 | testResults: {
1283 | total: 5,
1284 | passed: 0,
1285 | failed: 5,
1286 | skipped: 0,
1287 | phase: 'RED'
1288 | }
1289 | });
1290 | orchestrator.transition({
1291 | type: 'GREEN_PHASE_COMPLETE',
1292 | testResults: {
1293 | total: 5,
1294 | passed: 5,
1295 | failed: 0,
1296 | skipped: 0,
1297 | phase: 'GREEN'
1298 | }
1299 | });
1300 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
1301 | orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
1302 |
1303 | const state = orchestrator.getState();
1304 |
1305 | // Restore
1306 | const restored = new WorkflowOrchestrator(state.context);
1307 | restored.restoreState(state);
1308 |
1309 | const progress = restored.getProgress();
1310 | expect(progress.completed).toBe(1);
1311 | expect(progress.current).toBe(2);
1312 | });
1313 |
1314 | it('should emit workflow:resumed event on restore', () => {
1315 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1316 | orchestrator.transition({
1317 | type: 'BRANCH_CREATED',
1318 | branchName: 'feature/test'
1319 | });
1320 |
1321 | const state = orchestrator.getState();
1322 |
1323 | // Create new orchestrator with event listener
1324 | const events: WorkflowEventData[] = [];
1325 | const restored = new WorkflowOrchestrator(state.context);
1326 | restored.on('workflow:resumed', (event) => events.push(event));
1327 |
1328 | restored.restoreState(state);
1329 |
1330 | expect(events).toHaveLength(1);
1331 | expect(events[0].type).toBe('workflow:resumed');
1332 | expect(events[0].phase).toBe('SUBTASK_LOOP');
1333 | });
1334 |
1335 | it('should calculate correct progress after resume', () => {
1336 | // Complete first subtask
1337 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1338 | orchestrator.transition({
1339 | type: 'BRANCH_CREATED',
1340 | branchName: 'feature/test'
1341 | });
1342 | orchestrator.transition({
1343 | type: 'RED_PHASE_COMPLETE',
1344 | testResults: {
1345 | total: 5,
1346 | passed: 0,
1347 | failed: 5,
1348 | skipped: 0,
1349 | phase: 'RED'
1350 | }
1351 | });
1352 | orchestrator.transition({
1353 | type: 'GREEN_PHASE_COMPLETE',
1354 | testResults: {
1355 | total: 5,
1356 | passed: 5,
1357 | failed: 0,
1358 | skipped: 0,
1359 | phase: 'GREEN'
1360 | }
1361 | });
1362 | orchestrator.transition({ type: 'COMMIT_COMPLETE' });
1363 | orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
1364 |
1365 | const state = orchestrator.getState();
1366 |
1367 | // Restore and check progress
1368 | const restored = new WorkflowOrchestrator(state.context);
1369 | restored.restoreState(state);
1370 |
1371 | const progress = restored.getProgress();
1372 | expect(progress.completed).toBe(1);
1373 | expect(progress.total).toBe(2);
1374 | expect(progress.percentage).toBe(50);
1375 | });
1376 | });
1377 |
1378 | describe('Adapter Integration', () => {
1379 | let testValidator: TestResultValidator;
1380 |
1381 | beforeEach(() => {
1382 | testValidator = new TestResultValidator();
1383 | });
1384 |
1385 | it('should integrate with TestResultValidator', () => {
1386 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1387 | orchestrator.transition({
1388 | type: 'BRANCH_CREATED',
1389 | branchName: 'feature/test'
1390 | });
1391 |
1392 | // Set validator
1393 | orchestrator.setTestResultValidator(testValidator);
1394 |
1395 | // Validator should be used internally
1396 | expect(orchestrator.hasTestResultValidator()).toBe(true);
1397 | });
1398 |
1399 | it('should use TestResultValidator to validate RED phase', () => {
1400 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1401 | orchestrator.transition({
1402 | type: 'BRANCH_CREATED',
1403 | branchName: 'feature/test'
1404 | });
1405 |
1406 | orchestrator.setTestResultValidator(testValidator);
1407 |
1408 | // Should reject passing tests in RED phase
1409 | expect(() => {
1410 | orchestrator.transition({
1411 | type: 'RED_PHASE_COMPLETE',
1412 | testResults: {
1413 | total: 5,
1414 | passed: 5,
1415 | failed: 0,
1416 | skipped: 0,
1417 | phase: 'RED'
1418 | }
1419 | });
1420 | }).toThrow('RED phase must have at least one failing test');
1421 | });
1422 |
1423 | it('should use TestResultValidator to validate GREEN phase', () => {
1424 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1425 | orchestrator.transition({
1426 | type: 'BRANCH_CREATED',
1427 | branchName: 'feature/test'
1428 | });
1429 |
1430 | orchestrator.setTestResultValidator(testValidator);
1431 |
1432 | orchestrator.transition({
1433 | type: 'RED_PHASE_COMPLETE',
1434 | testResults: {
1435 | total: 5,
1436 | passed: 0,
1437 | failed: 5,
1438 | skipped: 0,
1439 | phase: 'RED'
1440 | }
1441 | });
1442 |
1443 | // Should reject failing tests in GREEN phase
1444 | expect(() => {
1445 | orchestrator.transition({
1446 | type: 'GREEN_PHASE_COMPLETE',
1447 | testResults: {
1448 | total: 5,
1449 | passed: 3,
1450 | failed: 2,
1451 | skipped: 0,
1452 | phase: 'GREEN'
1453 | }
1454 | });
1455 | }).toThrow('GREEN phase must have zero failures');
1456 | });
1457 |
1458 | it('should support git adapter hooks', () => {
1459 | const gitOperations: string[] = [];
1460 |
1461 | orchestrator.onGitOperation((operation, data) => {
1462 | gitOperations.push(operation);
1463 | });
1464 |
1465 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1466 | orchestrator.transition({
1467 | type: 'BRANCH_CREATED',
1468 | branchName: 'feature/test'
1469 | });
1470 |
1471 | // Verify git operation hook was called
1472 | expect(gitOperations).toContain('branch:created');
1473 | });
1474 |
1475 | it('should support executor adapter hooks', () => {
1476 | const executions: string[] = [];
1477 |
1478 | orchestrator.onExecute((command, context) => {
1479 | executions.push(command);
1480 | });
1481 |
1482 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1483 | orchestrator.transition({
1484 | type: 'BRANCH_CREATED',
1485 | branchName: 'feature/test'
1486 | });
1487 |
1488 | orchestrator.executeCommand('run-tests');
1489 |
1490 | expect(executions).toContain('run-tests');
1491 | });
1492 |
1493 | it('should provide adapter context in events', () => {
1494 | const events: WorkflowEventData[] = [];
1495 | orchestrator.on('phase:entered', (event) => events.push(event));
1496 |
1497 | orchestrator.setTestResultValidator(testValidator);
1498 |
1499 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1500 |
1501 | // Event should include adapter availability
1502 | expect(events[0].data?.adapters).toBeDefined();
1503 | });
1504 |
1505 | it('should allow adapter reconfiguration', () => {
1506 | orchestrator.setTestResultValidator(testValidator);
1507 | expect(orchestrator.hasTestResultValidator()).toBe(true);
1508 |
1509 | orchestrator.removeTestResultValidator();
1510 | expect(orchestrator.hasTestResultValidator()).toBe(false);
1511 | });
1512 |
1513 | it('should work without adapters (optional integration)', () => {
1514 | // Should work fine without adapters
1515 | orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
1516 | orchestrator.transition({
1517 | type: 'BRANCH_CREATED',
1518 | branchName: 'feature/test'
1519 | });
1520 |
1521 | expect(orchestrator.getCurrentPhase()).toBe('SUBTASK_LOOP');
1522 | });
1523 |
1524 | it('should emit adapter-related events', () => {
1525 | const events: WorkflowEventData[] = [];
1526 | orchestrator.on('adapter:configured', (event) => events.push(event));
1527 |
1528 | orchestrator.setTestResultValidator(testValidator);
1529 |
1530 | expect(events).toHaveLength(1);
1531 | expect(events[0].type).toBe('adapter:configured');
1532 | expect(events[0].data?.adapterType).toBe('test-validator');
1533 | });
1534 | });
1535 | });
1536 |
```