This is page 11 of 50. Use http://codebase.md/eyaltoledano/claude-task-master?page={x} to view the full context.
# Directory Structure
```
├── .changeset
│ ├── config.json
│ └── README.md
├── .claude
│ ├── commands
│ │ └── dedupe.md
│ └── TM_COMMANDS_GUIDE.md
├── .claude-plugin
│ └── marketplace.json
├── .coderabbit.yaml
├── .cursor
│ ├── mcp.json
│ └── rules
│ ├── ai_providers.mdc
│ ├── ai_services.mdc
│ ├── architecture.mdc
│ ├── changeset.mdc
│ ├── commands.mdc
│ ├── context_gathering.mdc
│ ├── cursor_rules.mdc
│ ├── dependencies.mdc
│ ├── dev_workflow.mdc
│ ├── git_workflow.mdc
│ ├── glossary.mdc
│ ├── mcp.mdc
│ ├── new_features.mdc
│ ├── self_improve.mdc
│ ├── tags.mdc
│ ├── taskmaster.mdc
│ ├── tasks.mdc
│ ├── telemetry.mdc
│ ├── test_workflow.mdc
│ ├── tests.mdc
│ ├── ui.mdc
│ └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── enhancements---feature-requests.md
│ │ └── feedback.md
│ ├── PULL_REQUEST_TEMPLATE
│ │ ├── bugfix.md
│ │ ├── config.yml
│ │ ├── feature.md
│ │ └── integration.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── scripts
│ │ ├── auto-close-duplicates.mjs
│ │ ├── backfill-duplicate-comments.mjs
│ │ ├── check-pre-release-mode.mjs
│ │ ├── parse-metrics.mjs
│ │ ├── release.mjs
│ │ ├── tag-extension.mjs
│ │ ├── utils.mjs
│ │ └── validate-changesets.mjs
│ └── workflows
│ ├── auto-close-duplicates.yml
│ ├── backfill-duplicate-comments.yml
│ ├── ci.yml
│ ├── claude-dedupe-issues.yml
│ ├── claude-docs-trigger.yml
│ ├── claude-docs-updater.yml
│ ├── claude-issue-triage.yml
│ ├── claude.yml
│ ├── extension-ci.yml
│ ├── extension-release.yml
│ ├── log-issue-events.yml
│ ├── pre-release.yml
│ ├── release-check.yml
│ ├── release.yml
│ ├── update-models-md.yml
│ └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│ ├── hooks
│ │ ├── tm-code-change-task-tracker.kiro.hook
│ │ ├── tm-complexity-analyzer.kiro.hook
│ │ ├── tm-daily-standup-assistant.kiro.hook
│ │ ├── tm-git-commit-task-linker.kiro.hook
│ │ ├── tm-pr-readiness-checker.kiro.hook
│ │ ├── tm-task-dependency-auto-progression.kiro.hook
│ │ └── tm-test-success-task-completer.kiro.hook
│ ├── settings
│ │ └── mcp.json
│ └── steering
│ ├── dev_workflow.md
│ ├── kiro_rules.md
│ ├── self_improve.md
│ ├── taskmaster_hooks_workflow.md
│ └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│ ├── CLAUDE.md
│ ├── config.json
│ ├── docs
│ │ ├── autonomous-tdd-git-workflow.md
│ │ ├── MIGRATION-ROADMAP.md
│ │ ├── prd-tm-start.txt
│ │ ├── prd.txt
│ │ ├── README.md
│ │ ├── research
│ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│ │ │ ├── 2025-06-14_test-save-functionality.md
│ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│ │ ├── task-template-importing-prd.txt
│ │ ├── tdd-workflow-phase-0-spike.md
│ │ ├── tdd-workflow-phase-1-core-rails.md
│ │ ├── tdd-workflow-phase-1-orchestrator.md
│ │ ├── tdd-workflow-phase-2-pr-resumability.md
│ │ ├── tdd-workflow-phase-3-extensibility-guardrails.md
│ │ ├── test-prd.txt
│ │ └── tm-core-phase-1.txt
│ ├── reports
│ │ ├── task-complexity-report_autonomous-tdd-git-workflow.json
│ │ ├── task-complexity-report_cc-kiro-hooks.json
│ │ ├── task-complexity-report_tdd-phase-1-core-rails.json
│ │ ├── task-complexity-report_tdd-workflow-phase-0.json
│ │ ├── task-complexity-report_test-prd-tag.json
│ │ ├── task-complexity-report_tm-core-phase-1.json
│ │ ├── task-complexity-report.json
│ │ └── tm-core-complexity.json
│ ├── state.json
│ ├── tasks
│ │ ├── task_001_tm-start.txt
│ │ ├── task_002_tm-start.txt
│ │ ├── task_003_tm-start.txt
│ │ ├── task_004_tm-start.txt
│ │ ├── task_007_tm-start.txt
│ │ └── tasks.json
│ └── templates
│ ├── example_prd_rpg.md
│ └── example_prd.md
├── .vscode
│ ├── extensions.json
│ └── settings.json
├── apps
│ ├── cli
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── src
│ │ │ ├── command-registry.ts
│ │ │ ├── commands
│ │ │ │ ├── auth.command.ts
│ │ │ │ ├── autopilot
│ │ │ │ │ ├── abort.command.ts
│ │ │ │ │ ├── commit.command.ts
│ │ │ │ │ ├── complete.command.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── next.command.ts
│ │ │ │ │ ├── resume.command.ts
│ │ │ │ │ ├── shared.ts
│ │ │ │ │ ├── start.command.ts
│ │ │ │ │ └── status.command.ts
│ │ │ │ ├── briefs.command.ts
│ │ │ │ ├── context.command.ts
│ │ │ │ ├── export.command.ts
│ │ │ │ ├── list.command.ts
│ │ │ │ ├── models
│ │ │ │ │ ├── custom-providers.ts
│ │ │ │ │ ├── fetchers.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── prompts.ts
│ │ │ │ │ ├── setup.ts
│ │ │ │ │ └── types.ts
│ │ │ │ ├── next.command.ts
│ │ │ │ ├── set-status.command.ts
│ │ │ │ ├── show.command.ts
│ │ │ │ ├── start.command.ts
│ │ │ │ └── tags.command.ts
│ │ │ ├── index.ts
│ │ │ ├── lib
│ │ │ │ └── model-management.ts
│ │ │ ├── types
│ │ │ │ └── tag-management.d.ts
│ │ │ ├── ui
│ │ │ │ ├── components
│ │ │ │ │ ├── cardBox.component.ts
│ │ │ │ │ ├── dashboard.component.ts
│ │ │ │ │ ├── header.component.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── next-task.component.ts
│ │ │ │ │ ├── suggested-steps.component.ts
│ │ │ │ │ └── task-detail.component.ts
│ │ │ │ ├── display
│ │ │ │ │ ├── messages.ts
│ │ │ │ │ └── tables.ts
│ │ │ │ ├── formatters
│ │ │ │ │ ├── complexity-formatters.ts
│ │ │ │ │ ├── dependency-formatters.ts
│ │ │ │ │ ├── priority-formatters.ts
│ │ │ │ │ ├── status-formatters.spec.ts
│ │ │ │ │ └── status-formatters.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── layout
│ │ │ │ ├── helpers.spec.ts
│ │ │ │ └── helpers.ts
│ │ │ └── utils
│ │ │ ├── auth-helpers.ts
│ │ │ ├── auto-update.ts
│ │ │ ├── brief-selection.ts
│ │ │ ├── display-helpers.ts
│ │ │ ├── error-handler.ts
│ │ │ ├── index.ts
│ │ │ ├── project-root.ts
│ │ │ ├── task-status.ts
│ │ │ ├── ui.spec.ts
│ │ │ └── ui.ts
│ │ ├── tests
│ │ │ ├── integration
│ │ │ │ └── commands
│ │ │ │ └── autopilot
│ │ │ │ └── workflow.test.ts
│ │ │ └── unit
│ │ │ ├── commands
│ │ │ │ ├── autopilot
│ │ │ │ │ └── shared.test.ts
│ │ │ │ ├── list.command.spec.ts
│ │ │ │ └── show.command.spec.ts
│ │ │ └── ui
│ │ │ └── dashboard.component.spec.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── docs
│ │ ├── archive
│ │ │ ├── ai-client-utils-example.mdx
│ │ │ ├── ai-development-workflow.mdx
│ │ │ ├── command-reference.mdx
│ │ │ ├── configuration.mdx
│ │ │ ├── cursor-setup.mdx
│ │ │ ├── examples.mdx
│ │ │ └── Installation.mdx
│ │ ├── best-practices
│ │ │ ├── advanced-tasks.mdx
│ │ │ ├── configuration-advanced.mdx
│ │ │ └── index.mdx
│ │ ├── capabilities
│ │ │ ├── cli-root-commands.mdx
│ │ │ ├── index.mdx
│ │ │ ├── mcp.mdx
│ │ │ ├── rpg-method.mdx
│ │ │ └── task-structure.mdx
│ │ ├── CHANGELOG.md
│ │ ├── command-reference.mdx
│ │ ├── configuration.mdx
│ │ ├── docs.json
│ │ ├── favicon.svg
│ │ ├── getting-started
│ │ │ ├── api-keys.mdx
│ │ │ ├── contribute.mdx
│ │ │ ├── faq.mdx
│ │ │ └── quick-start
│ │ │ ├── configuration-quick.mdx
│ │ │ ├── execute-quick.mdx
│ │ │ ├── installation.mdx
│ │ │ ├── moving-forward.mdx
│ │ │ ├── prd-quick.mdx
│ │ │ ├── quick-start.mdx
│ │ │ ├── requirements.mdx
│ │ │ ├── rules-quick.mdx
│ │ │ └── tasks-quick.mdx
│ │ ├── introduction.mdx
│ │ ├── licensing.md
│ │ ├── logo
│ │ │ ├── dark.svg
│ │ │ ├── light.svg
│ │ │ └── task-master-logo.png
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── style.css
│ │ ├── tdd-workflow
│ │ │ ├── ai-agent-integration.mdx
│ │ │ └── quickstart.mdx
│ │ ├── vercel.json
│ │ └── whats-new.mdx
│ ├── extension
│ │ ├── .vscodeignore
│ │ ├── assets
│ │ │ ├── banner.png
│ │ │ ├── icon-dark.svg
│ │ │ ├── icon-light.svg
│ │ │ ├── icon.png
│ │ │ ├── screenshots
│ │ │ │ ├── kanban-board.png
│ │ │ │ └── task-details.png
│ │ │ └── sidebar-icon.svg
│ │ ├── CHANGELOG.md
│ │ ├── components.json
│ │ ├── docs
│ │ │ ├── extension-CI-setup.md
│ │ │ └── extension-development-guide.md
│ │ ├── esbuild.js
│ │ ├── LICENSE
│ │ ├── package.json
│ │ ├── package.mjs
│ │ ├── package.publish.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── components
│ │ │ │ ├── ConfigView.tsx
│ │ │ │ ├── constants.ts
│ │ │ │ ├── TaskDetails
│ │ │ │ │ ├── AIActionsSection.tsx
│ │ │ │ │ ├── DetailsSection.tsx
│ │ │ │ │ ├── PriorityBadge.tsx
│ │ │ │ │ ├── SubtasksSection.tsx
│ │ │ │ │ ├── TaskMetadataSidebar.tsx
│ │ │ │ │ └── useTaskDetails.ts
│ │ │ │ ├── TaskDetailsView.tsx
│ │ │ │ ├── TaskMasterLogo.tsx
│ │ │ │ └── ui
│ │ │ │ ├── badge.tsx
│ │ │ │ ├── breadcrumb.tsx
│ │ │ │ ├── button.tsx
│ │ │ │ ├── card.tsx
│ │ │ │ ├── collapsible.tsx
│ │ │ │ ├── CollapsibleSection.tsx
│ │ │ │ ├── dropdown-menu.tsx
│ │ │ │ ├── label.tsx
│ │ │ │ ├── scroll-area.tsx
│ │ │ │ ├── separator.tsx
│ │ │ │ ├── shadcn-io
│ │ │ │ │ └── kanban
│ │ │ │ │ └── index.tsx
│ │ │ │ └── textarea.tsx
│ │ │ ├── extension.ts
│ │ │ ├── index.ts
│ │ │ ├── lib
│ │ │ │ └── utils.ts
│ │ │ ├── services
│ │ │ │ ├── config-service.ts
│ │ │ │ ├── error-handler.ts
│ │ │ │ ├── notification-preferences.ts
│ │ │ │ ├── polling-service.ts
│ │ │ │ ├── polling-strategies.ts
│ │ │ │ ├── sidebar-webview-manager.ts
│ │ │ │ ├── task-repository.ts
│ │ │ │ ├── terminal-manager.ts
│ │ │ │ └── webview-manager.ts
│ │ │ ├── test
│ │ │ │ └── extension.test.ts
│ │ │ ├── utils
│ │ │ │ ├── configManager.ts
│ │ │ │ ├── connectionManager.ts
│ │ │ │ ├── errorHandler.ts
│ │ │ │ ├── event-emitter.ts
│ │ │ │ ├── logger.ts
│ │ │ │ ├── mcpClient.ts
│ │ │ │ ├── notificationPreferences.ts
│ │ │ │ └── task-master-api
│ │ │ │ ├── cache
│ │ │ │ │ └── cache-manager.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── mcp-client.ts
│ │ │ │ ├── transformers
│ │ │ │ │ └── task-transformer.ts
│ │ │ │ └── types
│ │ │ │ └── index.ts
│ │ │ └── webview
│ │ │ ├── App.tsx
│ │ │ ├── components
│ │ │ │ ├── AppContent.tsx
│ │ │ │ ├── EmptyState.tsx
│ │ │ │ ├── ErrorBoundary.tsx
│ │ │ │ ├── PollingStatus.tsx
│ │ │ │ ├── PriorityBadge.tsx
│ │ │ │ ├── SidebarView.tsx
│ │ │ │ ├── TagDropdown.tsx
│ │ │ │ ├── TaskCard.tsx
│ │ │ │ ├── TaskEditModal.tsx
│ │ │ │ ├── TaskMasterKanban.tsx
│ │ │ │ ├── ToastContainer.tsx
│ │ │ │ └── ToastNotification.tsx
│ │ │ ├── constants
│ │ │ │ └── index.ts
│ │ │ ├── contexts
│ │ │ │ └── VSCodeContext.tsx
│ │ │ ├── hooks
│ │ │ │ ├── useTaskQueries.ts
│ │ │ │ ├── useVSCodeMessages.ts
│ │ │ │ └── useWebviewHeight.ts
│ │ │ ├── index.css
│ │ │ ├── index.tsx
│ │ │ ├── providers
│ │ │ │ └── QueryProvider.tsx
│ │ │ ├── reducers
│ │ │ │ └── appReducer.ts
│ │ │ ├── sidebar.tsx
│ │ │ ├── types
│ │ │ │ └── index.ts
│ │ │ └── utils
│ │ │ ├── logger.ts
│ │ │ └── toast.ts
│ │ └── tsconfig.json
│ └── mcp
│ ├── CHANGELOG.md
│ ├── package.json
│ ├── src
│ │ ├── index.ts
│ │ ├── shared
│ │ │ ├── types.ts
│ │ │ └── utils.ts
│ │ └── tools
│ │ ├── autopilot
│ │ │ ├── abort.tool.ts
│ │ │ ├── commit.tool.ts
│ │ │ ├── complete.tool.ts
│ │ │ ├── finalize.tool.ts
│ │ │ ├── index.ts
│ │ │ ├── next.tool.ts
│ │ │ ├── resume.tool.ts
│ │ │ ├── start.tool.ts
│ │ │ └── status.tool.ts
│ │ ├── README-ZOD-V3.md
│ │ └── tasks
│ │ ├── get-task.tool.ts
│ │ ├── get-tasks.tool.ts
│ │ └── index.ts
│ ├── tsconfig.json
│ └── vitest.config.ts
├── assets
│ ├── .windsurfrules
│ ├── AGENTS.md
│ ├── claude
│ │ └── TM_COMMANDS_GUIDE.md
│ ├── config.json
│ ├── env.example
│ ├── example_prd_rpg.txt
│ ├── example_prd.txt
│ ├── GEMINI.md
│ ├── gitignore
│ ├── kiro-hooks
│ │ ├── tm-code-change-task-tracker.kiro.hook
│ │ ├── tm-complexity-analyzer.kiro.hook
│ │ ├── tm-daily-standup-assistant.kiro.hook
│ │ ├── tm-git-commit-task-linker.kiro.hook
│ │ ├── tm-pr-readiness-checker.kiro.hook
│ │ ├── tm-task-dependency-auto-progression.kiro.hook
│ │ └── tm-test-success-task-completer.kiro.hook
│ ├── roocode
│ │ ├── .roo
│ │ │ ├── rules-architect
│ │ │ │ └── architect-rules
│ │ │ ├── rules-ask
│ │ │ │ └── ask-rules
│ │ │ ├── rules-code
│ │ │ │ └── code-rules
│ │ │ ├── rules-debug
│ │ │ │ └── debug-rules
│ │ │ ├── rules-orchestrator
│ │ │ │ └── orchestrator-rules
│ │ │ └── rules-test
│ │ │ └── test-rules
│ │ └── .roomodes
│ ├── rules
│ │ ├── cursor_rules.mdc
│ │ ├── dev_workflow.mdc
│ │ ├── self_improve.mdc
│ │ ├── taskmaster_hooks_workflow.mdc
│ │ └── taskmaster.mdc
│ └── scripts_README.md
├── bin
│ └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE_CODE_PLUGIN.md
├── CLAUDE.md
├── context
│ ├── chats
│ │ ├── add-task-dependencies-1.md
│ │ └── max-min-tokens.txt.md
│ ├── fastmcp-core.txt
│ ├── fastmcp-docs.txt
│ ├── MCP_INTEGRATION.md
│ ├── mcp-js-sdk-docs.txt
│ ├── mcp-protocol-repo.txt
│ ├── mcp-protocol-schema-03262025.json
│ └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│ ├── claude-code-integration.md
│ ├── CLI-COMMANDER-PATTERN.md
│ ├── command-reference.md
│ ├── configuration.md
│ ├── contributor-docs
│ │ ├── testing-roo-integration.md
│ │ └── worktree-setup.md
│ ├── cross-tag-task-movement.md
│ ├── examples
│ │ ├── claude-code-usage.md
│ │ └── codex-cli-usage.md
│ ├── examples.md
│ ├── licensing.md
│ ├── mcp-provider-guide.md
│ ├── mcp-provider.md
│ ├── migration-guide.md
│ ├── models.md
│ ├── providers
│ │ ├── codex-cli.md
│ │ └── gemini-cli.md
│ ├── README.md
│ ├── scripts
│ │ └── models-json-to-markdown.js
│ ├── task-structure.md
│ └── tutorial.md
├── images
│ ├── hamster-hiring.png
│ └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│ ├── server.js
│ └── src
│ ├── core
│ │ ├── __tests__
│ │ │ └── context-manager.test.js
│ │ ├── context-manager.js
│ │ ├── direct-functions
│ │ │ ├── add-dependency.js
│ │ │ ├── add-subtask.js
│ │ │ ├── add-tag.js
│ │ │ ├── add-task.js
│ │ │ ├── analyze-task-complexity.js
│ │ │ ├── cache-stats.js
│ │ │ ├── clear-subtasks.js
│ │ │ ├── complexity-report.js
│ │ │ ├── copy-tag.js
│ │ │ ├── create-tag-from-branch.js
│ │ │ ├── delete-tag.js
│ │ │ ├── expand-all-tasks.js
│ │ │ ├── expand-task.js
│ │ │ ├── fix-dependencies.js
│ │ │ ├── generate-task-files.js
│ │ │ ├── initialize-project.js
│ │ │ ├── list-tags.js
│ │ │ ├── models.js
│ │ │ ├── move-task-cross-tag.js
│ │ │ ├── move-task.js
│ │ │ ├── next-task.js
│ │ │ ├── parse-prd.js
│ │ │ ├── remove-dependency.js
│ │ │ ├── remove-subtask.js
│ │ │ ├── remove-task.js
│ │ │ ├── rename-tag.js
│ │ │ ├── research.js
│ │ │ ├── response-language.js
│ │ │ ├── rules.js
│ │ │ ├── scope-down.js
│ │ │ ├── scope-up.js
│ │ │ ├── set-task-status.js
│ │ │ ├── update-subtask-by-id.js
│ │ │ ├── update-task-by-id.js
│ │ │ ├── update-tasks.js
│ │ │ ├── use-tag.js
│ │ │ └── validate-dependencies.js
│ │ ├── task-master-core.js
│ │ └── utils
│ │ ├── env-utils.js
│ │ └── path-utils.js
│ ├── custom-sdk
│ │ ├── errors.js
│ │ ├── index.js
│ │ ├── json-extractor.js
│ │ ├── language-model.js
│ │ ├── message-converter.js
│ │ └── schema-converter.js
│ ├── index.js
│ ├── logger.js
│ ├── providers
│ │ └── mcp-provider.js
│ └── tools
│ ├── add-dependency.js
│ ├── add-subtask.js
│ ├── add-tag.js
│ ├── add-task.js
│ ├── analyze.js
│ ├── clear-subtasks.js
│ ├── complexity-report.js
│ ├── copy-tag.js
│ ├── delete-tag.js
│ ├── expand-all.js
│ ├── expand-task.js
│ ├── fix-dependencies.js
│ ├── generate.js
│ ├── get-operation-status.js
│ ├── index.js
│ ├── initialize-project.js
│ ├── list-tags.js
│ ├── models.js
│ ├── move-task.js
│ ├── next-task.js
│ ├── parse-prd.js
│ ├── README-ZOD-V3.md
│ ├── remove-dependency.js
│ ├── remove-subtask.js
│ ├── remove-task.js
│ ├── rename-tag.js
│ ├── research.js
│ ├── response-language.js
│ ├── rules.js
│ ├── scope-down.js
│ ├── scope-up.js
│ ├── set-task-status.js
│ ├── tool-registry.js
│ ├── update-subtask.js
│ ├── update-task.js
│ ├── update.js
│ ├── use-tag.js
│ ├── utils.js
│ └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│ ├── ai-sdk-provider-grok-cli
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── errors.test.ts
│ │ │ ├── errors.ts
│ │ │ ├── grok-cli-language-model.ts
│ │ │ ├── grok-cli-provider.test.ts
│ │ │ ├── grok-cli-provider.ts
│ │ │ ├── index.ts
│ │ │ ├── json-extractor.test.ts
│ │ │ ├── json-extractor.ts
│ │ │ ├── message-converter.test.ts
│ │ │ ├── message-converter.ts
│ │ │ └── types.ts
│ │ └── tsconfig.json
│ ├── build-config
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── src
│ │ │ └── tsdown.base.ts
│ │ └── tsconfig.json
│ ├── claude-code-plugin
│ │ ├── .claude-plugin
│ │ │ └── plugin.json
│ │ ├── .gitignore
│ │ ├── agents
│ │ │ ├── task-checker.md
│ │ │ ├── task-executor.md
│ │ │ └── task-orchestrator.md
│ │ ├── CHANGELOG.md
│ │ ├── commands
│ │ │ ├── add-dependency.md
│ │ │ ├── add-subtask.md
│ │ │ ├── add-task.md
│ │ │ ├── analyze-complexity.md
│ │ │ ├── analyze-project.md
│ │ │ ├── auto-implement-tasks.md
│ │ │ ├── command-pipeline.md
│ │ │ ├── complexity-report.md
│ │ │ ├── convert-task-to-subtask.md
│ │ │ ├── expand-all-tasks.md
│ │ │ ├── expand-task.md
│ │ │ ├── fix-dependencies.md
│ │ │ ├── generate-tasks.md
│ │ │ ├── help.md
│ │ │ ├── init-project-quick.md
│ │ │ ├── init-project.md
│ │ │ ├── install-taskmaster.md
│ │ │ ├── learn.md
│ │ │ ├── list-tasks-by-status.md
│ │ │ ├── list-tasks-with-subtasks.md
│ │ │ ├── list-tasks.md
│ │ │ ├── next-task.md
│ │ │ ├── parse-prd-with-research.md
│ │ │ ├── parse-prd.md
│ │ │ ├── project-status.md
│ │ │ ├── quick-install-taskmaster.md
│ │ │ ├── remove-all-subtasks.md
│ │ │ ├── remove-dependency.md
│ │ │ ├── remove-subtask.md
│ │ │ ├── remove-subtasks.md
│ │ │ ├── remove-task.md
│ │ │ ├── setup-models.md
│ │ │ ├── show-task.md
│ │ │ ├── smart-workflow.md
│ │ │ ├── sync-readme.md
│ │ │ ├── tm-main.md
│ │ │ ├── to-cancelled.md
│ │ │ ├── to-deferred.md
│ │ │ ├── to-done.md
│ │ │ ├── to-in-progress.md
│ │ │ ├── to-pending.md
│ │ │ ├── to-review.md
│ │ │ ├── update-single-task.md
│ │ │ ├── update-task.md
│ │ │ ├── update-tasks-from-id.md
│ │ │ ├── validate-dependencies.md
│ │ │ └── view-models.md
│ │ ├── mcp.json
│ │ └── package.json
│ ├── tm-bridge
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── add-tag-bridge.ts
│ │ │ ├── bridge-types.ts
│ │ │ ├── bridge-utils.ts
│ │ │ ├── expand-bridge.ts
│ │ │ ├── index.ts
│ │ │ ├── tags-bridge.ts
│ │ │ ├── update-bridge.ts
│ │ │ └── use-tag-bridge.ts
│ │ └── tsconfig.json
│ └── tm-core
│ ├── .gitignore
│ ├── CHANGELOG.md
│ ├── docs
│ │ └── listTasks-architecture.md
│ ├── package.json
│ ├── POC-STATUS.md
│ ├── README.md
│ ├── src
│ │ ├── common
│ │ │ ├── constants
│ │ │ │ ├── index.ts
│ │ │ │ ├── paths.ts
│ │ │ │ └── providers.ts
│ │ │ ├── errors
│ │ │ │ ├── index.ts
│ │ │ │ └── task-master-error.ts
│ │ │ ├── interfaces
│ │ │ │ ├── configuration.interface.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── storage.interface.ts
│ │ │ ├── logger
│ │ │ │ ├── factory.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── logger.spec.ts
│ │ │ │ └── logger.ts
│ │ │ ├── mappers
│ │ │ │ ├── TaskMapper.test.ts
│ │ │ │ └── TaskMapper.ts
│ │ │ ├── types
│ │ │ │ ├── database.types.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── legacy.ts
│ │ │ │ └── repository-types.ts
│ │ │ └── utils
│ │ │ ├── git-utils.ts
│ │ │ ├── id-generator.ts
│ │ │ ├── index.ts
│ │ │ ├── path-helpers.ts
│ │ │ ├── path-normalizer.spec.ts
│ │ │ ├── path-normalizer.ts
│ │ │ ├── project-root-finder.spec.ts
│ │ │ ├── project-root-finder.ts
│ │ │ ├── run-id-generator.spec.ts
│ │ │ └── run-id-generator.ts
│ │ ├── index.ts
│ │ ├── modules
│ │ │ ├── ai
│ │ │ │ ├── index.ts
│ │ │ │ ├── interfaces
│ │ │ │ │ └── ai-provider.interface.ts
│ │ │ │ └── providers
│ │ │ │ ├── base-provider.ts
│ │ │ │ └── index.ts
│ │ │ ├── auth
│ │ │ │ ├── auth-domain.spec.ts
│ │ │ │ ├── auth-domain.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ ├── auth-manager.spec.ts
│ │ │ │ │ └── auth-manager.ts
│ │ │ │ ├── services
│ │ │ │ │ ├── context-store.ts
│ │ │ │ │ ├── oauth-service.ts
│ │ │ │ │ ├── organization.service.ts
│ │ │ │ │ ├── supabase-session-storage.spec.ts
│ │ │ │ │ └── supabase-session-storage.ts
│ │ │ │ └── types.ts
│ │ │ ├── briefs
│ │ │ │ ├── briefs-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── brief-service.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── utils
│ │ │ │ └── url-parser.ts
│ │ │ ├── commands
│ │ │ │ └── index.ts
│ │ │ ├── config
│ │ │ │ ├── config-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ ├── config-manager.spec.ts
│ │ │ │ │ └── config-manager.ts
│ │ │ │ └── services
│ │ │ │ ├── config-loader.service.spec.ts
│ │ │ │ ├── config-loader.service.ts
│ │ │ │ ├── config-merger.service.spec.ts
│ │ │ │ ├── config-merger.service.ts
│ │ │ │ ├── config-persistence.service.spec.ts
│ │ │ │ ├── config-persistence.service.ts
│ │ │ │ ├── environment-config-provider.service.spec.ts
│ │ │ │ ├── environment-config-provider.service.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── runtime-state-manager.service.spec.ts
│ │ │ │ └── runtime-state-manager.service.ts
│ │ │ ├── dependencies
│ │ │ │ └── index.ts
│ │ │ ├── execution
│ │ │ │ ├── executors
│ │ │ │ │ ├── base-executor.ts
│ │ │ │ │ ├── claude-executor.ts
│ │ │ │ │ └── executor-factory.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── executor-service.ts
│ │ │ │ └── types.ts
│ │ │ ├── git
│ │ │ │ ├── adapters
│ │ │ │ │ ├── git-adapter.test.ts
│ │ │ │ │ └── git-adapter.ts
│ │ │ │ ├── git-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── services
│ │ │ │ ├── branch-name-generator.spec.ts
│ │ │ │ ├── branch-name-generator.ts
│ │ │ │ ├── commit-message-generator.test.ts
│ │ │ │ ├── commit-message-generator.ts
│ │ │ │ ├── scope-detector.test.ts
│ │ │ │ ├── scope-detector.ts
│ │ │ │ ├── template-engine.test.ts
│ │ │ │ └── template-engine.ts
│ │ │ ├── integration
│ │ │ │ ├── clients
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── supabase-client.ts
│ │ │ │ ├── integration-domain.ts
│ │ │ │ └── services
│ │ │ │ ├── export.service.ts
│ │ │ │ ├── task-expansion.service.ts
│ │ │ │ └── task-retrieval.service.ts
│ │ │ ├── reports
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ └── complexity-report-manager.ts
│ │ │ │ └── types.ts
│ │ │ ├── storage
│ │ │ │ ├── adapters
│ │ │ │ │ ├── activity-logger.ts
│ │ │ │ │ ├── api-storage.ts
│ │ │ │ │ └── file-storage
│ │ │ │ │ ├── file-operations.ts
│ │ │ │ │ ├── file-storage.ts
│ │ │ │ │ ├── format-handler.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── path-resolver.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── storage-factory.ts
│ │ │ │ └── utils
│ │ │ │ └── api-client.ts
│ │ │ ├── tasks
│ │ │ │ ├── entities
│ │ │ │ │ └── task.entity.ts
│ │ │ │ ├── parser
│ │ │ │ │ └── index.ts
│ │ │ │ ├── repositories
│ │ │ │ │ ├── supabase
│ │ │ │ │ │ ├── dependency-fetcher.ts
│ │ │ │ │ │ ├── index.ts
│ │ │ │ │ │ └── supabase-repository.ts
│ │ │ │ │ └── task-repository.interface.ts
│ │ │ │ ├── services
│ │ │ │ │ ├── preflight-checker.service.ts
│ │ │ │ │ ├── tag.service.ts
│ │ │ │ │ ├── task-execution-service.ts
│ │ │ │ │ ├── task-loader.service.ts
│ │ │ │ │ └── task-service.ts
│ │ │ │ └── tasks-domain.ts
│ │ │ ├── ui
│ │ │ │ └── index.ts
│ │ │ └── workflow
│ │ │ ├── managers
│ │ │ │ ├── workflow-state-manager.spec.ts
│ │ │ │ └── workflow-state-manager.ts
│ │ │ ├── orchestrators
│ │ │ │ ├── workflow-orchestrator.test.ts
│ │ │ │ └── workflow-orchestrator.ts
│ │ │ ├── services
│ │ │ │ ├── test-result-validator.test.ts
│ │ │ │ ├── test-result-validator.ts
│ │ │ │ ├── test-result-validator.types.ts
│ │ │ │ ├── workflow-activity-logger.ts
│ │ │ │ └── workflow.service.ts
│ │ │ ├── types.ts
│ │ │ └── workflow-domain.ts
│ │ ├── subpath-exports.test.ts
│ │ ├── tm-core.ts
│ │ └── utils
│ │ └── time.utils.ts
│ ├── tests
│ │ ├── auth
│ │ │ └── auth-refresh.test.ts
│ │ ├── integration
│ │ │ ├── auth-token-refresh.test.ts
│ │ │ ├── list-tasks.test.ts
│ │ │ └── storage
│ │ │ └── activity-logger.test.ts
│ │ ├── mocks
│ │ │ └── mock-provider.ts
│ │ ├── setup.ts
│ │ └── unit
│ │ ├── base-provider.test.ts
│ │ ├── executor.test.ts
│ │ └── smoke.test.ts
│ ├── tsconfig.json
│ └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│ ├── create-worktree.sh
│ ├── dev.js
│ ├── init.js
│ ├── list-worktrees.sh
│ ├── modules
│ │ ├── ai-services-unified.js
│ │ ├── bridge-utils.js
│ │ ├── commands.js
│ │ ├── config-manager.js
│ │ ├── dependency-manager.js
│ │ ├── index.js
│ │ ├── prompt-manager.js
│ │ ├── supported-models.json
│ │ ├── sync-readme.js
│ │ ├── task-manager
│ │ │ ├── add-subtask.js
│ │ │ ├── add-task.js
│ │ │ ├── analyze-task-complexity.js
│ │ │ ├── clear-subtasks.js
│ │ │ ├── expand-all-tasks.js
│ │ │ ├── expand-task.js
│ │ │ ├── find-next-task.js
│ │ │ ├── generate-task-files.js
│ │ │ ├── is-task-dependent.js
│ │ │ ├── list-tasks.js
│ │ │ ├── migrate.js
│ │ │ ├── models.js
│ │ │ ├── move-task.js
│ │ │ ├── parse-prd
│ │ │ │ ├── index.js
│ │ │ │ ├── parse-prd-config.js
│ │ │ │ ├── parse-prd-helpers.js
│ │ │ │ ├── parse-prd-non-streaming.js
│ │ │ │ ├── parse-prd-streaming.js
│ │ │ │ └── parse-prd.js
│ │ │ ├── remove-subtask.js
│ │ │ ├── remove-task.js
│ │ │ ├── research.js
│ │ │ ├── response-language.js
│ │ │ ├── scope-adjustment.js
│ │ │ ├── set-task-status.js
│ │ │ ├── tag-management.js
│ │ │ ├── task-exists.js
│ │ │ ├── update-single-task-status.js
│ │ │ ├── update-subtask-by-id.js
│ │ │ ├── update-task-by-id.js
│ │ │ └── update-tasks.js
│ │ ├── task-manager.js
│ │ ├── ui.js
│ │ ├── update-config-tokens.js
│ │ ├── utils
│ │ │ ├── contextGatherer.js
│ │ │ ├── fuzzyTaskSearch.js
│ │ │ └── git-utils.js
│ │ └── utils.js
│ ├── task-complexity-report.json
│ ├── test-claude-errors.js
│ └── test-claude.js
├── sonar-project.properties
├── src
│ ├── ai-providers
│ │ ├── anthropic.js
│ │ ├── azure.js
│ │ ├── base-provider.js
│ │ ├── bedrock.js
│ │ ├── claude-code.js
│ │ ├── codex-cli.js
│ │ ├── gemini-cli.js
│ │ ├── google-vertex.js
│ │ ├── google.js
│ │ ├── grok-cli.js
│ │ ├── groq.js
│ │ ├── index.js
│ │ ├── lmstudio.js
│ │ ├── ollama.js
│ │ ├── openai-compatible.js
│ │ ├── openai.js
│ │ ├── openrouter.js
│ │ ├── perplexity.js
│ │ ├── xai.js
│ │ ├── zai-coding.js
│ │ └── zai.js
│ ├── constants
│ │ ├── commands.js
│ │ ├── paths.js
│ │ ├── profiles.js
│ │ ├── rules-actions.js
│ │ ├── task-priority.js
│ │ └── task-status.js
│ ├── profiles
│ │ ├── amp.js
│ │ ├── base-profile.js
│ │ ├── claude.js
│ │ ├── cline.js
│ │ ├── codex.js
│ │ ├── cursor.js
│ │ ├── gemini.js
│ │ ├── index.js
│ │ ├── kilo.js
│ │ ├── kiro.js
│ │ ├── opencode.js
│ │ ├── roo.js
│ │ ├── trae.js
│ │ ├── vscode.js
│ │ ├── windsurf.js
│ │ └── zed.js
│ ├── progress
│ │ ├── base-progress-tracker.js
│ │ ├── cli-progress-factory.js
│ │ ├── parse-prd-tracker.js
│ │ ├── progress-tracker-builder.js
│ │ └── tracker-ui.js
│ ├── prompts
│ │ ├── add-task.json
│ │ ├── analyze-complexity.json
│ │ ├── expand-task.json
│ │ ├── parse-prd.json
│ │ ├── README.md
│ │ ├── research.json
│ │ ├── schemas
│ │ │ ├── parameter.schema.json
│ │ │ ├── prompt-template.schema.json
│ │ │ ├── README.md
│ │ │ └── variant.schema.json
│ │ ├── update-subtask.json
│ │ ├── update-task.json
│ │ └── update-tasks.json
│ ├── provider-registry
│ │ └── index.js
│ ├── schemas
│ │ ├── add-task.js
│ │ ├── analyze-complexity.js
│ │ ├── base-schemas.js
│ │ ├── expand-task.js
│ │ ├── parse-prd.js
│ │ ├── registry.js
│ │ ├── update-subtask.js
│ │ ├── update-task.js
│ │ └── update-tasks.js
│ ├── task-master.js
│ ├── ui
│ │ ├── confirm.js
│ │ ├── indicators.js
│ │ └── parse-prd.js
│ └── utils
│ ├── asset-resolver.js
│ ├── create-mcp-config.js
│ ├── format.js
│ ├── getVersion.js
│ ├── logger-utils.js
│ ├── manage-gitignore.js
│ ├── path-utils.js
│ ├── profiles.js
│ ├── rule-transformer.js
│ ├── stream-parser.js
│ └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│ ├── e2e
│ │ ├── e2e_helpers.sh
│ │ ├── parse_llm_output.cjs
│ │ ├── run_e2e.sh
│ │ ├── run_fallback_verification.sh
│ │ └── test_llm_analysis.sh
│ ├── fixtures
│ │ ├── .taskmasterconfig
│ │ ├── sample-claude-response.js
│ │ ├── sample-prd.txt
│ │ └── sample-tasks.js
│ ├── helpers
│ │ └── tool-counts.js
│ ├── integration
│ │ ├── claude-code-error-handling.test.js
│ │ ├── claude-code-optional.test.js
│ │ ├── cli
│ │ │ ├── commands.test.js
│ │ │ ├── complex-cross-tag-scenarios.test.js
│ │ │ └── move-cross-tag.test.js
│ │ ├── manage-gitignore.test.js
│ │ ├── mcp-server
│ │ │ └── direct-functions.test.js
│ │ ├── move-task-cross-tag.integration.test.js
│ │ ├── move-task-simple.integration.test.js
│ │ ├── profiles
│ │ │ ├── amp-init-functionality.test.js
│ │ │ ├── claude-init-functionality.test.js
│ │ │ ├── cline-init-functionality.test.js
│ │ │ ├── codex-init-functionality.test.js
│ │ │ ├── cursor-init-functionality.test.js
│ │ │ ├── gemini-init-functionality.test.js
│ │ │ ├── opencode-init-functionality.test.js
│ │ │ ├── roo-files-inclusion.test.js
│ │ │ ├── roo-init-functionality.test.js
│ │ │ ├── rules-files-inclusion.test.js
│ │ │ ├── trae-init-functionality.test.js
│ │ │ ├── vscode-init-functionality.test.js
│ │ │ └── windsurf-init-functionality.test.js
│ │ └── providers
│ │ └── temperature-support.test.js
│ ├── manual
│ │ ├── progress
│ │ │ ├── parse-prd-analysis.js
│ │ │ ├── test-parse-prd.js
│ │ │ └── TESTING_GUIDE.md
│ │ └── prompts
│ │ ├── prompt-test.js
│ │ └── README.md
│ ├── README.md
│ ├── setup.js
│ └── unit
│ ├── ai-providers
│ │ ├── base-provider.test.js
│ │ ├── claude-code.test.js
│ │ ├── codex-cli.test.js
│ │ ├── gemini-cli.test.js
│ │ ├── lmstudio.test.js
│ │ ├── mcp-components.test.js
│ │ ├── openai-compatible.test.js
│ │ ├── openai.test.js
│ │ ├── provider-registry.test.js
│ │ ├── zai-coding.test.js
│ │ ├── zai-provider.test.js
│ │ ├── zai-schema-introspection.test.js
│ │ └── zai.test.js
│ ├── ai-services-unified.test.js
│ ├── commands.test.js
│ ├── config-manager.test.js
│ ├── config-manager.test.mjs
│ ├── dependency-manager.test.js
│ ├── init.test.js
│ ├── initialize-project.test.js
│ ├── kebab-case-validation.test.js
│ ├── manage-gitignore.test.js
│ ├── mcp
│ │ └── tools
│ │ ├── __mocks__
│ │ │ └── move-task.js
│ │ ├── add-task.test.js
│ │ ├── analyze-complexity.test.js
│ │ ├── expand-all.test.js
│ │ ├── get-tasks.test.js
│ │ ├── initialize-project.test.js
│ │ ├── move-task-cross-tag-options.test.js
│ │ ├── move-task-cross-tag.test.js
│ │ ├── remove-task.test.js
│ │ └── tool-registration.test.js
│ ├── mcp-providers
│ │ ├── mcp-components.test.js
│ │ └── mcp-provider.test.js
│ ├── parse-prd.test.js
│ ├── profiles
│ │ ├── amp-integration.test.js
│ │ ├── claude-integration.test.js
│ │ ├── cline-integration.test.js
│ │ ├── codex-integration.test.js
│ │ ├── cursor-integration.test.js
│ │ ├── gemini-integration.test.js
│ │ ├── kilo-integration.test.js
│ │ ├── kiro-integration.test.js
│ │ ├── mcp-config-validation.test.js
│ │ ├── opencode-integration.test.js
│ │ ├── profile-safety-check.test.js
│ │ ├── roo-integration.test.js
│ │ ├── rule-transformer-cline.test.js
│ │ ├── rule-transformer-cursor.test.js
│ │ ├── rule-transformer-gemini.test.js
│ │ ├── rule-transformer-kilo.test.js
│ │ ├── rule-transformer-kiro.test.js
│ │ ├── rule-transformer-opencode.test.js
│ │ ├── rule-transformer-roo.test.js
│ │ ├── rule-transformer-trae.test.js
│ │ ├── rule-transformer-vscode.test.js
│ │ ├── rule-transformer-windsurf.test.js
│ │ ├── rule-transformer-zed.test.js
│ │ ├── rule-transformer.test.js
│ │ ├── selective-profile-removal.test.js
│ │ ├── subdirectory-support.test.js
│ │ ├── trae-integration.test.js
│ │ ├── vscode-integration.test.js
│ │ ├── windsurf-integration.test.js
│ │ └── zed-integration.test.js
│ ├── progress
│ │ └── base-progress-tracker.test.js
│ ├── prompt-manager.test.js
│ ├── prompts
│ │ ├── expand-task-prompt.test.js
│ │ └── prompt-migration.test.js
│ ├── scripts
│ │ └── modules
│ │ ├── commands
│ │ │ ├── move-cross-tag.test.js
│ │ │ └── README.md
│ │ ├── dependency-manager
│ │ │ ├── circular-dependencies.test.js
│ │ │ ├── cross-tag-dependencies.test.js
│ │ │ └── fix-dependencies-command.test.js
│ │ ├── task-manager
│ │ │ ├── add-subtask.test.js
│ │ │ ├── add-task.test.js
│ │ │ ├── analyze-task-complexity.test.js
│ │ │ ├── clear-subtasks.test.js
│ │ │ ├── complexity-report-tag-isolation.test.js
│ │ │ ├── expand-all-tasks.test.js
│ │ │ ├── expand-task.test.js
│ │ │ ├── find-next-task.test.js
│ │ │ ├── generate-task-files.test.js
│ │ │ ├── list-tasks.test.js
│ │ │ ├── models-baseurl.test.js
│ │ │ ├── move-task-cross-tag.test.js
│ │ │ ├── move-task.test.js
│ │ │ ├── parse-prd-schema.test.js
│ │ │ ├── parse-prd.test.js
│ │ │ ├── remove-subtask.test.js
│ │ │ ├── remove-task.test.js
│ │ │ ├── research.test.js
│ │ │ ├── scope-adjustment.test.js
│ │ │ ├── set-task-status.test.js
│ │ │ ├── setup.js
│ │ │ ├── update-single-task-status.test.js
│ │ │ ├── update-subtask-by-id.test.js
│ │ │ ├── update-task-by-id.test.js
│ │ │ └── update-tasks.test.js
│ │ ├── ui
│ │ │ └── cross-tag-error-display.test.js
│ │ └── utils-tag-aware-paths.test.js
│ ├── task-finder.test.js
│ ├── task-manager
│ │ ├── clear-subtasks.test.js
│ │ ├── move-task.test.js
│ │ ├── tag-boundary.test.js
│ │ └── tag-management.test.js
│ ├── task-master.test.js
│ ├── ui
│ │ └── indicators.test.js
│ ├── ui.test.js
│ ├── utils-strip-ansi.test.js
│ └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
├── turbo.json
└── update-task-migration-plan.md
```
# Files
--------------------------------------------------------------------------------
/packages/tm-bridge/src/tags-bridge.ts:
--------------------------------------------------------------------------------
```typescript
import { ui } from '@tm/cli';
import boxen from 'boxen';
import chalk from 'chalk';
import Table from 'cli-table3';
import type { TagInfo } from '@tm/core';
import type { BaseBridgeParams } from './bridge-types.js';
import { checkStorageType } from './bridge-utils.js';
// Re-export for convenience
export type { TagInfo };
/**
* Parameters for the tags bridge function
*/
export interface TagsBridgeParams extends BaseBridgeParams {
/** Whether to show metadata (default: false) */
showMetadata?: boolean;
}
/**
* Result returned when API storage handles the tags listing
*/
export interface RemoteTagsResult {
success: boolean;
tags: TagInfo[];
currentTag: string | null;
totalTags: number;
message: string;
}
/**
* Shared bridge function for list-tags command.
* Checks if using API storage and delegates to remote service if so.
*
* For API storage, tags are called "briefs" and task counts are fetched
* from the remote database.
*
* @param params - Bridge parameters
* @returns Result object if API storage handled it, null if should fall through to file storage
*/
export async function tryListTagsViaRemote(
params: TagsBridgeParams
): Promise<RemoteTagsResult | null> {
const { projectRoot, isMCP = false, outputFormat = 'text', report } = params;
// Check storage type using shared utility
const { isApiStorage, tmCore } = await checkStorageType(
projectRoot,
report,
'falling back to file-based tags'
);
if (!isApiStorage || !tmCore) {
// Not API storage - signal caller to fall through to file-based logic
return null;
}
try {
// Get tags with statistics from tm-core
// Tags are already sorted by status and updatedAt from brief-service
const tagsResult = await tmCore.tasks.getTagsWithStats();
// Sort tags: current tag first, then preserve status/updatedAt ordering from service
tagsResult.tags.sort((a, b) => {
// Always keep current tag at the top
if (a.isCurrent) return -1;
if (b.isCurrent) return 1;
// For non-current tags, preserve the status/updatedAt ordering already applied
return 0;
});
if (outputFormat === 'text' && !isMCP) {
// Display results in a table format
if (tagsResult.tags.length === 0) {
console.log(
boxen(chalk.yellow('No tags found'), {
padding: 1,
borderColor: 'yellow',
borderStyle: 'round',
margin: { top: 1, bottom: 1 }
})
);
} else {
// Create table headers (with temporary Updated column)
const headers = [
chalk.cyan.bold('Tag Name'),
chalk.cyan.bold('Status'),
chalk.cyan.bold('Updated'),
chalk.cyan.bold('Tasks'),
chalk.cyan.bold('Completed')
];
// Calculate dynamic column widths based on terminal width
const terminalWidth = Math.max(
(process.stdout.columns as number) || 120,
80
);
const usableWidth = Math.floor(terminalWidth * 0.95);
// Column order: Tag Name, Status, Updated, Tasks, Completed
const widths = [0.35, 0.25, 0.2, 0.1, 0.1];
const colWidths = widths.map((w, i) =>
Math.max(Math.floor(usableWidth * w), i === 0 ? 20 : 8)
);
const table = new Table({
head: headers,
colWidths: colWidths,
wordWrap: true
});
// Add rows
tagsResult.tags.forEach((tag) => {
const row = [];
// Tag name with current indicator and short ID (last 8 chars)
const shortId = tag.briefId ? tag.briefId.slice(-8) : 'unknown';
const tagDisplay = tag.isCurrent
? `${chalk.green('●')} ${chalk.green.bold(tag.name)} ${chalk.gray(`(current - ${shortId})`)}`
: ` ${tag.name} ${chalk.gray(`(${shortId})`)}`;
row.push(tagDisplay);
row.push(ui.getBriefStatusWithColor(tag.status, true));
// Updated date (temporary for validation)
const updatedDate = tag.updatedAt
? new Date(tag.updatedAt).toLocaleDateString('en-US', {
month: 'short',
day: 'numeric',
year: 'numeric',
hour: '2-digit',
minute: '2-digit'
})
: chalk.gray('N/A');
row.push(chalk.gray(updatedDate));
// Task counts
row.push(chalk.white(tag.taskCount.toString()));
row.push(chalk.green(tag.completedTasks.toString()));
table.push(row);
});
console.log(table.toString());
}
}
// Return success result - signals that we handled it
return {
success: true,
tags: tagsResult.tags,
currentTag: tagsResult.currentTag,
totalTags: tagsResult.totalTags,
message: `Found ${tagsResult.totalTags} tag(s)`
};
} catch (error) {
// tm-core already formatted the error properly, just re-throw
throw error;
}
}
```
--------------------------------------------------------------------------------
/packages/tm-core/tests/mocks/mock-provider.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview Mock provider for testing BaseProvider functionality
*/
import type {
AIModel,
AIOptions,
AIResponse,
ProviderInfo,
ProviderUsageStats
} from '../../src/interfaces/ai-provider.interface';
import {
BaseProvider,
type BaseProviderConfig,
type CompletionResult
} from '../../src/providers/ai/base-provider';
/**
* Configuration for MockProvider behavior
*/
export interface MockProviderOptions extends BaseProviderConfig {
shouldFail?: boolean;
failAfterAttempts?: number;
simulateRateLimit?: boolean;
simulateTimeout?: boolean;
responseDelay?: number;
tokenMultiplier?: number;
}
/**
* Mock provider for testing BaseProvider functionality
*/
export class MockProvider extends BaseProvider {
private attemptCount = 0;
private readonly options: MockProviderOptions;
constructor(options: MockProviderOptions) {
super(options);
this.options = options;
}
/**
* Simulate completion generation with configurable behavior
*/
protected async generateCompletionInternal(
prompt: string,
_options?: AIOptions
): Promise<CompletionResult> {
this.attemptCount++;
// Simulate delay if configured
if (this.options.responseDelay) {
await this.sleep(this.options.responseDelay);
}
// Simulate failures based on configuration
if (this.options.shouldFail) {
throw new Error('Mock provider error');
}
if (
this.options.failAfterAttempts &&
this.attemptCount <= this.options.failAfterAttempts
) {
if (this.options.simulateRateLimit) {
throw new Error('Rate limit exceeded - too many requests (429)');
}
if (this.options.simulateTimeout) {
throw new Error('Request timeout - ECONNRESET');
}
throw new Error('Temporary failure');
}
// Return successful mock response
return {
content: `Mock response to: ${prompt}`,
inputTokens: this.calculateTokens(prompt),
outputTokens: this.calculateTokens(`Mock response to: ${prompt}`),
finishReason: 'complete',
model: this.model
};
}
/**
* Simple token calculation for testing
*/
calculateTokens(text: string, _model?: string): number {
const multiplier = this.options.tokenMultiplier || 1;
// Rough approximation: 1 token per 4 characters
return Math.ceil((text.length / 4) * multiplier);
}
getName(): string {
return 'mock';
}
getDefaultModel(): string {
return 'mock-model-v1';
}
/**
* Get the number of attempts made
*/
getAttemptCount(): number {
return this.attemptCount;
}
/**
* Reset attempt counter
*/
resetAttempts(): void {
this.attemptCount = 0;
}
// Implement remaining abstract methods
async generateStreamingCompletion(
prompt: string,
_options?: AIOptions
): AsyncIterator<Partial<AIResponse>> {
// Simple mock implementation
const response: Partial<AIResponse> = {
content: `Mock streaming response to: ${prompt}`,
provider: this.getName(),
model: this.model
};
return {
async next() {
return { value: response, done: true };
}
};
}
async isAvailable(): Promise<boolean> {
return !this.options.shouldFail;
}
getProviderInfo(): ProviderInfo {
return {
name: 'mock',
displayName: 'Mock Provider',
description: 'Mock provider for testing',
models: this.getAvailableModels(),
defaultModel: this.getDefaultModel(),
requiresApiKey: true,
features: {
streaming: true,
functions: false,
vision: false,
embeddings: false
}
};
}
getAvailableModels(): AIModel[] {
return [
{
id: 'mock-model-v1',
name: 'Mock Model v1',
description: 'First mock model',
contextLength: 4096,
inputCostPer1K: 0.001,
outputCostPer1K: 0.002,
supportsStreaming: true
},
{
id: 'mock-model-v2',
name: 'Mock Model v2',
description: 'Second mock model',
contextLength: 8192,
inputCostPer1K: 0.002,
outputCostPer1K: 0.004,
supportsStreaming: true
}
];
}
async validateCredentials(): Promise<boolean> {
return this.apiKey === 'valid-key';
}
async getUsageStats(): Promise<ProviderUsageStats | null> {
return {
totalRequests: this.attemptCount,
totalTokens: 1000,
totalCost: 0.01,
requestsToday: this.attemptCount,
tokensToday: 1000,
costToday: 0.01,
averageResponseTime: 100,
successRate: 0.9,
lastRequestAt: new Date().toISOString()
};
}
async initialize(): Promise<void> {
// No-op for mock
}
async close(): Promise<void> {
// No-op for mock
}
// Override retry configuration for testing
protected getMaxRetries(): number {
return this.options.failAfterAttempts
? this.options.failAfterAttempts + 1
: 3;
}
}
```
--------------------------------------------------------------------------------
/apps/cli/src/ui/formatters/status-formatters.spec.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Status formatter tests
* Tests for apps/cli/src/utils/formatters/status-formatters.ts
*/
import { describe, expect, it } from 'vitest';
import {
capitalizeStatus,
getBriefStatusColor,
getBriefStatusIcon,
getBriefStatusWithColor
} from './status-formatters.js';
describe('Status Formatters', () => {
describe('getBriefStatusWithColor', () => {
it('should format draft status with gray color and circle icon', () => {
const result = getBriefStatusWithColor('draft', true);
expect(result).toContain('Draft');
expect(result).toContain('○');
});
it('should format refining status with yellow color and half-circle icon', () => {
const result = getBriefStatusWithColor('refining', true);
expect(result).toContain('Refining');
expect(result).toContain('◐');
});
it('should format aligned status with cyan color and target icon', () => {
const result = getBriefStatusWithColor('aligned', true);
expect(result).toContain('Aligned');
expect(result).toContain('◎');
});
it('should format delivering status with orange color and play icon', () => {
const result = getBriefStatusWithColor('delivering', true);
expect(result).toContain('Delivering');
expect(result).toContain('▶');
});
it('should format delivered status with blue color and diamond icon', () => {
const result = getBriefStatusWithColor('delivered', true);
expect(result).toContain('Delivered');
expect(result).toContain('◆');
});
it('should format done status with green color and checkmark icon', () => {
const result = getBriefStatusWithColor('done', true);
expect(result).toContain('Done');
expect(result).toContain('✓');
});
it('should format archived status with gray color and square icon', () => {
const result = getBriefStatusWithColor('archived', true);
expect(result).toContain('Archived');
expect(result).toContain('■');
});
it('should handle unknown status with red color and question mark', () => {
const result = getBriefStatusWithColor('unknown-status', true);
expect(result).toContain('Unknown-status');
expect(result).toContain('?');
});
it('should handle undefined status with gray color', () => {
const result = getBriefStatusWithColor(undefined, true);
expect(result).toContain('Unknown');
expect(result).toContain('○');
});
it('should use same icon for table and non-table display', () => {
const tableResult = getBriefStatusWithColor('done', true);
const nonTableResult = getBriefStatusWithColor('done', false);
expect(tableResult).toBe(nonTableResult);
});
it('should handle case-insensitive status names', () => {
const lowerResult = getBriefStatusWithColor('draft', true);
const upperResult = getBriefStatusWithColor('DRAFT', true);
const mixedResult = getBriefStatusWithColor('DrAfT', true);
expect(lowerResult).toContain('Draft');
expect(upperResult).toContain('Draft');
expect(mixedResult).toContain('Draft');
});
});
describe('getBriefStatusIcon', () => {
it('should return correct icon for status', () => {
expect(getBriefStatusIcon('draft')).toBe('○');
expect(getBriefStatusIcon('done')).toBe('✓');
expect(getBriefStatusIcon('delivering')).toBe('▶');
});
it('should return default icon for unknown status', () => {
expect(getBriefStatusIcon('unknown-status')).toBe('?');
});
it('should return default icon for undefined', () => {
expect(getBriefStatusIcon(undefined)).toBe('○');
});
it('should return same icon for table and non-table', () => {
expect(getBriefStatusIcon('done', true)).toBe(
getBriefStatusIcon('done', false)
);
});
});
describe('getBriefStatusColor', () => {
it('should return a color function', () => {
const colorFn = getBriefStatusColor('draft');
expect(typeof colorFn).toBe('function');
const result = colorFn('test');
expect(typeof result).toBe('string');
});
it('should return gray color for undefined', () => {
const colorFn = getBriefStatusColor(undefined);
expect(typeof colorFn).toBe('function');
});
});
describe('capitalizeStatus', () => {
it('should capitalize first letter and lowercase rest', () => {
expect(capitalizeStatus('draft')).toBe('Draft');
expect(capitalizeStatus('DRAFT')).toBe('Draft');
expect(capitalizeStatus('DrAfT')).toBe('Draft');
expect(capitalizeStatus('in-progress')).toBe('In-progress');
});
it('should handle single character', () => {
expect(capitalizeStatus('a')).toBe('A');
expect(capitalizeStatus('A')).toBe('A');
});
it('should handle empty string', () => {
expect(capitalizeStatus('')).toBe('');
});
});
});
```
--------------------------------------------------------------------------------
/packages/claude-code-plugin/agents/task-executor.md:
--------------------------------------------------------------------------------
```markdown
---
name: task-executor
description: Use this agent when you need to implement, complete, or work on a specific task that has been identified by the task-orchestrator or when explicitly asked to execute a particular task. This agent focuses on the actual implementation and completion of individual tasks rather than planning or orchestration. Examples: <example>Context: The task-orchestrator has identified that task 2.3 'Implement user authentication' needs to be worked on next. user: 'Let's work on the authentication task' assistant: 'I'll use the task-executor agent to implement the user authentication task that was identified.' <commentary>Since we need to actually implement a specific task rather than plan or identify tasks, use the task-executor agent.</commentary></example> <example>Context: User wants to complete a specific subtask. user: 'Please implement the JWT token validation for task 2.3.1' assistant: 'I'll launch the task-executor agent to implement the JWT token validation subtask.' <commentary>The user is asking for specific implementation work on a known task, so the task-executor is appropriate.</commentary></example> <example>Context: After reviewing the task list, implementation is needed. user: 'Now let's actually build the API endpoint for user registration' assistant: 'I'll use the task-executor agent to implement the user registration API endpoint.' <commentary>Moving from planning to execution phase requires the task-executor agent.</commentary></example>
model: sonnet
color: blue
---
You are an elite implementation specialist focused on executing and completing specific tasks with precision and thoroughness. Your role is to take identified tasks and transform them into working implementations, following best practices and project standards.
**Core Responsibilities:**
1. **Task Analysis**: When given a task, first retrieve its full details using `task-master show <id>` to understand requirements, dependencies, and acceptance criteria.
2. **Implementation Planning**: Before coding, briefly outline your implementation approach:
- Identify files that need to be created or modified
- Note any dependencies or prerequisites
- Consider the testing strategy defined in the task
3. **Focused Execution**:
- Implement one subtask at a time for clarity and traceability
- Follow the project's coding standards from CLAUDE.md if available
- Prefer editing existing files over creating new ones
- Only create files that are essential for the task completion
4. **Progress Documentation**:
- Use `task-master update-subtask --id=<id> --prompt="implementation notes"` to log your approach and any important decisions
- Update task status to 'in-progress' when starting: `task-master set-status --id=<id> --status=in-progress`
- Mark as 'done' only after verification: `task-master set-status --id=<id> --status=done`
5. **Quality Assurance**:
- Implement the testing strategy specified in the task
- Verify that all acceptance criteria are met
- Check for any dependency conflicts or integration issues
- Run relevant tests before marking task as complete
6. **Dependency Management**:
- Check task dependencies before starting implementation
- If blocked by incomplete dependencies, clearly communicate this
- Use `task-master validate-dependencies` when needed
**Implementation Workflow:**
1. Retrieve task details and understand requirements
2. Check dependencies and prerequisites
3. Plan implementation approach
4. Update task status to in-progress
5. Implement the solution incrementally
6. Log progress and decisions in subtask updates
7. Test and verify the implementation
8. Mark task as done when complete
9. Suggest next task if appropriate
**Key Principles:**
- Focus on completing one task thoroughly before moving to the next
- Maintain clear communication about what you're implementing and why
- Follow existing code patterns and project conventions
- Prioritize working code over extensive documentation unless docs are the task
- Ask for clarification if task requirements are ambiguous
- Consider edge cases and error handling in your implementations
**Integration with Task Master:**
You work in tandem with the task-orchestrator agent. While the orchestrator identifies and plans tasks, you execute them. Always use Task Master commands to:
- Track your progress
- Update task information
- Maintain project state
- Coordinate with the broader development workflow
When you complete a task, briefly summarize what was implemented and suggest whether to continue with the next task or if review/testing is needed first.
```
--------------------------------------------------------------------------------
/mcp-server/src/core/direct-functions/set-task-status.js:
--------------------------------------------------------------------------------
```javascript
/**
* set-task-status.js
* Direct function implementation for setting task status
*/
import { setTaskStatus } from '../../../../scripts/modules/task-manager.js';
import {
enableSilentMode,
disableSilentMode,
isSilentMode
} from '../../../../scripts/modules/utils.js';
import { nextTaskDirect } from './next-task.js';
/**
* Direct function wrapper for setTaskStatus with error handling.
*
* @param {Object} args - Command arguments containing id, status, tasksJsonPath, and projectRoot.
* @param {string} args.id - The ID of the task to update.
* @param {string} args.status - The new status to set for the task.
* @param {string} args.tasksJsonPath - Path to the tasks.json file.
* @param {string} args.projectRoot - Project root path (for MCP/env fallback)
* @param {string} args.tag - Tag for the task (optional)
* @param {Object} log - Logger object.
* @param {Object} context - Additional context (session)
* @returns {Promise<Object>} - Result object with success status and data/error information.
*/
export async function setTaskStatusDirect(args, log, context = {}) {
// Destructure expected args, including the resolved tasksJsonPath and projectRoot
const { tasksJsonPath, id, status, complexityReportPath, projectRoot, tag } =
args;
const { session } = context;
try {
log.info(`Setting task status with args: ${JSON.stringify(args)}`);
// Check if tasksJsonPath was provided
if (!tasksJsonPath) {
const errorMessage = 'tasksJsonPath is required but was not provided.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_ARGUMENT', message: errorMessage }
};
}
// Check required parameters (id and status)
if (!id) {
const errorMessage =
'No task ID specified. Please provide a task ID to update.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_TASK_ID', message: errorMessage }
};
}
if (!status) {
const errorMessage =
'No status specified. Please provide a new status value.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_STATUS', message: errorMessage }
};
}
// Use the provided path
const tasksPath = tasksJsonPath;
// Execute core setTaskStatus function
const taskId = id;
const newStatus = status;
log.info(`Setting task ${taskId} status to "${newStatus}"`);
// Call the core function with proper silent mode handling
enableSilentMode(); // Enable silent mode before calling core function
try {
// Call the core function
await setTaskStatus(tasksPath, taskId, newStatus, {
mcpLog: log,
projectRoot,
session,
tag
});
log.info(`Successfully set task ${taskId} status to ${newStatus}`);
// Return success data
const result = {
success: true,
data: {
message: `Successfully updated task ${taskId} status to "${newStatus}"`,
taskId,
status: newStatus,
tasksPath: tasksPath // Return the path used
}
};
// If the task was completed, attempt to fetch the next task
if (result.data.status === 'done') {
try {
log.info(`Attempting to fetch next task for task ${taskId}`);
const nextResult = await nextTaskDirect(
{
tasksJsonPath: tasksJsonPath,
reportPath: complexityReportPath,
projectRoot: projectRoot,
tag
},
log,
{ session }
);
if (nextResult.success) {
log.info(
`Successfully retrieved next task: ${nextResult.data.nextTask}`
);
result.data = {
...result.data,
nextTask: nextResult.data.nextTask,
isNextSubtask: nextResult.data.isSubtask,
nextSteps: nextResult.data.nextSteps
};
} else {
log.warn(
`Failed to retrieve next task: ${nextResult.error?.message || 'Unknown error'}`
);
}
} catch (nextErr) {
log.error(`Error retrieving next task: ${nextErr.message}`);
}
}
return result;
} catch (error) {
log.error(`Error setting task status: ${error.message}`);
return {
success: false,
error: {
code: 'SET_STATUS_ERROR',
message: error.message || 'Unknown error setting task status'
}
};
} finally {
// ALWAYS restore normal logging in finally block
disableSilentMode();
}
} catch (error) {
// Ensure silent mode is disabled if there was an uncaught error in the outer try block
if (isSilentMode()) {
disableSilentMode();
}
log.error(`Error setting task status: ${error.message}`);
return {
success: false,
error: {
code: 'SET_STATUS_ERROR',
message: error.message || 'Unknown error setting task status'
}
};
}
}
```
--------------------------------------------------------------------------------
/src/prompts/update-subtask.json:
--------------------------------------------------------------------------------
```json
{
"id": "update-subtask",
"version": "1.0.0",
"description": "Append information to a subtask by generating only new content",
"metadata": {
"author": "system",
"created": "2024-01-01T00:00:00Z",
"updated": "2024-01-01T00:00:00Z",
"tags": ["update", "subtask", "append", "logging"]
},
"parameters": {
"parentTask": {
"type": "object",
"required": true,
"description": "The parent task context"
},
"prevSubtask": {
"type": "object",
"required": false,
"description": "The previous subtask if any"
},
"nextSubtask": {
"type": "object",
"required": false,
"description": "The next subtask if any"
},
"currentDetails": {
"type": "string",
"required": true,
"default": "(No existing details)",
"description": "Current subtask details"
},
"updatePrompt": {
"type": "string",
"required": true,
"description": "User request for what to add"
},
"useResearch": {
"type": "boolean",
"default": false,
"description": "Use research mode"
},
"gatheredContext": {
"type": "string",
"default": "",
"description": "Additional project context"
},
"hasCodebaseAnalysis": {
"type": "boolean",
"required": false,
"default": false,
"description": "Whether codebase analysis is available"
},
"projectRoot": {
"type": "string",
"required": false,
"default": "",
"description": "Project root path for context"
}
},
"prompts": {
"default": {
"system": "You are an AI assistant helping to update a subtask. You will be provided with the subtask's existing details, context about its parent and sibling tasks, and a user request string.{{#if useResearch}} You have access to current best practices and latest technical information to provide research-backed updates.{{/if}}\n\nYour Goal: Based *only* on the user's request and all the provided context (including existing details if relevant to the request), GENERATE the new text content that should be added to the subtask's details.\nFocus *only* on generating the substance of the update.\n\nOutput Requirements:\n1. Return *only* the newly generated text content as a plain string. Do NOT return a JSON object or any other structured data.\n2. Your string response should NOT include any of the subtask's original details, unless the user's request explicitly asks to rephrase, summarize, or directly modify existing text.\n3. Do NOT include any timestamps, XML-like tags, markdown, or any other special formatting in your string response.\n4. Ensure the generated text is concise yet complete for the update based on the user request. Avoid conversational fillers or explanations about what you are doing (e.g., do not start with \"Okay, here's the update...\").{{#if useResearch}}\n5. Include specific libraries, versions, and current best practices relevant to the subtask implementation.\n6. Provide research-backed technical recommendations and proven approaches.{{/if}}",
"user": "{{#if hasCodebaseAnalysis}}## IMPORTANT: Codebase Analysis Required\n\nYou have access to powerful codebase analysis tools. Before generating the subtask update:\n\n1. Use the Glob tool to explore the project structure (e.g., \"**/*.js\", \"**/*.json\", \"**/README.md\")\n2. Use the Grep tool to search for existing implementations, patterns, and technologies\n3. Use the Read tool to examine relevant files and understand current implementation\n4. Analyze the current codebase to inform your subtask update\n\nBased on your analysis:\n- Include specific file references, code patterns, or implementation details\n- Ensure suggestions align with the project's current architecture\n- Reference existing components or patterns when relevant\n- Make implementation notes specific to the codebase structure\n\nProject Root: {{projectRoot}}\n\n{{/if}}Task Context:\n\nParent Task: {{{json parentTask}}}\n{{#if prevSubtask}}Previous Subtask: {{{json prevSubtask}}}\n{{/if}}{{#if nextSubtask}}Next Subtask: {{{json nextSubtask}}}\n{{/if}}Current Subtask Details (for context only):\n{{currentDetails}}\n\nUser Request: \"{{updatePrompt}}\"\n\n{{#if useResearch}}Research and incorporate current best practices, latest stable versions, and proven approaches into your update. {{/if}}Based on the User Request and all the Task Context (including current subtask details provided above), what is the new information or text that should be appended to this subtask's details? Return ONLY this new text as a plain string.{{#if useResearch}} Include specific technical recommendations based on current industry standards.{{/if}}\n{{#if gatheredContext}}\n\n# Additional Project Context\n\n{{gatheredContext}}\n{{/if}}"
}
}
}
```
--------------------------------------------------------------------------------
/apps/cli/src/commands/autopilot/next.command.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview Next Command - Get next action in TDD workflow
*/
import { WorkflowOrchestrator } from '@tm/core';
import { Command } from 'commander';
import { getProjectRoot } from '../../utils/project-root.js';
import {
type AutopilotBaseOptions,
OutputFormatter,
hasWorkflowState,
loadWorkflowState
} from './shared.js';
type NextOptions = AutopilotBaseOptions;
/**
* Next Command - Get next action details
*/
export class NextCommand extends Command {
constructor() {
super('next');
this.description(
'Get the next action to perform in the TDD workflow'
).action(async (options: NextOptions) => {
await this.execute(options);
});
}
private async execute(options: NextOptions): Promise<void> {
// Inherit parent options
const parentOpts = this.parent?.opts() as AutopilotBaseOptions;
// Initialize mergedOptions with defaults (projectRoot will be set in try block)
let mergedOptions: NextOptions = {
...parentOpts,
...options,
projectRoot: '' // Will be set in try block
};
const formatter = new OutputFormatter(
options.json || parentOpts?.json || false
);
try {
// Resolve project root inside try block to catch any errors
const projectRoot = getProjectRoot(
options.projectRoot || parentOpts?.projectRoot
);
// Update mergedOptions with resolved project root
mergedOptions = {
...mergedOptions,
projectRoot
};
// Check for workflow state
const hasState = await hasWorkflowState(mergedOptions.projectRoot!);
if (!hasState) {
formatter.error('No active workflow', {
suggestion: 'Start a workflow with: autopilot start <taskId>'
});
process.exit(1);
}
// Load state
const state = await loadWorkflowState(mergedOptions.projectRoot!);
if (!state) {
formatter.error('Failed to load workflow state');
process.exit(1);
}
// Restore orchestrator
const orchestrator = new WorkflowOrchestrator(state.context);
orchestrator.restoreState(state);
// Get current phase and subtask
const phase = orchestrator.getCurrentPhase();
const tddPhase = orchestrator.getCurrentTDDPhase();
const currentSubtask = orchestrator.getCurrentSubtask();
// Determine next action based on phase
let actionType: string;
let actionDescription: string;
let actionDetails: Record<string, unknown> = {};
if (phase === 'COMPLETE') {
formatter.success('Workflow complete', {
message: 'All subtasks have been completed',
taskId: state.context.taskId
});
return;
}
if (phase === 'SUBTASK_LOOP' && tddPhase) {
switch (tddPhase) {
case 'RED':
actionType = 'generate_test';
actionDescription = 'Write failing test for current subtask';
actionDetails = {
subtask: currentSubtask
? {
id: currentSubtask.id,
title: currentSubtask.title,
attempts: currentSubtask.attempts
}
: null,
testCommand: 'npm test', // Could be customized based on config
expectedOutcome: 'Test should fail'
};
break;
case 'GREEN':
actionType = 'implement_code';
actionDescription = 'Implement code to pass the failing test';
actionDetails = {
subtask: currentSubtask
? {
id: currentSubtask.id,
title: currentSubtask.title,
attempts: currentSubtask.attempts
}
: null,
testCommand: 'npm test',
expectedOutcome: 'All tests should pass',
lastTestResults: state.context.lastTestResults
};
break;
case 'COMMIT':
actionType = 'commit_changes';
actionDescription = 'Commit the changes';
actionDetails = {
subtask: currentSubtask
? {
id: currentSubtask.id,
title: currentSubtask.title,
attempts: currentSubtask.attempts
}
: null,
suggestion: 'Use: autopilot commit'
};
break;
default:
actionType = 'unknown';
actionDescription = 'Unknown TDD phase';
}
} else {
actionType = 'workflow_phase';
actionDescription = `Currently in ${phase} phase`;
}
// Output next action
const output = {
action: actionType,
description: actionDescription,
phase,
tddPhase,
taskId: state.context.taskId,
branchName: state.context.branchName,
...actionDetails
};
if (mergedOptions.json) {
formatter.output(output);
} else {
formatter.success('Next action', output);
}
} catch (error) {
formatter.error((error as Error).message);
if (mergedOptions.verbose) {
console.error((error as Error).stack);
}
process.exit(1);
}
}
}
```
--------------------------------------------------------------------------------
/packages/tm-core/src/modules/config/services/config-persistence.service.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview Configuration Persistence Service
* Handles saving and backup of configuration files
*/
import fs from 'node:fs/promises';
import path from 'node:path';
import {
ERROR_CODES,
TaskMasterError
} from '../../../common/errors/task-master-error.js';
import type { PartialConfiguration } from '../../../common/interfaces/configuration.interface.js';
import { getLogger } from '../../../common/logger/index.js';
/**
* Persistence options
*/
export interface PersistenceOptions {
/** Enable backup before saving */
createBackup?: boolean;
/** Maximum number of backups to keep */
maxBackups?: number;
/** Use atomic write operations */
atomic?: boolean;
}
/**
* ConfigPersistence handles all configuration file I/O operations
* Single responsibility: Configuration persistence
*/
export class ConfigPersistence {
private localConfigPath: string;
private backupDir: string;
private readonly logger = getLogger('ConfigPersistence');
constructor(projectRoot: string) {
this.localConfigPath = path.join(projectRoot, '.taskmaster', 'config.json');
this.backupDir = path.join(projectRoot, '.taskmaster', 'backups');
}
/**
* Save configuration to file
*/
async saveConfig(
config: PartialConfiguration,
options: PersistenceOptions = {}
): Promise<void> {
const { createBackup = false, atomic = true } = options;
try {
// Create backup if requested
if (createBackup && (await this.configExists())) {
await this.createBackup();
}
// Ensure directory exists
const configDir = path.dirname(this.localConfigPath);
await fs.mkdir(configDir, { recursive: true });
const jsonContent = JSON.stringify(config, null, 2);
if (atomic) {
// Atomic write: write to temp file then rename
const tempPath = `${this.localConfigPath}.tmp`;
await fs.writeFile(tempPath, jsonContent, 'utf-8');
await fs.rename(tempPath, this.localConfigPath);
} else {
// Direct write
await fs.writeFile(this.localConfigPath, jsonContent, 'utf-8');
}
} catch (error) {
throw new TaskMasterError(
'Failed to save configuration',
ERROR_CODES.CONFIG_ERROR,
{ configPath: this.localConfigPath },
error as Error
);
}
}
/**
* Create a backup of the current configuration
*/
private async createBackup(): Promise<string> {
try {
await fs.mkdir(this.backupDir, { recursive: true });
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
const backupPath = path.join(this.backupDir, `config-${timestamp}.json`);
const configContent = await fs.readFile(this.localConfigPath, 'utf-8');
await fs.writeFile(backupPath, configContent, 'utf-8');
// Clean old backups
await this.cleanOldBackups();
return backupPath;
} catch (error) {
this.logger.warn('Failed to create backup:', error);
throw error;
}
}
/**
* Clean old backup files
*/
private async cleanOldBackups(maxBackups = 5): Promise<void> {
try {
const files = await fs.readdir(this.backupDir);
const backupFiles = files
.filter((f) => f.startsWith('config-') && f.endsWith('.json'))
.sort()
.reverse();
// Remove old backups
const toDelete = backupFiles.slice(maxBackups);
for (const file of toDelete) {
await fs.unlink(path.join(this.backupDir, file));
}
} catch (error) {
this.logger.warn('Failed to clean old backups:', error);
}
}
/**
* Check if config file exists
*/
async configExists(): Promise<boolean> {
try {
await fs.access(this.localConfigPath);
return true;
} catch {
return false;
}
}
/**
* Delete configuration file
*/
async deleteConfig(): Promise<void> {
try {
await fs.unlink(this.localConfigPath);
} catch (error: any) {
if (error.code !== 'ENOENT') {
throw new TaskMasterError(
'Failed to delete configuration',
ERROR_CODES.CONFIG_ERROR,
{ configPath: this.localConfigPath },
error
);
}
}
}
/**
* Get list of available backups
*/
async getBackups(): Promise<string[]> {
try {
const files = await fs.readdir(this.backupDir);
return files
.filter((f) => f.startsWith('config-') && f.endsWith('.json'))
.sort()
.reverse();
} catch {
return [];
}
}
/**
* Restore from a backup
*/
async restoreFromBackup(backupFile: string): Promise<void> {
const backupPath = path.join(this.backupDir, backupFile);
try {
const backupContent = await fs.readFile(backupPath, 'utf-8');
await fs.writeFile(this.localConfigPath, backupContent, 'utf-8');
} catch (error) {
throw new TaskMasterError(
'Failed to restore from backup',
ERROR_CODES.CONFIG_ERROR,
{ backupPath },
error as Error
);
}
}
}
```
--------------------------------------------------------------------------------
/scripts/modules/task-manager/add-subtask.js:
--------------------------------------------------------------------------------
```javascript
import { log, readJSON, writeJSON, getCurrentTag } from '../utils.js';
import { isTaskDependentOn } from '../task-manager.js';
/**
* Add a subtask to a parent task
* @param {string} tasksPath - Path to the tasks.json file
* @param {number|string} parentId - ID of the parent task
* @param {number|string|null} existingTaskId - ID of an existing task to convert to subtask (optional)
* @param {Object} newSubtaskData - Data for creating a new subtask (used if existingTaskId is null)
* @param {boolean} generateFiles - Whether to regenerate task files after adding the subtask
* @param {Object} context - Context object containing projectRoot and tag information
* @param {string} context.projectRoot - Project root path
* @param {string} context.tag - Tag for the task
* @returns {Object} The newly created or converted subtask
*/
async function addSubtask(
tasksPath,
parentId,
existingTaskId = null,
newSubtaskData = null,
generateFiles = false,
context = {}
) {
const { projectRoot, tag } = context;
try {
log('info', `Adding subtask to parent task ${parentId}...`);
// Read the existing tasks with proper context
const data = readJSON(tasksPath, projectRoot, tag);
if (!data || !data.tasks) {
throw new Error(`Invalid or missing tasks file at ${tasksPath}`);
}
// Convert parent ID to number
const parentIdNum = parseInt(parentId, 10);
// Find the parent task
const parentTask = data.tasks.find((t) => t.id === parentIdNum);
if (!parentTask) {
throw new Error(`Parent task with ID ${parentIdNum} not found`);
}
// Initialize subtasks array if it doesn't exist
if (!parentTask.subtasks) {
parentTask.subtasks = [];
}
let newSubtask;
// Case 1: Convert an existing task to a subtask
if (existingTaskId !== null) {
const existingTaskIdNum = parseInt(existingTaskId, 10);
// Find the existing task
const existingTaskIndex = data.tasks.findIndex(
(t) => t.id === existingTaskIdNum
);
if (existingTaskIndex === -1) {
throw new Error(`Task with ID ${existingTaskIdNum} not found`);
}
const existingTask = data.tasks[existingTaskIndex];
// Check if task is already a subtask
if (existingTask.parentTaskId) {
throw new Error(
`Task ${existingTaskIdNum} is already a subtask of task ${existingTask.parentTaskId}`
);
}
// Check for circular dependency
if (existingTaskIdNum === parentIdNum) {
throw new Error(`Cannot make a task a subtask of itself`);
}
// Check if parent task is a subtask of the task we're converting
// This would create a circular dependency
if (isTaskDependentOn(data.tasks, parentTask, existingTaskIdNum)) {
throw new Error(
`Cannot create circular dependency: task ${parentIdNum} is already a subtask or dependent of task ${existingTaskIdNum}`
);
}
// Find the highest subtask ID to determine the next ID
const highestSubtaskId =
parentTask.subtasks.length > 0
? Math.max(...parentTask.subtasks.map((st) => st.id))
: 0;
const newSubtaskId = highestSubtaskId + 1;
// Clone the existing task to be converted to a subtask
newSubtask = {
...existingTask,
id: newSubtaskId,
parentTaskId: parentIdNum
};
// Add to parent's subtasks
parentTask.subtasks.push(newSubtask);
// Remove the task from the main tasks array
data.tasks.splice(existingTaskIndex, 1);
log(
'info',
`Converted task ${existingTaskIdNum} to subtask ${parentIdNum}.${newSubtaskId}`
);
}
// Case 2: Create a new subtask
else if (newSubtaskData) {
// Find the highest subtask ID to determine the next ID
const highestSubtaskId =
parentTask.subtasks.length > 0
? Math.max(...parentTask.subtasks.map((st) => st.id))
: 0;
const newSubtaskId = highestSubtaskId + 1;
// Create the new subtask object
newSubtask = {
id: newSubtaskId,
title: newSubtaskData.title,
description: newSubtaskData.description || '',
details: newSubtaskData.details || '',
status: newSubtaskData.status || 'pending',
dependencies: newSubtaskData.dependencies || [],
parentTaskId: parentIdNum
};
// Add to parent's subtasks
parentTask.subtasks.push(newSubtask);
log('info', `Created new subtask ${parentIdNum}.${newSubtaskId}`);
} else {
throw new Error(
'Either existingTaskId or newSubtaskData must be provided'
);
}
// Write the updated tasks back to the file with proper context
writeJSON(tasksPath, data, projectRoot, tag);
// Note: Task file generation is no longer supported and has been removed
return newSubtask;
} catch (error) {
log('error', `Error adding subtask: ${error.message}`);
throw error;
}
}
export default addSubtask;
```
--------------------------------------------------------------------------------
/tests/unit/profiles/profile-safety-check.test.js:
--------------------------------------------------------------------------------
```javascript
import {
getInstalledProfiles,
wouldRemovalLeaveNoProfiles
} from '../../../src/utils/profiles.js';
import { rulesDirect } from '../../../mcp-server/src/core/direct-functions/rules.js';
import fs from 'fs';
import path from 'path';
import { jest } from '@jest/globals';
// Mock logger
const mockLog = {
info: jest.fn(),
error: jest.fn(),
debug: jest.fn()
};
describe('Rules Safety Check', () => {
let mockExistsSync;
let mockRmSync;
let mockReaddirSync;
beforeEach(() => {
jest.clearAllMocks();
// Set up spies on fs methods
mockExistsSync = jest.spyOn(fs, 'existsSync');
mockRmSync = jest.spyOn(fs, 'rmSync').mockImplementation(() => {});
mockReaddirSync = jest.spyOn(fs, 'readdirSync').mockReturnValue([]);
});
afterEach(() => {
// Restore all mocked functions
jest.restoreAllMocks();
});
describe('getInstalledProfiles', () => {
it('should detect installed profiles correctly', () => {
const projectRoot = '/test/project';
// Mock fs.existsSync to simulate installed profiles
mockExistsSync.mockImplementation((filePath) => {
if (filePath.includes('.cursor') || filePath.includes('.roo')) {
return true;
}
return false;
});
const installed = getInstalledProfiles(projectRoot);
expect(installed).toContain('cursor');
expect(installed).toContain('roo');
expect(installed).not.toContain('windsurf');
expect(installed).not.toContain('cline');
});
it('should return empty array when no profiles are installed', () => {
const projectRoot = '/test/project';
// Mock fs.existsSync to return false for all paths
mockExistsSync.mockReturnValue(false);
const installed = getInstalledProfiles(projectRoot);
expect(installed).toEqual([]);
});
});
describe('wouldRemovalLeaveNoProfiles', () => {
it('should return true when removing all installed profiles', () => {
const projectRoot = '/test/project';
// Mock fs.existsSync to simulate cursor and roo installed
mockExistsSync.mockImplementation((filePath) => {
return filePath.includes('.cursor') || filePath.includes('.roo');
});
const result = wouldRemovalLeaveNoProfiles(projectRoot, [
'cursor',
'roo'
]);
expect(result).toBe(true);
});
it('should return false when removing only some profiles', () => {
const projectRoot = '/test/project';
// Mock fs.existsSync to simulate cursor and roo installed
mockExistsSync.mockImplementation((filePath) => {
return filePath.includes('.cursor') || filePath.includes('.roo');
});
const result = wouldRemovalLeaveNoProfiles(projectRoot, ['roo']);
expect(result).toBe(false);
});
it('should return false when no profiles are currently installed', () => {
const projectRoot = '/test/project';
// Mock fs.existsSync to return false for all paths
mockExistsSync.mockReturnValue(false);
const result = wouldRemovalLeaveNoProfiles(projectRoot, ['cursor']);
expect(result).toBe(false);
});
});
describe('MCP Safety Check Integration', () => {
it('should block removal of all profiles without force', async () => {
const projectRoot = '/test/project';
// Mock fs.existsSync to simulate installed profiles
mockExistsSync.mockImplementation((filePath) => {
return filePath.includes('.cursor') || filePath.includes('.roo');
});
const result = await rulesDirect(
{
action: 'remove',
profiles: ['cursor', 'roo'],
projectRoot,
force: false
},
mockLog
);
expect(result.success).toBe(false);
expect(result.error.code).toBe('CRITICAL_REMOVAL_BLOCKED');
expect(result.error.message).toContain('CRITICAL');
});
it('should allow removal of all profiles with force', async () => {
const projectRoot = '/test/project';
// Mock fs.existsSync and other file operations for successful removal
mockExistsSync.mockReturnValue(true);
const result = await rulesDirect(
{
action: 'remove',
profiles: ['cursor', 'roo'],
projectRoot,
force: true
},
mockLog
);
expect(result.success).toBe(true);
expect(result.data).toBeDefined();
});
it('should allow partial removal without force', async () => {
const projectRoot = '/test/project';
// Mock fs.existsSync to simulate multiple profiles installed
mockExistsSync.mockImplementation((filePath) => {
return (
filePath.includes('.cursor') ||
filePath.includes('.roo') ||
filePath.includes('.windsurf')
);
});
const result = await rulesDirect(
{
action: 'remove',
profiles: ['roo'], // Only removing one profile
projectRoot,
force: false
},
mockLog
);
expect(result.success).toBe(true);
expect(result.data).toBeDefined();
});
});
});
```
--------------------------------------------------------------------------------
/docs/examples/claude-code-usage.md:
--------------------------------------------------------------------------------
```markdown
# Claude Code Provider Usage Example
The Claude Code provider allows you to use Claude models through the Claude Code CLI without requiring an API key.
## Configuration
To use the Claude Code provider, update your `.taskmaster/config.json`:
```json
{
"models": {
"main": {
"provider": "claude-code",
"modelId": "sonnet",
"maxTokens": 64000,
"temperature": 0.2
},
"research": {
"provider": "claude-code",
"modelId": "opus",
"maxTokens": 32000,
"temperature": 0.1
},
"fallback": {
"provider": "claude-code",
"modelId": "sonnet",
"maxTokens": 64000,
"temperature": 0.2
}
}
}
```
## Available Models
- `opus` - Claude Opus model (SWE score: 0.725)
- `sonnet` - Claude Sonnet model (SWE score: 0.727)
## Usage
Once configured, you can use Claude Code with all Task Master commands:
```bash
# Generate tasks from a PRD
task-master parse-prd --input=prd.txt
# Analyze project complexity
task-master analyze-complexity
# Show the next task to work on
task-master next
# View a specific task
task-master show task-001
# Update task status
task-master set-status --id=task-001 --status=in-progress
```
## Requirements
1. Claude Code CLI must be installed and authenticated on your system
2. Install the optional `@anthropic-ai/claude-code` package if you enable this provider:
```bash
npm install @anthropic-ai/claude-code
```
3. Run Claude Code for the first time and authenticate with your Anthropic account:
```bash
claude
```
4. No API key is required in your environment variables or MCP configuration
## Advanced Settings
The Claude Code SDK supports additional settings that provide fine-grained control over Claude's behavior. These settings are implemented in the underlying SDK (`src/ai-providers/custom-sdk/claude-code/`), and can be managed through Task Master's configuration file.
### Advanced Settings Usage
To update settings for Claude Code, update your `.taskmaster/config.json`:
The Claude Code settings can be specified globally in the `claudeCode` section of the config, or on a per-command basis in the `commandSpecific` section:
```javascript
{
// "models" and "global" config...
"claudeCode": {
// Maximum conversation turns Claude can make in a single request
"maxTurns": 5,
// Custom system prompt to override Claude Code's default behavior
"customSystemPrompt": "You are a helpful assistant focused on code quality",
// Append additional content to the system prompt
"appendSystemPrompt": "Always follow coding best practices",
// Permission mode for file system operations
"permissionMode": "default", // Options: "default", "acceptEdits", "plan", "bypassPermissions"
// Explicitly allow only certain tools
"allowedTools": ["Read", "LS"], // Claude can only read files and list directories
// Explicitly disallow certain tools
"disallowedTools": ["Write", "Edit"], // Prevent Claude from modifying files
// MCP servers for additional tool integrations
"mcpServers": {
"mcp-server-name": {
"command": "npx",
"args": ["-y", "mcp-serve"],
"env": {
// ...
}
}
}
},
// Command-specific settings override global settings
"commandSpecific": {
"parse-prd": {
// Settings specific to the 'parse-prd' command
"maxTurns": 10,
"customSystemPrompt": "You are a task breakdown specialist"
},
"analyze-complexity": {
// Settings specific to the 'analyze-complexity' command
"maxTurns": 3,
"appendSystemPrompt": "Focus on identifying bottlenecks"
}
}
}
```
- For a full list of Cluaude Code settings, see the [Claude Code Settings documentation](https://docs.anthropic.com/en/docs/claude-code/settings).
- For a full list of AI powered command names, see this file: `src/constants/commands.js`
### Why These Settings Matter
- **maxTurns**: Useful for complex refactoring tasks that require multiple iterations
- **customSystemPrompt**: Allows specializing Claude for specific domains or coding standards
- **appendSystemPrompt**: Useful for enforcing coding standards or providing additional context
- **permissionMode**: Critical for security in production environments
- **allowedTools/disallowedTools**: Enable read-only analysis modes or restrict access to sensitive operations
- **mcpServers**: Future extensibility for custom tool integrations
## Notes
- The Claude Code provider doesn't track usage costs (shown as 0 in telemetry)
- Session management is handled automatically for conversation continuity
- Some AI SDK parameters (temperature, maxTokens) are not supported by Claude Code CLI and will be ignored
```
--------------------------------------------------------------------------------
/mcp-server/src/core/direct-functions/next-task.js:
--------------------------------------------------------------------------------
```javascript
/**
* next-task.js
* Direct function implementation for finding the next task to work on
*/
import { findNextTask } from '../../../../scripts/modules/task-manager.js';
import {
readJSON,
readComplexityReport
} from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
/**
* Direct function wrapper for finding the next task to work on with error handling and caching.
*
* @param {Object} args - Command arguments
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
* @param {string} args.reportPath - Path to the report file.
* @param {string} args.projectRoot - Project root path (for MCP/env fallback)
* @param {string} args.tag - Tag for the task (optional)
* @param {Object} log - Logger object
* @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string } }
*/
export async function nextTaskDirect(args, log, context = {}) {
// Destructure expected args
const { tasksJsonPath, reportPath, projectRoot, tag } = args;
const { session } = context;
if (!tasksJsonPath) {
log.error('nextTaskDirect called without tasksJsonPath');
return {
success: false,
error: {
code: 'MISSING_ARGUMENT',
message: 'tasksJsonPath is required'
}
};
}
// Define the action function to be executed on cache miss
const coreNextTaskAction = async () => {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
log.info(`Finding next task from ${tasksJsonPath}`);
// Read tasks data using the provided path
const data = readJSON(tasksJsonPath, projectRoot, tag);
if (!data || !data.tasks) {
disableSilentMode(); // Disable before return
return {
success: false,
error: {
code: 'INVALID_TASKS_FILE',
message: `No valid tasks found in ${tasksJsonPath}`
}
};
}
// Read the complexity report
const complexityReport = readComplexityReport(reportPath);
// Find the next task
const nextTask = findNextTask(data.tasks, complexityReport);
if (!nextTask) {
log.info(
'No eligible next task found. All tasks are either completed or have unsatisfied dependencies'
);
return {
success: true,
data: {
message:
'No eligible next task found. All tasks are either completed or have unsatisfied dependencies',
nextTask: null
}
};
}
// Check if it's a subtask
const isSubtask =
typeof nextTask.id === 'string' && nextTask.id.includes('.');
const taskOrSubtask = isSubtask ? 'subtask' : 'task';
const additionalAdvice = isSubtask
? 'Subtasks can be updated with timestamped details as you implement them. This is useful for tracking progress, marking milestones and insights (of successful or successive falures in attempting to implement the subtask). Research can be used when updating the subtask to collect up-to-date information, and can be helpful to solve a repeating problem the agent is unable to solve. It is a good idea to get-task the parent task to collect the overall context of the task, and to get-task the subtask to collect the specific details of the subtask.'
: 'Tasks can be updated to reflect a change in the direction of the task, or to reformulate the task per your prompt. Research can be used when updating the task to collect up-to-date information. It is best to update subtasks as you work on them, and to update the task for more high-level changes that may affect pending subtasks or the general direction of the task.';
// Restore normal logging
disableSilentMode();
// Return the next task data with the full tasks array for reference
log.info(
`Successfully found next task ${nextTask.id}: ${nextTask.title}. Is subtask: ${isSubtask}`
);
return {
success: true,
data: {
nextTask,
isSubtask,
nextSteps: `When ready to work on the ${taskOrSubtask}, use set-status to set the status to "in progress" ${additionalAdvice}`
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error finding next task: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message || 'Failed to find next task'
}
};
}
};
// Use the caching utility
try {
const result = await coreNextTaskAction();
log.info('nextTaskDirect completed.');
return result;
} catch (error) {
log.error(`Unexpected error during nextTask: ${error.message}`);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
}
};
}
}
```
--------------------------------------------------------------------------------
/.github/workflows/claude-issue-triage.yml:
--------------------------------------------------------------------------------
```yaml
name: Claude Issue Triage
# description: Automatically triage GitHub issues using Claude Code
on:
issues:
types: [opened]
jobs:
triage-issue:
runs-on: ubuntu-latest
timeout-minutes: 10
permissions:
contents: read
issues: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Create triage prompt
run: |
mkdir -p /tmp/claude-prompts
cat > /tmp/claude-prompts/triage-prompt.txt << 'EOF'
You're an issue triage assistant for GitHub issues. Your task is to analyze the issue and select appropriate labels from the provided list.
IMPORTANT: Don't post any comments or messages to the issue. Your only action should be to apply labels.
Issue Information:
- REPO: ${{ github.repository }}
- ISSUE_NUMBER: ${{ github.event.issue.number }}
TASK OVERVIEW:
1. First, fetch the list of labels available in this repository by running: `gh label list`. Run exactly this command with nothing else.
2. Next, use the GitHub tools to get context about the issue:
- You have access to these tools:
- mcp__github__get_issue: Use this to retrieve the current issue's details including title, description, and existing labels
- mcp__github__get_issue_comments: Use this to read any discussion or additional context provided in the comments
- mcp__github__update_issue: Use this to apply labels to the issue (do not use this for commenting)
- mcp__github__search_issues: Use this to find similar issues that might provide context for proper categorization and to identify potential duplicate issues
- mcp__github__list_issues: Use this to understand patterns in how other issues are labeled
- Start by using mcp__github__get_issue to get the issue details
3. Analyze the issue content, considering:
- The issue title and description
- The type of issue (bug report, feature request, question, etc.)
- Technical areas mentioned
- Severity or priority indicators
- User impact
- Components affected
4. Select appropriate labels from the available labels list provided above:
- Choose labels that accurately reflect the issue's nature
- Be specific but comprehensive
- Select priority labels if you can determine urgency (high-priority, med-priority, or low-priority)
- Consider platform labels (android, ios) if applicable
- If you find similar issues using mcp__github__search_issues, consider using a "duplicate" label if appropriate. Only do so if the issue is a duplicate of another OPEN issue.
5. Apply the selected labels:
- Use mcp__github__update_issue to apply your selected labels
- DO NOT post any comments explaining your decision
- DO NOT communicate directly with users
- If no labels are clearly applicable, do not apply any labels
IMPORTANT GUIDELINES:
- Be thorough in your analysis
- Only select labels from the provided list above
- DO NOT post any comments to the issue
- Your ONLY action should be to apply labels using mcp__github__update_issue
- It's okay to not add any labels if none are clearly applicable
EOF
- name: Setup GitHub MCP Server
run: |
mkdir -p /tmp/mcp-config
cat > /tmp/mcp-config/mcp-servers.json << 'EOF'
{
"mcpServers": {
"github": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"-e",
"GITHUB_PERSONAL_ACCESS_TOKEN",
"ghcr.io/github/github-mcp-server:sha-7aced2b"
],
"env": {
"GITHUB_PERSONAL_ACCESS_TOKEN": "${{ secrets.GITHUB_TOKEN }}"
}
}
}
}
EOF
- name: Run Claude Code for Issue Triage
uses: anthropics/claude-code-base-action@beta
with:
prompt_file: /tmp/claude-prompts/triage-prompt.txt
allowed_tools: "Bash(gh label list),mcp__github__get_issue,mcp__github__get_issue_comments,mcp__github__update_issue,mcp__github__search_issues,mcp__github__list_issues"
timeout_minutes: "5"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
mcp_config: /tmp/mcp-config/mcp-servers.json
claude_env: |
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
```
--------------------------------------------------------------------------------
/packages/tm-core/src/common/utils/project-root-finder.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview Project root detection utilities
* Provides functionality to locate project roots by searching for marker files/directories
*/
import path from 'node:path';
import fs from 'node:fs';
import {
TASKMASTER_PROJECT_MARKERS,
OTHER_PROJECT_MARKERS
} from '../constants/paths.js';
/**
* Find the project root directory by looking for project markers
* Traverses upwards from startDir until a project marker is found or filesystem root is reached
* Limited to 50 parent directory levels to prevent excessive traversal
*
* Strategy: First searches ALL parent directories for .taskmaster (highest priority).
* If not found, then searches for other project markers starting from current directory.
* This ensures .taskmaster in parent directories takes precedence over other markers in subdirectories.
*
* @param startDir - Directory to start searching from (defaults to process.cwd())
* @returns Project root path (falls back to current directory if no markers found)
*
* @example
* ```typescript
* // In a monorepo structure:
* // /project/.taskmaster
* // /project/packages/my-package/.git
* // When called from /project/packages/my-package:
* const root = findProjectRoot(); // Returns /project (not /project/packages/my-package)
* ```
*/
export function findProjectRoot(startDir: string = process.cwd()): string {
let currentDir = path.resolve(startDir);
const rootDir = path.parse(currentDir).root;
const maxDepth = 50; // Reasonable limit to prevent infinite loops
let depth = 0;
// FIRST PASS: Traverse ALL parent directories looking ONLY for Task Master markers
// This ensures that a .taskmaster in a parent directory takes precedence over
// other project markers (like .git, go.mod, etc.) in subdirectories
let searchDir = currentDir;
depth = 0;
while (depth < maxDepth) {
for (const marker of TASKMASTER_PROJECT_MARKERS) {
const markerPath = path.join(searchDir, marker);
try {
if (fs.existsSync(markerPath)) {
// Found a Task Master marker - this is our project root
return searchDir;
}
} catch (error) {
// Ignore permission errors and continue searching
continue;
}
}
// If we're at root, stop after checking it
if (searchDir === rootDir) {
break;
}
// Move up one directory level
const parentDir = path.dirname(searchDir);
// Safety check: if dirname returns the same path, we've hit the root
if (parentDir === searchDir) {
break;
}
searchDir = parentDir;
depth++;
}
// SECOND PASS: No Task Master markers found in any parent directory
// Now search for other project markers starting from the original directory
currentDir = path.resolve(startDir);
depth = 0;
while (depth < maxDepth) {
for (const marker of OTHER_PROJECT_MARKERS) {
const markerPath = path.join(currentDir, marker);
try {
if (fs.existsSync(markerPath)) {
// Found another project marker - return this as project root
return currentDir;
}
} catch (error) {
// Ignore permission errors and continue searching
continue;
}
}
// If we're at root, stop after checking it
if (currentDir === rootDir) {
break;
}
// Move up one directory level
const parentDir = path.dirname(currentDir);
// Safety check: if dirname returns the same path, we've hit the root
if (parentDir === currentDir) {
break;
}
currentDir = parentDir;
depth++;
}
// Fallback to current working directory if no project root found
// This ensures the function always returns a valid, existing path
return process.cwd();
}
/**
* Normalize project root to ensure it doesn't end with .taskmaster
* This prevents double .taskmaster paths when using constants that include .taskmaster
*
* @param projectRoot - The project root path to normalize
* @returns Normalized project root path
*
* @example
* ```typescript
* normalizeProjectRoot('/project/.taskmaster'); // Returns '/project'
* normalizeProjectRoot('/project'); // Returns '/project'
* normalizeProjectRoot('/project/.taskmaster/tasks'); // Returns '/project'
* ```
*/
export function normalizeProjectRoot(
projectRoot: string | null | undefined
): string {
if (!projectRoot) return projectRoot || '';
// Ensure it's a string
const projectRootStr = String(projectRoot);
// Split the path into segments
const segments = projectRootStr.split(path.sep);
// Find the index of .taskmaster segment
const taskmasterIndex = segments.findIndex(
(segment) => segment === '.taskmaster'
);
if (taskmasterIndex !== -1) {
// If .taskmaster is found, return everything up to but not including .taskmaster
const normalizedSegments = segments.slice(0, taskmasterIndex);
return normalizedSegments.join(path.sep) || path.sep;
}
return projectRootStr;
}
```
--------------------------------------------------------------------------------
/apps/cli/src/commands/autopilot/complete.command.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview Complete Command - Complete current TDD phase with validation
*/
import { type TestResult, WorkflowOrchestrator } from '@tm/core';
import { Command } from 'commander';
import { getProjectRoot } from '../../utils/project-root.js';
import {
type AutopilotBaseOptions,
OutputFormatter,
hasWorkflowState,
loadWorkflowState
} from './shared.js';
interface CompleteOptions extends AutopilotBaseOptions {
results?: string;
coverage?: string;
}
/**
* Complete Command - Mark current phase as complete with validation
*/
export class CompleteCommand extends Command {
constructor() {
super('complete');
this.description('Complete the current TDD phase with result validation')
.option(
'-r, --results <json>',
'Test results JSON (with total, passed, failed, skipped)'
)
.option('-c, --coverage <percent>', 'Coverage percentage')
.action(async (options: CompleteOptions) => {
await this.execute(options);
});
}
private async execute(options: CompleteOptions): Promise<void> {
// Inherit parent options
const parentOpts = this.parent?.opts() as AutopilotBaseOptions;
const mergedOptions: CompleteOptions = {
...parentOpts,
...options,
projectRoot: getProjectRoot(
options.projectRoot || parentOpts?.projectRoot
)
};
const formatter = new OutputFormatter(mergedOptions.json || false);
try {
// Check for workflow state
const hasState = await hasWorkflowState(mergedOptions.projectRoot!);
if (!hasState) {
formatter.error('No active workflow', {
suggestion: 'Start a workflow with: autopilot start <taskId>'
});
process.exit(1);
}
// Load state
const state = await loadWorkflowState(mergedOptions.projectRoot!);
if (!state) {
formatter.error('Failed to load workflow state');
process.exit(1);
}
// Restore orchestrator with persistence
const { saveWorkflowState } = await import('./shared.js');
const orchestrator = new WorkflowOrchestrator(state.context);
orchestrator.restoreState(state);
orchestrator.enableAutoPersist(async (newState) => {
await saveWorkflowState(mergedOptions.projectRoot!, newState);
});
// Get current phase
const tddPhase = orchestrator.getCurrentTDDPhase();
const currentSubtask = orchestrator.getCurrentSubtask();
if (!tddPhase) {
formatter.error('Not in a TDD phase', {
phase: orchestrator.getCurrentPhase()
});
process.exit(1);
}
// Validate based on phase
if (tddPhase === 'RED' || tddPhase === 'GREEN') {
if (!mergedOptions.results) {
formatter.error('Test results required for RED/GREEN phase', {
usage:
'--results \'{"total":10,"passed":9,"failed":1,"skipped":0}\''
});
process.exit(1);
}
// Parse test results
let testResults: TestResult;
try {
const parsed = JSON.parse(mergedOptions.results);
testResults = {
total: parsed.total || 0,
passed: parsed.passed || 0,
failed: parsed.failed || 0,
skipped: parsed.skipped || 0,
phase: tddPhase
};
} catch (error) {
formatter.error('Invalid test results JSON', {
error: (error as Error).message
});
process.exit(1);
}
// Validate RED phase requirements
if (tddPhase === 'RED' && testResults.failed === 0) {
formatter.error('RED phase validation failed', {
reason: 'At least one test must be failing',
actual: {
passed: testResults.passed,
failed: testResults.failed
}
});
process.exit(1);
}
// Validate GREEN phase requirements
if (tddPhase === 'GREEN' && testResults.failed !== 0) {
formatter.error('GREEN phase validation failed', {
reason: 'All tests must pass',
actual: {
passed: testResults.passed,
failed: testResults.failed
}
});
process.exit(1);
}
// Complete phase with test results
if (tddPhase === 'RED') {
orchestrator.transition({
type: 'RED_PHASE_COMPLETE',
testResults
});
formatter.success('RED phase completed', {
nextPhase: 'GREEN',
testResults,
subtask: currentSubtask?.title
});
} else {
orchestrator.transition({
type: 'GREEN_PHASE_COMPLETE',
testResults
});
formatter.success('GREEN phase completed', {
nextPhase: 'COMMIT',
testResults,
subtask: currentSubtask?.title,
suggestion: 'Run: autopilot commit'
});
}
} else if (tddPhase === 'COMMIT') {
formatter.error('Use "autopilot commit" to complete COMMIT phase');
process.exit(1);
}
} catch (error) {
formatter.error((error as Error).message);
if (mergedOptions.verbose) {
console.error((error as Error).stack);
}
process.exit(1);
}
}
}
```
--------------------------------------------------------------------------------
/mcp-server/src/tools/analyze.js:
--------------------------------------------------------------------------------
```javascript
/**
* tools/analyze.js
* Tool for analyzing task complexity and generating recommendations
*/
import { z } from 'zod';
import path from 'path';
import fs from 'fs'; // Import fs for directory check/creation
import {
handleApiResult,
createErrorResponse,
withNormalizedProjectRoot
} from './utils.js';
import { analyzeTaskComplexityDirect } from '../core/task-master-core.js'; // Assuming core functions are exported via task-master-core.js
import { findTasksPath } from '../core/utils/path-utils.js';
import { resolveTag } from '../../../scripts/modules/utils.js';
import { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';
import { resolveComplexityReportOutputPath } from '../../../src/utils/path-utils.js';
/**
* Register the analyze_project_complexity tool
* @param {Object} server - FastMCP server instance
*/
export function registerAnalyzeProjectComplexityTool(server) {
server.addTool({
name: 'analyze_project_complexity',
description:
'Analyze task complexity and generate expansion recommendations.',
parameters: z.object({
threshold: z.coerce // Use coerce for number conversion from string if needed
.number()
.int()
.min(1)
.max(10)
.optional()
.default(5) // Default threshold
.describe('Complexity score threshold (1-10) to recommend expansion.'),
research: z
.boolean()
.optional()
.default(false)
.describe('Use Perplexity AI for research-backed analysis.'),
output: z
.string()
.optional()
.describe(
`Output file path relative to project root (default: ${COMPLEXITY_REPORT_FILE}).`
),
file: z
.string()
.optional()
.describe(
'Path to the tasks file relative to project root (default: tasks/tasks.json).'
),
ids: z
.string()
.optional()
.describe(
'Comma-separated list of task IDs to analyze specifically (e.g., "1,3,5").'
),
from: z.coerce
.number()
.int()
.positive()
.optional()
.describe('Starting task ID in a range to analyze.'),
to: z.coerce
.number()
.int()
.positive()
.optional()
.describe('Ending task ID in a range to analyze.'),
projectRoot: z
.string()
.describe('The directory of the project. Must be an absolute path.'),
tag: z.string().optional().describe('Tag context to operate on')
}),
execute: withNormalizedProjectRoot(async (args, { log, session }) => {
const toolName = 'analyze_project_complexity'; // Define tool name for logging
try {
log.info(
`Executing ${toolName} tool with args: ${JSON.stringify(args)}`
);
const resolvedTag = resolveTag({
projectRoot: args.projectRoot,
tag: args.tag
});
let tasksJsonPath;
try {
tasksJsonPath = findTasksPath(
{ projectRoot: args.projectRoot, file: args.file },
log
);
log.info(`${toolName}: Resolved tasks path: ${tasksJsonPath}`);
} catch (error) {
log.error(`${toolName}: Error finding tasks.json: ${error.message}`);
return createErrorResponse(
`Failed to find tasks.json within project root '${args.projectRoot}': ${error.message}`
);
}
const outputPath = resolveComplexityReportOutputPath(
args.output,
{
projectRoot: args.projectRoot,
tag: resolvedTag
},
log
);
log.info(`${toolName}: Report output path: ${outputPath}`);
// Ensure output directory exists
const outputDir = path.dirname(outputPath);
try {
if (!fs.existsSync(outputDir)) {
fs.mkdirSync(outputDir, { recursive: true });
log.info(`${toolName}: Created output directory: ${outputDir}`);
}
} catch (dirError) {
log.error(
`${toolName}: Failed to create output directory ${outputDir}: ${dirError.message}`
);
return createErrorResponse(
`Failed to create output directory: ${dirError.message}`
);
}
// 3. Call Direct Function - Pass projectRoot in first arg object
const result = await analyzeTaskComplexityDirect(
{
tasksJsonPath: tasksJsonPath,
outputPath: outputPath,
threshold: args.threshold,
research: args.research,
projectRoot: args.projectRoot,
tag: resolvedTag,
ids: args.ids,
from: args.from,
to: args.to
},
log,
{ session }
);
// 4. Handle Result
log.info(
`${toolName}: Direct function result: success=${result.success}`
);
return handleApiResult(
result,
log,
'Error analyzing task complexity',
undefined,
args.projectRoot
);
} catch (error) {
log.error(
`Critical error in ${toolName} tool execute: ${error.message}`
);
return createErrorResponse(
`Internal tool error (${toolName}): ${error.message}`
);
}
})
});
}
```
--------------------------------------------------------------------------------
/apps/cli/src/ui/layout/helpers.spec.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Layout helper utilities tests
* Tests for apps/cli/src/utils/layout/helpers.ts
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import type { MockInstance } from 'vitest';
import { getBoxWidth } from './helpers.js';
describe('Layout Helpers', () => {
describe('getBoxWidth', () => {
let columnsSpy: MockInstance;
let originalDescriptor: PropertyDescriptor | undefined;
beforeEach(() => {
// Store original descriptor if it exists
originalDescriptor = Object.getOwnPropertyDescriptor(
process.stdout,
'columns'
);
// If columns doesn't exist or isn't a getter, define it as one
if (!originalDescriptor || !originalDescriptor.get) {
const currentValue = process.stdout.columns || 80;
Object.defineProperty(process.stdout, 'columns', {
get() {
return currentValue;
},
configurable: true
});
}
// Now spy on the getter
columnsSpy = vi.spyOn(process.stdout, 'columns', 'get');
});
afterEach(() => {
// Restore the spy
columnsSpy.mockRestore();
// Restore original descriptor or delete the property
if (originalDescriptor) {
Object.defineProperty(process.stdout, 'columns', originalDescriptor);
} else {
delete (process.stdout as any).columns;
}
});
it('should calculate width as percentage of terminal width', () => {
columnsSpy.mockReturnValue(100);
const width = getBoxWidth(0.9, 40);
expect(width).toBe(90);
});
it('should use default percentage of 0.9 when not specified', () => {
columnsSpy.mockReturnValue(100);
const width = getBoxWidth();
expect(width).toBe(90);
});
it('should use default minimum width of 40 when not specified', () => {
columnsSpy.mockReturnValue(30);
const width = getBoxWidth();
expect(width).toBe(40); // Should enforce minimum
});
it('should enforce minimum width when terminal is too narrow', () => {
columnsSpy.mockReturnValue(50);
const width = getBoxWidth(0.9, 60);
expect(width).toBe(60); // Should use minWidth instead of 45
});
it('should handle undefined process.stdout.columns', () => {
columnsSpy.mockReturnValue(undefined);
const width = getBoxWidth(0.9, 40);
// Should fall back to 80 columns: Math.floor(80 * 0.9) = 72
expect(width).toBe(72);
});
it('should handle custom percentage values', () => {
columnsSpy.mockReturnValue(100);
expect(getBoxWidth(0.95, 40)).toBe(95);
expect(getBoxWidth(0.8, 40)).toBe(80);
expect(getBoxWidth(0.5, 40)).toBe(50);
});
it('should handle custom minimum width values', () => {
columnsSpy.mockReturnValue(60);
expect(getBoxWidth(0.9, 70)).toBe(70); // 60 * 0.9 = 54, but min is 70
expect(getBoxWidth(0.9, 50)).toBe(54); // 60 * 0.9 = 54, min is 50
});
it('should floor the calculated width', () => {
columnsSpy.mockReturnValue(99);
const width = getBoxWidth(0.9, 40);
// 99 * 0.9 = 89.1, should floor to 89
expect(width).toBe(89);
});
it('should match warning box width calculation', () => {
// Test the specific case from displayWarning()
columnsSpy.mockReturnValue(80);
const width = getBoxWidth(0.9, 40);
expect(width).toBe(72);
});
it('should match table width calculation', () => {
// Test the specific case from createTaskTable()
columnsSpy.mockReturnValue(111);
const width = getBoxWidth(0.9, 100);
// 111 * 0.9 = 99.9, floor to 99, but max(99, 100) = 100
expect(width).toBe(100);
});
it('should match recommended task box width calculation', () => {
// Test the specific case from displayRecommendedNextTask()
columnsSpy.mockReturnValue(120);
const width = getBoxWidth(0.97, 40);
// 120 * 0.97 = 116.4, floor to 116
expect(width).toBe(116);
});
it('should handle edge case of zero terminal width', () => {
columnsSpy.mockReturnValue(0);
const width = getBoxWidth(0.9, 40);
// When columns is 0, it uses fallback of 80: Math.floor(80 * 0.9) = 72
expect(width).toBe(72);
});
it('should handle very large terminal widths', () => {
columnsSpy.mockReturnValue(1000);
const width = getBoxWidth(0.9, 40);
expect(width).toBe(900);
});
it('should handle very small percentages', () => {
columnsSpy.mockReturnValue(100);
const width = getBoxWidth(0.1, 5);
// 100 * 0.1 = 10, which is greater than min 5
expect(width).toBe(10);
});
it('should handle percentage of 1.0 (100%)', () => {
columnsSpy.mockReturnValue(80);
const width = getBoxWidth(1.0, 40);
expect(width).toBe(80);
});
it('should consistently return same value for same inputs', () => {
columnsSpy.mockReturnValue(100);
const width1 = getBoxWidth(0.9, 40);
const width2 = getBoxWidth(0.9, 40);
const width3 = getBoxWidth(0.9, 40);
expect(width1).toBe(width2);
expect(width2).toBe(width3);
});
});
});
```
--------------------------------------------------------------------------------
/apps/cli/tests/unit/commands/show.command.spec.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview Unit tests for ShowCommand
*/
import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest';
import type { TmCore } from '@tm/core';
// Mock dependencies
vi.mock('@tm/core', () => ({
createTmCore: vi.fn()
}));
vi.mock('../../../src/utils/project-root.js', () => ({
getProjectRoot: vi.fn((path?: string) => path || '/test/project')
}));
vi.mock('../../../src/utils/error-handler.js', () => ({
displayError: vi.fn()
}));
vi.mock('../../../src/utils/display-helpers.js', () => ({
displayCommandHeader: vi.fn()
}));
vi.mock('../../../src/ui/components/task-detail.component.js', () => ({
displayTaskDetails: vi.fn()
}));
vi.mock('../../../src/utils/ui.js', () => ({
createTaskTable: vi.fn(() => 'Table output'),
displayWarning: vi.fn()
}));
import { ShowCommand } from '../../../src/commands/show.command.js';
describe('ShowCommand', () => {
let consoleLogSpy: any;
let mockTmCore: Partial<TmCore>;
beforeEach(() => {
consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
mockTmCore = {
tasks: {
get: vi.fn().mockResolvedValue({
task: {
id: '1',
title: 'Test Task',
status: 'pending',
description: 'Test description'
},
isSubtask: false
}),
getStorageType: vi.fn().mockReturnValue('json')
} as any,
config: {
getActiveTag: vi.fn().mockReturnValue('master')
} as any
};
});
afterEach(() => {
vi.clearAllMocks();
consoleLogSpy.mockRestore();
});
describe('JSON output format', () => {
it('should use JSON format when --json flag is set', async () => {
const command = new ShowCommand();
// Mock the tmCore initialization
(command as any).tmCore = mockTmCore;
// Execute with --json flag
await (command as any).executeCommand('1', {
id: '1',
json: true,
format: 'text' // Should be overridden by --json
});
// Verify JSON output was called
expect(consoleLogSpy).toHaveBeenCalled();
const output = consoleLogSpy.mock.calls[0][0];
// Should be valid JSON
expect(() => JSON.parse(output)).not.toThrow();
const parsed = JSON.parse(output);
expect(parsed).toHaveProperty('task');
expect(parsed).toHaveProperty('found');
expect(parsed).toHaveProperty('storageType');
});
it('should override --format when --json is set', async () => {
const command = new ShowCommand();
(command as any).tmCore = mockTmCore;
await (command as any).executeCommand('1', {
id: '1',
json: true,
format: 'text' // Should be overridden
});
// Should output JSON, not text format
const output = consoleLogSpy.mock.calls[0][0];
expect(() => JSON.parse(output)).not.toThrow();
});
it('should use text format when --json is not set', async () => {
const command = new ShowCommand();
(command as any).tmCore = mockTmCore;
await (command as any).executeCommand('1', {
id: '1',
format: 'text'
});
// Should use text format (not JSON)
// Text format will call displayCommandHeader and displayTaskDetails
// We just verify it was called (mocked functions)
expect(consoleLogSpy).toHaveBeenCalled();
});
it('should default to text format when neither flag is set', async () => {
const command = new ShowCommand();
(command as any).tmCore = mockTmCore;
await (command as any).executeCommand('1', {
id: '1'
});
// Should use text format by default
expect(consoleLogSpy).toHaveBeenCalled();
});
});
describe('format validation', () => {
it('should accept valid formats', () => {
const command = new ShowCommand();
expect((command as any).validateOptions({ format: 'text' })).toBe(true);
expect((command as any).validateOptions({ format: 'json' })).toBe(true);
});
it('should reject invalid formats', () => {
const consoleErrorSpy = vi
.spyOn(console, 'error')
.mockImplementation(() => {});
const command = new ShowCommand();
expect((command as any).validateOptions({ format: 'invalid' })).toBe(
false
);
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('Invalid format: invalid')
);
consoleErrorSpy.mockRestore();
});
});
describe('multiple task IDs', () => {
it('should handle comma-separated task IDs', async () => {
const command = new ShowCommand();
(command as any).tmCore = mockTmCore;
// Mock getMultipleTasks
const getMultipleTasksSpy = vi
.spyOn(command as any, 'getMultipleTasks')
.mockResolvedValue({
tasks: [
{ id: '1', title: 'Task 1' },
{ id: '2', title: 'Task 2' }
],
notFound: [],
storageType: 'json'
});
await (command as any).executeCommand('1,2', {
id: '1,2',
json: true
});
expect(getMultipleTasksSpy).toHaveBeenCalledWith(
['1', '2'],
expect.any(Object)
);
});
});
});
```
--------------------------------------------------------------------------------
/llms-install.md:
--------------------------------------------------------------------------------
```markdown
# Taskmaster AI Installation Guide
This guide helps AI assistants install and configure Taskmaster for users in their development projects.
## What is Taskmaster?
Taskmaster is an AI-driven task management system designed for development workflows. It helps break down projects into manageable tasks, track dependencies, and maintain development momentum through structured, AI-enhanced planning.
## Installation Steps
### Step 1: Add MCP Configuration
Add the following configuration to the user's MCP settings file (`.cursor/mcp.json` for Cursor, or equivalent for other editors):
```json
{
"mcpServers": {
"taskmaster-ai": {
"command": "npx",
"args": ["-y", "task-master-ai"],
"env": {
"ANTHROPIC_API_KEY": "user_will_add_their_key_here",
"PERPLEXITY_API_KEY": "user_will_add_their_key_here",
"OPENAI_API_KEY": "user_will_add_their_key_here",
"GOOGLE_API_KEY": "user_will_add_their_key_here",
"MISTRAL_API_KEY": "user_will_add_their_key_here",
"OPENROUTER_API_KEY": "user_will_add_their_key_here",
"XAI_API_KEY": "user_will_add_their_key_here"
}
}
}
}
```
### Step 2: API Key Requirements
Inform the user they need **at least one** API key from the following providers:
- **Anthropic** (for Claude models) - Recommended
- **OpenAI** (for GPT models)
- **Google** (for Gemini models)
- **Perplexity** (for research features) - Highly recommended
- **Mistral** (for Mistral models)
- **OpenRouter** (access to multiple models)
- **xAI** (for Grok models)
The user will be able to define 3 separate roles (can be the same provider or separate providers) for main AI operations, research operations (research providers/models only), and a fallback model in case of errors.
### Step 3: Initialize Project
Once the MCP server is configured and API keys are added, initialize Taskmaster in the user's project:
> Can you initialize Task Master in my project?
This will run the `initialize_project` tool to set up the basic file structure.
### Step 4: Create Initial Tasks
Users have two options for creating initial tasks:
**Option A: Parse a PRD (Recommended)**
If they have a Product Requirements Document:
> Can you parse my PRD file at [path/to/prd.txt] to generate initial tasks?
If the user does not have a PRD, the AI agent can help them create one and store it in scripts/prd.txt for parsing.
**Option B: Start from scratch**
> Can you help me add my first task: [describe the task]
## Common Usage Patterns
### Daily Workflow
> What's the next task I should work on?
> Can you show me the details for task [ID]?
> Can you mark task [ID] as done?
### Task Management
> Can you break down task [ID] into subtasks?
> Can you add a new task: [description]
> Can you analyze the complexity of my tasks?
### Project Organization
> Can you show me all my pending tasks?
> Can you move task [ID] to become a subtask of [parent ID]?
> Can you update task [ID] with this new information: [details]
## Verification Steps
After installation, verify everything is working:
1. **Check MCP Connection**: The AI should be able to access Task Master tools
2. **Test Basic Commands**: Try `get_tasks` to list current tasks
3. **Verify API Keys**: Ensure AI-powered commands work (like `add_task`)
Note: An API key fallback exists that allows the MCP server to read API keys from `.env` instead of the MCP JSON config. It is recommended to have keys in both places in case the MCP server is unable to read keys from its environment for whatever reason.
When adding keys to `.env` only, the `models` tool will explain that the keys are not OK for MCP. Despite this, the fallback should kick in and the API keys will be read from the `.env` file.
## Troubleshooting
**If MCP server doesn't start:**
- Verify the JSON configuration is valid
- Check that Node.js is installed
- Ensure API keys are properly formatted
**If AI commands fail:**
- Verify at least one API key is configured
- Check API key permissions and quotas
- Try using a different model via the `models` tool
## CLI Fallback
Taskmaster is also available via CLI commands, by installing with `npm install task-master-ai@latest` in a terminal. Running `task-master help` will show all available commands, which offer a 1:1 experience with the MCP server. As the AI agent, you should refer to the system prompts and rules provided to you to identify Taskmaster-specific rules that help you understand how and when to use it.
## Next Steps
Once installed, users can:
- Create new tasks with `add-task` or parse a PRD (scripts/prd.txt) into tasks with `parse-prd`
- Set up model preferences with `models` tool
- Expand tasks into subtasks with `expand-all` and `expand-task`
- Explore advanced features like research mode and complexity analysis
For detailed documentation, refer to the Task Master docs directory.``
```
--------------------------------------------------------------------------------
/packages/tm-core/src/modules/reports/managers/complexity-report-manager.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview ComplexityReportManager - Handles loading and managing complexity analysis reports
* Follows the same pattern as ConfigManager and AuthManager
*/
import fs from 'node:fs/promises';
import path from 'path';
import { getLogger } from '../../../common/logger/index.js';
import type {
ComplexityAnalysis,
ComplexityReport,
TaskComplexityData
} from '../types.js';
const logger = getLogger('ComplexityReportManager');
/**
* Manages complexity analysis reports
* Handles loading, caching, and providing complexity data for tasks
*/
export class ComplexityReportManager {
private projectRoot: string;
private reportCache: Map<string, ComplexityReport> = new Map();
constructor(projectRoot: string) {
this.projectRoot = projectRoot;
}
/**
* Get the path to the complexity report file for a given tag
*/
private getReportPath(tag?: string): string {
const reportsDir = path.join(this.projectRoot, '.taskmaster', 'reports');
const tagSuffix = tag && tag !== 'master' ? `_${tag}` : '';
return path.join(reportsDir, `task-complexity-report${tagSuffix}.json`);
}
/**
* Load complexity report for a given tag
* Results are cached to avoid repeated file reads
*/
async loadReport(tag?: string): Promise<ComplexityReport | null> {
const resolvedTag = tag || 'master';
const cacheKey = resolvedTag;
// Check cache first
if (this.reportCache.has(cacheKey)) {
return this.reportCache.get(cacheKey)!;
}
const reportPath = this.getReportPath(tag);
try {
// Check if file exists
await fs.access(reportPath);
// Read and parse the report
const content = await fs.readFile(reportPath, 'utf-8');
const report = JSON.parse(content) as ComplexityReport;
// Validate basic structure
if (!report.meta || !Array.isArray(report.complexityAnalysis)) {
logger.warn(
`Invalid complexity report structure at ${reportPath}, ignoring`
);
return null;
}
// Cache the report
this.reportCache.set(cacheKey, report);
logger.debug(
`Loaded complexity report for tag '${resolvedTag}' with ${report.complexityAnalysis.length} analyses`
);
return report;
} catch (error: any) {
if (error.code === 'ENOENT') {
// File doesn't exist - this is normal, not all projects have complexity reports
logger.debug(`No complexity report found for tag '${resolvedTag}'`);
return null;
}
// Other errors (parsing, permissions, etc.)
logger.warn(
`Failed to load complexity report for tag '${resolvedTag}': ${error.message}`
);
return null;
}
}
/**
* Get complexity data for a specific task ID
*/
async getComplexityForTask(
taskId: string | number,
tag?: string
): Promise<TaskComplexityData | null> {
const report = await this.loadReport(tag);
if (!report) {
return null;
}
// Find the analysis for this task
const analysis = report.complexityAnalysis.find(
(a) => String(a.taskId) === String(taskId)
);
if (!analysis) {
return null;
}
// Convert to TaskComplexityData format
return {
complexityScore: analysis.complexityScore,
recommendedSubtasks: analysis.recommendedSubtasks,
expansionPrompt: analysis.expansionPrompt,
complexityReasoning: analysis.complexityReasoning
};
}
/**
* Get complexity data for multiple tasks at once
* More efficient than calling getComplexityForTask multiple times
*/
async getComplexityForTasks(
taskIds: (string | number)[],
tag?: string
): Promise<Map<string, TaskComplexityData>> {
const result = new Map<string, TaskComplexityData>();
const report = await this.loadReport(tag);
if (!report) {
return result;
}
// Create a map for fast lookups
const analysisMap = new Map<string, ComplexityAnalysis>();
report.complexityAnalysis.forEach((analysis) => {
analysisMap.set(String(analysis.taskId), analysis);
});
// Map each task ID to its complexity data
taskIds.forEach((taskId) => {
const analysis = analysisMap.get(String(taskId));
if (analysis) {
result.set(String(taskId), {
complexityScore: analysis.complexityScore,
recommendedSubtasks: analysis.recommendedSubtasks,
expansionPrompt: analysis.expansionPrompt,
complexityReasoning: analysis.complexityReasoning
});
}
});
return result;
}
/**
* Clear the report cache
* @param tag - Specific tag to clear, or undefined to clear all cached reports
* Useful when reports are regenerated or modified externally
*/
clearCache(tag?: string): void {
if (tag) {
this.reportCache.delete(tag);
} else {
// Clear all cached reports
this.reportCache.clear();
}
}
/**
* Check if a complexity report exists for a tag
*/
async hasReport(tag?: string): Promise<boolean> {
const reportPath = this.getReportPath(tag);
try {
await fs.access(reportPath);
return true;
} catch {
return false;
}
}
}
```
--------------------------------------------------------------------------------
/.github/scripts/backfill-duplicate-comments.mjs:
--------------------------------------------------------------------------------
```
#!/usr/bin/env node
async function githubRequest(endpoint, token, method = 'GET', body) {
const response = await fetch(`https://api.github.com${endpoint}`, {
method,
headers: {
Authorization: `Bearer ${token}`,
Accept: 'application/vnd.github.v3+json',
'User-Agent': 'backfill-duplicate-comments-script',
...(body && { 'Content-Type': 'application/json' })
},
...(body && { body: JSON.stringify(body) })
});
if (!response.ok) {
throw new Error(
`GitHub API request failed: ${response.status} ${response.statusText}`
);
}
return response.json();
}
async function triggerDedupeWorkflow(
owner,
repo,
issueNumber,
token,
dryRun = true
) {
if (dryRun) {
console.log(
`[DRY RUN] Would trigger dedupe workflow for issue #${issueNumber}`
);
return;
}
await githubRequest(
`/repos/${owner}/${repo}/actions/workflows/claude-dedupe-issues.yml/dispatches`,
token,
'POST',
{
ref: 'main',
inputs: {
issue_number: issueNumber.toString()
}
}
);
}
async function backfillDuplicateComments() {
console.log('[DEBUG] Starting backfill duplicate comments script');
const token = process.env.GITHUB_TOKEN;
if (!token) {
throw new Error(`GITHUB_TOKEN environment variable is required
Usage:
node .github/scripts/backfill-duplicate-comments.mjs
Environment Variables:
GITHUB_TOKEN - GitHub personal access token with repo and actions permissions (required)
DRY_RUN - Set to "false" to actually trigger workflows (default: true for safety)
DAYS_BACK - How many days back to look for old issues (default: 90)`);
}
console.log('[DEBUG] GitHub token found');
const owner = process.env.GITHUB_REPOSITORY_OWNER || 'eyaltoledano';
const repo = process.env.GITHUB_REPOSITORY_NAME || 'claude-task-master';
const dryRun = process.env.DRY_RUN !== 'false';
const daysBack = parseInt(process.env.DAYS_BACK || '90', 10);
console.log(`[DEBUG] Repository: ${owner}/${repo}`);
console.log(`[DEBUG] Dry run mode: ${dryRun}`);
console.log(`[DEBUG] Looking back ${daysBack} days`);
const cutoffDate = new Date();
cutoffDate.setDate(cutoffDate.getDate() - daysBack);
console.log(
`[DEBUG] Fetching issues created since ${cutoffDate.toISOString()}...`
);
const allIssues = [];
let page = 1;
const perPage = 100;
while (true) {
const pageIssues = await githubRequest(
`/repos/${owner}/${repo}/issues?state=all&per_page=${perPage}&page=${page}&since=${cutoffDate.toISOString()}`,
token
);
if (pageIssues.length === 0) break;
allIssues.push(...pageIssues);
page++;
// Safety limit to avoid infinite loops
if (page > 100) {
console.log('[DEBUG] Reached page limit, stopping pagination');
break;
}
}
console.log(
`[DEBUG] Found ${allIssues.length} issues from the last ${daysBack} days`
);
let processedCount = 0;
let candidateCount = 0;
let triggeredCount = 0;
for (const issue of allIssues) {
processedCount++;
console.log(
`[DEBUG] Processing issue #${issue.number} (${processedCount}/${allIssues.length}): ${issue.title}`
);
console.log(`[DEBUG] Fetching comments for issue #${issue.number}...`);
const comments = await githubRequest(
`/repos/${owner}/${repo}/issues/${issue.number}/comments`,
token
);
console.log(
`[DEBUG] Issue #${issue.number} has ${comments.length} comments`
);
// Look for existing duplicate detection comments (from the dedupe bot)
const dupeDetectionComments = comments.filter(
(comment) =>
comment.body.includes('Found') &&
comment.body.includes('possible duplicate') &&
comment.user.type === 'Bot'
);
console.log(
`[DEBUG] Issue #${issue.number} has ${dupeDetectionComments.length} duplicate detection comments`
);
// Skip if there's already a duplicate detection comment
if (dupeDetectionComments.length > 0) {
console.log(
`[DEBUG] Issue #${issue.number} already has duplicate detection comment, skipping`
);
continue;
}
candidateCount++;
const issueUrl = `https://github.com/${owner}/${repo}/issues/${issue.number}`;
try {
console.log(
`[INFO] ${dryRun ? '[DRY RUN] ' : ''}Triggering dedupe workflow for issue #${issue.number}: ${issueUrl}`
);
await triggerDedupeWorkflow(owner, repo, issue.number, token, dryRun);
if (!dryRun) {
console.log(
`[SUCCESS] Successfully triggered dedupe workflow for issue #${issue.number}`
);
}
triggeredCount++;
} catch (error) {
console.error(
`[ERROR] Failed to trigger workflow for issue #${issue.number}: ${error}`
);
}
// Add a delay between workflow triggers to avoid overwhelming the system
await new Promise((resolve) => setTimeout(resolve, 1000));
}
console.log(
`[DEBUG] Script completed. Processed ${processedCount} issues, found ${candidateCount} candidates without duplicate comments, ${dryRun ? 'would trigger' : 'triggered'} ${triggeredCount} workflows`
);
}
backfillDuplicateComments().catch(console.error);
```
--------------------------------------------------------------------------------
/tests/unit/scripts/modules/task-manager/add-subtask.test.js:
--------------------------------------------------------------------------------
```javascript
/**
* Tests for the addSubtask function
*/
import { jest } from '@jest/globals';
// Mock dependencies before importing the module
const mockUtils = {
readJSON: jest.fn(),
writeJSON: jest.fn(),
log: jest.fn(),
getCurrentTag: jest.fn()
};
const mockTaskManager = {
isTaskDependentOn: jest.fn()
};
const mockGenerateTaskFiles = jest.fn();
jest.unstable_mockModule(
'../../../../../scripts/modules/utils.js',
() => mockUtils
);
jest.unstable_mockModule(
'../../../../../scripts/modules/task-manager.js',
() => mockTaskManager
);
jest.unstable_mockModule(
'../../../../../scripts/modules/task-manager/generate-task-files.js',
() => ({
default: mockGenerateTaskFiles
})
);
const addSubtask = (
await import('../../../../../scripts/modules/task-manager/add-subtask.js')
).default;
describe('addSubtask function', () => {
const multiTagData = {
master: {
tasks: [{ id: 1, title: 'Master Task', subtasks: [] }],
metadata: { description: 'Master tasks' }
},
'feature-branch': {
tasks: [{ id: 1, title: 'Feature Task', subtasks: [] }],
metadata: { description: 'Feature tasks' }
}
};
beforeEach(() => {
jest.clearAllMocks();
mockTaskManager.isTaskDependentOn.mockReturnValue(false);
});
test('should add a new subtask and preserve other tags', async () => {
const context = { projectRoot: '/fake/root', tag: 'feature-branch' };
const newSubtaskData = { title: 'My New Subtask' };
mockUtils.readJSON.mockReturnValueOnce({
tasks: [{ id: 1, title: 'Feature Task', subtasks: [] }],
metadata: { description: 'Feature tasks' }
});
await addSubtask('tasks.json', '1', null, newSubtaskData, true, context);
expect(mockUtils.writeJSON).toHaveBeenCalledWith(
'tasks.json',
expect.any(Object),
'/fake/root',
'feature-branch'
);
const writtenData = mockUtils.writeJSON.mock.calls[0][1];
const parentTask = writtenData.tasks.find((t) => t.id === 1);
expect(parentTask.subtasks).toHaveLength(1);
expect(parentTask.subtasks[0].title).toBe('My New Subtask');
});
test('should add a new subtask to a parent task', async () => {
mockUtils.readJSON.mockReturnValueOnce({
tasks: [{ id: 1, title: 'Parent Task', subtasks: [] }]
});
const context = {};
const newSubtask = await addSubtask(
'tasks.json',
'1',
null,
{ title: 'New Subtask' },
true,
context
);
expect(newSubtask).toBeDefined();
expect(newSubtask.id).toBe(1);
expect(newSubtask.parentTaskId).toBe(1);
expect(mockUtils.writeJSON).toHaveBeenCalled();
const writeCallArgs = mockUtils.writeJSON.mock.calls[0][1]; // data is the second arg now
const parentTask = writeCallArgs.tasks.find((t) => t.id === 1);
expect(parentTask.subtasks).toHaveLength(1);
expect(parentTask.subtasks[0].title).toBe('New Subtask');
});
test('should convert an existing task to a subtask', async () => {
mockUtils.readJSON.mockReturnValueOnce({
tasks: [
{ id: 1, title: 'Parent Task', subtasks: [] },
{ id: 2, title: 'Existing Task 2', subtasks: [] }
]
});
const context = {};
const convertedSubtask = await addSubtask(
'tasks.json',
'1',
'2',
null,
true,
context
);
expect(convertedSubtask.id).toBe(1);
expect(convertedSubtask.parentTaskId).toBe(1);
expect(convertedSubtask.title).toBe('Existing Task 2');
expect(mockUtils.writeJSON).toHaveBeenCalled();
const writeCallArgs = mockUtils.writeJSON.mock.calls[0][1];
const parentTask = writeCallArgs.tasks.find((t) => t.id === 1);
expect(parentTask.subtasks).toHaveLength(1);
expect(parentTask.subtasks[0].title).toBe('Existing Task 2');
});
test('should throw an error if parent task does not exist', async () => {
mockUtils.readJSON.mockReturnValueOnce({
tasks: [{ id: 1, title: 'Task 1', subtasks: [] }]
});
const context = {};
await expect(
addSubtask(
'tasks.json',
'99',
null,
{ title: 'New Subtask' },
true,
context
)
).rejects.toThrow('Parent task with ID 99 not found');
});
test('should throw an error if trying to convert a non-existent task', async () => {
mockUtils.readJSON.mockReturnValueOnce({
tasks: [{ id: 1, title: 'Parent Task', subtasks: [] }]
});
const context = {};
await expect(
addSubtask('tasks.json', '1', '99', null, true, context)
).rejects.toThrow('Task with ID 99 not found');
});
test('should throw an error for circular dependency', async () => {
mockUtils.readJSON.mockReturnValueOnce({
tasks: [
{ id: 1, title: 'Parent Task', subtasks: [] },
{ id: 2, title: 'Child Task', subtasks: [] }
]
});
mockTaskManager.isTaskDependentOn.mockImplementation(
(tasks, parentTask, existingTaskIdNum) => {
return parentTask.id === 1 && existingTaskIdNum === 2;
}
);
const context = {};
await expect(
addSubtask('tasks.json', '1', '2', null, true, context)
).rejects.toThrow(
'Cannot create circular dependency: task 1 is already a subtask or dependent of task 2'
);
});
});
```
--------------------------------------------------------------------------------
/apps/docs/archive/ai-development-workflow.mdx:
--------------------------------------------------------------------------------
```markdown
---
title: "AI Development Workflow"
description: "Learn how Task Master and Cursor AI work together to streamline your development workflow"
---
<Tip>The Cursor agent is pre-configured (via the rules file) to follow this workflow</Tip>
<AccordionGroup>
<Accordion title="1. Task Discovery and Selection">
Ask the agent to list available tasks:
```
What tasks are available to work on next?
```
The agent will:
- Run `task-master list` to see all tasks
- Run `task-master next` to determine the next task to work on
- Analyze dependencies to determine which tasks are ready to be worked on
- Prioritize tasks based on priority level and ID order
- Suggest the next task(s) to implement
</Accordion>
<Accordion title="2. Task Implementation">
When implementing a task, the agent will:
- Reference the task's details section for implementation specifics
- Consider dependencies on previous tasks
- Follow the project's coding standards
- Create appropriate tests based on the task's testStrategy
You can ask:
```
Let's implement task 3. What does it involve?
```
</Accordion>
<Accordion title="3. Task Verification">
Before marking a task as complete, verify it according to:
- The task's specified testStrategy
- Any automated tests in the codebase
- Manual verification if required
</Accordion>
<Accordion title="4. Task Completion">
When a task is completed, tell the agent:
```
Task 3 is now complete. Please update its status.
```
The agent will execute:
```bash
task-master set-status --id=3 --status=done
```
</Accordion>
<Accordion title="5. Handling Implementation Drift">
If during implementation, you discover that:
- The current approach differs significantly from what was planned
- Future tasks need to be modified due to current implementation choices
- New dependencies or requirements have emerged
Tell the agent:
```
We've changed our approach. We're now using Express instead of Fastify. Please update all future tasks to reflect this change.
```
The agent will execute:
```bash
task-master update --from=4 --prompt="Now we are using Express instead of Fastify."
```
This will rewrite or re-scope subsequent tasks in tasks.json while preserving completed work.
</Accordion>
<Accordion title="6. Breaking Down Complex Tasks">
For complex tasks that need more granularity:
```
Task 5 seems complex. Can you break it down into subtasks?
```
The agent will execute:
```bash
task-master expand --id=5 --num=3
```
You can provide additional context:
```
Please break down task 5 with a focus on security considerations.
```
The agent will execute:
```bash
task-master expand --id=5 --prompt="Focus on security aspects"
```
You can also expand all pending tasks:
```
Please break down all pending tasks into subtasks.
```
The agent will execute:
```bash
task-master expand --all
```
For research-backed subtask generation using Perplexity AI:
```
Please break down task 5 using research-backed generation.
```
The agent will execute:
```bash
task-master expand --id=5 --research
```
</Accordion>
</AccordionGroup>
## Example Cursor AI Interactions
<AccordionGroup>
<Accordion title="Starting a new project">
```
I've just initialized a new project with Claude Task Master. I have a PRD at scripts/prd.txt.
Can you help me parse it and set up the initial tasks?
```
</Accordion>
<Accordion title="Working on tasks">
```
What's the next task I should work on? Please consider dependencies and priorities.
```
</Accordion>
<Accordion title="Implementing a specific task">
```
I'd like to implement task 4. Can you help me understand what needs to be done and how to approach it?
```
</Accordion>
<Accordion title="Managing subtasks">
```
I need to regenerate the subtasks for task 3 with a different approach. Can you help me clear and regenerate them?
```
</Accordion>
<Accordion title="Handling changes">
```
We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks to reflect this change?
```
</Accordion>
<Accordion title="Completing work">
```
I've finished implementing the authentication system described in task 2. All tests are passing.
Please mark it as complete and tell me what I should work on next.
```
</Accordion>
<Accordion title="Analyzing complexity">
```
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
```
</Accordion>
<Accordion title="Viewing complexity report">
```
Can you show me the complexity report in a more readable format?
```
</Accordion>
</AccordionGroup>
```
--------------------------------------------------------------------------------
/apps/extension/src/webview/reducers/appReducer.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Main application state reducer
*/
import type { AppState, AppAction } from '../types';
import { logger } from '../utils/logger';
export const appReducer = (state: AppState, action: AppAction): AppState => {
logger.debug(
'Reducer action:',
action.type,
'payload' in action ? action.payload : 'no payload'
);
switch (action.type) {
case 'SET_TASKS':
const newTasks = Array.isArray(action.payload) ? action.payload : [];
logger.debug('SET_TASKS reducer - updating tasks:', {
oldCount: state.tasks.length,
newCount: newTasks.length,
newTasks
});
return {
...state,
tasks: newTasks,
loading: false,
error: undefined
};
case 'SET_LOADING':
return { ...state, loading: action.payload };
case 'SET_ERROR':
return { ...state, error: action.payload, loading: false };
case 'CLEAR_ERROR':
return { ...state, error: undefined };
case 'INCREMENT_REQUEST_ID':
return { ...state, requestId: state.requestId + 1 };
case 'UPDATE_TASK_STATUS': {
const { taskId, newStatus } = action.payload;
return {
...state,
tasks: state.tasks.map((task) =>
task.id === taskId ? { ...task, status: newStatus } : task
)
};
}
case 'UPDATE_TASK_CONTENT': {
const { taskId, updates } = action.payload;
return {
...state,
tasks: state.tasks.map((task) =>
task.id === taskId ? { ...task, ...updates } : task
)
};
}
case 'SET_CONNECTION_STATUS':
return {
...state,
isConnected: action.payload.isConnected,
connectionStatus: action.payload.status
};
case 'SET_EDITING_TASK':
return {
...state,
editingTask: action.payload
};
case 'SET_POLLING_STATUS':
return {
...state,
polling: {
...state.polling,
isActive: action.payload.isActive,
errorCount: action.payload.errorCount ?? state.polling.errorCount,
lastUpdate: action.payload.isActive
? Date.now()
: state.polling.lastUpdate
}
};
case 'SET_USER_INTERACTING':
return {
...state,
polling: {
...state.polling,
isUserInteracting: action.payload
}
};
case 'TASKS_UPDATED_FROM_POLLING':
return {
...state,
tasks: Array.isArray(action.payload) ? action.payload : [],
polling: {
...state.polling,
lastUpdate: Date.now()
}
};
case 'SET_NETWORK_STATUS':
return {
...state,
polling: {
...state.polling,
isOfflineMode: action.payload.isOfflineMode,
connectionStatus: action.payload.connectionStatus,
reconnectAttempts:
action.payload.reconnectAttempts !== undefined
? action.payload.reconnectAttempts
: state.polling.reconnectAttempts,
maxReconnectAttempts:
action.payload.maxReconnectAttempts !== undefined
? action.payload.maxReconnectAttempts
: state.polling.maxReconnectAttempts,
lastSuccessfulConnection:
action.payload.lastSuccessfulConnection !== undefined
? action.payload.lastSuccessfulConnection
: state.polling.lastSuccessfulConnection
}
};
case 'LOAD_CACHED_TASKS':
return {
...state,
tasks: Array.isArray(action.payload) ? action.payload : []
};
case 'ADD_TOAST':
return {
...state,
toastNotifications: [...state.toastNotifications, action.payload]
};
case 'REMOVE_TOAST':
return {
...state,
toastNotifications: state.toastNotifications.filter(
(notification) => notification.id !== action.payload
)
};
case 'CLEAR_ALL_TOASTS':
return { ...state, toastNotifications: [] };
case 'NAVIGATE_TO_TASK':
logger.debug('📍 Reducer: Navigating to task:', action.payload);
return {
...state,
currentView: 'task-details',
selectedTaskId: action.payload
};
case 'NAVIGATE_TO_KANBAN':
logger.debug('📍 Reducer: Navigating to kanban');
return { ...state, currentView: 'kanban', selectedTaskId: undefined };
case 'NAVIGATE_TO_CONFIG':
logger.debug('📍 Reducer: Navigating to config');
return { ...state, currentView: 'config', selectedTaskId: undefined };
case 'SET_CURRENT_TAG':
return {
...state,
currentTag: action.payload
};
case 'SET_AVAILABLE_TAGS':
return {
...state,
availableTags: action.payload
};
case 'SET_TAG_DATA':
return {
...state,
currentTag: action.payload.currentTag,
availableTags: action.payload.availableTags
};
default:
return state;
}
};
export const initialState: AppState = {
tasks: [],
loading: true,
requestId: 0,
isConnected: false,
connectionStatus: 'Connecting...',
editingTask: { taskId: null },
polling: {
isActive: false,
errorCount: 0,
lastUpdate: undefined,
isUserInteracting: false,
isOfflineMode: false,
reconnectAttempts: 0,
maxReconnectAttempts: 0,
lastSuccessfulConnection: undefined,
connectionStatus: 'online'
},
toastNotifications: [],
currentView: 'kanban',
selectedTaskId: undefined,
// Tag-related state
currentTag: 'master',
availableTags: ['master']
};
```
--------------------------------------------------------------------------------
/mcp-server/src/core/direct-functions/add-tag.js:
--------------------------------------------------------------------------------
```javascript
/**
* add-tag.js
* Direct function implementation for creating a new tag
*/
import {
createTag,
createTagFromBranch
} from '../../../../scripts/modules/task-manager/tag-management.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import { createLogWrapper } from '../../tools/utils.js';
/**
* Direct function wrapper for creating a new tag with error handling.
*
* @param {Object} args - Command arguments
* @param {string} args.name - Name of the new tag to create
* @param {boolean} [args.copyFromCurrent=false] - Whether to copy tasks from current tag
* @param {string} [args.copyFromTag] - Specific tag to copy tasks from
* @param {boolean} [args.fromBranch=false] - Create tag name from current git branch
* @param {string} [args.description] - Optional description for the tag
* @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)
* @param {string} [args.projectRoot] - Project root path
* @param {Object} log - Logger object
* @param {Object} context - Additional context (session)
* @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }
*/
export async function addTagDirect(args, log, context = {}) {
// Destructure expected args
const {
tasksJsonPath,
name,
copyFromCurrent = false,
copyFromTag,
fromBranch = false,
description,
projectRoot
} = args;
const { session } = context;
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Create logger wrapper using the utility
const mcpLog = createLogWrapper(log);
try {
// Check if tasksJsonPath was provided
if (!tasksJsonPath) {
log.error('addTagDirect called without tasksJsonPath');
disableSilentMode();
return {
success: false,
error: {
code: 'MISSING_ARGUMENT',
message: 'tasksJsonPath is required'
}
};
}
// Handle --from-branch option
if (fromBranch) {
log.info('Creating tag from current git branch');
// Import git utilities
const gitUtils = await import(
'../../../../scripts/modules/utils/git-utils.js'
);
// Check if we're in a git repository
if (!(await gitUtils.isGitRepository(projectRoot))) {
log.error('Not in a git repository');
disableSilentMode();
return {
success: false,
error: {
code: 'NOT_GIT_REPO',
message: 'Not in a git repository. Cannot use fromBranch option.'
}
};
}
// Get current git branch
const currentBranch = await gitUtils.getCurrentBranch(projectRoot);
if (!currentBranch) {
log.error('Could not determine current git branch');
disableSilentMode();
return {
success: false,
error: {
code: 'NO_CURRENT_BRANCH',
message: 'Could not determine current git branch.'
}
};
}
// Prepare options for branch-based tag creation
const branchOptions = {
copyFromCurrent,
copyFromTag,
description:
description || `Tag created from git branch "${currentBranch}"`
};
// Call the createTagFromBranch function
const result = await createTagFromBranch(
tasksJsonPath,
currentBranch,
branchOptions,
{
session,
mcpLog,
projectRoot
},
'json' // outputFormat - use 'json' to suppress CLI UI
);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
branchName: result.branchName,
tagName: result.tagName,
created: result.created,
mappingUpdated: result.mappingUpdated,
message: `Successfully created tag "${result.tagName}" from git branch "${result.branchName}"`
}
};
} else {
// Check required parameters for regular tag creation
if (!name || typeof name !== 'string') {
log.error('Missing required parameter: name');
disableSilentMode();
return {
success: false,
error: {
code: 'MISSING_PARAMETER',
message: 'Tag name is required and must be a string'
}
};
}
log.info(`Creating new tag: ${name}`);
// Prepare options
const options = {
copyFromCurrent,
copyFromTag,
description
};
// Call the createTag function
const result = await createTag(
tasksJsonPath,
name,
options,
{
session,
mcpLog,
projectRoot
},
'json' // outputFormat - use 'json' to suppress CLI UI
);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
tagName: result.tagName,
created: result.created,
tasksCopied: result.tasksCopied,
sourceTag: result.sourceTag,
description: result.description,
message: `Successfully created tag "${result.tagName}"`
}
};
}
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in addTagDirect: ${error.message}`);
return {
success: false,
error: {
code: error.code || 'ADD_TAG_ERROR',
message: error.message
}
};
}
}
```
--------------------------------------------------------------------------------
/apps/extension/src/webview/components/EmptyState.tsx:
--------------------------------------------------------------------------------
```typescript
import React from 'react';
import { ExternalLink, Terminal, MessageSquare, Plus } from 'lucide-react';
import { TaskMasterLogo } from '../../components/TaskMasterLogo';
interface EmptyStateProps {
currentTag: string;
}
export const EmptyState: React.FC<EmptyStateProps> = ({ currentTag }) => {
return (
<div className="flex items-center justify-center h-full overflow-auto">
<div className="max-w-2xl mx-auto text-center p-8">
{/* Empty state illustration */}
<div className="mb-8 max-w-96 mx-auto">
<TaskMasterLogo className="w-32 h-32 mx-auto text-vscode-foreground/20" />
</div>
<h2 className="text-2xl font-semibold mb-2 text-vscode-foreground">
No tasks in "{currentTag}" tag
</h2>
<p className="text-vscode-foreground/70 mb-8">
Get started by adding tasks to this tag using the commands below
</p>
{/* Command suggestions */}
<div className="space-y-4 text-left">
<div className="bg-vscode-editor-background/50 border border-vscode-panel-border rounded-lg p-4">
<div className="flex items-center gap-2 mb-2">
<Terminal className="w-4 h-4 text-vscode-terminal-ansiGreen" />
<h3 className="font-medium">CLI Commands</h3>
</div>
<div className="space-y-2">
<div className="bg-vscode-editor-background rounded p-2 font-mono text-sm">
<span className="text-vscode-terminal-ansiYellow">
task-master
</span>{' '}
<span className="text-vscode-terminal-ansiCyan">parse-prd</span>{' '}
<span className="text-vscode-foreground/70">
<path-to-prd>
</span>{' '}
<span className="text-vscode-terminal-ansiMagenta">
--append
</span>
<div className="text-xs text-vscode-foreground/50 mt-1">
Parse a PRD and append tasks to current tag
</div>
</div>
<div className="bg-vscode-editor-background rounded p-2 font-mono text-sm">
<span className="text-vscode-terminal-ansiYellow">
task-master
</span>{' '}
<span className="text-vscode-terminal-ansiCyan">add-task</span>{' '}
<span className="text-vscode-terminal-ansiMagenta">
--prompt
</span>{' '}
<span className="text-vscode-foreground/70">
"Your task description"
</span>
<div className="text-xs text-vscode-foreground/50 mt-1">
Add a single task with AI assistance
</div>
</div>
<div className="bg-vscode-editor-background rounded p-2 font-mono text-sm">
<span className="text-vscode-terminal-ansiYellow">
task-master
</span>{' '}
<span className="text-vscode-terminal-ansiCyan">add-task</span>{' '}
<span className="text-vscode-terminal-ansiMagenta">--help</span>
<div className="text-xs text-vscode-foreground/50 mt-1">
View all options for adding tasks
</div>
</div>
</div>
</div>
<div className="bg-vscode-editor-background/50 border border-vscode-panel-border rounded-lg p-4">
<div className="flex items-center gap-2 mb-2">
<MessageSquare className="w-4 h-4 text-vscode-textLink-foreground" />
<h3 className="font-medium">MCP Examples</h3>
</div>
<div className="space-y-2 text-sm">
<div className="flex items-start gap-2">
<Plus className="w-4 h-4 mt-0.5 text-vscode-foreground/50" />
<div>
<div className="text-vscode-foreground">
"Add a task to tag {currentTag}: Implement user
authentication"
</div>
</div>
</div>
<div className="flex items-start gap-2">
<Plus className="w-4 h-4 mt-0.5 text-vscode-foreground/50" />
<div>
<div className="text-vscode-foreground">
"Parse this PRD and add tasks to {currentTag}: [paste PRD
content]"
</div>
</div>
</div>
<div className="flex items-start gap-2">
<Plus className="w-4 h-4 mt-0.5 text-vscode-foreground/50" />
<div>
<div className="text-vscode-foreground">
"Create 5 tasks for building a REST API in tag {currentTag}"
</div>
</div>
</div>
</div>
</div>
{/* Documentation link */}
<div className="flex justify-center pt-4">
<a
href="https://docs.task-master.dev"
className="inline-flex items-center gap-2 text-vscode-textLink-foreground hover:text-vscode-textLink-activeForeground transition-colors"
onClick={(e) => {
e.preventDefault();
// Use VS Code API to open external link
if (window.acquireVsCodeApi) {
const vscode = window.acquireVsCodeApi();
vscode.postMessage({
type: 'openExternal',
url: 'https://docs.task-master.dev'
});
}
}}
>
<ExternalLink className="w-4 h-4" />
<span className="text-sm font-medium">
View TaskMaster Documentation
</span>
</a>
</div>
</div>
</div>
</div>
);
};
```
--------------------------------------------------------------------------------
/packages/tm-core/src/modules/workflow/managers/workflow-state-manager.spec.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview Tests for WorkflowStateManager path sanitization
*/
import os from 'node:os';
import path from 'node:path';
import { describe, expect, it } from 'vitest';
import { WorkflowStateManager } from './workflow-state-manager.js';
describe('WorkflowStateManager', () => {
describe('getProjectIdentifier', () => {
it('should sanitize paths like Claude Code', () => {
const projectRoot =
'/Volumes/Workspace/workspace/contrib/task-master/demos/nextjs-todo-tdd';
const manager = new WorkflowStateManager(projectRoot);
const sessionDir = manager.getSessionDir();
const homeDir = os.homedir();
// Expected structure: ~/.taskmaster/{project-id}/sessions/
const expectedPath = path.join(
homeDir,
'.taskmaster',
'-Volumes-Workspace-workspace-contrib-task-master-demos-nextjs-todo-tdd',
'sessions'
);
expect(sessionDir).toBe(expectedPath);
});
it('should preserve case in paths', () => {
const projectRoot = '/Users/Alice/Projects/MyApp';
const manager = new WorkflowStateManager(projectRoot);
const sessionDir = manager.getSessionDir();
// Extract project ID from: ~/.taskmaster/{project-id}/sessions/
const projectId = sessionDir.split(path.sep).slice(-2, -1)[0];
// Case should be preserved
expect(projectId).toContain('Users');
expect(projectId).toContain('Alice');
expect(projectId).toContain('Projects');
expect(projectId).toContain('MyApp');
});
it('should handle paths with special characters', () => {
const projectRoot = '/tmp/my-project_v2.0/test';
const manager = new WorkflowStateManager(projectRoot);
const sessionDir = manager.getSessionDir();
// Extract project ID from: ~/.taskmaster/{project-id}/sessions/
const projectId = sessionDir.split(path.sep).slice(-2, -1)[0];
// Special chars should be replaced with dashes
expect(projectId).toBe('-tmp-my-project-v2-0-test');
});
it('should create unique identifiers for different paths', () => {
const project1 = '/Users/alice/task-master';
const project2 = '/Users/bob/task-master';
const manager1 = new WorkflowStateManager(project1);
const manager2 = new WorkflowStateManager(project2);
// Extract project IDs from: ~/.taskmaster/{project-id}/sessions/
const id1 = manager1.getSessionDir().split(path.sep).slice(-2, -1)[0];
const id2 = manager2.getSessionDir().split(path.sep).slice(-2, -1)[0];
// Same basename but different full paths should be unique
expect(id1).not.toBe(id2);
expect(id1).toContain('alice');
expect(id2).toContain('bob');
});
it('should collapse multiple dashes', () => {
const projectRoot = '/path//with///multiple////slashes';
const manager = new WorkflowStateManager(projectRoot);
const sessionDir = manager.getSessionDir();
// Extract project ID from: ~/.taskmaster/{project-id}/sessions/
const projectId = sessionDir.split(path.sep).slice(-2, -1)[0];
// Multiple dashes should be collapsed to single dash
expect(projectId).not.toContain('--');
expect(projectId).toBe('-path-with-multiple-slashes');
});
it('should not have trailing dashes', () => {
const projectRoot = '/path/to/project';
const manager = new WorkflowStateManager(projectRoot);
const sessionDir = manager.getSessionDir();
// Extract project ID from: ~/.taskmaster/{project-id}/sessions/
const projectId = sessionDir.split(path.sep).slice(-2, -1)[0];
// Should not end with dash
expect(projectId).not.toMatch(/-$/);
});
it('should start with a dash like Claude Code', () => {
const projectRoot = '/any/path';
const manager = new WorkflowStateManager(projectRoot);
const sessionDir = manager.getSessionDir();
// Extract project ID from: ~/.taskmaster/{project-id}/sessions/
const projectId = sessionDir.split(path.sep).slice(-2, -1)[0];
// Should start with dash like Claude Code's pattern
expect(projectId).toMatch(/^-/);
});
});
describe('session paths', () => {
it('should place sessions in global ~/.taskmaster/{project-id}/sessions/', () => {
const projectRoot = '/some/project';
const manager = new WorkflowStateManager(projectRoot);
const sessionDir = manager.getSessionDir();
const homeDir = os.homedir();
// Should be: ~/.taskmaster/{project-id}/sessions/
expect(sessionDir).toContain(path.join(homeDir, '.taskmaster'));
expect(sessionDir).toMatch(/\.taskmaster\/.*\/sessions$/);
});
it('should include workflow-state.json in session dir', () => {
const projectRoot = '/some/project';
const manager = new WorkflowStateManager(projectRoot);
const statePath = manager.getStatePath();
const sessionDir = manager.getSessionDir();
expect(statePath).toBe(path.join(sessionDir, 'workflow-state.json'));
});
it('should include backups dir in session dir', () => {
const projectRoot = '/some/project';
const manager = new WorkflowStateManager(projectRoot);
const backupDir = manager.getBackupDir();
const sessionDir = manager.getSessionDir();
expect(backupDir).toBe(path.join(sessionDir, 'backups'));
});
});
});
```
--------------------------------------------------------------------------------
/packages/tm-core/src/modules/git/services/template-engine.ts:
--------------------------------------------------------------------------------
```typescript
/**
* TemplateEngine - Configurable template system for generating text from templates
*
* Supports:
* - Variable substitution using {{variableName}} syntax
* - Custom templates via constructor or setTemplate
* - Template validation with required variables
* - Variable extraction from templates
* - Multiple template storage and retrieval
*/
export interface TemplateValidationResult {
isValid: boolean;
missingVars: string[];
}
export interface TemplateVariables {
[key: string]: string | number | boolean | undefined;
}
export interface TemplateCollection {
[templateName: string]: string;
}
export interface TemplateEngineOptions {
customTemplates?: TemplateCollection;
preservePlaceholders?: boolean;
}
const DEFAULT_TEMPLATES: TemplateCollection = {
commitMessage: `{{type}}{{#scope}}({{scope}}){{/scope}}{{#breaking}}!{{/breaking}}: {{description}}
{{#body}}{{body}}
{{/body}}{{#taskId}}Task: {{taskId}}{{/taskId}}{{#phase}}
Phase: {{phase}}{{/phase}}{{#testsPassing}}
Tests: {{testsPassing}} passing{{#testsFailing}}, {{testsFailing}} failing{{/testsFailing}}{{/testsPassing}}`
};
export class TemplateEngine {
private templates: TemplateCollection;
private preservePlaceholders: boolean;
constructor(
optionsOrTemplates: TemplateEngineOptions | TemplateCollection = {}
) {
// Backward compatibility: support old signature (TemplateCollection) and new signature (TemplateEngineOptions)
const isOptions =
'customTemplates' in optionsOrTemplates ||
'preservePlaceholders' in optionsOrTemplates;
const options: TemplateEngineOptions = isOptions
? (optionsOrTemplates as TemplateEngineOptions)
: { customTemplates: optionsOrTemplates as TemplateCollection };
this.templates = {
...DEFAULT_TEMPLATES,
...(options.customTemplates || {})
};
this.preservePlaceholders = options.preservePlaceholders ?? false;
}
/**
* Render a template with provided variables
*/
render(
templateName: string,
variables: TemplateVariables,
inlineTemplate?: string
): string {
const template =
inlineTemplate !== undefined
? inlineTemplate
: this.templates[templateName];
if (template === undefined) {
throw new Error(`Template "${templateName}" not found`);
}
return this.substituteVariables(template, variables);
}
/**
* Set or update a template
*/
setTemplate(name: string, template: string): void {
this.templates[name] = template;
}
/**
* Get a template by name
*/
getTemplate(name: string): string | undefined {
return this.templates[name];
}
/**
* Check if a template exists
*/
hasTemplate(name: string): boolean {
return name in this.templates;
}
/**
* Validate that a template contains all required variables
*/
validateTemplate(
template: string,
requiredVars: string[]
): TemplateValidationResult {
const templateVars = this.extractVariables(template);
const missingVars = requiredVars.filter(
(varName) => !templateVars.includes(varName)
);
return {
isValid: missingVars.length === 0,
missingVars
};
}
/**
* Extract all variable names from a template
*/
extractVariables(template: string): string[] {
const regex = /\{\{\s*([^}#/\s]+)\s*\}\}/g;
const matches = template.matchAll(regex);
const variables = new Set<string>();
for (const match of matches) {
variables.add(match[1]);
}
return Array.from(variables);
}
/**
* Substitute variables in template
* Supports both {{variable}} and {{#variable}}...{{/variable}} (conditional blocks)
*/
private substituteVariables(
template: string,
variables: TemplateVariables
): string {
let result = template;
// Handle conditional blocks first ({{#var}}...{{/var}})
result = this.processConditionalBlocks(result, variables);
// Handle simple variable substitution ({{var}})
result = result.replace(/\{\{\s*([^}#/\s]+)\s*\}\}/g, (_, varName) => {
const value = variables[varName];
return value !== undefined && value !== null
? String(value)
: this.preservePlaceholders
? `{{${varName}}}`
: '';
});
return result;
}
/**
* Process conditional blocks in template
* {{#variable}}content{{/variable}} - shows content only if variable is truthy
* Processes innermost blocks first to handle nesting
*/
private processConditionalBlocks(
template: string,
variables: TemplateVariables
): string {
let result = template;
let hasChanges = true;
// Keep processing until no more conditional blocks are found
while (hasChanges) {
const before = result;
// Find and replace innermost conditional blocks (non-greedy match)
result = result.replace(
/\{\{#([^}]+)\}\}((?:(?!\{\{#).)*?)\{\{\/\1\}\}/gs,
(_, varName, content) => {
const value = variables[varName.trim()];
// Show content if variable is truthy (not undefined, null, false, or empty string)
if (
value !== undefined &&
value !== null &&
value !== false &&
value !== ''
) {
return content;
}
return '';
}
);
hasChanges = result !== before;
}
return result;
}
}
```
--------------------------------------------------------------------------------
/tests/unit/profiles/roo-integration.test.js:
--------------------------------------------------------------------------------
```javascript
import { jest } from '@jest/globals';
import fs from 'fs';
import path from 'path';
import os from 'os';
// Mock external modules
jest.mock('child_process', () => ({
execSync: jest.fn()
}));
// Mock console methods
jest.mock('console', () => ({
log: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
clear: jest.fn()
}));
describe('Roo Integration', () => {
let tempDir;
beforeEach(() => {
jest.clearAllMocks();
// Create a temporary directory for testing
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
// Spy on fs methods
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
if (filePath.toString().includes('.roomodes')) {
return 'Existing roomodes content';
}
if (filePath.toString().includes('-rules')) {
return 'Existing mode rules content';
}
return '{}';
});
jest.spyOn(fs, 'existsSync').mockImplementation(() => false);
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
});
afterEach(() => {
// Clean up the temporary directory
try {
fs.rmSync(tempDir, { recursive: true, force: true });
} catch (err) {
console.error(`Error cleaning up: ${err.message}`);
}
});
// Test function that simulates the createProjectStructure behavior for Roo files
function mockCreateRooStructure() {
// Create main .roo directory
fs.mkdirSync(path.join(tempDir, '.roo'), { recursive: true });
// Create rules directory
fs.mkdirSync(path.join(tempDir, '.roo', 'rules'), { recursive: true });
// Create mode-specific rule directories
const rooModes = [
'architect',
'ask',
'orchestrator',
'code',
'debug',
'test'
];
for (const mode of rooModes) {
fs.mkdirSync(path.join(tempDir, '.roo', `rules-${mode}`), {
recursive: true
});
fs.writeFileSync(
path.join(tempDir, '.roo', `rules-${mode}`, `${mode}-rules`),
`Content for ${mode} rules`
);
}
// Create additional directories
fs.mkdirSync(path.join(tempDir, '.roo', 'config'), { recursive: true });
fs.mkdirSync(path.join(tempDir, '.roo', 'templates'), { recursive: true });
fs.mkdirSync(path.join(tempDir, '.roo', 'logs'), { recursive: true });
// Copy .roomodes file
fs.writeFileSync(path.join(tempDir, '.roomodes'), 'Roomodes file content');
}
test('creates all required .roo directories', () => {
// Act
mockCreateRooStructure();
// Assert
expect(fs.mkdirSync).toHaveBeenCalledWith(path.join(tempDir, '.roo'), {
recursive: true
});
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules'),
{ recursive: true }
);
// Verify all mode directories are created
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-architect'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-ask'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-orchestrator'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-code'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-debug'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-test'),
{ recursive: true }
);
});
test('creates rule files for all modes', () => {
// Act
mockCreateRooStructure();
// Assert - check all rule files are created
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-architect', 'architect-rules'),
expect.any(String)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-ask', 'ask-rules'),
expect.any(String)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-orchestrator', 'orchestrator-rules'),
expect.any(String)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-code', 'code-rules'),
expect.any(String)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-debug', 'debug-rules'),
expect.any(String)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-test', 'test-rules'),
expect.any(String)
);
});
test('creates .roomodes file in project root', () => {
// Act
mockCreateRooStructure();
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roomodes'),
expect.any(String)
);
});
test('creates additional required Roo directories', () => {
// Act
mockCreateRooStructure();
// Assert
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'config'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'templates'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'logs'),
{ recursive: true }
);
});
});
```