This is page 40 of 50. Use http://codebase.md/eyaltoledano/claude-task-master?page={x} to view the full context.
# Directory Structure
```
├── .changeset
│ ├── config.json
│ └── README.md
├── .claude
│ ├── commands
│ │ └── dedupe.md
│ └── TM_COMMANDS_GUIDE.md
├── .claude-plugin
│ └── marketplace.json
├── .coderabbit.yaml
├── .cursor
│ ├── mcp.json
│ └── rules
│ ├── ai_providers.mdc
│ ├── ai_services.mdc
│ ├── architecture.mdc
│ ├── changeset.mdc
│ ├── commands.mdc
│ ├── context_gathering.mdc
│ ├── cursor_rules.mdc
│ ├── dependencies.mdc
│ ├── dev_workflow.mdc
│ ├── git_workflow.mdc
│ ├── glossary.mdc
│ ├── mcp.mdc
│ ├── new_features.mdc
│ ├── self_improve.mdc
│ ├── tags.mdc
│ ├── taskmaster.mdc
│ ├── tasks.mdc
│ ├── telemetry.mdc
│ ├── test_workflow.mdc
│ ├── tests.mdc
│ ├── ui.mdc
│ └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── enhancements---feature-requests.md
│ │ └── feedback.md
│ ├── PULL_REQUEST_TEMPLATE
│ │ ├── bugfix.md
│ │ ├── config.yml
│ │ ├── feature.md
│ │ └── integration.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── scripts
│ │ ├── auto-close-duplicates.mjs
│ │ ├── backfill-duplicate-comments.mjs
│ │ ├── check-pre-release-mode.mjs
│ │ ├── parse-metrics.mjs
│ │ ├── release.mjs
│ │ ├── tag-extension.mjs
│ │ ├── utils.mjs
│ │ └── validate-changesets.mjs
│ └── workflows
│ ├── auto-close-duplicates.yml
│ ├── backfill-duplicate-comments.yml
│ ├── ci.yml
│ ├── claude-dedupe-issues.yml
│ ├── claude-docs-trigger.yml
│ ├── claude-docs-updater.yml
│ ├── claude-issue-triage.yml
│ ├── claude.yml
│ ├── extension-ci.yml
│ ├── extension-release.yml
│ ├── log-issue-events.yml
│ ├── pre-release.yml
│ ├── release-check.yml
│ ├── release.yml
│ ├── update-models-md.yml
│ └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│ ├── hooks
│ │ ├── tm-code-change-task-tracker.kiro.hook
│ │ ├── tm-complexity-analyzer.kiro.hook
│ │ ├── tm-daily-standup-assistant.kiro.hook
│ │ ├── tm-git-commit-task-linker.kiro.hook
│ │ ├── tm-pr-readiness-checker.kiro.hook
│ │ ├── tm-task-dependency-auto-progression.kiro.hook
│ │ └── tm-test-success-task-completer.kiro.hook
│ ├── settings
│ │ └── mcp.json
│ └── steering
│ ├── dev_workflow.md
│ ├── kiro_rules.md
│ ├── self_improve.md
│ ├── taskmaster_hooks_workflow.md
│ └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│ ├── CLAUDE.md
│ ├── config.json
│ ├── docs
│ │ ├── autonomous-tdd-git-workflow.md
│ │ ├── MIGRATION-ROADMAP.md
│ │ ├── prd-tm-start.txt
│ │ ├── prd.txt
│ │ ├── README.md
│ │ ├── research
│ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│ │ │ ├── 2025-06-14_test-save-functionality.md
│ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│ │ ├── task-template-importing-prd.txt
│ │ ├── tdd-workflow-phase-0-spike.md
│ │ ├── tdd-workflow-phase-1-core-rails.md
│ │ ├── tdd-workflow-phase-1-orchestrator.md
│ │ ├── tdd-workflow-phase-2-pr-resumability.md
│ │ ├── tdd-workflow-phase-3-extensibility-guardrails.md
│ │ ├── test-prd.txt
│ │ └── tm-core-phase-1.txt
│ ├── reports
│ │ ├── task-complexity-report_autonomous-tdd-git-workflow.json
│ │ ├── task-complexity-report_cc-kiro-hooks.json
│ │ ├── task-complexity-report_tdd-phase-1-core-rails.json
│ │ ├── task-complexity-report_tdd-workflow-phase-0.json
│ │ ├── task-complexity-report_test-prd-tag.json
│ │ ├── task-complexity-report_tm-core-phase-1.json
│ │ ├── task-complexity-report.json
│ │ └── tm-core-complexity.json
│ ├── state.json
│ ├── tasks
│ │ ├── task_001_tm-start.txt
│ │ ├── task_002_tm-start.txt
│ │ ├── task_003_tm-start.txt
│ │ ├── task_004_tm-start.txt
│ │ ├── task_007_tm-start.txt
│ │ └── tasks.json
│ └── templates
│ ├── example_prd_rpg.md
│ └── example_prd.md
├── .vscode
│ ├── extensions.json
│ └── settings.json
├── apps
│ ├── cli
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── src
│ │ │ ├── command-registry.ts
│ │ │ ├── commands
│ │ │ │ ├── auth.command.ts
│ │ │ │ ├── autopilot
│ │ │ │ │ ├── abort.command.ts
│ │ │ │ │ ├── commit.command.ts
│ │ │ │ │ ├── complete.command.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── next.command.ts
│ │ │ │ │ ├── resume.command.ts
│ │ │ │ │ ├── shared.ts
│ │ │ │ │ ├── start.command.ts
│ │ │ │ │ └── status.command.ts
│ │ │ │ ├── briefs.command.ts
│ │ │ │ ├── context.command.ts
│ │ │ │ ├── export.command.ts
│ │ │ │ ├── list.command.ts
│ │ │ │ ├── models
│ │ │ │ │ ├── custom-providers.ts
│ │ │ │ │ ├── fetchers.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── prompts.ts
│ │ │ │ │ ├── setup.ts
│ │ │ │ │ └── types.ts
│ │ │ │ ├── next.command.ts
│ │ │ │ ├── set-status.command.ts
│ │ │ │ ├── show.command.ts
│ │ │ │ ├── start.command.ts
│ │ │ │ └── tags.command.ts
│ │ │ ├── index.ts
│ │ │ ├── lib
│ │ │ │ └── model-management.ts
│ │ │ ├── types
│ │ │ │ └── tag-management.d.ts
│ │ │ ├── ui
│ │ │ │ ├── components
│ │ │ │ │ ├── cardBox.component.ts
│ │ │ │ │ ├── dashboard.component.ts
│ │ │ │ │ ├── header.component.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── next-task.component.ts
│ │ │ │ │ ├── suggested-steps.component.ts
│ │ │ │ │ └── task-detail.component.ts
│ │ │ │ ├── display
│ │ │ │ │ ├── messages.ts
│ │ │ │ │ └── tables.ts
│ │ │ │ ├── formatters
│ │ │ │ │ ├── complexity-formatters.ts
│ │ │ │ │ ├── dependency-formatters.ts
│ │ │ │ │ ├── priority-formatters.ts
│ │ │ │ │ ├── status-formatters.spec.ts
│ │ │ │ │ └── status-formatters.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── layout
│ │ │ │ ├── helpers.spec.ts
│ │ │ │ └── helpers.ts
│ │ │ └── utils
│ │ │ ├── auth-helpers.ts
│ │ │ ├── auto-update.ts
│ │ │ ├── brief-selection.ts
│ │ │ ├── display-helpers.ts
│ │ │ ├── error-handler.ts
│ │ │ ├── index.ts
│ │ │ ├── project-root.ts
│ │ │ ├── task-status.ts
│ │ │ ├── ui.spec.ts
│ │ │ └── ui.ts
│ │ ├── tests
│ │ │ ├── integration
│ │ │ │ └── commands
│ │ │ │ └── autopilot
│ │ │ │ └── workflow.test.ts
│ │ │ └── unit
│ │ │ ├── commands
│ │ │ │ ├── autopilot
│ │ │ │ │ └── shared.test.ts
│ │ │ │ ├── list.command.spec.ts
│ │ │ │ └── show.command.spec.ts
│ │ │ └── ui
│ │ │ └── dashboard.component.spec.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── docs
│ │ ├── archive
│ │ │ ├── ai-client-utils-example.mdx
│ │ │ ├── ai-development-workflow.mdx
│ │ │ ├── command-reference.mdx
│ │ │ ├── configuration.mdx
│ │ │ ├── cursor-setup.mdx
│ │ │ ├── examples.mdx
│ │ │ └── Installation.mdx
│ │ ├── best-practices
│ │ │ ├── advanced-tasks.mdx
│ │ │ ├── configuration-advanced.mdx
│ │ │ └── index.mdx
│ │ ├── capabilities
│ │ │ ├── cli-root-commands.mdx
│ │ │ ├── index.mdx
│ │ │ ├── mcp.mdx
│ │ │ ├── rpg-method.mdx
│ │ │ └── task-structure.mdx
│ │ ├── CHANGELOG.md
│ │ ├── command-reference.mdx
│ │ ├── configuration.mdx
│ │ ├── docs.json
│ │ ├── favicon.svg
│ │ ├── getting-started
│ │ │ ├── api-keys.mdx
│ │ │ ├── contribute.mdx
│ │ │ ├── faq.mdx
│ │ │ └── quick-start
│ │ │ ├── configuration-quick.mdx
│ │ │ ├── execute-quick.mdx
│ │ │ ├── installation.mdx
│ │ │ ├── moving-forward.mdx
│ │ │ ├── prd-quick.mdx
│ │ │ ├── quick-start.mdx
│ │ │ ├── requirements.mdx
│ │ │ ├── rules-quick.mdx
│ │ │ └── tasks-quick.mdx
│ │ ├── introduction.mdx
│ │ ├── licensing.md
│ │ ├── logo
│ │ │ ├── dark.svg
│ │ │ ├── light.svg
│ │ │ └── task-master-logo.png
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── style.css
│ │ ├── tdd-workflow
│ │ │ ├── ai-agent-integration.mdx
│ │ │ └── quickstart.mdx
│ │ ├── vercel.json
│ │ └── whats-new.mdx
│ ├── extension
│ │ ├── .vscodeignore
│ │ ├── assets
│ │ │ ├── banner.png
│ │ │ ├── icon-dark.svg
│ │ │ ├── icon-light.svg
│ │ │ ├── icon.png
│ │ │ ├── screenshots
│ │ │ │ ├── kanban-board.png
│ │ │ │ └── task-details.png
│ │ │ └── sidebar-icon.svg
│ │ ├── CHANGELOG.md
│ │ ├── components.json
│ │ ├── docs
│ │ │ ├── extension-CI-setup.md
│ │ │ └── extension-development-guide.md
│ │ ├── esbuild.js
│ │ ├── LICENSE
│ │ ├── package.json
│ │ ├── package.mjs
│ │ ├── package.publish.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── components
│ │ │ │ ├── ConfigView.tsx
│ │ │ │ ├── constants.ts
│ │ │ │ ├── TaskDetails
│ │ │ │ │ ├── AIActionsSection.tsx
│ │ │ │ │ ├── DetailsSection.tsx
│ │ │ │ │ ├── PriorityBadge.tsx
│ │ │ │ │ ├── SubtasksSection.tsx
│ │ │ │ │ ├── TaskMetadataSidebar.tsx
│ │ │ │ │ └── useTaskDetails.ts
│ │ │ │ ├── TaskDetailsView.tsx
│ │ │ │ ├── TaskMasterLogo.tsx
│ │ │ │ └── ui
│ │ │ │ ├── badge.tsx
│ │ │ │ ├── breadcrumb.tsx
│ │ │ │ ├── button.tsx
│ │ │ │ ├── card.tsx
│ │ │ │ ├── collapsible.tsx
│ │ │ │ ├── CollapsibleSection.tsx
│ │ │ │ ├── dropdown-menu.tsx
│ │ │ │ ├── label.tsx
│ │ │ │ ├── scroll-area.tsx
│ │ │ │ ├── separator.tsx
│ │ │ │ ├── shadcn-io
│ │ │ │ │ └── kanban
│ │ │ │ │ └── index.tsx
│ │ │ │ └── textarea.tsx
│ │ │ ├── extension.ts
│ │ │ ├── index.ts
│ │ │ ├── lib
│ │ │ │ └── utils.ts
│ │ │ ├── services
│ │ │ │ ├── config-service.ts
│ │ │ │ ├── error-handler.ts
│ │ │ │ ├── notification-preferences.ts
│ │ │ │ ├── polling-service.ts
│ │ │ │ ├── polling-strategies.ts
│ │ │ │ ├── sidebar-webview-manager.ts
│ │ │ │ ├── task-repository.ts
│ │ │ │ ├── terminal-manager.ts
│ │ │ │ └── webview-manager.ts
│ │ │ ├── test
│ │ │ │ └── extension.test.ts
│ │ │ ├── utils
│ │ │ │ ├── configManager.ts
│ │ │ │ ├── connectionManager.ts
│ │ │ │ ├── errorHandler.ts
│ │ │ │ ├── event-emitter.ts
│ │ │ │ ├── logger.ts
│ │ │ │ ├── mcpClient.ts
│ │ │ │ ├── notificationPreferences.ts
│ │ │ │ └── task-master-api
│ │ │ │ ├── cache
│ │ │ │ │ └── cache-manager.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── mcp-client.ts
│ │ │ │ ├── transformers
│ │ │ │ │ └── task-transformer.ts
│ │ │ │ └── types
│ │ │ │ └── index.ts
│ │ │ └── webview
│ │ │ ├── App.tsx
│ │ │ ├── components
│ │ │ │ ├── AppContent.tsx
│ │ │ │ ├── EmptyState.tsx
│ │ │ │ ├── ErrorBoundary.tsx
│ │ │ │ ├── PollingStatus.tsx
│ │ │ │ ├── PriorityBadge.tsx
│ │ │ │ ├── SidebarView.tsx
│ │ │ │ ├── TagDropdown.tsx
│ │ │ │ ├── TaskCard.tsx
│ │ │ │ ├── TaskEditModal.tsx
│ │ │ │ ├── TaskMasterKanban.tsx
│ │ │ │ ├── ToastContainer.tsx
│ │ │ │ └── ToastNotification.tsx
│ │ │ ├── constants
│ │ │ │ └── index.ts
│ │ │ ├── contexts
│ │ │ │ └── VSCodeContext.tsx
│ │ │ ├── hooks
│ │ │ │ ├── useTaskQueries.ts
│ │ │ │ ├── useVSCodeMessages.ts
│ │ │ │ └── useWebviewHeight.ts
│ │ │ ├── index.css
│ │ │ ├── index.tsx
│ │ │ ├── providers
│ │ │ │ └── QueryProvider.tsx
│ │ │ ├── reducers
│ │ │ │ └── appReducer.ts
│ │ │ ├── sidebar.tsx
│ │ │ ├── types
│ │ │ │ └── index.ts
│ │ │ └── utils
│ │ │ ├── logger.ts
│ │ │ └── toast.ts
│ │ └── tsconfig.json
│ └── mcp
│ ├── CHANGELOG.md
│ ├── package.json
│ ├── src
│ │ ├── index.ts
│ │ ├── shared
│ │ │ ├── types.ts
│ │ │ └── utils.ts
│ │ └── tools
│ │ ├── autopilot
│ │ │ ├── abort.tool.ts
│ │ │ ├── commit.tool.ts
│ │ │ ├── complete.tool.ts
│ │ │ ├── finalize.tool.ts
│ │ │ ├── index.ts
│ │ │ ├── next.tool.ts
│ │ │ ├── resume.tool.ts
│ │ │ ├── start.tool.ts
│ │ │ └── status.tool.ts
│ │ ├── README-ZOD-V3.md
│ │ └── tasks
│ │ ├── get-task.tool.ts
│ │ ├── get-tasks.tool.ts
│ │ └── index.ts
│ ├── tsconfig.json
│ └── vitest.config.ts
├── assets
│ ├── .windsurfrules
│ ├── AGENTS.md
│ ├── claude
│ │ └── TM_COMMANDS_GUIDE.md
│ ├── config.json
│ ├── env.example
│ ├── example_prd_rpg.txt
│ ├── example_prd.txt
│ ├── GEMINI.md
│ ├── gitignore
│ ├── kiro-hooks
│ │ ├── tm-code-change-task-tracker.kiro.hook
│ │ ├── tm-complexity-analyzer.kiro.hook
│ │ ├── tm-daily-standup-assistant.kiro.hook
│ │ ├── tm-git-commit-task-linker.kiro.hook
│ │ ├── tm-pr-readiness-checker.kiro.hook
│ │ ├── tm-task-dependency-auto-progression.kiro.hook
│ │ └── tm-test-success-task-completer.kiro.hook
│ ├── roocode
│ │ ├── .roo
│ │ │ ├── rules-architect
│ │ │ │ └── architect-rules
│ │ │ ├── rules-ask
│ │ │ │ └── ask-rules
│ │ │ ├── rules-code
│ │ │ │ └── code-rules
│ │ │ ├── rules-debug
│ │ │ │ └── debug-rules
│ │ │ ├── rules-orchestrator
│ │ │ │ └── orchestrator-rules
│ │ │ └── rules-test
│ │ │ └── test-rules
│ │ └── .roomodes
│ ├── rules
│ │ ├── cursor_rules.mdc
│ │ ├── dev_workflow.mdc
│ │ ├── self_improve.mdc
│ │ ├── taskmaster_hooks_workflow.mdc
│ │ └── taskmaster.mdc
│ └── scripts_README.md
├── bin
│ └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE_CODE_PLUGIN.md
├── CLAUDE.md
├── context
│ ├── chats
│ │ ├── add-task-dependencies-1.md
│ │ └── max-min-tokens.txt.md
│ ├── fastmcp-core.txt
│ ├── fastmcp-docs.txt
│ ├── MCP_INTEGRATION.md
│ ├── mcp-js-sdk-docs.txt
│ ├── mcp-protocol-repo.txt
│ ├── mcp-protocol-schema-03262025.json
│ └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│ ├── claude-code-integration.md
│ ├── CLI-COMMANDER-PATTERN.md
│ ├── command-reference.md
│ ├── configuration.md
│ ├── contributor-docs
│ │ ├── testing-roo-integration.md
│ │ └── worktree-setup.md
│ ├── cross-tag-task-movement.md
│ ├── examples
│ │ ├── claude-code-usage.md
│ │ └── codex-cli-usage.md
│ ├── examples.md
│ ├── licensing.md
│ ├── mcp-provider-guide.md
│ ├── mcp-provider.md
│ ├── migration-guide.md
│ ├── models.md
│ ├── providers
│ │ ├── codex-cli.md
│ │ └── gemini-cli.md
│ ├── README.md
│ ├── scripts
│ │ └── models-json-to-markdown.js
│ ├── task-structure.md
│ └── tutorial.md
├── images
│ ├── hamster-hiring.png
│ └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│ ├── server.js
│ └── src
│ ├── core
│ │ ├── __tests__
│ │ │ └── context-manager.test.js
│ │ ├── context-manager.js
│ │ ├── direct-functions
│ │ │ ├── add-dependency.js
│ │ │ ├── add-subtask.js
│ │ │ ├── add-tag.js
│ │ │ ├── add-task.js
│ │ │ ├── analyze-task-complexity.js
│ │ │ ├── cache-stats.js
│ │ │ ├── clear-subtasks.js
│ │ │ ├── complexity-report.js
│ │ │ ├── copy-tag.js
│ │ │ ├── create-tag-from-branch.js
│ │ │ ├── delete-tag.js
│ │ │ ├── expand-all-tasks.js
│ │ │ ├── expand-task.js
│ │ │ ├── fix-dependencies.js
│ │ │ ├── generate-task-files.js
│ │ │ ├── initialize-project.js
│ │ │ ├── list-tags.js
│ │ │ ├── models.js
│ │ │ ├── move-task-cross-tag.js
│ │ │ ├── move-task.js
│ │ │ ├── next-task.js
│ │ │ ├── parse-prd.js
│ │ │ ├── remove-dependency.js
│ │ │ ├── remove-subtask.js
│ │ │ ├── remove-task.js
│ │ │ ├── rename-tag.js
│ │ │ ├── research.js
│ │ │ ├── response-language.js
│ │ │ ├── rules.js
│ │ │ ├── scope-down.js
│ │ │ ├── scope-up.js
│ │ │ ├── set-task-status.js
│ │ │ ├── update-subtask-by-id.js
│ │ │ ├── update-task-by-id.js
│ │ │ ├── update-tasks.js
│ │ │ ├── use-tag.js
│ │ │ └── validate-dependencies.js
│ │ ├── task-master-core.js
│ │ └── utils
│ │ ├── env-utils.js
│ │ └── path-utils.js
│ ├── custom-sdk
│ │ ├── errors.js
│ │ ├── index.js
│ │ ├── json-extractor.js
│ │ ├── language-model.js
│ │ ├── message-converter.js
│ │ └── schema-converter.js
│ ├── index.js
│ ├── logger.js
│ ├── providers
│ │ └── mcp-provider.js
│ └── tools
│ ├── add-dependency.js
│ ├── add-subtask.js
│ ├── add-tag.js
│ ├── add-task.js
│ ├── analyze.js
│ ├── clear-subtasks.js
│ ├── complexity-report.js
│ ├── copy-tag.js
│ ├── delete-tag.js
│ ├── expand-all.js
│ ├── expand-task.js
│ ├── fix-dependencies.js
│ ├── generate.js
│ ├── get-operation-status.js
│ ├── index.js
│ ├── initialize-project.js
│ ├── list-tags.js
│ ├── models.js
│ ├── move-task.js
│ ├── next-task.js
│ ├── parse-prd.js
│ ├── README-ZOD-V3.md
│ ├── remove-dependency.js
│ ├── remove-subtask.js
│ ├── remove-task.js
│ ├── rename-tag.js
│ ├── research.js
│ ├── response-language.js
│ ├── rules.js
│ ├── scope-down.js
│ ├── scope-up.js
│ ├── set-task-status.js
│ ├── tool-registry.js
│ ├── update-subtask.js
│ ├── update-task.js
│ ├── update.js
│ ├── use-tag.js
│ ├── utils.js
│ └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│ ├── ai-sdk-provider-grok-cli
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── errors.test.ts
│ │ │ ├── errors.ts
│ │ │ ├── grok-cli-language-model.ts
│ │ │ ├── grok-cli-provider.test.ts
│ │ │ ├── grok-cli-provider.ts
│ │ │ ├── index.ts
│ │ │ ├── json-extractor.test.ts
│ │ │ ├── json-extractor.ts
│ │ │ ├── message-converter.test.ts
│ │ │ ├── message-converter.ts
│ │ │ └── types.ts
│ │ └── tsconfig.json
│ ├── build-config
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── src
│ │ │ └── tsdown.base.ts
│ │ └── tsconfig.json
│ ├── claude-code-plugin
│ │ ├── .claude-plugin
│ │ │ └── plugin.json
│ │ ├── .gitignore
│ │ ├── agents
│ │ │ ├── task-checker.md
│ │ │ ├── task-executor.md
│ │ │ └── task-orchestrator.md
│ │ ├── CHANGELOG.md
│ │ ├── commands
│ │ │ ├── add-dependency.md
│ │ │ ├── add-subtask.md
│ │ │ ├── add-task.md
│ │ │ ├── analyze-complexity.md
│ │ │ ├── analyze-project.md
│ │ │ ├── auto-implement-tasks.md
│ │ │ ├── command-pipeline.md
│ │ │ ├── complexity-report.md
│ │ │ ├── convert-task-to-subtask.md
│ │ │ ├── expand-all-tasks.md
│ │ │ ├── expand-task.md
│ │ │ ├── fix-dependencies.md
│ │ │ ├── generate-tasks.md
│ │ │ ├── help.md
│ │ │ ├── init-project-quick.md
│ │ │ ├── init-project.md
│ │ │ ├── install-taskmaster.md
│ │ │ ├── learn.md
│ │ │ ├── list-tasks-by-status.md
│ │ │ ├── list-tasks-with-subtasks.md
│ │ │ ├── list-tasks.md
│ │ │ ├── next-task.md
│ │ │ ├── parse-prd-with-research.md
│ │ │ ├── parse-prd.md
│ │ │ ├── project-status.md
│ │ │ ├── quick-install-taskmaster.md
│ │ │ ├── remove-all-subtasks.md
│ │ │ ├── remove-dependency.md
│ │ │ ├── remove-subtask.md
│ │ │ ├── remove-subtasks.md
│ │ │ ├── remove-task.md
│ │ │ ├── setup-models.md
│ │ │ ├── show-task.md
│ │ │ ├── smart-workflow.md
│ │ │ ├── sync-readme.md
│ │ │ ├── tm-main.md
│ │ │ ├── to-cancelled.md
│ │ │ ├── to-deferred.md
│ │ │ ├── to-done.md
│ │ │ ├── to-in-progress.md
│ │ │ ├── to-pending.md
│ │ │ ├── to-review.md
│ │ │ ├── update-single-task.md
│ │ │ ├── update-task.md
│ │ │ ├── update-tasks-from-id.md
│ │ │ ├── validate-dependencies.md
│ │ │ └── view-models.md
│ │ ├── mcp.json
│ │ └── package.json
│ ├── tm-bridge
│ │ ├── CHANGELOG.md
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── add-tag-bridge.ts
│ │ │ ├── bridge-types.ts
│ │ │ ├── bridge-utils.ts
│ │ │ ├── expand-bridge.ts
│ │ │ ├── index.ts
│ │ │ ├── tags-bridge.ts
│ │ │ ├── update-bridge.ts
│ │ │ └── use-tag-bridge.ts
│ │ └── tsconfig.json
│ └── tm-core
│ ├── .gitignore
│ ├── CHANGELOG.md
│ ├── docs
│ │ └── listTasks-architecture.md
│ ├── package.json
│ ├── POC-STATUS.md
│ ├── README.md
│ ├── src
│ │ ├── common
│ │ │ ├── constants
│ │ │ │ ├── index.ts
│ │ │ │ ├── paths.ts
│ │ │ │ └── providers.ts
│ │ │ ├── errors
│ │ │ │ ├── index.ts
│ │ │ │ └── task-master-error.ts
│ │ │ ├── interfaces
│ │ │ │ ├── configuration.interface.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── storage.interface.ts
│ │ │ ├── logger
│ │ │ │ ├── factory.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── logger.spec.ts
│ │ │ │ └── logger.ts
│ │ │ ├── mappers
│ │ │ │ ├── TaskMapper.test.ts
│ │ │ │ └── TaskMapper.ts
│ │ │ ├── types
│ │ │ │ ├── database.types.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── legacy.ts
│ │ │ │ └── repository-types.ts
│ │ │ └── utils
│ │ │ ├── git-utils.ts
│ │ │ ├── id-generator.ts
│ │ │ ├── index.ts
│ │ │ ├── path-helpers.ts
│ │ │ ├── path-normalizer.spec.ts
│ │ │ ├── path-normalizer.ts
│ │ │ ├── project-root-finder.spec.ts
│ │ │ ├── project-root-finder.ts
│ │ │ ├── run-id-generator.spec.ts
│ │ │ └── run-id-generator.ts
│ │ ├── index.ts
│ │ ├── modules
│ │ │ ├── ai
│ │ │ │ ├── index.ts
│ │ │ │ ├── interfaces
│ │ │ │ │ └── ai-provider.interface.ts
│ │ │ │ └── providers
│ │ │ │ ├── base-provider.ts
│ │ │ │ └── index.ts
│ │ │ ├── auth
│ │ │ │ ├── auth-domain.spec.ts
│ │ │ │ ├── auth-domain.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ ├── auth-manager.spec.ts
│ │ │ │ │ └── auth-manager.ts
│ │ │ │ ├── services
│ │ │ │ │ ├── context-store.ts
│ │ │ │ │ ├── oauth-service.ts
│ │ │ │ │ ├── organization.service.ts
│ │ │ │ │ ├── supabase-session-storage.spec.ts
│ │ │ │ │ └── supabase-session-storage.ts
│ │ │ │ └── types.ts
│ │ │ ├── briefs
│ │ │ │ ├── briefs-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── brief-service.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── utils
│ │ │ │ └── url-parser.ts
│ │ │ ├── commands
│ │ │ │ └── index.ts
│ │ │ ├── config
│ │ │ │ ├── config-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ ├── config-manager.spec.ts
│ │ │ │ │ └── config-manager.ts
│ │ │ │ └── services
│ │ │ │ ├── config-loader.service.spec.ts
│ │ │ │ ├── config-loader.service.ts
│ │ │ │ ├── config-merger.service.spec.ts
│ │ │ │ ├── config-merger.service.ts
│ │ │ │ ├── config-persistence.service.spec.ts
│ │ │ │ ├── config-persistence.service.ts
│ │ │ │ ├── environment-config-provider.service.spec.ts
│ │ │ │ ├── environment-config-provider.service.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── runtime-state-manager.service.spec.ts
│ │ │ │ └── runtime-state-manager.service.ts
│ │ │ ├── dependencies
│ │ │ │ └── index.ts
│ │ │ ├── execution
│ │ │ │ ├── executors
│ │ │ │ │ ├── base-executor.ts
│ │ │ │ │ ├── claude-executor.ts
│ │ │ │ │ └── executor-factory.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── executor-service.ts
│ │ │ │ └── types.ts
│ │ │ ├── git
│ │ │ │ ├── adapters
│ │ │ │ │ ├── git-adapter.test.ts
│ │ │ │ │ └── git-adapter.ts
│ │ │ │ ├── git-domain.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── services
│ │ │ │ ├── branch-name-generator.spec.ts
│ │ │ │ ├── branch-name-generator.ts
│ │ │ │ ├── commit-message-generator.test.ts
│ │ │ │ ├── commit-message-generator.ts
│ │ │ │ ├── scope-detector.test.ts
│ │ │ │ ├── scope-detector.ts
│ │ │ │ ├── template-engine.test.ts
│ │ │ │ └── template-engine.ts
│ │ │ ├── integration
│ │ │ │ ├── clients
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── supabase-client.ts
│ │ │ │ ├── integration-domain.ts
│ │ │ │ └── services
│ │ │ │ ├── export.service.ts
│ │ │ │ ├── task-expansion.service.ts
│ │ │ │ └── task-retrieval.service.ts
│ │ │ ├── reports
│ │ │ │ ├── index.ts
│ │ │ │ ├── managers
│ │ │ │ │ └── complexity-report-manager.ts
│ │ │ │ └── types.ts
│ │ │ ├── storage
│ │ │ │ ├── adapters
│ │ │ │ │ ├── activity-logger.ts
│ │ │ │ │ ├── api-storage.ts
│ │ │ │ │ └── file-storage
│ │ │ │ │ ├── file-operations.ts
│ │ │ │ │ ├── file-storage.ts
│ │ │ │ │ ├── format-handler.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── path-resolver.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── services
│ │ │ │ │ └── storage-factory.ts
│ │ │ │ └── utils
│ │ │ │ └── api-client.ts
│ │ │ ├── tasks
│ │ │ │ ├── entities
│ │ │ │ │ └── task.entity.ts
│ │ │ │ ├── parser
│ │ │ │ │ └── index.ts
│ │ │ │ ├── repositories
│ │ │ │ │ ├── supabase
│ │ │ │ │ │ ├── dependency-fetcher.ts
│ │ │ │ │ │ ├── index.ts
│ │ │ │ │ │ └── supabase-repository.ts
│ │ │ │ │ └── task-repository.interface.ts
│ │ │ │ ├── services
│ │ │ │ │ ├── preflight-checker.service.ts
│ │ │ │ │ ├── tag.service.ts
│ │ │ │ │ ├── task-execution-service.ts
│ │ │ │ │ ├── task-loader.service.ts
│ │ │ │ │ └── task-service.ts
│ │ │ │ └── tasks-domain.ts
│ │ │ ├── ui
│ │ │ │ └── index.ts
│ │ │ └── workflow
│ │ │ ├── managers
│ │ │ │ ├── workflow-state-manager.spec.ts
│ │ │ │ └── workflow-state-manager.ts
│ │ │ ├── orchestrators
│ │ │ │ ├── workflow-orchestrator.test.ts
│ │ │ │ └── workflow-orchestrator.ts
│ │ │ ├── services
│ │ │ │ ├── test-result-validator.test.ts
│ │ │ │ ├── test-result-validator.ts
│ │ │ │ ├── test-result-validator.types.ts
│ │ │ │ ├── workflow-activity-logger.ts
│ │ │ │ └── workflow.service.ts
│ │ │ ├── types.ts
│ │ │ └── workflow-domain.ts
│ │ ├── subpath-exports.test.ts
│ │ ├── tm-core.ts
│ │ └── utils
│ │ └── time.utils.ts
│ ├── tests
│ │ ├── auth
│ │ │ └── auth-refresh.test.ts
│ │ ├── integration
│ │ │ ├── auth-token-refresh.test.ts
│ │ │ ├── list-tasks.test.ts
│ │ │ └── storage
│ │ │ └── activity-logger.test.ts
│ │ ├── mocks
│ │ │ └── mock-provider.ts
│ │ ├── setup.ts
│ │ └── unit
│ │ ├── base-provider.test.ts
│ │ ├── executor.test.ts
│ │ └── smoke.test.ts
│ ├── tsconfig.json
│ └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│ ├── create-worktree.sh
│ ├── dev.js
│ ├── init.js
│ ├── list-worktrees.sh
│ ├── modules
│ │ ├── ai-services-unified.js
│ │ ├── bridge-utils.js
│ │ ├── commands.js
│ │ ├── config-manager.js
│ │ ├── dependency-manager.js
│ │ ├── index.js
│ │ ├── prompt-manager.js
│ │ ├── supported-models.json
│ │ ├── sync-readme.js
│ │ ├── task-manager
│ │ │ ├── add-subtask.js
│ │ │ ├── add-task.js
│ │ │ ├── analyze-task-complexity.js
│ │ │ ├── clear-subtasks.js
│ │ │ ├── expand-all-tasks.js
│ │ │ ├── expand-task.js
│ │ │ ├── find-next-task.js
│ │ │ ├── generate-task-files.js
│ │ │ ├── is-task-dependent.js
│ │ │ ├── list-tasks.js
│ │ │ ├── migrate.js
│ │ │ ├── models.js
│ │ │ ├── move-task.js
│ │ │ ├── parse-prd
│ │ │ │ ├── index.js
│ │ │ │ ├── parse-prd-config.js
│ │ │ │ ├── parse-prd-helpers.js
│ │ │ │ ├── parse-prd-non-streaming.js
│ │ │ │ ├── parse-prd-streaming.js
│ │ │ │ └── parse-prd.js
│ │ │ ├── remove-subtask.js
│ │ │ ├── remove-task.js
│ │ │ ├── research.js
│ │ │ ├── response-language.js
│ │ │ ├── scope-adjustment.js
│ │ │ ├── set-task-status.js
│ │ │ ├── tag-management.js
│ │ │ ├── task-exists.js
│ │ │ ├── update-single-task-status.js
│ │ │ ├── update-subtask-by-id.js
│ │ │ ├── update-task-by-id.js
│ │ │ └── update-tasks.js
│ │ ├── task-manager.js
│ │ ├── ui.js
│ │ ├── update-config-tokens.js
│ │ ├── utils
│ │ │ ├── contextGatherer.js
│ │ │ ├── fuzzyTaskSearch.js
│ │ │ └── git-utils.js
│ │ └── utils.js
│ ├── task-complexity-report.json
│ ├── test-claude-errors.js
│ └── test-claude.js
├── sonar-project.properties
├── src
│ ├── ai-providers
│ │ ├── anthropic.js
│ │ ├── azure.js
│ │ ├── base-provider.js
│ │ ├── bedrock.js
│ │ ├── claude-code.js
│ │ ├── codex-cli.js
│ │ ├── gemini-cli.js
│ │ ├── google-vertex.js
│ │ ├── google.js
│ │ ├── grok-cli.js
│ │ ├── groq.js
│ │ ├── index.js
│ │ ├── lmstudio.js
│ │ ├── ollama.js
│ │ ├── openai-compatible.js
│ │ ├── openai.js
│ │ ├── openrouter.js
│ │ ├── perplexity.js
│ │ ├── xai.js
│ │ ├── zai-coding.js
│ │ └── zai.js
│ ├── constants
│ │ ├── commands.js
│ │ ├── paths.js
│ │ ├── profiles.js
│ │ ├── rules-actions.js
│ │ ├── task-priority.js
│ │ └── task-status.js
│ ├── profiles
│ │ ├── amp.js
│ │ ├── base-profile.js
│ │ ├── claude.js
│ │ ├── cline.js
│ │ ├── codex.js
│ │ ├── cursor.js
│ │ ├── gemini.js
│ │ ├── index.js
│ │ ├── kilo.js
│ │ ├── kiro.js
│ │ ├── opencode.js
│ │ ├── roo.js
│ │ ├── trae.js
│ │ ├── vscode.js
│ │ ├── windsurf.js
│ │ └── zed.js
│ ├── progress
│ │ ├── base-progress-tracker.js
│ │ ├── cli-progress-factory.js
│ │ ├── parse-prd-tracker.js
│ │ ├── progress-tracker-builder.js
│ │ └── tracker-ui.js
│ ├── prompts
│ │ ├── add-task.json
│ │ ├── analyze-complexity.json
│ │ ├── expand-task.json
│ │ ├── parse-prd.json
│ │ ├── README.md
│ │ ├── research.json
│ │ ├── schemas
│ │ │ ├── parameter.schema.json
│ │ │ ├── prompt-template.schema.json
│ │ │ ├── README.md
│ │ │ └── variant.schema.json
│ │ ├── update-subtask.json
│ │ ├── update-task.json
│ │ └── update-tasks.json
│ ├── provider-registry
│ │ └── index.js
│ ├── schemas
│ │ ├── add-task.js
│ │ ├── analyze-complexity.js
│ │ ├── base-schemas.js
│ │ ├── expand-task.js
│ │ ├── parse-prd.js
│ │ ├── registry.js
│ │ ├── update-subtask.js
│ │ ├── update-task.js
│ │ └── update-tasks.js
│ ├── task-master.js
│ ├── ui
│ │ ├── confirm.js
│ │ ├── indicators.js
│ │ └── parse-prd.js
│ └── utils
│ ├── asset-resolver.js
│ ├── create-mcp-config.js
│ ├── format.js
│ ├── getVersion.js
│ ├── logger-utils.js
│ ├── manage-gitignore.js
│ ├── path-utils.js
│ ├── profiles.js
│ ├── rule-transformer.js
│ ├── stream-parser.js
│ └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│ ├── e2e
│ │ ├── e2e_helpers.sh
│ │ ├── parse_llm_output.cjs
│ │ ├── run_e2e.sh
│ │ ├── run_fallback_verification.sh
│ │ └── test_llm_analysis.sh
│ ├── fixtures
│ │ ├── .taskmasterconfig
│ │ ├── sample-claude-response.js
│ │ ├── sample-prd.txt
│ │ └── sample-tasks.js
│ ├── helpers
│ │ └── tool-counts.js
│ ├── integration
│ │ ├── claude-code-error-handling.test.js
│ │ ├── claude-code-optional.test.js
│ │ ├── cli
│ │ │ ├── commands.test.js
│ │ │ ├── complex-cross-tag-scenarios.test.js
│ │ │ └── move-cross-tag.test.js
│ │ ├── manage-gitignore.test.js
│ │ ├── mcp-server
│ │ │ └── direct-functions.test.js
│ │ ├── move-task-cross-tag.integration.test.js
│ │ ├── move-task-simple.integration.test.js
│ │ ├── profiles
│ │ │ ├── amp-init-functionality.test.js
│ │ │ ├── claude-init-functionality.test.js
│ │ │ ├── cline-init-functionality.test.js
│ │ │ ├── codex-init-functionality.test.js
│ │ │ ├── cursor-init-functionality.test.js
│ │ │ ├── gemini-init-functionality.test.js
│ │ │ ├── opencode-init-functionality.test.js
│ │ │ ├── roo-files-inclusion.test.js
│ │ │ ├── roo-init-functionality.test.js
│ │ │ ├── rules-files-inclusion.test.js
│ │ │ ├── trae-init-functionality.test.js
│ │ │ ├── vscode-init-functionality.test.js
│ │ │ └── windsurf-init-functionality.test.js
│ │ └── providers
│ │ └── temperature-support.test.js
│ ├── manual
│ │ ├── progress
│ │ │ ├── parse-prd-analysis.js
│ │ │ ├── test-parse-prd.js
│ │ │ └── TESTING_GUIDE.md
│ │ └── prompts
│ │ ├── prompt-test.js
│ │ └── README.md
│ ├── README.md
│ ├── setup.js
│ └── unit
│ ├── ai-providers
│ │ ├── base-provider.test.js
│ │ ├── claude-code.test.js
│ │ ├── codex-cli.test.js
│ │ ├── gemini-cli.test.js
│ │ ├── lmstudio.test.js
│ │ ├── mcp-components.test.js
│ │ ├── openai-compatible.test.js
│ │ ├── openai.test.js
│ │ ├── provider-registry.test.js
│ │ ├── zai-coding.test.js
│ │ ├── zai-provider.test.js
│ │ ├── zai-schema-introspection.test.js
│ │ └── zai.test.js
│ ├── ai-services-unified.test.js
│ ├── commands.test.js
│ ├── config-manager.test.js
│ ├── config-manager.test.mjs
│ ├── dependency-manager.test.js
│ ├── init.test.js
│ ├── initialize-project.test.js
│ ├── kebab-case-validation.test.js
│ ├── manage-gitignore.test.js
│ ├── mcp
│ │ └── tools
│ │ ├── __mocks__
│ │ │ └── move-task.js
│ │ ├── add-task.test.js
│ │ ├── analyze-complexity.test.js
│ │ ├── expand-all.test.js
│ │ ├── get-tasks.test.js
│ │ ├── initialize-project.test.js
│ │ ├── move-task-cross-tag-options.test.js
│ │ ├── move-task-cross-tag.test.js
│ │ ├── remove-task.test.js
│ │ └── tool-registration.test.js
│ ├── mcp-providers
│ │ ├── mcp-components.test.js
│ │ └── mcp-provider.test.js
│ ├── parse-prd.test.js
│ ├── profiles
│ │ ├── amp-integration.test.js
│ │ ├── claude-integration.test.js
│ │ ├── cline-integration.test.js
│ │ ├── codex-integration.test.js
│ │ ├── cursor-integration.test.js
│ │ ├── gemini-integration.test.js
│ │ ├── kilo-integration.test.js
│ │ ├── kiro-integration.test.js
│ │ ├── mcp-config-validation.test.js
│ │ ├── opencode-integration.test.js
│ │ ├── profile-safety-check.test.js
│ │ ├── roo-integration.test.js
│ │ ├── rule-transformer-cline.test.js
│ │ ├── rule-transformer-cursor.test.js
│ │ ├── rule-transformer-gemini.test.js
│ │ ├── rule-transformer-kilo.test.js
│ │ ├── rule-transformer-kiro.test.js
│ │ ├── rule-transformer-opencode.test.js
│ │ ├── rule-transformer-roo.test.js
│ │ ├── rule-transformer-trae.test.js
│ │ ├── rule-transformer-vscode.test.js
│ │ ├── rule-transformer-windsurf.test.js
│ │ ├── rule-transformer-zed.test.js
│ │ ├── rule-transformer.test.js
│ │ ├── selective-profile-removal.test.js
│ │ ├── subdirectory-support.test.js
│ │ ├── trae-integration.test.js
│ │ ├── vscode-integration.test.js
│ │ ├── windsurf-integration.test.js
│ │ └── zed-integration.test.js
│ ├── progress
│ │ └── base-progress-tracker.test.js
│ ├── prompt-manager.test.js
│ ├── prompts
│ │ ├── expand-task-prompt.test.js
│ │ └── prompt-migration.test.js
│ ├── scripts
│ │ └── modules
│ │ ├── commands
│ │ │ ├── move-cross-tag.test.js
│ │ │ └── README.md
│ │ ├── dependency-manager
│ │ │ ├── circular-dependencies.test.js
│ │ │ ├── cross-tag-dependencies.test.js
│ │ │ └── fix-dependencies-command.test.js
│ │ ├── task-manager
│ │ │ ├── add-subtask.test.js
│ │ │ ├── add-task.test.js
│ │ │ ├── analyze-task-complexity.test.js
│ │ │ ├── clear-subtasks.test.js
│ │ │ ├── complexity-report-tag-isolation.test.js
│ │ │ ├── expand-all-tasks.test.js
│ │ │ ├── expand-task.test.js
│ │ │ ├── find-next-task.test.js
│ │ │ ├── generate-task-files.test.js
│ │ │ ├── list-tasks.test.js
│ │ │ ├── models-baseurl.test.js
│ │ │ ├── move-task-cross-tag.test.js
│ │ │ ├── move-task.test.js
│ │ │ ├── parse-prd-schema.test.js
│ │ │ ├── parse-prd.test.js
│ │ │ ├── remove-subtask.test.js
│ │ │ ├── remove-task.test.js
│ │ │ ├── research.test.js
│ │ │ ├── scope-adjustment.test.js
│ │ │ ├── set-task-status.test.js
│ │ │ ├── setup.js
│ │ │ ├── update-single-task-status.test.js
│ │ │ ├── update-subtask-by-id.test.js
│ │ │ ├── update-task-by-id.test.js
│ │ │ └── update-tasks.test.js
│ │ ├── ui
│ │ │ └── cross-tag-error-display.test.js
│ │ └── utils-tag-aware-paths.test.js
│ ├── task-finder.test.js
│ ├── task-manager
│ │ ├── clear-subtasks.test.js
│ │ ├── move-task.test.js
│ │ ├── tag-boundary.test.js
│ │ └── tag-management.test.js
│ ├── task-master.test.js
│ ├── ui
│ │ └── indicators.test.js
│ ├── ui.test.js
│ ├── utils-strip-ansi.test.js
│ └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
├── turbo.json
└── update-task-migration-plan.md
```
# Files
--------------------------------------------------------------------------------
/tests/unit/config-manager.test.js:
--------------------------------------------------------------------------------
```javascript
import fs from 'fs';
import path from 'path';
import { jest } from '@jest/globals';
import { fileURLToPath } from 'url';
// Mock modules first before any imports
jest.mock('fs', () => ({
existsSync: jest.fn((filePath) => {
// Prevent Jest internal file access
if (
filePath.includes('jest-message-util') ||
filePath.includes('node_modules')
) {
return false;
}
return false; // Default to false for config discovery prevention
}),
readFileSync: jest.fn(() => '{}'),
writeFileSync: jest.fn(),
mkdirSync: jest.fn()
}));
jest.mock('path', () => ({
join: jest.fn((dir, file) => `${dir}/${file}`),
dirname: jest.fn((filePath) => filePath.split('/').slice(0, -1).join('/')),
resolve: jest.fn((...paths) => paths.join('/')),
basename: jest.fn((filePath) => filePath.split('/').pop())
}));
jest.mock('chalk', () => ({
red: jest.fn((text) => text),
blue: jest.fn((text) => text),
green: jest.fn((text) => text),
yellow: jest.fn((text) => text),
white: jest.fn(() => ({
bold: jest.fn((text) => text)
})),
reset: jest.fn((text) => text),
dim: jest.fn((text) => text) // Add dim function to prevent chalk errors
}));
// Mock console to prevent Jest internal access
const mockConsole = {
log: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn()
};
global.console = mockConsole;
// --- Define Mock Function Instances ---
const mockFindConfigPath = jest.fn(() => null); // Default to null, can be overridden in tests
// Mock path-utils to prevent config file path discovery and logging
jest.mock('../../src/utils/path-utils.js', () => ({
__esModule: true,
findProjectRoot: jest.fn(() => '/mock/project'),
findConfigPath: mockFindConfigPath, // Use the mock function instance
findTasksPath: jest.fn(() => '/mock/tasks.json'),
findComplexityReportPath: jest.fn(() => null),
resolveTasksOutputPath: jest.fn(() => '/mock/tasks.json'),
resolveComplexityReportOutputPath: jest.fn(() => '/mock/report.json')
}));
// --- Read REAL supported-models.json data BEFORE mocks ---
const __filename = fileURLToPath(import.meta.url); // Get current file path
const __dirname = path.dirname(__filename); // Get current directory
const realSupportedModelsPath = path.resolve(
__dirname,
'../../scripts/modules/supported-models.json'
);
let REAL_SUPPORTED_MODELS_CONTENT;
let REAL_SUPPORTED_MODELS_DATA;
try {
REAL_SUPPORTED_MODELS_CONTENT = fs.readFileSync(
realSupportedModelsPath,
'utf-8'
);
REAL_SUPPORTED_MODELS_DATA = JSON.parse(REAL_SUPPORTED_MODELS_CONTENT);
} catch (err) {
console.error(
'FATAL TEST SETUP ERROR: Could not read or parse real supported-models.json',
err
);
REAL_SUPPORTED_MODELS_CONTENT = '{}'; // Default to empty object on error
REAL_SUPPORTED_MODELS_DATA = {};
process.exit(1); // Exit if essential test data can't be loaded
}
// --- Define Mock Function Instances ---
const mockFindProjectRoot = jest.fn();
const mockLog = jest.fn();
// --- Mock Dependencies BEFORE importing the module under test ---
// Mock the 'utils.js' module using a factory function
jest.mock('../../scripts/modules/utils.js', () => ({
__esModule: true, // Indicate it's an ES module mock
findProjectRoot: mockFindProjectRoot, // Use the mock function instance
log: mockLog, // Use the mock function instance
// Include other necessary exports from utils if config-manager uses them directly
resolveEnvVariable: jest.fn() // Example if needed
}));
// --- Import the module under test AFTER mocks are defined ---
import * as configManager from '../../scripts/modules/config-manager.js';
// Import the mocked 'fs' module to allow spying on its functions
import fsMocked from 'fs';
// --- Test Data (Keep as is, ensure DEFAULT_CONFIG is accurate) ---
const MOCK_PROJECT_ROOT = '/mock/project';
const MOCK_CONFIG_PATH = path.join(
MOCK_PROJECT_ROOT,
'.taskmaster/config.json'
);
// Updated DEFAULT_CONFIG reflecting the implementation
const DEFAULT_CONFIG = {
models: {
main: {
provider: 'anthropic',
modelId: 'claude-sonnet-4-20250514',
maxTokens: 64000,
temperature: 0.2
},
research: {
provider: 'perplexity',
modelId: 'sonar',
maxTokens: 8700,
temperature: 0.1
},
fallback: {
provider: 'anthropic',
modelId: 'claude-3-7-sonnet-20250219',
maxTokens: 120000,
temperature: 0.2
}
},
global: {
logLevel: 'info',
debug: false,
defaultNumTasks: 10,
defaultSubtasks: 5,
defaultPriority: 'medium',
projectName: 'Task Master',
ollamaBaseURL: 'http://localhost:11434/api',
bedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com',
enableCodebaseAnalysis: true,
enableProxy: false,
responseLanguage: 'English'
},
claudeCode: {},
codexCli: {},
grokCli: {
timeout: 120000,
workingDirectory: null,
defaultModel: 'grok-4-latest'
}
};
// Other test data (VALID_CUSTOM_CONFIG, PARTIAL_CONFIG, INVALID_PROVIDER_CONFIG)
const VALID_CUSTOM_CONFIG = {
models: {
main: {
provider: 'openai',
modelId: 'gpt-4o',
maxTokens: 4096,
temperature: 0.5
},
research: {
provider: 'google',
modelId: 'gemini-1.5-pro-latest',
maxTokens: 8192,
temperature: 0.3
},
fallback: {
provider: 'anthropic',
modelId: 'claude-3-opus-20240229',
maxTokens: 100000,
temperature: 0.4
}
},
global: {
logLevel: 'debug',
defaultPriority: 'high',
projectName: 'My Custom Project'
}
};
const PARTIAL_CONFIG = {
models: {
main: { provider: 'openai', modelId: 'gpt-4-turbo' }
},
global: {
projectName: 'Partial Project'
}
};
const INVALID_PROVIDER_CONFIG = {
models: {
main: { provider: 'invalid-provider', modelId: 'some-model' },
research: {
provider: 'perplexity',
modelId: 'llama-3-sonar-large-32k-online'
}
},
global: {
logLevel: 'warn'
}
};
// Claude Code test data
const VALID_CLAUDE_CODE_CONFIG = {
maxTurns: 5,
customSystemPrompt: 'You are a helpful coding assistant',
appendSystemPrompt: 'Always follow best practices',
permissionMode: 'acceptEdits',
allowedTools: ['Read', 'LS', 'Edit'],
disallowedTools: ['Write'],
mcpServers: {
'test-server': {
type: 'stdio',
command: 'node',
args: ['server.js'],
env: { NODE_ENV: 'test' }
}
},
commandSpecific: {
'add-task': {
maxTurns: 3,
permissionMode: 'plan'
},
research: {
customSystemPrompt: 'You are a research assistant'
}
}
};
const INVALID_CLAUDE_CODE_CONFIG = {
maxTurns: 'invalid', // Should be number
permissionMode: 'invalid-mode', // Invalid enum value
allowedTools: 'not-an-array', // Should be array
mcpServers: {
'invalid-server': {
type: 'invalid-type', // Invalid enum value
url: 'not-a-valid-url' // Invalid URL format
}
},
commandSpecific: {
'invalid-command': {
// Invalid command name
maxTurns: -1 // Invalid negative number
}
}
};
const PARTIAL_CLAUDE_CODE_CONFIG = {
maxTurns: 10,
permissionMode: 'default',
commandSpecific: {
'expand-task': {
customSystemPrompt: 'Focus on task breakdown'
}
}
};
// Define spies globally to be restored in afterAll
let consoleErrorSpy;
let consoleWarnSpy;
let fsReadFileSyncSpy;
let fsWriteFileSyncSpy;
let fsExistsSyncSpy;
beforeAll(() => {
// Set up console spies
consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {});
consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(() => {});
});
afterAll(() => {
// Restore all spies
jest.restoreAllMocks();
});
// Reset mocks before each test for isolation
beforeEach(() => {
// Clear all mock calls and reset implementations between tests
jest.clearAllMocks();
// Reset the external mock instances for utils
mockFindProjectRoot.mockReset();
mockLog.mockReset();
mockFindConfigPath.mockReset();
// --- Set up spies ON the imported 'fs' mock ---
fsExistsSyncSpy = jest.spyOn(fsMocked, 'existsSync');
fsReadFileSyncSpy = jest.spyOn(fsMocked, 'readFileSync');
fsWriteFileSyncSpy = jest.spyOn(fsMocked, 'writeFileSync');
// --- Default Mock Implementations ---
mockFindProjectRoot.mockReturnValue(MOCK_PROJECT_ROOT); // Default for utils.findProjectRoot
mockFindConfigPath.mockReturnValue(null); // Default to no config file found
fsExistsSyncSpy.mockReturnValue(true); // Assume files exist by default
// Default readFileSync: Return REAL models content, mocked config, or throw error
fsReadFileSyncSpy.mockImplementation((filePath) => {
const baseName = path.basename(filePath);
if (baseName === 'supported-models.json') {
// Return the REAL file content stringified
return REAL_SUPPORTED_MODELS_CONTENT;
} else if (filePath === MOCK_CONFIG_PATH) {
// Still mock the .taskmasterconfig reads
return JSON.stringify(DEFAULT_CONFIG); // Default behavior
}
// For Jest internal files or other unexpected files, return empty string instead of throwing
// This prevents Jest's internal file operations from breaking tests
if (
filePath.includes('jest-message-util') ||
filePath.includes('node_modules')
) {
return '{}'; // Return empty JSON for Jest internal files
}
// Throw for truly unexpected reads that should be caught in tests
throw new Error(`Unexpected fs.readFileSync call in test: ${filePath}`);
});
// Default writeFileSync: Do nothing, just allow calls
fsWriteFileSyncSpy.mockImplementation(() => {});
});
// --- Validation Functions ---
describe('Validation Functions', () => {
// Tests for validateProvider and validateProviderModelCombination
test('validateProvider should return true for valid providers', () => {
expect(configManager.validateProvider('openai')).toBe(true);
expect(configManager.validateProvider('anthropic')).toBe(true);
expect(configManager.validateProvider('google')).toBe(true);
expect(configManager.validateProvider('perplexity')).toBe(true);
expect(configManager.validateProvider('ollama')).toBe(true);
expect(configManager.validateProvider('openrouter')).toBe(true);
expect(configManager.validateProvider('bedrock')).toBe(true);
});
test('validateProvider should return false for invalid providers', () => {
expect(configManager.validateProvider('invalid-provider')).toBe(false);
expect(configManager.validateProvider('grok')).toBe(false); // Not in mock map
expect(configManager.validateProvider('')).toBe(false);
expect(configManager.validateProvider(null)).toBe(false);
});
test('validateProviderModelCombination should validate known good combinations', () => {
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
configManager.getConfig(MOCK_PROJECT_ROOT, true);
expect(
configManager.validateProviderModelCombination('openai', 'gpt-4o')
).toBe(true);
expect(
configManager.validateProviderModelCombination(
'anthropic',
'claude-3-5-sonnet-20241022'
)
).toBe(true);
});
test('validateProviderModelCombination should return false for known bad combinations', () => {
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
configManager.getConfig(MOCK_PROJECT_ROOT, true);
expect(
configManager.validateProviderModelCombination(
'openai',
'claude-3-opus-20240229'
)
).toBe(false);
});
test('validateProviderModelCombination should return true for ollama/openrouter (empty lists in map)', () => {
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
configManager.getConfig(MOCK_PROJECT_ROOT, true);
expect(
configManager.validateProviderModelCombination('ollama', 'any-model')
).toBe(false);
expect(
configManager.validateProviderModelCombination('openrouter', 'any/model')
).toBe(false);
});
test('validateProviderModelCombination should return true for providers not in map', () => {
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
configManager.getConfig(MOCK_PROJECT_ROOT, true);
// The implementation returns true if the provider isn't in the map
expect(
configManager.validateProviderModelCombination(
'unknown-provider',
'some-model'
)
).toBe(true);
});
});
// --- Claude Code Validation Tests ---
describe('Claude Code Validation', () => {
test('validateClaudeCodeSettings should return valid settings for correct input', () => {
const result = configManager.validateClaudeCodeSettings(
VALID_CLAUDE_CODE_CONFIG
);
expect(result).toEqual(VALID_CLAUDE_CODE_CONFIG);
expect(consoleWarnSpy).not.toHaveBeenCalled();
});
test('validateClaudeCodeSettings should return empty object for invalid input', () => {
const result = configManager.validateClaudeCodeSettings(
INVALID_CLAUDE_CODE_CONFIG
);
expect(result).toEqual({});
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining('Warning: Invalid Claude Code settings in config')
);
});
test('validateClaudeCodeSettings should handle partial valid configuration', () => {
const result = configManager.validateClaudeCodeSettings(
PARTIAL_CLAUDE_CODE_CONFIG
);
expect(result).toEqual(PARTIAL_CLAUDE_CODE_CONFIG);
expect(consoleWarnSpy).not.toHaveBeenCalled();
});
test('validateClaudeCodeSettings should return empty object for empty input', () => {
const result = configManager.validateClaudeCodeSettings({});
expect(result).toEqual({});
expect(consoleWarnSpy).not.toHaveBeenCalled();
});
test('validateClaudeCodeSettings should handle null/undefined input', () => {
expect(configManager.validateClaudeCodeSettings(null)).toEqual({});
expect(configManager.validateClaudeCodeSettings(undefined)).toEqual({});
expect(consoleWarnSpy).toHaveBeenCalledTimes(2);
});
});
// --- Claude Code Getter Tests ---
describe('Claude Code Getter Functions', () => {
test('getClaudeCodeSettings should return default empty object when no config exists', () => {
// No config file exists, should return empty object
fsExistsSyncSpy.mockReturnValue(false);
const settings = configManager.getClaudeCodeSettings(MOCK_PROJECT_ROOT);
expect(settings).toEqual({});
});
test('getClaudeCodeSettings should return merged settings from config file', () => {
// Config file with Claude Code settings
const configWithClaudeCode = {
...VALID_CUSTOM_CONFIG,
claudeCode: VALID_CLAUDE_CODE_CONFIG
};
// Mock findConfigPath to return the mock config path
mockFindConfigPath.mockReturnValue(MOCK_CONFIG_PATH);
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(configWithClaudeCode);
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
openai: [{ id: 'gpt-4o' }],
google: [{ id: 'gemini-1.5-pro-latest' }],
anthropic: [
{ id: 'claude-3-opus-20240229' },
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
perplexity: [{ id: 'sonar-pro' }],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
const settings = configManager.getClaudeCodeSettings(
MOCK_PROJECT_ROOT,
true
); // Force reload
expect(settings).toEqual(VALID_CLAUDE_CODE_CONFIG);
});
test('getClaudeCodeSettingsForCommand should return command-specific settings', () => {
// Config with command-specific settings
const configWithClaudeCode = {
...VALID_CUSTOM_CONFIG,
claudeCode: VALID_CLAUDE_CODE_CONFIG
};
// Mock findConfigPath to return the mock config path
mockFindConfigPath.mockReturnValue(MOCK_CONFIG_PATH);
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (path.basename(filePath) === 'supported-models.json') return '{}';
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(configWithClaudeCode);
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
const settings = configManager.getClaudeCodeSettingsForCommand(
'add-task',
MOCK_PROJECT_ROOT,
true
); // Force reload
// Should merge global settings with command-specific settings
const expectedSettings = {
...VALID_CLAUDE_CODE_CONFIG,
...VALID_CLAUDE_CODE_CONFIG.commandSpecific['add-task']
};
expect(settings).toEqual(expectedSettings);
});
test('getClaudeCodeSettingsForCommand should return global settings for unknown command', () => {
// Config with Claude Code settings
const configWithClaudeCode = {
...VALID_CUSTOM_CONFIG,
claudeCode: PARTIAL_CLAUDE_CODE_CONFIG
};
// Mock findConfigPath to return the mock config path
mockFindConfigPath.mockReturnValue(MOCK_CONFIG_PATH);
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (path.basename(filePath) === 'supported-models.json') return '{}';
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(configWithClaudeCode);
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
const settings = configManager.getClaudeCodeSettingsForCommand(
'unknown-command',
MOCK_PROJECT_ROOT,
true
); // Force reload
// Should return global settings only
expect(settings).toEqual(PARTIAL_CLAUDE_CODE_CONFIG);
});
});
// --- getConfig Tests ---
describe('getConfig Tests', () => {
test('should return default config if .taskmasterconfig does not exist', () => {
// Arrange
fsExistsSyncSpy.mockReturnValue(false);
// findProjectRoot mock is set in beforeEach
// Act: Call getConfig with explicit root
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Force reload
// Assert
expect(config).toEqual(DEFAULT_CONFIG);
expect(mockFindProjectRoot).not.toHaveBeenCalled(); // Explicit root provided
// The implementation checks for .taskmaster directory first
expect(fsExistsSyncSpy).toHaveBeenCalledWith(
path.join(MOCK_PROJECT_ROOT, '.taskmaster')
);
expect(fsReadFileSyncSpy).not.toHaveBeenCalled(); // No read if file doesn't exist
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining('not found at provided project root')
);
});
test.skip('should use findProjectRoot and return defaults if file not found', () => {
// TODO: Fix mock interaction, findProjectRoot isn't being registered as called
// Arrange
fsExistsSyncSpy.mockReturnValue(false);
// findProjectRoot mock is set in beforeEach
// Act: Call getConfig without explicit root
const config = configManager.getConfig(null, true); // Force reload
// Assert
expect(mockFindProjectRoot).toHaveBeenCalled(); // Should be called now
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
expect(config).toEqual(DEFAULT_CONFIG);
expect(fsReadFileSyncSpy).not.toHaveBeenCalled();
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining('not found at derived root')
); // Adjusted expected warning
});
test('should read and merge valid config file with defaults', () => {
// Arrange: Override readFileSync for this test
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(VALID_CUSTOM_CONFIG);
if (path.basename(filePath) === 'supported-models.json') {
// Provide necessary models for validation within getConfig
return JSON.stringify({
openai: [{ id: 'gpt-4o' }],
google: [{ id: 'gemini-1.5-pro-latest' }],
perplexity: [{ id: 'sonar-pro' }],
anthropic: [
{ id: 'claude-3-opus-20240229' },
{ id: 'claude-3-5-sonnet' },
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Force reload
// Assert: Construct expected merged config
const expectedMergedConfig = {
models: {
main: {
...DEFAULT_CONFIG.models.main,
...VALID_CUSTOM_CONFIG.models.main
},
research: {
...DEFAULT_CONFIG.models.research,
...VALID_CUSTOM_CONFIG.models.research
},
fallback: {
...DEFAULT_CONFIG.models.fallback,
...VALID_CUSTOM_CONFIG.models.fallback
}
},
global: { ...DEFAULT_CONFIG.global, ...VALID_CUSTOM_CONFIG.global },
claudeCode: {
...DEFAULT_CONFIG.claudeCode,
...VALID_CUSTOM_CONFIG.claudeCode
},
grokCli: { ...DEFAULT_CONFIG.grokCli },
codexCli: { ...DEFAULT_CONFIG.codexCli }
};
expect(config).toEqual(expectedMergedConfig);
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
expect(fsReadFileSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8');
});
test('should merge defaults for partial config file', () => {
// Arrange
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) return JSON.stringify(PARTIAL_CONFIG);
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
openai: [{ id: 'gpt-4-turbo' }],
perplexity: [{ id: 'sonar-pro' }],
anthropic: [
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Assert: Construct expected merged config
const expectedMergedConfig = {
models: {
main: { ...DEFAULT_CONFIG.models.main, ...PARTIAL_CONFIG.models.main },
research: { ...DEFAULT_CONFIG.models.research },
fallback: { ...DEFAULT_CONFIG.models.fallback }
},
global: { ...DEFAULT_CONFIG.global, ...PARTIAL_CONFIG.global },
claudeCode: {
...DEFAULT_CONFIG.claudeCode,
...VALID_CUSTOM_CONFIG.claudeCode
},
grokCli: { ...DEFAULT_CONFIG.grokCli },
codexCli: { ...DEFAULT_CONFIG.codexCli }
};
expect(config).toEqual(expectedMergedConfig);
expect(fsReadFileSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8');
});
test('should handle JSON parsing error and return defaults', () => {
// Arrange
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) return 'invalid json';
// Mock models read needed for initial load before parse error
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
anthropic: [{ id: 'claude-3-7-sonnet-20250219' }],
perplexity: [{ id: 'sonar-pro' }],
fallback: [{ id: 'claude-3-5-sonnet' }],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Assert
expect(config).toEqual(DEFAULT_CONFIG);
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('Error reading or parsing')
);
});
test('should handle file read error and return defaults', () => {
// Arrange
const readError = new Error('Permission denied');
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) throw readError;
// Mock models read needed for initial load before read error
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
anthropic: [{ id: 'claude-3-7-sonnet-20250219' }],
perplexity: [{ id: 'sonar-pro' }],
fallback: [{ id: 'claude-3-5-sonnet' }],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Assert
expect(config).toEqual(DEFAULT_CONFIG);
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('Permission denied. Using default configuration.')
);
});
test('should validate provider and fallback to default if invalid', () => {
// Arrange
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(INVALID_PROVIDER_CONFIG);
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
perplexity: [{ id: 'llama-3-sonar-large-32k-online' }],
anthropic: [
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Assert
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining(
'Warning: Invalid main provider "invalid-provider"'
)
);
const expectedMergedConfig = {
models: {
main: { ...DEFAULT_CONFIG.models.main },
research: {
...DEFAULT_CONFIG.models.research,
...INVALID_PROVIDER_CONFIG.models.research
},
fallback: { ...DEFAULT_CONFIG.models.fallback }
},
global: { ...DEFAULT_CONFIG.global, ...INVALID_PROVIDER_CONFIG.global },
claudeCode: {
...DEFAULT_CONFIG.claudeCode,
...VALID_CUSTOM_CONFIG.claudeCode
},
grokCli: { ...DEFAULT_CONFIG.grokCli },
codexCli: { ...DEFAULT_CONFIG.codexCli }
};
expect(config).toEqual(expectedMergedConfig);
});
});
// --- writeConfig Tests ---
describe('writeConfig', () => {
test('should write valid config to file', () => {
// Arrange (Default mocks are sufficient)
// findProjectRoot mock set in beforeEach
fsWriteFileSyncSpy.mockImplementation(() => {}); // Ensure it doesn't throw
// Act
const success = configManager.writeConfig(
VALID_CUSTOM_CONFIG,
MOCK_PROJECT_ROOT
);
// Assert
expect(success).toBe(true);
expect(fsWriteFileSyncSpy).toHaveBeenCalledWith(
MOCK_CONFIG_PATH,
JSON.stringify(VALID_CUSTOM_CONFIG, null, 2) // writeConfig stringifies
);
expect(consoleErrorSpy).not.toHaveBeenCalled();
});
test('should return false and log error if write fails', () => {
// Arrange
const mockWriteError = new Error('Disk full');
fsWriteFileSyncSpy.mockImplementation(() => {
throw mockWriteError;
});
// findProjectRoot mock set in beforeEach
// Act
const success = configManager.writeConfig(
VALID_CUSTOM_CONFIG,
MOCK_PROJECT_ROOT
);
// Assert
expect(success).toBe(false);
expect(fsWriteFileSyncSpy).toHaveBeenCalled();
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('Disk full')
);
});
test.skip('should return false if project root cannot be determined', () => {
// TODO: Fix mock interaction or function logic, returns true unexpectedly in test
// Arrange: Override mock for this specific test
mockFindProjectRoot.mockReturnValue(null);
// Act: Call without explicit root
const success = configManager.writeConfig(VALID_CUSTOM_CONFIG);
// Assert
expect(success).toBe(false); // Function should return false if root is null
expect(mockFindProjectRoot).toHaveBeenCalled();
expect(fsWriteFileSyncSpy).not.toHaveBeenCalled();
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('Could not determine project root')
);
});
});
// --- Getter Functions ---
describe('Getter Functions', () => {
test('getMainProvider should return provider from config', () => {
// Arrange: Set up readFileSync to return VALID_CUSTOM_CONFIG
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(VALID_CUSTOM_CONFIG);
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
openai: [{ id: 'gpt-4o' }],
google: [{ id: 'gemini-1.5-pro-latest' }],
anthropic: [
{ id: 'claude-3-opus-20240229' },
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
perplexity: [{ id: 'sonar-pro' }],
ollama: [],
openrouter: []
}); // Added perplexity
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const provider = configManager.getMainProvider(MOCK_PROJECT_ROOT);
// Assert
expect(provider).toBe(VALID_CUSTOM_CONFIG.models.main.provider);
});
test('getLogLevel should return logLevel from config', () => {
// Arrange: Set up readFileSync to return VALID_CUSTOM_CONFIG
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(VALID_CUSTOM_CONFIG);
if (path.basename(filePath) === 'supported-models.json') {
// Provide enough mock model data for validation within getConfig
return JSON.stringify({
openai: [{ id: 'gpt-4o' }],
google: [{ id: 'gemini-1.5-pro-latest' }],
anthropic: [
{ id: 'claude-3-opus-20240229' },
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
perplexity: [{ id: 'sonar-pro' }],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const logLevel = configManager.getLogLevel(MOCK_PROJECT_ROOT);
// Assert
expect(logLevel).toBe(VALID_CUSTOM_CONFIG.global.logLevel);
});
test('getResponseLanguage should return responseLanguage from config', () => {
// Arrange
// Prepare a config object with responseLanguage property for this test
const configWithLanguage = JSON.stringify({
models: {
main: { provider: 'openai', modelId: 'gpt-4-turbo' }
},
global: {
projectName: 'Test Project',
responseLanguage: '中文'
}
});
// Set up fs.readFileSync to return our test config
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) {
return configWithLanguage;
}
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
openai: [{ id: 'gpt-4-turbo' }]
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// Ensure getConfig returns new values instead of cached ones
configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Act
const responseLanguage =
configManager.getResponseLanguage(MOCK_PROJECT_ROOT);
// Assert
expect(responseLanguage).toBe('中文');
});
test('getResponseLanguage should return undefined when responseLanguage is not in config', () => {
// Arrange
const configWithoutLanguage = JSON.stringify({
models: {
main: { provider: 'openai', modelId: 'gpt-4-turbo' }
},
global: {
projectName: 'Test Project'
// No responseLanguage property
}
});
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) {
return configWithoutLanguage;
}
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
openai: [{ id: 'gpt-4-turbo' }]
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// Ensure getConfig returns new values instead of cached ones
configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Act
const responseLanguage =
configManager.getResponseLanguage(MOCK_PROJECT_ROOT);
// Assert
expect(responseLanguage).toBe('English');
});
// Add more tests for other getters (getResearchProvider, getProjectName, etc.)
});
// --- isConfigFilePresent Tests ---
describe('isConfigFilePresent', () => {
test('should return true if config file exists', () => {
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
expect(configManager.isConfigFilePresent(MOCK_PROJECT_ROOT)).toBe(true);
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
});
test('should return false if config file does not exist', () => {
fsExistsSyncSpy.mockReturnValue(false);
// findProjectRoot mock set in beforeEach
expect(configManager.isConfigFilePresent(MOCK_PROJECT_ROOT)).toBe(false);
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
});
test.skip('should use findProjectRoot if explicitRoot is not provided', () => {
// TODO: Fix mock interaction, findProjectRoot isn't being registered as called
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
expect(configManager.isConfigFilePresent()).toBe(true);
expect(mockFindProjectRoot).toHaveBeenCalled(); // Should be called now
});
});
// --- getAllProviders Tests ---
describe('getAllProviders', () => {
test('should return all providers from ALL_PROVIDERS constant', () => {
// Arrange: Ensure config is loaded with real data
configManager.getConfig(null, true); // Force load using the mock that returns real data
// Act
const providers = configManager.getAllProviders();
// Assert
// getAllProviders() should return the same as the ALL_PROVIDERS constant
expect(providers).toEqual(configManager.ALL_PROVIDERS);
expect(providers.length).toBe(configManager.ALL_PROVIDERS.length);
// Verify it includes both validated and custom providers
expect(providers).toEqual(
expect.arrayContaining(configManager.VALIDATED_PROVIDERS)
);
expect(providers).toEqual(
expect.arrayContaining(Object.values(configManager.CUSTOM_PROVIDERS))
);
});
});
// Add tests for getParametersForRole if needed
// --- defaultNumTasks Tests ---
describe('Configuration Getters', () => {
test('getDefaultNumTasks should return default value when config is valid', () => {
// Arrange: Mock fs.readFileSync to return valid config when called with the expected path
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) {
return JSON.stringify({
global: {
defaultNumTasks: 15
}
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// Force reload to clear cache
configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Act: Call getDefaultNumTasks with explicit root
const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);
// Assert
expect(result).toBe(15);
});
test('getDefaultNumTasks should return fallback when config value is invalid', () => {
// Arrange: Mock fs.readFileSync to return invalid config
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) {
return JSON.stringify({
global: {
defaultNumTasks: 'invalid'
}
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// Force reload to clear cache
configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Act: Call getDefaultNumTasks with explicit root
const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);
// Assert
expect(result).toBe(10); // Should fallback to DEFAULTS.global.defaultNumTasks
});
test('getDefaultNumTasks should return fallback when config value is missing', () => {
// Arrange: Mock fs.readFileSync to return config without defaultNumTasks
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) {
return JSON.stringify({
global: {}
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// Force reload to clear cache
configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Act: Call getDefaultNumTasks with explicit root
const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);
// Assert
expect(result).toBe(10); // Should fallback to DEFAULTS.global.defaultNumTasks
});
test('getDefaultNumTasks should handle non-existent config file', () => {
// Arrange: Mock file not existing
fsExistsSyncSpy.mockReturnValue(false);
// Force reload to clear cache
configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Act: Call getDefaultNumTasks with explicit root
const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);
// Assert
expect(result).toBe(10); // Should fallback to DEFAULTS.global.defaultNumTasks
});
test('getDefaultNumTasks should accept explicit project root', () => {
// Arrange: Mock fs.readFileSync to return valid config
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) {
return JSON.stringify({
global: {
defaultNumTasks: 20
}
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// Force reload to clear cache
configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Act: Call getDefaultNumTasks with explicit project root
const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);
// Assert
expect(result).toBe(20);
});
});
// Note: Tests for setMainModel, setResearchModel were removed as the functions were removed in the implementation.
// If similar setter functions exist, add tests for them following the writeConfig pattern.
```
--------------------------------------------------------------------------------
/scripts/modules/config-manager.js:
--------------------------------------------------------------------------------
```javascript
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import chalk from 'chalk';
import { z } from 'zod';
import { AI_COMMAND_NAMES } from '../../src/constants/commands.js';
import {
LEGACY_CONFIG_FILE,
TASKMASTER_DIR
} from '../../src/constants/paths.js';
import {
ALL_PROVIDERS,
CUSTOM_PROVIDERS,
CUSTOM_PROVIDERS_ARRAY,
VALIDATED_PROVIDERS
} from '@tm/core';
import { findConfigPath } from '../../src/utils/path-utils.js';
import { findProjectRoot, isEmpty, log, resolveEnvVariable } from './utils.js';
import MODEL_MAP from './supported-models.json' with { type: 'json' };
// Calculate __dirname in ESM
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Default configuration values (used if config file is missing or incomplete)
const DEFAULTS = {
models: {
main: {
provider: 'anthropic',
modelId: 'claude-sonnet-4-20250514',
maxTokens: 64000,
temperature: 0.2
},
research: {
provider: 'perplexity',
modelId: 'sonar',
maxTokens: 8700,
temperature: 0.1
},
fallback: {
// No default fallback provider/model initially
provider: 'anthropic',
modelId: 'claude-3-7-sonnet-20250219',
maxTokens: 120000, // Default parameters if fallback IS configured
temperature: 0.2
}
},
global: {
logLevel: 'info',
debug: false,
defaultNumTasks: 10,
defaultSubtasks: 5,
defaultPriority: 'medium',
projectName: 'Task Master',
ollamaBaseURL: 'http://localhost:11434/api',
bedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com',
responseLanguage: 'English',
enableCodebaseAnalysis: true,
enableProxy: false
},
claudeCode: {},
codexCli: {},
grokCli: {
timeout: 120000,
workingDirectory: null,
defaultModel: 'grok-4-latest'
}
};
// --- Internal Config Loading ---
let loadedConfig = null;
let loadedConfigRoot = null; // Track which root loaded the config
// Custom Error for configuration issues
class ConfigurationError extends Error {
constructor(message) {
super(message);
this.name = 'ConfigurationError';
}
}
function _loadAndValidateConfig(explicitRoot = null) {
const defaults = DEFAULTS; // Use the defined defaults
let rootToUse = explicitRoot;
let configSource = explicitRoot
? `explicit root (${explicitRoot})`
: 'defaults (no root provided yet)';
// ---> If no explicit root, TRY to find it <---
if (!rootToUse) {
rootToUse = findProjectRoot();
if (rootToUse) {
configSource = `found root (${rootToUse})`;
} else {
// No root found, use current working directory as fallback
// This prevents infinite loops during initialization
rootToUse = process.cwd();
configSource = `current directory (${rootToUse}) - no project markers found`;
}
}
// ---> End find project root logic <---
// --- Find configuration file ---
let configPath = null;
let config = { ...defaults }; // Start with a deep copy of defaults
let configExists = false;
// During initialization (no project markers), skip config file search entirely
const hasProjectMarkers =
fs.existsSync(path.join(rootToUse, TASKMASTER_DIR)) ||
fs.existsSync(path.join(rootToUse, LEGACY_CONFIG_FILE));
if (hasProjectMarkers) {
// Only try to find config if we have project markers
// This prevents the repeated warnings during init
configPath = findConfigPath(null, { projectRoot: rootToUse });
}
if (configPath) {
configExists = true;
const isLegacy = configPath.endsWith(LEGACY_CONFIG_FILE);
try {
const rawData = fs.readFileSync(configPath, 'utf-8');
const parsedConfig = JSON.parse(rawData);
// Deep merge parsed config onto defaults
config = {
models: {
main: { ...defaults.models.main, ...parsedConfig?.models?.main },
research: {
...defaults.models.research,
...parsedConfig?.models?.research
},
fallback:
parsedConfig?.models?.fallback?.provider &&
parsedConfig?.models?.fallback?.modelId
? { ...defaults.models.fallback, ...parsedConfig.models.fallback }
: { ...defaults.models.fallback }
},
global: { ...defaults.global, ...parsedConfig?.global },
claudeCode: { ...defaults.claudeCode, ...parsedConfig?.claudeCode },
codexCli: { ...defaults.codexCli, ...parsedConfig?.codexCli },
grokCli: { ...defaults.grokCli, ...parsedConfig?.grokCli }
};
configSource = `file (${configPath})`; // Update source info
// Issue deprecation warning if using legacy config file
if (isLegacy) {
console.warn(
chalk.yellow(
`⚠️ DEPRECATION WARNING: Found configuration in legacy location '${configPath}'. Please migrate to .taskmaster/config.json. Run 'task-master migrate' to automatically migrate your project.`
)
);
}
// --- Validation (Warn if file content is invalid) ---
// Use log.warn for consistency
if (!validateProvider(config.models.main.provider)) {
console.warn(
chalk.yellow(
`Warning: Invalid main provider "${config.models.main.provider}" in ${configPath}. Falling back to default.`
)
);
config.models.main = { ...defaults.models.main };
}
if (!validateProvider(config.models.research.provider)) {
console.warn(
chalk.yellow(
`Warning: Invalid research provider "${config.models.research.provider}" in ${configPath}. Falling back to default.`
)
);
config.models.research = { ...defaults.models.research };
}
if (
config.models.fallback?.provider &&
!validateProvider(config.models.fallback.provider)
) {
console.warn(
chalk.yellow(
`Warning: Invalid fallback provider "${config.models.fallback.provider}" in ${configPath}. Fallback model configuration will be ignored.`
)
);
config.models.fallback.provider = undefined;
config.models.fallback.modelId = undefined;
}
if (config.claudeCode && !isEmpty(config.claudeCode)) {
config.claudeCode = validateClaudeCodeSettings(config.claudeCode);
}
if (config.codexCli && !isEmpty(config.codexCli)) {
config.codexCli = validateCodexCliSettings(config.codexCli);
}
} catch (error) {
// Use console.error for actual errors during parsing
console.error(
chalk.red(
`Error reading or parsing ${configPath}: ${error.message}. Using default configuration.`
)
);
config = { ...defaults }; // Reset to defaults on parse error
configSource = `defaults (parse error at ${configPath})`;
}
} else {
// Config file doesn't exist at the determined rootToUse.
if (explicitRoot) {
// Only warn if an explicit root was *expected*.
console.warn(
chalk.yellow(
`Warning: Configuration file not found at provided project root (${explicitRoot}). Using default configuration. Run 'task-master models --setup' to configure.`
)
);
} else {
// Don't warn about missing config during initialization
// Only warn if this looks like an existing project (has .taskmaster dir or legacy config marker)
const hasTaskmasterDir = fs.existsSync(
path.join(rootToUse, TASKMASTER_DIR)
);
const hasLegacyMarker = fs.existsSync(
path.join(rootToUse, LEGACY_CONFIG_FILE)
);
if (hasTaskmasterDir || hasLegacyMarker) {
console.warn(
chalk.yellow(
`Warning: Configuration file not found at derived root (${rootToUse}). Using defaults.`
)
);
}
}
// Keep config as defaults
config = { ...defaults };
configSource = `defaults (no config file found at ${rootToUse})`;
}
return config;
}
/**
* Gets the current configuration, loading it if necessary.
* Handles MCP initialization context gracefully.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @param {boolean} forceReload - Force reloading the config file.
* @returns {object} The loaded configuration object.
*/
function getConfig(explicitRoot = null, forceReload = false) {
// Determine if a reload is necessary
const needsLoad =
!loadedConfig ||
forceReload ||
(explicitRoot && explicitRoot !== loadedConfigRoot);
if (needsLoad) {
const newConfig = _loadAndValidateConfig(explicitRoot); // _load handles null explicitRoot
// Only update the global cache if loading was forced or if an explicit root
// was provided (meaning we attempted to load a specific project's config).
// We avoid caching the initial default load triggered without an explicitRoot.
if (forceReload || explicitRoot) {
loadedConfig = newConfig;
loadedConfigRoot = explicitRoot; // Store the root used for this loaded config
}
return newConfig; // Return the newly loaded/default config
}
// If no load was needed, return the cached config
return loadedConfig;
}
/**
* Validates if a provider name is supported.
* Custom providers (azure, vertex, bedrock, openrouter, ollama) are always allowed.
* Validated providers must exist in the MODEL_MAP from supported-models.json.
* @param {string} providerName The name of the provider.
* @returns {boolean} True if the provider is valid, false otherwise.
*/
function validateProvider(providerName) {
// Custom providers are always allowed
if (CUSTOM_PROVIDERS_ARRAY.includes(providerName)) {
return true;
}
// Validated providers must exist in MODEL_MAP
if (VALIDATED_PROVIDERS.includes(providerName)) {
return !!(MODEL_MAP && MODEL_MAP[providerName]);
}
// Unknown providers are not allowed
return false;
}
/**
* Optional: Validates if a modelId is known for a given provider based on MODEL_MAP.
* This is a non-strict validation; an unknown model might still be valid.
* @param {string} providerName The name of the provider.
* @param {string} modelId The model ID.
* @returns {boolean} True if the modelId is in the map for the provider, false otherwise.
*/
function validateProviderModelCombination(providerName, modelId) {
// If provider isn't even in our map, we can't validate the model
if (!MODEL_MAP[providerName]) {
return true; // Allow unknown providers or those without specific model lists
}
// If the provider is known, check if the model is in its list OR if the list is empty (meaning accept any)
return (
MODEL_MAP[providerName].length === 0 ||
// Use .some() to check the 'id' property of objects in the array
MODEL_MAP[providerName].some((modelObj) => modelObj.id === modelId)
);
}
/**
* Gets the list of supported model IDs for a given provider from supported-models.json
* @param {string} providerName - The name of the provider (e.g., 'claude-code', 'anthropic')
* @returns {string[]} Array of supported model IDs, or empty array if provider not found
*/
export function getSupportedModelsForProvider(providerName) {
if (!MODEL_MAP[providerName]) {
return [];
}
return MODEL_MAP[providerName]
.filter((model) => model.supported !== false)
.map((model) => model.id);
}
/**
* Validates Claude Code AI provider custom settings
* @param {object} settings The settings to validate
* @returns {object} The validated settings
*/
function validateClaudeCodeSettings(settings) {
// Define the base settings schema without commandSpecific first
const BaseSettingsSchema = z.object({
pathToClaudeCodeExecutable: z.string().optional(),
// Use number().int() for integer validation in Zod
maxTurns: z.number().int().positive().optional(),
customSystemPrompt: z.string().optional(),
appendSystemPrompt: z.string().optional(),
permissionMode: z
.enum(['default', 'acceptEdits', 'plan', 'bypassPermissions'])
.optional(),
allowedTools: z.array(z.string()).optional(),
disallowedTools: z.array(z.string()).optional(),
mcpServers: z
.record(
z.string(),
z.object({
type: z.enum(['stdio', 'sse']).optional(),
command: z.string(),
args: z.array(z.string()).optional(),
env: z.record(z.string(), z.string()).optional(),
url: z.url().optional(),
headers: z.record(z.string(), z.string()).optional()
})
)
.optional()
});
// Define CommandSpecificSchema using flexible keys, but restrict to known commands
const CommandSpecificSchema = z
.record(z.string(), BaseSettingsSchema)
.refine(
(obj) =>
Object.keys(obj || {}).every((k) => AI_COMMAND_NAMES.includes(k)),
{ message: 'Invalid command name in commandSpecific' }
);
// Define the full settings schema with commandSpecific
const SettingsSchema = BaseSettingsSchema.extend({
commandSpecific: CommandSpecificSchema.optional()
});
let validatedSettings = {};
try {
validatedSettings = SettingsSchema.parse(settings);
} catch (error) {
console.warn(
chalk.yellow(
`Warning: Invalid Claude Code settings in config: ${error.message}. Falling back to default.`
)
);
validatedSettings = {};
}
return validatedSettings;
}
/**
* Validates Codex CLI provider custom settings
* Mirrors the ai-sdk-provider-codex-cli options
* @param {object} settings The settings to validate
* @returns {object} The validated settings
*/
function validateCodexCliSettings(settings) {
const BaseSettingsSchema = z.object({
codexPath: z.string().optional(),
cwd: z.string().optional(),
approvalMode: z
.enum(['untrusted', 'on-failure', 'on-request', 'never'])
.optional(),
sandboxMode: z
.enum(['read-only', 'workspace-write', 'danger-full-access'])
.optional(),
fullAuto: z.boolean().optional(),
dangerouslyBypassApprovalsAndSandbox: z.boolean().optional(),
skipGitRepoCheck: z.boolean().optional(),
color: z.enum(['always', 'never', 'auto']).optional(),
allowNpx: z.boolean().optional(),
outputLastMessageFile: z.string().optional(),
env: z.record(z.string(), z.string()).optional(),
verbose: z.boolean().optional(),
logger: z.union([z.object({}).passthrough(), z.literal(false)]).optional()
});
const CommandSpecificSchema = z
.record(z.string(), BaseSettingsSchema)
.refine(
(obj) =>
Object.keys(obj || {}).every((k) => AI_COMMAND_NAMES.includes(k)),
{ message: 'Invalid command name in commandSpecific' }
);
const SettingsSchema = BaseSettingsSchema.extend({
commandSpecific: CommandSpecificSchema.optional()
});
try {
return SettingsSchema.parse(settings);
} catch (error) {
console.warn(
chalk.yellow(
`Warning: Invalid Codex CLI settings in config: ${error.message}. Falling back to default.`
)
);
return {};
}
}
// --- Claude Code Settings Getters ---
function getClaudeCodeSettings(explicitRoot = null, forceReload = false) {
const config = getConfig(explicitRoot, forceReload);
// Ensure Claude Code defaults are applied if Claude Code section is missing
return { ...DEFAULTS.claudeCode, ...(config?.claudeCode || {}) };
}
// --- Codex CLI Settings Getters ---
function getCodexCliSettings(explicitRoot = null, forceReload = false) {
const config = getConfig(explicitRoot, forceReload);
return { ...DEFAULTS.codexCli, ...(config?.codexCli || {}) };
}
function getCodexCliSettingsForCommand(
commandName,
explicitRoot = null,
forceReload = false
) {
const settings = getCodexCliSettings(explicitRoot, forceReload);
const commandSpecific = settings?.commandSpecific || {};
return { ...settings, ...commandSpecific[commandName] };
}
function getClaudeCodeSettingsForCommand(
commandName,
explicitRoot = null,
forceReload = false
) {
const settings = getClaudeCodeSettings(explicitRoot, forceReload);
const commandSpecific = settings?.commandSpecific || {};
return { ...settings, ...commandSpecific[commandName] };
}
function getGrokCliSettings(explicitRoot = null, forceReload = false) {
const config = getConfig(explicitRoot, forceReload);
// Ensure Grok CLI defaults are applied if Grok CLI section is missing
return { ...DEFAULTS.grokCli, ...(config?.grokCli || {}) };
}
function getGrokCliSettingsForCommand(
commandName,
explicitRoot = null,
forceReload = false
) {
const settings = getGrokCliSettings(explicitRoot, forceReload);
const commandSpecific = settings?.commandSpecific || {};
return { ...settings, ...commandSpecific[commandName] };
}
// --- Role-Specific Getters ---
function getModelConfigForRole(role, explicitRoot = null) {
const config = getConfig(explicitRoot);
const roleConfig = config?.models?.[role];
if (!roleConfig) {
log(
'warn',
`No model configuration found for role: ${role}. Returning default.`
);
return DEFAULTS.models[role] || {};
}
return roleConfig;
}
function getMainProvider(explicitRoot = null) {
return getModelConfigForRole('main', explicitRoot).provider;
}
function getMainModelId(explicitRoot = null) {
return getModelConfigForRole('main', explicitRoot).modelId;
}
function getMainMaxTokens(explicitRoot = null) {
// Directly return value from config (which includes defaults)
return getModelConfigForRole('main', explicitRoot).maxTokens;
}
function getMainTemperature(explicitRoot = null) {
// Directly return value from config
return getModelConfigForRole('main', explicitRoot).temperature;
}
function getResearchProvider(explicitRoot = null) {
return getModelConfigForRole('research', explicitRoot).provider;
}
/**
* Check if codebase analysis feature flag is enabled across all sources
* Priority: .env > MCP env > config.json
* @param {object|null} session - MCP session object (optional)
* @param {string|null} projectRoot - Project root path (optional)
* @returns {boolean} True if codebase analysis is enabled
*/
function isCodebaseAnalysisEnabled(session = null, projectRoot = null) {
// Priority 1: Environment variable
const envFlag = resolveEnvVariable(
'TASKMASTER_ENABLE_CODEBASE_ANALYSIS',
session,
projectRoot
);
if (envFlag !== null && envFlag !== undefined && envFlag !== '') {
return envFlag.toLowerCase() === 'true' || envFlag === '1';
}
// Priority 2: MCP session environment
if (session?.env?.TASKMASTER_ENABLE_CODEBASE_ANALYSIS) {
const mcpFlag = session.env.TASKMASTER_ENABLE_CODEBASE_ANALYSIS;
return mcpFlag.toLowerCase() === 'true' || mcpFlag === '1';
}
// Priority 3: Configuration file
const globalConfig = getGlobalConfig(projectRoot);
return globalConfig.enableCodebaseAnalysis !== false; // Default to true
}
/**
* Check if codebase analysis is available and enabled
* @param {boolean} useResearch - Whether to check research provider or main provider
* @param {string|null} projectRoot - Project root path (optional)
* @param {object|null} session - MCP session object (optional)
* @returns {boolean} True if codebase analysis is available and enabled
*/
function hasCodebaseAnalysis(
useResearch = false,
projectRoot = null,
session = null
) {
// First check if the feature is enabled
if (!isCodebaseAnalysisEnabled(session, projectRoot)) {
return false;
}
// Then check if a codebase analysis provider is configured
const currentProvider = useResearch
? getResearchProvider(projectRoot)
: getMainProvider(projectRoot);
return (
currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE ||
currentProvider === CUSTOM_PROVIDERS.GEMINI_CLI ||
currentProvider === CUSTOM_PROVIDERS.GROK_CLI ||
currentProvider === CUSTOM_PROVIDERS.CODEX_CLI
);
}
function getResearchModelId(explicitRoot = null) {
return getModelConfigForRole('research', explicitRoot).modelId;
}
function getResearchMaxTokens(explicitRoot = null) {
// Directly return value from config
return getModelConfigForRole('research', explicitRoot).maxTokens;
}
function getResearchTemperature(explicitRoot = null) {
// Directly return value from config
return getModelConfigForRole('research', explicitRoot).temperature;
}
function getFallbackProvider(explicitRoot = null) {
// Directly return value from config (will be undefined if not set)
return getModelConfigForRole('fallback', explicitRoot).provider;
}
function getFallbackModelId(explicitRoot = null) {
// Directly return value from config
return getModelConfigForRole('fallback', explicitRoot).modelId;
}
function getFallbackMaxTokens(explicitRoot = null) {
// Directly return value from config
return getModelConfigForRole('fallback', explicitRoot).maxTokens;
}
function getFallbackTemperature(explicitRoot = null) {
// Directly return value from config
return getModelConfigForRole('fallback', explicitRoot).temperature;
}
// --- Global Settings Getters ---
function getGlobalConfig(explicitRoot = null) {
const config = getConfig(explicitRoot);
// Ensure global defaults are applied if global section is missing
return { ...DEFAULTS.global, ...(config?.global || {}) };
}
function getLogLevel(explicitRoot = null) {
// Directly return value from config
return getGlobalConfig(explicitRoot).logLevel.toLowerCase();
}
function getDebugFlag(explicitRoot = null) {
// Directly return value from config, ensure boolean
return getGlobalConfig(explicitRoot).debug === true;
}
function getDefaultSubtasks(explicitRoot = null) {
// Directly return value from config, ensure integer
const val = getGlobalConfig(explicitRoot).defaultSubtasks;
const parsedVal = parseInt(val, 10);
return Number.isNaN(parsedVal) ? DEFAULTS.global.defaultSubtasks : parsedVal;
}
function getDefaultNumTasks(explicitRoot = null) {
const val = getGlobalConfig(explicitRoot).defaultNumTasks;
const parsedVal = parseInt(val, 10);
return Number.isNaN(parsedVal) ? DEFAULTS.global.defaultNumTasks : parsedVal;
}
function getDefaultPriority(explicitRoot = null) {
// Directly return value from config
return getGlobalConfig(explicitRoot).defaultPriority;
}
function getProjectName(explicitRoot = null) {
// Directly return value from config
return getGlobalConfig(explicitRoot).projectName;
}
function getOllamaBaseURL(explicitRoot = null) {
// Directly return value from config
return getGlobalConfig(explicitRoot).ollamaBaseURL;
}
function getAzureBaseURL(explicitRoot = null) {
// Directly return value from config
return getGlobalConfig(explicitRoot).azureBaseURL;
}
function getBedrockBaseURL(explicitRoot = null) {
// Directly return value from config
return getGlobalConfig(explicitRoot).bedrockBaseURL;
}
/**
* Gets the Google Cloud project ID for Vertex AI from configuration
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string|null} The project ID or null if not configured
*/
function getVertexProjectId(explicitRoot = null) {
// Return value from config
return getGlobalConfig(explicitRoot).vertexProjectId;
}
/**
* Gets the Google Cloud location for Vertex AI from configuration
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string} The location or default value of "us-central1"
*/
function getVertexLocation(explicitRoot = null) {
// Return value from config or default
return getGlobalConfig(explicitRoot).vertexLocation || 'us-central1';
}
function getResponseLanguage(explicitRoot = null) {
// Directly return value from config
return getGlobalConfig(explicitRoot).responseLanguage;
}
function getCodebaseAnalysisEnabled(explicitRoot = null) {
// Return boolean-safe value with default true
return getGlobalConfig(explicitRoot).enableCodebaseAnalysis !== false;
}
function getProxyEnabled(explicitRoot = null) {
// Return boolean-safe value with default false
return getGlobalConfig(explicitRoot).enableProxy === true;
}
function isProxyEnabled(session = null, projectRoot = null) {
// Priority 1: Environment variable
const envFlag = resolveEnvVariable(
'TASKMASTER_ENABLE_PROXY',
session,
projectRoot
);
if (envFlag !== null && envFlag !== undefined && envFlag !== '') {
return envFlag.toLowerCase() === 'true' || envFlag === '1';
}
// Priority 2: MCP session environment (explicit check for parity with other flags)
if (session?.env?.TASKMASTER_ENABLE_PROXY) {
const mcpFlag = session.env.TASKMASTER_ENABLE_PROXY;
return mcpFlag.toLowerCase() === 'true' || mcpFlag === '1';
}
// Priority 3: Configuration file
return getProxyEnabled(projectRoot);
}
/**
* Gets model parameters (maxTokens, temperature) for a specific role,
* considering model-specific overrides from supported-models.json.
* @param {string} role - The role ('main', 'research', 'fallback').
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {{maxTokens: number, temperature: number}}
*/
function getParametersForRole(role, explicitRoot = null) {
const roleConfig = getModelConfigForRole(role, explicitRoot);
const roleMaxTokens = roleConfig.maxTokens;
const roleTemperature = roleConfig.temperature;
const modelId = roleConfig.modelId;
const providerName = roleConfig.provider;
let effectiveMaxTokens = roleMaxTokens; // Start with the role's default
let effectiveTemperature = roleTemperature; // Start with the role's default
try {
// Find the model definition in MODEL_MAP
const providerModels = MODEL_MAP[providerName];
if (providerModels && Array.isArray(providerModels)) {
const modelDefinition = providerModels.find((m) => m.id === modelId);
// Check if a model-specific max_tokens is defined and valid
if (
modelDefinition &&
typeof modelDefinition.max_tokens === 'number' &&
modelDefinition.max_tokens > 0
) {
const modelSpecificMaxTokens = modelDefinition.max_tokens;
// Use the minimum of the role default and the model specific limit
effectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens);
log(
'debug',
`Applying model-specific max_tokens (${modelSpecificMaxTokens}) for ${modelId}. Effective limit: ${effectiveMaxTokens}`
);
} else {
log(
'debug',
`No valid model-specific max_tokens override found for ${modelId}. Using role default: ${roleMaxTokens}`
);
}
// Check if a model-specific temperature is defined
if (
modelDefinition &&
typeof modelDefinition.temperature === 'number' &&
modelDefinition.temperature >= 0 &&
modelDefinition.temperature <= 1
) {
effectiveTemperature = modelDefinition.temperature;
log(
'debug',
`Applying model-specific temperature (${modelDefinition.temperature}) for ${modelId}`
);
}
} else {
// Special handling for custom OpenRouter models
if (providerName === CUSTOM_PROVIDERS.OPENROUTER) {
// Use a conservative default for OpenRouter models not in our list
const openrouterDefault = 32768;
effectiveMaxTokens = Math.min(roleMaxTokens, openrouterDefault);
log(
'debug',
`Custom OpenRouter model ${modelId} detected. Using conservative max_tokens: ${effectiveMaxTokens}`
);
} else {
log(
'debug',
`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
);
}
}
} catch (lookupError) {
log(
'warn',
`Error looking up model-specific parameters for ${modelId}: ${lookupError.message}. Using role defaults.`
);
// Fallback to role defaults on error
effectiveMaxTokens = roleMaxTokens;
effectiveTemperature = roleTemperature;
}
return {
maxTokens: effectiveMaxTokens,
temperature: effectiveTemperature
};
}
/**
* Checks if the API key for a given provider is set in the environment.
* Checks process.env first, then session.env if session is provided, then .env file if projectRoot provided.
* @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic').
* @param {object|null} [session=null] - The MCP session object (optional).
* @param {string|null} [projectRoot=null] - The project root directory (optional, for .env file check).
* @returns {boolean} True if the API key is set, false otherwise.
*/
function isApiKeySet(providerName, session = null, projectRoot = null) {
// Define the expected environment variable name for each provider
// Providers that don't require API keys for authentication
const providersWithoutApiKeys = [
CUSTOM_PROVIDERS.OLLAMA,
CUSTOM_PROVIDERS.BEDROCK,
CUSTOM_PROVIDERS.GEMINI_CLI,
CUSTOM_PROVIDERS.GROK_CLI,
CUSTOM_PROVIDERS.MCP,
CUSTOM_PROVIDERS.CODEX_CLI
];
if (providersWithoutApiKeys.includes(providerName?.toLowerCase())) {
return true; // Indicate key status is effectively "OK"
}
// Claude Code doesn't require an API key
if (providerName?.toLowerCase() === 'claude-code') {
return true; // No API key needed
}
// Codex CLI supports OAuth via codex login; API key optional
if (providerName?.toLowerCase() === 'codex-cli') {
return true; // Treat as OK even without key
}
const keyMap = {
openai: 'OPENAI_API_KEY',
anthropic: 'ANTHROPIC_API_KEY',
google: 'GOOGLE_API_KEY',
perplexity: 'PERPLEXITY_API_KEY',
mistral: 'MISTRAL_API_KEY',
azure: 'AZURE_OPENAI_API_KEY',
openrouter: 'OPENROUTER_API_KEY',
xai: 'XAI_API_KEY',
zai: 'ZAI_API_KEY',
'zai-coding': 'ZAI_API_KEY',
groq: 'GROQ_API_KEY',
vertex: 'GOOGLE_API_KEY', // Vertex uses the same key as Google
'claude-code': 'CLAUDE_CODE_API_KEY', // Not actually used, but included for consistency
bedrock: 'AWS_ACCESS_KEY_ID' // Bedrock uses AWS credentials
// Add other providers as needed
};
const providerKey = providerName?.toLowerCase();
if (!providerKey || !keyMap[providerKey]) {
log('warn', `Unknown provider name: ${providerName} in isApiKeySet check.`);
return false;
}
const envVarName = keyMap[providerKey];
const apiKeyValue = resolveEnvVariable(envVarName, session, projectRoot);
// Check if the key exists, is not empty, and is not a placeholder
return (
apiKeyValue &&
apiKeyValue.trim() !== '' &&
!/YOUR_.*_API_KEY_HERE/.test(apiKeyValue) && // General placeholder check
!apiKeyValue.includes('KEY_HERE')
); // Another common placeholder pattern
}
/**
* Checks the API key status within .cursor/mcp.json for a given provider.
* Reads the mcp.json file, finds the taskmaster-ai server config, and checks the relevant env var.
* @param {string} providerName The name of the provider.
* @param {string|null} projectRoot - Optional explicit path to the project root.
* @returns {boolean} True if the key exists and is not a placeholder, false otherwise.
*/
function getMcpApiKeyStatus(providerName, projectRoot = null) {
const rootDir = projectRoot || findProjectRoot(); // Use existing root finding
if (!rootDir) {
console.warn(
chalk.yellow('Warning: Could not find project root to check mcp.json.')
);
return false; // Cannot check without root
}
const mcpConfigPath = path.join(rootDir, '.cursor', 'mcp.json');
if (!fs.existsSync(mcpConfigPath)) {
// console.warn(chalk.yellow('Warning: .cursor/mcp.json not found.'));
return false; // File doesn't exist
}
try {
const mcpConfigRaw = fs.readFileSync(mcpConfigPath, 'utf-8');
const mcpConfig = JSON.parse(mcpConfigRaw);
const mcpEnv =
mcpConfig?.mcpServers?.['task-master-ai']?.env ||
mcpConfig?.mcpServers?.['taskmaster-ai']?.env;
if (!mcpEnv) {
return false;
}
let apiKeyToCheck = null;
let placeholderValue = null;
switch (providerName) {
case 'anthropic':
apiKeyToCheck = mcpEnv.ANTHROPIC_API_KEY;
placeholderValue = 'YOUR_ANTHROPIC_API_KEY_HERE';
break;
case 'openai':
apiKeyToCheck = mcpEnv.OPENAI_API_KEY;
placeholderValue = 'YOUR_OPENAI_API_KEY_HERE'; // Assuming placeholder matches OPENAI
break;
case 'openrouter':
apiKeyToCheck = mcpEnv.OPENROUTER_API_KEY;
placeholderValue = 'YOUR_OPENROUTER_API_KEY_HERE';
break;
case 'google':
apiKeyToCheck = mcpEnv.GOOGLE_API_KEY;
placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE';
break;
case 'perplexity':
apiKeyToCheck = mcpEnv.PERPLEXITY_API_KEY;
placeholderValue = 'YOUR_PERPLEXITY_API_KEY_HERE';
break;
case 'xai':
apiKeyToCheck = mcpEnv.XAI_API_KEY;
placeholderValue = 'YOUR_XAI_API_KEY_HERE';
break;
case 'zai':
case 'zai-coding':
apiKeyToCheck = mcpEnv.ZAI_API_KEY;
placeholderValue = 'YOUR_ZAI_API_KEY_HERE';
break;
case 'groq':
apiKeyToCheck = mcpEnv.GROQ_API_KEY;
placeholderValue = 'YOUR_GROQ_API_KEY_HERE';
break;
case 'ollama':
return true; // No key needed
case 'claude-code':
return true; // No key needed
case 'codex-cli':
return true; // OAuth/subscription via Codex CLI
case 'mistral':
apiKeyToCheck = mcpEnv.MISTRAL_API_KEY;
placeholderValue = 'YOUR_MISTRAL_API_KEY_HERE';
break;
case 'azure':
apiKeyToCheck = mcpEnv.AZURE_OPENAI_API_KEY;
placeholderValue = 'YOUR_AZURE_OPENAI_API_KEY_HERE';
break;
case 'vertex':
apiKeyToCheck = mcpEnv.GOOGLE_API_KEY; // Vertex uses Google API key
placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE';
break;
case 'bedrock':
apiKeyToCheck = mcpEnv.AWS_ACCESS_KEY_ID; // Bedrock uses AWS credentials
placeholderValue = 'YOUR_AWS_ACCESS_KEY_ID_HERE';
break;
default:
return false; // Unknown provider
}
return !!apiKeyToCheck && !/KEY_HERE$/.test(apiKeyToCheck);
} catch (error) {
console.error(
chalk.red(`Error reading or parsing .cursor/mcp.json: ${error.message}`)
);
return false;
}
}
/**
* Gets a list of available models based on the MODEL_MAP.
* @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>}
*/
function getAvailableModels() {
const available = [];
for (const [provider, models] of Object.entries(MODEL_MAP)) {
if (models.length > 0) {
models
.filter((modelObj) => Boolean(modelObj.supported))
.forEach((modelObj) => {
// Basic name generation - can be improved
const modelId = modelObj.id;
const sweScore = modelObj.swe_score;
const cost = modelObj.cost_per_1m_tokens;
const allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];
const nameParts = modelId
.split('-')
.map((p) => p.charAt(0).toUpperCase() + p.slice(1));
// Handle specific known names better if needed
let name = nameParts.join(' ');
if (modelId === 'claude-3.5-sonnet-20240620')
name = 'Claude 3.5 Sonnet';
if (modelId === 'claude-3-7-sonnet-20250219')
name = 'Claude 3.7 Sonnet';
if (modelId === 'gpt-4o') name = 'GPT-4o';
if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';
if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';
if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';
available.push({
id: modelId,
name: name,
provider: provider,
swe_score: sweScore,
cost_per_1m_tokens: cost,
allowed_roles: allowedRoles,
max_tokens: modelObj.max_tokens
});
});
} else {
// For providers with empty lists (like ollama), maybe add a placeholder or skip
available.push({
id: `[${provider}-any]`,
name: `Any (${provider})`,
provider: provider
});
}
}
return available;
}
/**
* Writes the configuration object to the file.
* @param {Object} config The configuration object to write.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function writeConfig(config, explicitRoot = null) {
// ---> Determine root path reliably <---
let rootPath = explicitRoot;
if (explicitRoot === null || explicitRoot === undefined) {
// Logic matching _loadAndValidateConfig
const foundRoot = findProjectRoot(); // *** Explicitly call findProjectRoot ***
if (!foundRoot) {
console.error(
chalk.red(
'Error: Could not determine project root. Configuration not saved.'
)
);
return false;
}
rootPath = foundRoot;
}
// ---> End determine root path logic <---
// Use new config location: .taskmaster/config.json
const taskmasterDir = path.join(rootPath, '.taskmaster');
const configPath = path.join(taskmasterDir, 'config.json');
try {
// Ensure .taskmaster directory exists
if (!fs.existsSync(taskmasterDir)) {
fs.mkdirSync(taskmasterDir, { recursive: true });
}
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
loadedConfig = config; // Update the cache after successful write
return true;
} catch (error) {
console.error(
chalk.red(
`Error writing configuration to ${configPath}: ${error.message}`
)
);
return false;
}
}
/**
* Checks if a configuration file exists at the project root (new or legacy location)
* @param {string|null} explicitRoot - Optional explicit path to the project root
* @returns {boolean} True if the file exists, false otherwise
*/
function isConfigFilePresent(explicitRoot = null) {
return findConfigPath(null, { projectRoot: explicitRoot }) !== null;
}
/**
* Gets the user ID from the configuration.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string|null} The user ID or null if not found.
*/
function getUserId(explicitRoot = null) {
const config = getConfig(explicitRoot);
if (!config.global) {
config.global = {}; // Ensure global object exists
}
if (!config.global.userId) {
config.global.userId = '1234567890';
// Attempt to write the updated config.
// It's important that writeConfig correctly resolves the path
// using explicitRoot, similar to how getConfig does.
const success = writeConfig(config, explicitRoot);
if (!success) {
// Log an error or handle the failure to write,
// though for now, we'll proceed with the in-memory default.
log(
'warning',
'Failed to write updated configuration with new userId. Please let the developers know.'
);
}
}
return config.global.userId;
}
/**
* Gets a list of all known provider names (both validated and custom).
* @returns {string[]} An array of all provider names.
*/
function getAllProviders() {
return ALL_PROVIDERS;
}
function getBaseUrlForRole(role, explicitRoot = null) {
const roleConfig = getModelConfigForRole(role, explicitRoot);
if (roleConfig && typeof roleConfig.baseURL === 'string') {
return roleConfig.baseURL;
}
const provider = roleConfig?.provider;
if (provider) {
const envVarName = `${provider.toUpperCase()}_BASE_URL`;
return resolveEnvVariable(envVarName, null, explicitRoot);
}
return undefined;
}
// Export the providers without API keys array for use in other modules
export const providersWithoutApiKeys = [
CUSTOM_PROVIDERS.OLLAMA,
CUSTOM_PROVIDERS.BEDROCK,
CUSTOM_PROVIDERS.GEMINI_CLI,
CUSTOM_PROVIDERS.GROK_CLI,
CUSTOM_PROVIDERS.MCP,
CUSTOM_PROVIDERS.CODEX_CLI
];
export {
// Core config access
getConfig,
writeConfig,
ConfigurationError,
isConfigFilePresent,
// Claude Code settings
getClaudeCodeSettings,
getClaudeCodeSettingsForCommand,
// Codex CLI settings
getCodexCliSettings,
getCodexCliSettingsForCommand,
// Grok CLI settings
getGrokCliSettings,
getGrokCliSettingsForCommand,
// Validation
validateProvider,
validateProviderModelCombination,
validateClaudeCodeSettings,
validateCodexCliSettings,
VALIDATED_PROVIDERS,
CUSTOM_PROVIDERS,
ALL_PROVIDERS,
MODEL_MAP,
getAvailableModels,
// Role-specific getters (No env var overrides)
getMainProvider,
getMainModelId,
getMainMaxTokens,
getMainTemperature,
getResearchProvider,
getResearchModelId,
getResearchMaxTokens,
getResearchTemperature,
hasCodebaseAnalysis,
getFallbackProvider,
getFallbackModelId,
getFallbackMaxTokens,
getFallbackTemperature,
getBaseUrlForRole,
// Global setting getters (No env var overrides)
getLogLevel,
getDebugFlag,
getDefaultNumTasks,
getDefaultSubtasks,
getDefaultPriority,
getProjectName,
getOllamaBaseURL,
getAzureBaseURL,
getBedrockBaseURL,
getResponseLanguage,
getCodebaseAnalysisEnabled,
isCodebaseAnalysisEnabled,
getProxyEnabled,
isProxyEnabled,
getParametersForRole,
getUserId,
// API Key Checkers (still relevant)
isApiKeySet,
getMcpApiKeyStatus,
// ADD: Function to get all provider names
getAllProviders,
getVertexProjectId,
getVertexLocation
};
```
--------------------------------------------------------------------------------
/tests/unit/scripts/modules/task-manager/expand-task.test.js:
--------------------------------------------------------------------------------
```javascript
import fs from 'fs';
/**
* Tests for the expand-task.js module
*/
import { jest } from '@jest/globals';
import {
createGetTagAwareFilePathMock,
createSlugifyTagForFilePathMock
} from './setup.js';
// Mock the dependencies before importing the module under test
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
readJSON: jest.fn(),
writeJSON: jest.fn(),
log: jest.fn(),
CONFIG: {
model: 'mock-claude-model',
maxTokens: 4000,
temperature: 0.7,
debug: false
},
sanitizePrompt: jest.fn((prompt) => prompt),
truncate: jest.fn((text) => text),
isSilentMode: jest.fn(() => false),
findTaskById: jest.fn(),
findProjectRoot: jest.fn((tasksPath) => '/mock/project/root'),
getCurrentTag: jest.fn(() => 'master'),
resolveTag: jest.fn(() => 'master'),
addComplexityToTask: jest.fn((task, complexity) => ({ ...task, complexity })),
ensureTagMetadata: jest.fn((tagObj) => tagObj),
flattenTasksWithSubtasks: jest.fn((tasks) => {
const allTasks = [];
const queue = [...(tasks || [])];
while (queue.length > 0) {
const task = queue.shift();
allTasks.push(task);
if (task.subtasks) {
for (const subtask of task.subtasks) {
queue.push({ ...subtask, id: `${task.id}.${subtask.id}` });
}
}
}
return allTasks;
}),
getTagAwareFilePath: createGetTagAwareFilePathMock(),
slugifyTagForFilePath: createSlugifyTagForFilePathMock(),
readComplexityReport: jest.fn(),
markMigrationForNotice: jest.fn(),
performCompleteTagMigration: jest.fn(),
setTasksForTag: jest.fn(),
getTasksForTag: jest.fn((data, tag) => data[tag]?.tasks || [])
}));
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
displayBanner: jest.fn(),
getStatusWithColor: jest.fn((status) => status),
startLoadingIndicator: jest.fn(),
stopLoadingIndicator: jest.fn(),
succeedLoadingIndicator: jest.fn(),
failLoadingIndicator: jest.fn(),
warnLoadingIndicator: jest.fn(),
infoLoadingIndicator: jest.fn(),
displayAiUsageSummary: jest.fn(),
displayContextAnalysis: jest.fn()
}));
jest.unstable_mockModule(
'../../../../../scripts/modules/ai-services-unified.js',
() => ({
generateObjectService: jest.fn().mockResolvedValue({
mainResult: {
subtasks: [
{
id: 1,
title: 'Set up project structure',
description:
'Create the basic project directory structure and configuration files',
dependencies: [],
details:
'Initialize package.json, create src/ and test/ directories, set up linting configuration',
status: 'pending',
testStrategy:
'Verify all expected files and directories are created'
},
{
id: 2,
title: 'Implement core functionality',
description: 'Develop the main application logic and core features',
dependencies: [1],
details:
'Create main classes, implement business logic, set up data models',
status: 'pending',
testStrategy: 'Unit tests for all core functions and classes'
},
{
id: 3,
title: 'Add user interface',
description: 'Create the user interface components and layouts',
dependencies: [2],
details:
'Design UI components, implement responsive layouts, add user interactions',
status: 'pending',
testStrategy: 'UI tests and visual regression testing'
}
]
},
telemetryData: {
timestamp: new Date().toISOString(),
userId: '1234567890',
commandName: 'expand-task',
modelUsed: 'claude-3-5-sonnet',
providerName: 'anthropic',
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
totalCost: 0.012414,
currency: 'USD'
}
})
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/config-manager.js',
() => ({
getDefaultSubtasks: jest.fn(() => 3),
getDebugFlag: jest.fn(() => false),
getDefaultNumTasks: jest.fn(() => 10),
getMainProvider: jest.fn(() => 'openai'),
getResearchProvider: jest.fn(() => 'perplexity'),
hasCodebaseAnalysis: jest.fn(() => false)
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/utils/contextGatherer.js',
() => ({
ContextGatherer: jest.fn().mockImplementation(() => ({
gather: jest.fn().mockResolvedValue({
context: 'Mock project context from files'
})
}))
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/utils/fuzzyTaskSearch.js',
() => ({
FuzzyTaskSearch: jest.fn().mockImplementation(() => ({
findRelevantTasks: jest.fn().mockReturnValue([]),
getTaskIds: jest.fn().mockReturnValue([])
}))
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/task-manager/generate-task-files.js',
() => ({
default: jest.fn().mockResolvedValue()
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/prompt-manager.js',
() => ({
getPromptManager: jest.fn().mockReturnValue({
loadPrompt: jest.fn().mockReturnValue({
systemPrompt: 'Mocked system prompt',
userPrompt: 'Mocked user prompt'
})
})
})
);
// Mock external UI libraries
jest.unstable_mockModule('chalk', () => ({
default: {
white: { bold: jest.fn((text) => text) },
cyan: Object.assign(
jest.fn((text) => text),
{
bold: jest.fn((text) => text)
}
),
green: jest.fn((text) => text),
yellow: jest.fn((text) => text),
red: jest.fn((text) => text),
blue: jest.fn((text) => text),
magenta: jest.fn((text) => text),
gray: jest.fn((text) => text),
bold: jest.fn((text) => text),
dim: jest.fn((text) => text)
}
}));
jest.unstable_mockModule('boxen', () => ({
default: jest.fn((text) => text)
}));
jest.unstable_mockModule('cli-table3', () => ({
default: jest.fn().mockImplementation(() => ({
push: jest.fn(),
toString: jest.fn(() => 'mocked table')
}))
}));
// Mock @tm/bridge module
jest.unstable_mockModule('@tm/bridge', () => ({
tryExpandViaRemote: jest.fn().mockResolvedValue(null)
}));
// Mock bridge-utils module
jest.unstable_mockModule(
'../../../../../scripts/modules/bridge-utils.js',
() => ({
createBridgeLogger: jest.fn(() => ({
logger: {
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
debug: jest.fn()
},
report: jest.fn(),
isMCP: false
}))
})
);
// Mock process.exit to prevent Jest worker crashes
const mockExit = jest.spyOn(process, 'exit').mockImplementation((code) => {
throw new Error(`process.exit called with "${code}"`);
});
// Import the mocked modules
const {
readJSON,
writeJSON,
log,
findTaskById,
ensureTagMetadata,
readComplexityReport,
findProjectRoot
} = await import('../../../../../scripts/modules/utils.js');
const { generateObjectService } = await import(
'../../../../../scripts/modules/ai-services-unified.js'
);
const generateTaskFiles = (
await import(
'../../../../../scripts/modules/task-manager/generate-task-files.js'
)
).default;
const { getDefaultSubtasks } = await import(
'../../../../../scripts/modules/config-manager.js'
);
const { tryExpandViaRemote } = await import('@tm/bridge');
// Import the module under test
const { default: expandTask } = await import(
'../../../../../scripts/modules/task-manager/expand-task.js'
);
describe('expandTask', () => {
const sampleTasks = {
master: {
tasks: [
{
id: 1,
title: 'Task 1',
description: 'First task',
status: 'done',
dependencies: [],
details: 'Already completed task',
subtasks: []
},
{
id: 2,
title: 'Task 2',
description: 'Second task',
status: 'pending',
dependencies: [],
details: 'Task ready for expansion',
subtasks: []
},
{
id: 3,
title: 'Complex Task',
description: 'A complex task that needs breakdown',
status: 'pending',
dependencies: [1],
details: 'This task involves multiple steps',
subtasks: []
},
{
id: 4,
title: 'Task with existing subtasks',
description: 'Task that already has subtasks',
status: 'pending',
dependencies: [],
details: 'Has existing subtasks',
subtasks: [
{
id: 1,
title: 'Existing subtask',
description: 'Already exists',
status: 'pending',
dependencies: []
}
]
}
]
},
'feature-branch': {
tasks: [
{
id: 1,
title: 'Feature Task 1',
description: 'Task in feature branch',
status: 'pending',
dependencies: [],
details: 'Feature-specific task',
subtasks: []
}
]
}
};
// Create a helper function for consistent mcpLog mock
const createMcpLogMock = () => ({
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
debug: jest.fn(),
success: jest.fn()
});
beforeEach(() => {
jest.clearAllMocks();
mockExit.mockClear();
// Default readJSON implementation - returns tagged structure
readJSON.mockImplementation((tasksPath, projectRoot, tag) => {
const sampleTasksCopy = JSON.parse(JSON.stringify(sampleTasks));
const selectedTag = tag || 'master';
return {
...sampleTasksCopy[selectedTag],
tag: selectedTag,
_rawTaggedData: sampleTasksCopy
};
});
// Default findTaskById implementation
findTaskById.mockImplementation((tasks, taskId) => {
const id = parseInt(taskId, 10);
return tasks.find((t) => t.id === id);
});
// Default complexity report (no report available)
readComplexityReport.mockReturnValue(null);
// Mock findProjectRoot to return consistent path for complexity report
findProjectRoot.mockReturnValue('/mock/project/root');
writeJSON.mockResolvedValue();
generateTaskFiles.mockResolvedValue();
log.mockImplementation(() => {});
// Mock console.log to avoid output during tests
jest.spyOn(console, 'log').mockImplementation(() => {});
});
afterEach(() => {
console.log.mockRestore();
});
describe('Basic Functionality', () => {
test('should expand a task with AI-generated subtasks', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const numSubtasks = 3;
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
const result = await expandTask(
tasksPath,
taskId,
numSubtasks,
false,
'',
context,
false
);
// Assert
expect(readJSON).toHaveBeenCalledWith(
tasksPath,
'/mock/project/root',
undefined
);
expect(generateObjectService).toHaveBeenCalledWith(expect.any(Object));
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
tasks: expect.arrayContaining([
expect.objectContaining({
id: 2,
subtasks: expect.arrayContaining([
expect.objectContaining({
id: 1,
title: 'Set up project structure',
status: 'pending'
}),
expect.objectContaining({
id: 2,
title: 'Implement core functionality',
status: 'pending'
}),
expect.objectContaining({
id: 3,
title: 'Add user interface',
status: 'pending'
})
])
})
]),
tag: 'master',
_rawTaggedData: expect.objectContaining({
master: expect.objectContaining({
tasks: expect.any(Array)
})
})
}),
'/mock/project/root',
undefined
);
expect(result).toEqual(
expect.objectContaining({
task: expect.objectContaining({
id: 2,
subtasks: expect.arrayContaining([
expect.objectContaining({
id: 1,
title: 'Set up project structure',
status: 'pending'
}),
expect.objectContaining({
id: 2,
title: 'Implement core functionality',
status: 'pending'
}),
expect.objectContaining({
id: 3,
title: 'Add user interface',
status: 'pending'
})
])
}),
telemetryData: expect.any(Object)
})
);
});
test('should handle research flag correctly', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const numSubtasks = 3;
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(
tasksPath,
taskId,
numSubtasks,
true, // useResearch = true
'Additional context for research',
context,
false
);
// Assert
expect(generateObjectService).toHaveBeenCalledWith(
expect.objectContaining({
role: 'research',
commandName: expect.any(String)
})
);
});
test('should handle complexity report integration without errors', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act & Assert - Should complete without errors
const result = await expandTask(
tasksPath,
taskId,
undefined, // numSubtasks not specified
false,
'',
context,
false
);
// Assert - Should successfully expand and return expected structure
expect(result).toEqual(
expect.objectContaining({
task: expect.objectContaining({
id: 2,
subtasks: expect.any(Array)
}),
telemetryData: expect.any(Object)
})
);
expect(generateObjectService).toHaveBeenCalled();
});
});
describe('Tag Handling (The Critical Bug Fix)', () => {
test('should preserve tagged structure when expanding with default tag', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root',
tag: 'master' // Explicit tag context
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - CRITICAL: Check tag is passed to readJSON and writeJSON
expect(readJSON).toHaveBeenCalledWith(
tasksPath,
'/mock/project/root',
'master'
);
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
tag: 'master',
_rawTaggedData: expect.objectContaining({
master: expect.any(Object),
'feature-branch': expect.any(Object)
})
}),
'/mock/project/root',
'master' // CRITICAL: Tag must be passed to writeJSON
);
});
test('should preserve tagged structure when expanding with non-default tag', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '1'; // Task in feature-branch
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root',
tag: 'feature-branch' // Different tag context
};
// Configure readJSON to return feature-branch data
readJSON.mockImplementation((tasksPath, projectRoot, tag) => {
const sampleTasksCopy = JSON.parse(JSON.stringify(sampleTasks));
return {
...sampleTasksCopy['feature-branch'],
tag: 'feature-branch',
_rawTaggedData: sampleTasksCopy
};
});
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - CRITICAL: Check tag preservation for non-default tag
expect(readJSON).toHaveBeenCalledWith(
tasksPath,
'/mock/project/root',
'feature-branch'
);
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
tag: 'feature-branch',
_rawTaggedData: expect.objectContaining({
master: expect.any(Object),
'feature-branch': expect.any(Object)
})
}),
'/mock/project/root',
'feature-branch' // CRITICAL: Correct tag passed to writeJSON
);
});
test('should NOT corrupt tagged structure when tag is undefined', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
// No tag specified - should default gracefully
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should still preserve structure with undefined tag
expect(readJSON).toHaveBeenCalledWith(
tasksPath,
'/mock/project/root',
undefined
);
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
_rawTaggedData: expect.objectContaining({
master: expect.any(Object)
})
}),
'/mock/project/root',
undefined
);
// CRITICAL: Verify structure is NOT flattened to old format
const writeCallArgs = writeJSON.mock.calls[0][1];
expect(writeCallArgs).toHaveProperty('tasks'); // Should have tasks property from readJSON mock
expect(writeCallArgs).toHaveProperty('_rawTaggedData'); // Should preserve tagged structure
});
});
describe('Force Flag Handling', () => {
test('should replace existing subtasks when force=true', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '4'; // Task with existing subtasks
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, true);
// Assert - Should replace existing subtasks
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
tasks: expect.arrayContaining([
expect.objectContaining({
id: 4,
subtasks: expect.arrayContaining([
expect.objectContaining({
id: 1,
title: 'Set up project structure'
})
])
})
])
}),
'/mock/project/root',
undefined
);
});
test('should append to existing subtasks when force=false', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '4'; // Task with existing subtasks
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Verify generateObjectService was called correctly
expect(generateObjectService).toHaveBeenCalledWith(
expect.objectContaining({
role: 'main',
commandName: 'expand-task',
objectName: 'subtasks'
})
);
// Assert - Verify data was written with appended subtasks
expect(writeJSON).toHaveBeenCalled();
const writeCall = writeJSON.mock.calls[0];
const savedData = writeCall[1]; // Second argument is the data
const task4 = savedData.tasks.find((t) => t.id === 4);
// Should have 4 subtasks total (1 existing + 3 new)
expect(task4.subtasks).toHaveLength(4);
// Verify existing subtask is preserved at index 0
expect(task4.subtasks[0]).toEqual(
expect.objectContaining({
id: 1,
title: 'Existing subtask'
})
);
// Verify new subtasks were appended (they start with id=1 from AI)
expect(task4.subtasks[1]).toEqual(
expect.objectContaining({
id: 1,
title: 'Set up project structure'
})
);
});
});
describe('Complexity Report Integration (Tag-Specific)', () => {
test('should use tag-specific complexity report when available', async () => {
// Arrange
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockReturnValue({
systemPrompt: 'Generate exactly 5 subtasks for complexity report',
userPrompt:
'Please break this task into 5 parts\n\nUser provided context'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
const tasksPath = 'tasks/tasks.json';
const taskId = '1'; // Task in feature-branch
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root',
tag: 'feature-branch',
complexityReportPath:
'/mock/project/root/task-complexity-report_feature-branch.json'
};
// Stub fs.existsSync to simulate complexity report exists for this tag
const existsSpy = jest
.spyOn(fs, 'existsSync')
.mockImplementation((filepath) =>
filepath.endsWith('task-complexity-report_feature-branch.json')
);
// Stub readJSON to return complexity report when reading the report path
readJSON.mockImplementation((filepath, projectRootParam, tagParam) => {
if (filepath.includes('task-complexity-report_feature-branch.json')) {
return {
complexityAnalysis: [
{
taskId: 1,
complexityScore: 8,
recommendedSubtasks: 5,
reasoning: 'Needs five detailed steps',
expansionPrompt: 'Please break this task into 5 parts'
}
]
};
}
// Default tasks data for tasks.json
const sampleTasksCopy = JSON.parse(JSON.stringify(sampleTasks));
const selectedTag = tagParam || 'master';
return {
...sampleTasksCopy[selectedTag],
tag: selectedTag,
_rawTaggedData: sampleTasksCopy
};
});
// Act
await expandTask(tasksPath, taskId, undefined, false, '', context, false);
// Assert - generateObjectService called with systemPrompt for 5 subtasks
const callArg = generateObjectService.mock.calls[0][0];
expect(callArg.systemPrompt).toContain('Generate exactly 5 subtasks');
// Assert - Should use complexity-report variant with expansion prompt
expect(mockLoadPrompt).toHaveBeenCalledWith(
'expand-task',
expect.objectContaining({
subtaskCount: 5,
expansionPrompt: 'Please break this task into 5 parts'
}),
'complexity-report'
);
// Clean up stub
existsSpy.mockRestore();
});
});
describe('Error Handling', () => {
test('should handle non-existent task ID', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '999'; // Non-existent task
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
findTaskById.mockReturnValue(null);
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('Task 999 not found');
expect(writeJSON).not.toHaveBeenCalled();
});
test('should expand tasks regardless of status (including done tasks)', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '1'; // Task with 'done' status
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
const result = await expandTask(
tasksPath,
taskId,
3,
false,
'',
context,
false
);
// Assert - Should successfully expand even 'done' tasks
expect(writeJSON).toHaveBeenCalled();
expect(result).toEqual(
expect.objectContaining({
task: expect.objectContaining({
id: 1,
status: 'done', // Status unchanged
subtasks: expect.arrayContaining([
expect.objectContaining({
id: 1,
title: 'Set up project structure',
status: 'pending'
})
])
}),
telemetryData: expect.any(Object)
})
);
});
test('should handle AI service failures', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
generateObjectService.mockRejectedValueOnce(
new Error('AI service error')
);
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('AI service error');
expect(writeJSON).not.toHaveBeenCalled();
});
test('should handle missing mainResult from AI response', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Mock AI service returning response without mainResult
generateObjectService.mockResolvedValueOnce({
telemetryData: { inputTokens: 100, outputTokens: 50 }
// Missing mainResult
});
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('AI response did not include a valid subtasks array.');
expect(writeJSON).not.toHaveBeenCalled();
});
test('should handle invalid subtasks array from AI response', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Mock AI service returning response with invalid subtasks
generateObjectService.mockResolvedValueOnce({
mainResult: {
subtasks: 'not-an-array' // Invalid: should be an array
},
telemetryData: { inputTokens: 100, outputTokens: 50 }
});
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('AI response did not include a valid subtasks array.');
expect(writeJSON).not.toHaveBeenCalled();
});
test('should handle file read errors', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
readJSON.mockImplementation(() => {
throw new Error('File read failed');
});
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('File read failed');
expect(writeJSON).not.toHaveBeenCalled();
});
test('should handle invalid tasks data', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
readJSON.mockReturnValue(null);
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow();
});
});
describe('Output Format Handling', () => {
test('should display telemetry for CLI output format', async () => {
// Arrange
const { displayAiUsageSummary } = await import(
'../../../../../scripts/modules/ui.js'
);
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
projectRoot: '/mock/project/root'
// No mcpLog - should trigger CLI mode
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should display telemetry for CLI users
expect(displayAiUsageSummary).toHaveBeenCalledWith(
expect.objectContaining({
commandName: 'expand-task',
modelUsed: 'claude-3-5-sonnet',
totalCost: 0.012414
}),
'cli'
);
});
test('should not display telemetry for MCP output format', async () => {
// Arrange
const { displayAiUsageSummary } = await import(
'../../../../../scripts/modules/ui.js'
);
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should NOT display telemetry for MCP (handled at higher level)
expect(displayAiUsageSummary).not.toHaveBeenCalled();
});
});
describe('Edge Cases', () => {
test('should handle empty additional context', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should work with empty context (but may include project context)
expect(generateObjectService).toHaveBeenCalledWith(
expect.objectContaining({
prompt: expect.stringMatching(/.*/) // Just ensure prompt exists
})
);
});
test('should handle additional context correctly', async () => {
// Arrange
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockReturnValue({
systemPrompt: 'Mocked system prompt',
userPrompt: 'Mocked user prompt with context'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const additionalContext = 'Use React hooks and TypeScript';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(
tasksPath,
taskId,
3,
false,
additionalContext,
context,
false
);
// Assert - Should pass separate context parameters to prompt manager
expect(mockLoadPrompt).toHaveBeenCalledWith(
'expand-task',
expect.objectContaining({
additionalContext: expect.stringContaining(
'Use React hooks and TypeScript'
),
gatheredContext: expect.stringContaining(
'Mock project context from files'
)
}),
expect.any(String)
);
// Additional assertion to verify the context parameters are passed separately
const call = mockLoadPrompt.mock.calls[0];
const parameters = call[1];
expect(parameters.additionalContext).toContain(
'Use React hooks and TypeScript'
);
expect(parameters.gatheredContext).toContain(
'Mock project context from files'
);
});
test('should handle missing project root in context', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock()
// No projectRoot in context
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should derive project root from tasksPath
expect(findProjectRoot).toHaveBeenCalledWith(tasksPath);
expect(readJSON).toHaveBeenCalledWith(
tasksPath,
'/mock/project/root',
undefined
);
});
});
describe('Dynamic Subtask Generation', () => {
const tasksPath = 'tasks/tasks.json';
const taskId = 1;
const context = { session: null, mcpLog: null };
beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();
// Setup default mocks
readJSON.mockReturnValue({
tasks: [
{
id: 1,
title: 'Test Task',
description: 'A test task',
status: 'pending',
subtasks: []
}
]
});
findTaskById.mockReturnValue({
id: 1,
title: 'Test Task',
description: 'A test task',
status: 'pending',
subtasks: []
});
findProjectRoot.mockReturnValue('/mock/project/root');
});
test('should accept 0 as valid numSubtasks value for dynamic generation', async () => {
// Act - Call with numSubtasks=0 (should not throw error)
const result = await expandTask(
tasksPath,
taskId,
0,
false,
'',
context,
false
);
// Assert - Should complete successfully
expect(result).toBeDefined();
expect(generateObjectService).toHaveBeenCalled();
});
test('should use dynamic prompting when numSubtasks is 0', async () => {
// Mock getPromptManager to return realistic prompt with dynamic content
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockReturnValue({
systemPrompt:
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into an appropriate number of specific subtasks that can be implemented one by one.',
userPrompt:
'Break down this task into an appropriate number of specific subtasks'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
// Act
await expandTask(tasksPath, taskId, 0, false, '', context, false);
// Assert - Verify generateObjectService was called
expect(generateObjectService).toHaveBeenCalled();
// Get the call arguments to verify the system prompt
const callArgs = generateObjectService.mock.calls[0][0];
expect(callArgs.systemPrompt).toContain(
'an appropriate number of specific subtasks'
);
});
test('should use specific count prompting when numSubtasks is positive', async () => {
// Mock getPromptManager to return realistic prompt with specific count
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockReturnValue({
systemPrompt:
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 5 specific subtasks that can be implemented one by one.',
userPrompt: 'Break down this task into exactly 5 specific subtasks'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
// Act
await expandTask(tasksPath, taskId, 5, false, '', context, false);
// Assert - Verify generateObjectService was called
expect(generateObjectService).toHaveBeenCalled();
// Get the call arguments to verify the system prompt
const callArgs = generateObjectService.mock.calls[0][0];
expect(callArgs.systemPrompt).toContain('5 specific subtasks');
});
test('should reject negative numSubtasks values and fallback to default', async () => {
// Mock getDefaultSubtasks to return a specific value
getDefaultSubtasks.mockReturnValue(4);
// Mock getPromptManager to return realistic prompt with default count
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockReturnValue({
systemPrompt:
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 4 specific subtasks that can be implemented one by one.',
userPrompt: 'Break down this task into exactly 4 specific subtasks'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
// Act
await expandTask(tasksPath, taskId, -3, false, '', context, false);
// Assert - Should use default value instead of negative
expect(generateObjectService).toHaveBeenCalled();
const callArgs = generateObjectService.mock.calls[0][0];
expect(callArgs.systemPrompt).toContain('4 specific subtasks');
});
test('should use getDefaultSubtasks when numSubtasks is undefined', async () => {
// Mock getDefaultSubtasks to return a specific value
getDefaultSubtasks.mockReturnValue(6);
// Mock getPromptManager to return realistic prompt with default count
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockReturnValue({
systemPrompt:
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 6 specific subtasks that can be implemented one by one.',
userPrompt: 'Break down this task into exactly 6 specific subtasks'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
// Act - Call without specifying numSubtasks (undefined)
await expandTask(tasksPath, taskId, undefined, false, '', context, false);
// Assert - Should use default value
expect(generateObjectService).toHaveBeenCalled();
const callArgs = generateObjectService.mock.calls[0][0];
expect(callArgs.systemPrompt).toContain('6 specific subtasks');
});
test('should use getDefaultSubtasks when numSubtasks is null', async () => {
// Mock getDefaultSubtasks to return a specific value
getDefaultSubtasks.mockReturnValue(7);
// Mock getPromptManager to return realistic prompt with default count
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockReturnValue({
systemPrompt:
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 7 specific subtasks that can be implemented one by one.',
userPrompt: 'Break down this task into exactly 7 specific subtasks'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
// Act - Call with null numSubtasks
await expandTask(tasksPath, taskId, null, false, '', context, false);
// Assert - Should use default value
expect(generateObjectService).toHaveBeenCalled();
const callArgs = generateObjectService.mock.calls[0][0];
expect(callArgs.systemPrompt).toContain('7 specific subtasks');
});
});
describe('Remote Expansion via Bridge', () => {
const tasksPath = '/fake/path/tasks.json';
const taskId = '2';
const context = { tag: 'master' };
test('should use remote expansion result when tryExpandViaRemote succeeds', async () => {
// Arrange - Mock successful remote expansion
const remoteResult = {
success: true,
message: 'Task expanded successfully via remote',
data: {
subtasks: [
{
id: 1,
title: 'Remote Subtask 1',
description: 'First remote subtask',
status: 'pending',
dependencies: []
},
{
id: 2,
title: 'Remote Subtask 2',
description: 'Second remote subtask',
status: 'pending',
dependencies: [1]
}
]
}
};
tryExpandViaRemote.mockResolvedValue(remoteResult);
// Act
const result = await expandTask(
tasksPath,
taskId,
2,
false,
'',
context,
false
);
// Assert - Should use remote result and NOT call local AI service
expect(tryExpandViaRemote).toHaveBeenCalled();
expect(generateObjectService).not.toHaveBeenCalled();
expect(result).toEqual(remoteResult);
});
test('should fallback to local expansion when tryExpandViaRemote returns null', async () => {
// Arrange - Mock remote returning null (no remote available)
tryExpandViaRemote.mockResolvedValue(null);
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should fallback to local expansion
expect(tryExpandViaRemote).toHaveBeenCalled();
expect(generateObjectService).toHaveBeenCalled();
expect(writeJSON).toHaveBeenCalled();
});
test('should propagate error when tryExpandViaRemote throws error', async () => {
// Arrange - Mock remote throwing error (it re-throws, doesn't return null)
tryExpandViaRemote.mockImplementation(() =>
Promise.reject(new Error('Remote expansion service unavailable'))
);
// Act & Assert - Should propagate the error (not fallback to local)
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('Remote expansion service unavailable');
expect(tryExpandViaRemote).toHaveBeenCalled();
// Local expansion should NOT be called when remote throws
expect(generateObjectService).not.toHaveBeenCalled();
});
test('should pass correct parameters to tryExpandViaRemote', async () => {
// Arrange
const taskIdStr = '2'; // Use task 2 which exists in master tag
const numSubtasks = 5;
const additionalContext = 'Extra context for expansion';
const useResearch = false; // Note: useResearch is the 4th param, not 7th
const force = true; // Note: force is the 7th param
const contextObj = {
tag: 'master', // Use master tag where task 2 exists
projectRoot: '/mock/project'
};
tryExpandViaRemote.mockResolvedValue(null);
// Act
await expandTask(
tasksPath,
taskIdStr,
numSubtasks,
useResearch, // 4th param
additionalContext, // 5th param
contextObj, // 6th param
force // 7th param
);
// Assert - Verify tryExpandViaRemote was called with correct params
// Note: The actual call has a flat structure, not nested context
expect(tryExpandViaRemote).toHaveBeenCalledWith(
expect.objectContaining({
taskId: taskIdStr,
numSubtasks,
additionalContext,
useResearch,
force,
projectRoot: '/mock/project',
tag: 'master',
isMCP: expect.any(Boolean),
outputFormat: expect.any(String),
report: expect.any(Function)
})
);
});
});
});
```