#
tokens: 44864/50000 4/821 files (page 35/52)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 35 of 52. Use http://codebase.md/eyaltoledano/claude-task-master?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .changeset
│   ├── config.json
│   └── README.md
├── .claude
│   ├── agents
│   │   ├── task-checker.md
│   │   ├── task-executor.md
│   │   └── task-orchestrator.md
│   ├── commands
│   │   ├── dedupe.md
│   │   └── tm
│   │       ├── add-dependency
│   │       │   └── add-dependency.md
│   │       ├── add-subtask
│   │       │   ├── add-subtask.md
│   │       │   └── convert-task-to-subtask.md
│   │       ├── add-task
│   │       │   └── add-task.md
│   │       ├── analyze-complexity
│   │       │   └── analyze-complexity.md
│   │       ├── complexity-report
│   │       │   └── complexity-report.md
│   │       ├── expand
│   │       │   ├── expand-all-tasks.md
│   │       │   └── expand-task.md
│   │       ├── fix-dependencies
│   │       │   └── fix-dependencies.md
│   │       ├── generate
│   │       │   └── generate-tasks.md
│   │       ├── help.md
│   │       ├── init
│   │       │   ├── init-project-quick.md
│   │       │   └── init-project.md
│   │       ├── learn.md
│   │       ├── list
│   │       │   ├── list-tasks-by-status.md
│   │       │   ├── list-tasks-with-subtasks.md
│   │       │   └── list-tasks.md
│   │       ├── models
│   │       │   ├── setup-models.md
│   │       │   └── view-models.md
│   │       ├── next
│   │       │   └── next-task.md
│   │       ├── parse-prd
│   │       │   ├── parse-prd-with-research.md
│   │       │   └── parse-prd.md
│   │       ├── remove-dependency
│   │       │   └── remove-dependency.md
│   │       ├── remove-subtask
│   │       │   └── remove-subtask.md
│   │       ├── remove-subtasks
│   │       │   ├── remove-all-subtasks.md
│   │       │   └── remove-subtasks.md
│   │       ├── remove-task
│   │       │   └── remove-task.md
│   │       ├── set-status
│   │       │   ├── to-cancelled.md
│   │       │   ├── to-deferred.md
│   │       │   ├── to-done.md
│   │       │   ├── to-in-progress.md
│   │       │   ├── to-pending.md
│   │       │   └── to-review.md
│   │       ├── setup
│   │       │   ├── install-taskmaster.md
│   │       │   └── quick-install-taskmaster.md
│   │       ├── show
│   │       │   └── show-task.md
│   │       ├── status
│   │       │   └── project-status.md
│   │       ├── sync-readme
│   │       │   └── sync-readme.md
│   │       ├── tm-main.md
│   │       ├── update
│   │       │   ├── update-single-task.md
│   │       │   ├── update-task.md
│   │       │   └── update-tasks-from-id.md
│   │       ├── utils
│   │       │   └── analyze-project.md
│   │       ├── validate-dependencies
│   │       │   └── validate-dependencies.md
│   │       └── workflows
│   │           ├── auto-implement-tasks.md
│   │           ├── command-pipeline.md
│   │           └── smart-workflow.md
│   └── TM_COMMANDS_GUIDE.md
├── .coderabbit.yaml
├── .cursor
│   ├── mcp.json
│   └── rules
│       ├── ai_providers.mdc
│       ├── ai_services.mdc
│       ├── architecture.mdc
│       ├── changeset.mdc
│       ├── commands.mdc
│       ├── context_gathering.mdc
│       ├── cursor_rules.mdc
│       ├── dependencies.mdc
│       ├── dev_workflow.mdc
│       ├── git_workflow.mdc
│       ├── glossary.mdc
│       ├── mcp.mdc
│       ├── new_features.mdc
│       ├── self_improve.mdc
│       ├── tags.mdc
│       ├── taskmaster.mdc
│       ├── tasks.mdc
│       ├── telemetry.mdc
│       ├── test_workflow.mdc
│       ├── tests.mdc
│       ├── ui.mdc
│       └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── enhancements---feature-requests.md
│   │   └── feedback.md
│   ├── PULL_REQUEST_TEMPLATE
│   │   ├── bugfix.md
│   │   ├── config.yml
│   │   ├── feature.md
│   │   └── integration.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── scripts
│   │   ├── auto-close-duplicates.mjs
│   │   ├── backfill-duplicate-comments.mjs
│   │   ├── check-pre-release-mode.mjs
│   │   ├── parse-metrics.mjs
│   │   ├── release.mjs
│   │   ├── tag-extension.mjs
│   │   └── utils.mjs
│   └── workflows
│       ├── auto-close-duplicates.yml
│       ├── backfill-duplicate-comments.yml
│       ├── ci.yml
│       ├── claude-dedupe-issues.yml
│       ├── claude-docs-trigger.yml
│       ├── claude-docs-updater.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── extension-ci.yml
│       ├── extension-release.yml
│       ├── log-issue-events.yml
│       ├── pre-release.yml
│       ├── release-check.yml
│       ├── release.yml
│       ├── update-models-md.yml
│       └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│   ├── hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── settings
│   │   └── mcp.json
│   └── steering
│       ├── dev_workflow.md
│       ├── kiro_rules.md
│       ├── self_improve.md
│       ├── taskmaster_hooks_workflow.md
│       └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│   ├── CLAUDE.md
│   ├── config.json
│   ├── docs
│   │   ├── MIGRATION-ROADMAP.md
│   │   ├── prd-tm-start.txt
│   │   ├── prd.txt
│   │   ├── README.md
│   │   ├── research
│   │   │   ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│   │   │   ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│   │   │   ├── 2025-06-14_test-save-functionality.md
│   │   │   ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│   │   │   └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│   │   ├── task-template-importing-prd.txt
│   │   ├── test-prd.txt
│   │   └── tm-core-phase-1.txt
│   ├── reports
│   │   ├── task-complexity-report_cc-kiro-hooks.json
│   │   ├── task-complexity-report_test-prd-tag.json
│   │   ├── task-complexity-report_tm-core-phase-1.json
│   │   ├── task-complexity-report.json
│   │   └── tm-core-complexity.json
│   ├── state.json
│   ├── tasks
│   │   ├── task_001_tm-start.txt
│   │   ├── task_002_tm-start.txt
│   │   ├── task_003_tm-start.txt
│   │   ├── task_004_tm-start.txt
│   │   ├── task_007_tm-start.txt
│   │   └── tasks.json
│   └── templates
│       └── example_prd.txt
├── .vscode
│   ├── extensions.json
│   └── settings.json
├── apps
│   ├── cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   ├── commands
│   │   │   │   ├── auth.command.ts
│   │   │   │   ├── context.command.ts
│   │   │   │   ├── list.command.ts
│   │   │   │   ├── set-status.command.ts
│   │   │   │   ├── show.command.ts
│   │   │   │   └── start.command.ts
│   │   │   ├── index.ts
│   │   │   ├── ui
│   │   │   │   ├── components
│   │   │   │   │   ├── dashboard.component.ts
│   │   │   │   │   ├── header.component.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next-task.component.ts
│   │   │   │   │   ├── suggested-steps.component.ts
│   │   │   │   │   └── task-detail.component.ts
│   │   │   │   └── index.ts
│   │   │   └── utils
│   │   │       ├── auto-update.ts
│   │   │       └── ui.ts
│   │   └── tsconfig.json
│   ├── docs
│   │   ├── archive
│   │   │   ├── ai-client-utils-example.mdx
│   │   │   ├── ai-development-workflow.mdx
│   │   │   ├── command-reference.mdx
│   │   │   ├── configuration.mdx
│   │   │   ├── cursor-setup.mdx
│   │   │   ├── examples.mdx
│   │   │   └── Installation.mdx
│   │   ├── best-practices
│   │   │   ├── advanced-tasks.mdx
│   │   │   ├── configuration-advanced.mdx
│   │   │   └── index.mdx
│   │   ├── capabilities
│   │   │   ├── cli-root-commands.mdx
│   │   │   ├── index.mdx
│   │   │   ├── mcp.mdx
│   │   │   └── task-structure.mdx
│   │   ├── CHANGELOG.md
│   │   ├── docs.json
│   │   ├── favicon.svg
│   │   ├── getting-started
│   │   │   ├── contribute.mdx
│   │   │   ├── faq.mdx
│   │   │   └── quick-start
│   │   │       ├── configuration-quick.mdx
│   │   │       ├── execute-quick.mdx
│   │   │       ├── installation.mdx
│   │   │       ├── moving-forward.mdx
│   │   │       ├── prd-quick.mdx
│   │   │       ├── quick-start.mdx
│   │   │       ├── requirements.mdx
│   │   │       ├── rules-quick.mdx
│   │   │       └── tasks-quick.mdx
│   │   ├── introduction.mdx
│   │   ├── licensing.md
│   │   ├── logo
│   │   │   ├── dark.svg
│   │   │   ├── light.svg
│   │   │   └── task-master-logo.png
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── style.css
│   │   ├── vercel.json
│   │   └── whats-new.mdx
│   └── extension
│       ├── .vscodeignore
│       ├── assets
│       │   ├── banner.png
│       │   ├── icon-dark.svg
│       │   ├── icon-light.svg
│       │   ├── icon.png
│       │   ├── screenshots
│       │   │   ├── kanban-board.png
│       │   │   └── task-details.png
│       │   └── sidebar-icon.svg
│       ├── CHANGELOG.md
│       ├── components.json
│       ├── docs
│       │   ├── extension-CI-setup.md
│       │   └── extension-development-guide.md
│       ├── esbuild.js
│       ├── LICENSE
│       ├── package.json
│       ├── package.mjs
│       ├── package.publish.json
│       ├── README.md
│       ├── src
│       │   ├── components
│       │   │   ├── ConfigView.tsx
│       │   │   ├── constants.ts
│       │   │   ├── TaskDetails
│       │   │   │   ├── AIActionsSection.tsx
│       │   │   │   ├── DetailsSection.tsx
│       │   │   │   ├── PriorityBadge.tsx
│       │   │   │   ├── SubtasksSection.tsx
│       │   │   │   ├── TaskMetadataSidebar.tsx
│       │   │   │   └── useTaskDetails.ts
│       │   │   ├── TaskDetailsView.tsx
│       │   │   ├── TaskMasterLogo.tsx
│       │   │   └── ui
│       │   │       ├── badge.tsx
│       │   │       ├── breadcrumb.tsx
│       │   │       ├── button.tsx
│       │   │       ├── card.tsx
│       │   │       ├── collapsible.tsx
│       │   │       ├── CollapsibleSection.tsx
│       │   │       ├── dropdown-menu.tsx
│       │   │       ├── label.tsx
│       │   │       ├── scroll-area.tsx
│       │   │       ├── separator.tsx
│       │   │       ├── shadcn-io
│       │   │       │   └── kanban
│       │   │       │       └── index.tsx
│       │   │       └── textarea.tsx
│       │   ├── extension.ts
│       │   ├── index.ts
│       │   ├── lib
│       │   │   └── utils.ts
│       │   ├── services
│       │   │   ├── config-service.ts
│       │   │   ├── error-handler.ts
│       │   │   ├── notification-preferences.ts
│       │   │   ├── polling-service.ts
│       │   │   ├── polling-strategies.ts
│       │   │   ├── sidebar-webview-manager.ts
│       │   │   ├── task-repository.ts
│       │   │   ├── terminal-manager.ts
│       │   │   └── webview-manager.ts
│       │   ├── test
│       │   │   └── extension.test.ts
│       │   ├── utils
│       │   │   ├── configManager.ts
│       │   │   ├── connectionManager.ts
│       │   │   ├── errorHandler.ts
│       │   │   ├── event-emitter.ts
│       │   │   ├── logger.ts
│       │   │   ├── mcpClient.ts
│       │   │   ├── notificationPreferences.ts
│       │   │   └── task-master-api
│       │   │       ├── cache
│       │   │       │   └── cache-manager.ts
│       │   │       ├── index.ts
│       │   │       ├── mcp-client.ts
│       │   │       ├── transformers
│       │   │       │   └── task-transformer.ts
│       │   │       └── types
│       │   │           └── index.ts
│       │   └── webview
│       │       ├── App.tsx
│       │       ├── components
│       │       │   ├── AppContent.tsx
│       │       │   ├── EmptyState.tsx
│       │       │   ├── ErrorBoundary.tsx
│       │       │   ├── PollingStatus.tsx
│       │       │   ├── PriorityBadge.tsx
│       │       │   ├── SidebarView.tsx
│       │       │   ├── TagDropdown.tsx
│       │       │   ├── TaskCard.tsx
│       │       │   ├── TaskEditModal.tsx
│       │       │   ├── TaskMasterKanban.tsx
│       │       │   ├── ToastContainer.tsx
│       │       │   └── ToastNotification.tsx
│       │       ├── constants
│       │       │   └── index.ts
│       │       ├── contexts
│       │       │   └── VSCodeContext.tsx
│       │       ├── hooks
│       │       │   ├── useTaskQueries.ts
│       │       │   ├── useVSCodeMessages.ts
│       │       │   └── useWebviewHeight.ts
│       │       ├── index.css
│       │       ├── index.tsx
│       │       ├── providers
│       │       │   └── QueryProvider.tsx
│       │       ├── reducers
│       │       │   └── appReducer.ts
│       │       ├── sidebar.tsx
│       │       ├── types
│       │       │   └── index.ts
│       │       └── utils
│       │           ├── logger.ts
│       │           └── toast.ts
│       └── tsconfig.json
├── assets
│   ├── .windsurfrules
│   ├── AGENTS.md
│   ├── claude
│   │   ├── agents
│   │   │   ├── task-checker.md
│   │   │   ├── task-executor.md
│   │   │   └── task-orchestrator.md
│   │   ├── commands
│   │   │   └── tm
│   │   │       ├── add-dependency
│   │   │       │   └── add-dependency.md
│   │   │       ├── add-subtask
│   │   │       │   ├── add-subtask.md
│   │   │       │   └── convert-task-to-subtask.md
│   │   │       ├── add-task
│   │   │       │   └── add-task.md
│   │   │       ├── analyze-complexity
│   │   │       │   └── analyze-complexity.md
│   │   │       ├── clear-subtasks
│   │   │       │   ├── clear-all-subtasks.md
│   │   │       │   └── clear-subtasks.md
│   │   │       ├── complexity-report
│   │   │       │   └── complexity-report.md
│   │   │       ├── expand
│   │   │       │   ├── expand-all-tasks.md
│   │   │       │   └── expand-task.md
│   │   │       ├── fix-dependencies
│   │   │       │   └── fix-dependencies.md
│   │   │       ├── generate
│   │   │       │   └── generate-tasks.md
│   │   │       ├── help.md
│   │   │       ├── init
│   │   │       │   ├── init-project-quick.md
│   │   │       │   └── init-project.md
│   │   │       ├── learn.md
│   │   │       ├── list
│   │   │       │   ├── list-tasks-by-status.md
│   │   │       │   ├── list-tasks-with-subtasks.md
│   │   │       │   └── list-tasks.md
│   │   │       ├── models
│   │   │       │   ├── setup-models.md
│   │   │       │   └── view-models.md
│   │   │       ├── next
│   │   │       │   └── next-task.md
│   │   │       ├── parse-prd
│   │   │       │   ├── parse-prd-with-research.md
│   │   │       │   └── parse-prd.md
│   │   │       ├── remove-dependency
│   │   │       │   └── remove-dependency.md
│   │   │       ├── remove-subtask
│   │   │       │   └── remove-subtask.md
│   │   │       ├── remove-subtasks
│   │   │       │   ├── remove-all-subtasks.md
│   │   │       │   └── remove-subtasks.md
│   │   │       ├── remove-task
│   │   │       │   └── remove-task.md
│   │   │       ├── set-status
│   │   │       │   ├── to-cancelled.md
│   │   │       │   ├── to-deferred.md
│   │   │       │   ├── to-done.md
│   │   │       │   ├── to-in-progress.md
│   │   │       │   ├── to-pending.md
│   │   │       │   └── to-review.md
│   │   │       ├── setup
│   │   │       │   ├── install-taskmaster.md
│   │   │       │   └── quick-install-taskmaster.md
│   │   │       ├── show
│   │   │       │   └── show-task.md
│   │   │       ├── status
│   │   │       │   └── project-status.md
│   │   │       ├── sync-readme
│   │   │       │   └── sync-readme.md
│   │   │       ├── tm-main.md
│   │   │       ├── update
│   │   │       │   ├── update-single-task.md
│   │   │       │   ├── update-task.md
│   │   │       │   └── update-tasks-from-id.md
│   │   │       ├── utils
│   │   │       │   └── analyze-project.md
│   │   │       ├── validate-dependencies
│   │   │       │   └── validate-dependencies.md
│   │   │       └── workflows
│   │   │           ├── auto-implement-tasks.md
│   │   │           ├── command-pipeline.md
│   │   │           └── smart-workflow.md
│   │   └── TM_COMMANDS_GUIDE.md
│   ├── config.json
│   ├── env.example
│   ├── example_prd.txt
│   ├── gitignore
│   ├── kiro-hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── roocode
│   │   ├── .roo
│   │   │   ├── rules-architect
│   │   │   │   └── architect-rules
│   │   │   ├── rules-ask
│   │   │   │   └── ask-rules
│   │   │   ├── rules-code
│   │   │   │   └── code-rules
│   │   │   ├── rules-debug
│   │   │   │   └── debug-rules
│   │   │   ├── rules-orchestrator
│   │   │   │   └── orchestrator-rules
│   │   │   └── rules-test
│   │   │       └── test-rules
│   │   └── .roomodes
│   ├── rules
│   │   ├── cursor_rules.mdc
│   │   ├── dev_workflow.mdc
│   │   ├── self_improve.mdc
│   │   ├── taskmaster_hooks_workflow.mdc
│   │   └── taskmaster.mdc
│   └── scripts_README.md
├── bin
│   └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE.md
├── context
│   ├── chats
│   │   ├── add-task-dependencies-1.md
│   │   └── max-min-tokens.txt.md
│   ├── fastmcp-core.txt
│   ├── fastmcp-docs.txt
│   ├── MCP_INTEGRATION.md
│   ├── mcp-js-sdk-docs.txt
│   ├── mcp-protocol-repo.txt
│   ├── mcp-protocol-schema-03262025.json
│   └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│   ├── CLI-COMMANDER-PATTERN.md
│   ├── command-reference.md
│   ├── configuration.md
│   ├── contributor-docs
│   │   └── testing-roo-integration.md
│   ├── cross-tag-task-movement.md
│   ├── examples
│   │   └── claude-code-usage.md
│   ├── examples.md
│   ├── licensing.md
│   ├── mcp-provider-guide.md
│   ├── mcp-provider.md
│   ├── migration-guide.md
│   ├── models.md
│   ├── providers
│   │   └── gemini-cli.md
│   ├── README.md
│   ├── scripts
│   │   └── models-json-to-markdown.js
│   ├── task-structure.md
│   └── tutorial.md
├── images
│   └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│   ├── server.js
│   └── src
│       ├── core
│       │   ├── __tests__
│       │   │   └── context-manager.test.js
│       │   ├── context-manager.js
│       │   ├── direct-functions
│       │   │   ├── add-dependency.js
│       │   │   ├── add-subtask.js
│       │   │   ├── add-tag.js
│       │   │   ├── add-task.js
│       │   │   ├── analyze-task-complexity.js
│       │   │   ├── cache-stats.js
│       │   │   ├── clear-subtasks.js
│       │   │   ├── complexity-report.js
│       │   │   ├── copy-tag.js
│       │   │   ├── create-tag-from-branch.js
│       │   │   ├── delete-tag.js
│       │   │   ├── expand-all-tasks.js
│       │   │   ├── expand-task.js
│       │   │   ├── fix-dependencies.js
│       │   │   ├── generate-task-files.js
│       │   │   ├── initialize-project.js
│       │   │   ├── list-tags.js
│       │   │   ├── list-tasks.js
│       │   │   ├── models.js
│       │   │   ├── move-task-cross-tag.js
│       │   │   ├── move-task.js
│       │   │   ├── next-task.js
│       │   │   ├── parse-prd.js
│       │   │   ├── remove-dependency.js
│       │   │   ├── remove-subtask.js
│       │   │   ├── remove-task.js
│       │   │   ├── rename-tag.js
│       │   │   ├── research.js
│       │   │   ├── response-language.js
│       │   │   ├── rules.js
│       │   │   ├── scope-down.js
│       │   │   ├── scope-up.js
│       │   │   ├── set-task-status.js
│       │   │   ├── show-task.js
│       │   │   ├── update-subtask-by-id.js
│       │   │   ├── update-task-by-id.js
│       │   │   ├── update-tasks.js
│       │   │   ├── use-tag.js
│       │   │   └── validate-dependencies.js
│       │   ├── task-master-core.js
│       │   └── utils
│       │       ├── env-utils.js
│       │       └── path-utils.js
│       ├── custom-sdk
│       │   ├── errors.js
│       │   ├── index.js
│       │   ├── json-extractor.js
│       │   ├── language-model.js
│       │   ├── message-converter.js
│       │   └── schema-converter.js
│       ├── index.js
│       ├── logger.js
│       ├── providers
│       │   └── mcp-provider.js
│       └── tools
│           ├── add-dependency.js
│           ├── add-subtask.js
│           ├── add-tag.js
│           ├── add-task.js
│           ├── analyze.js
│           ├── clear-subtasks.js
│           ├── complexity-report.js
│           ├── copy-tag.js
│           ├── delete-tag.js
│           ├── expand-all.js
│           ├── expand-task.js
│           ├── fix-dependencies.js
│           ├── generate.js
│           ├── get-operation-status.js
│           ├── get-task.js
│           ├── get-tasks.js
│           ├── index.js
│           ├── initialize-project.js
│           ├── list-tags.js
│           ├── models.js
│           ├── move-task.js
│           ├── next-task.js
│           ├── parse-prd.js
│           ├── remove-dependency.js
│           ├── remove-subtask.js
│           ├── remove-task.js
│           ├── rename-tag.js
│           ├── research.js
│           ├── response-language.js
│           ├── rules.js
│           ├── scope-down.js
│           ├── scope-up.js
│           ├── set-task-status.js
│           ├── update-subtask.js
│           ├── update-task.js
│           ├── update.js
│           ├── use-tag.js
│           ├── utils.js
│           └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│   ├── build-config
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   └── tsdown.base.ts
│   │   └── tsconfig.json
│   └── tm-core
│       ├── .gitignore
│       ├── CHANGELOG.md
│       ├── docs
│       │   └── listTasks-architecture.md
│       ├── package.json
│       ├── POC-STATUS.md
│       ├── README.md
│       ├── src
│       │   ├── auth
│       │   │   ├── auth-manager.test.ts
│       │   │   ├── auth-manager.ts
│       │   │   ├── config.ts
│       │   │   ├── credential-store.test.ts
│       │   │   ├── credential-store.ts
│       │   │   ├── index.ts
│       │   │   ├── oauth-service.ts
│       │   │   ├── supabase-session-storage.ts
│       │   │   └── types.ts
│       │   ├── clients
│       │   │   ├── index.ts
│       │   │   └── supabase-client.ts
│       │   ├── config
│       │   │   ├── config-manager.spec.ts
│       │   │   ├── config-manager.ts
│       │   │   ├── index.ts
│       │   │   └── services
│       │   │       ├── config-loader.service.spec.ts
│       │   │       ├── config-loader.service.ts
│       │   │       ├── config-merger.service.spec.ts
│       │   │       ├── config-merger.service.ts
│       │   │       ├── config-persistence.service.spec.ts
│       │   │       ├── config-persistence.service.ts
│       │   │       ├── environment-config-provider.service.spec.ts
│       │   │       ├── environment-config-provider.service.ts
│       │   │       ├── index.ts
│       │   │       ├── runtime-state-manager.service.spec.ts
│       │   │       └── runtime-state-manager.service.ts
│       │   ├── constants
│       │   │   └── index.ts
│       │   ├── entities
│       │   │   └── task.entity.ts
│       │   ├── errors
│       │   │   ├── index.ts
│       │   │   └── task-master-error.ts
│       │   ├── executors
│       │   │   ├── base-executor.ts
│       │   │   ├── claude-executor.ts
│       │   │   ├── executor-factory.ts
│       │   │   ├── executor-service.ts
│       │   │   ├── index.ts
│       │   │   └── types.ts
│       │   ├── index.ts
│       │   ├── interfaces
│       │   │   ├── ai-provider.interface.ts
│       │   │   ├── configuration.interface.ts
│       │   │   ├── index.ts
│       │   │   └── storage.interface.ts
│       │   ├── logger
│       │   │   ├── factory.ts
│       │   │   ├── index.ts
│       │   │   └── logger.ts
│       │   ├── mappers
│       │   │   └── TaskMapper.ts
│       │   ├── parser
│       │   │   └── index.ts
│       │   ├── providers
│       │   │   ├── ai
│       │   │   │   ├── base-provider.ts
│       │   │   │   └── index.ts
│       │   │   └── index.ts
│       │   ├── repositories
│       │   │   ├── supabase-task-repository.ts
│       │   │   └── task-repository.interface.ts
│       │   ├── services
│       │   │   ├── index.ts
│       │   │   ├── organization.service.ts
│       │   │   ├── task-execution-service.ts
│       │   │   └── task-service.ts
│       │   ├── storage
│       │   │   ├── api-storage.ts
│       │   │   ├── file-storage
│       │   │   │   ├── file-operations.ts
│       │   │   │   ├── file-storage.ts
│       │   │   │   ├── format-handler.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── path-resolver.ts
│       │   │   ├── index.ts
│       │   │   └── storage-factory.ts
│       │   ├── subpath-exports.test.ts
│       │   ├── task-master-core.ts
│       │   ├── types
│       │   │   ├── database.types.ts
│       │   │   ├── index.ts
│       │   │   └── legacy.ts
│       │   └── utils
│       │       ├── id-generator.ts
│       │       └── index.ts
│       ├── tests
│       │   ├── integration
│       │   │   └── list-tasks.test.ts
│       │   ├── mocks
│       │   │   └── mock-provider.ts
│       │   ├── setup.ts
│       │   └── unit
│       │       ├── base-provider.test.ts
│       │       ├── executor.test.ts
│       │       └── smoke.test.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│   ├── dev.js
│   ├── init.js
│   ├── modules
│   │   ├── ai-services-unified.js
│   │   ├── commands.js
│   │   ├── config-manager.js
│   │   ├── dependency-manager.js
│   │   ├── index.js
│   │   ├── prompt-manager.js
│   │   ├── supported-models.json
│   │   ├── sync-readme.js
│   │   ├── task-manager
│   │   │   ├── add-subtask.js
│   │   │   ├── add-task.js
│   │   │   ├── analyze-task-complexity.js
│   │   │   ├── clear-subtasks.js
│   │   │   ├── expand-all-tasks.js
│   │   │   ├── expand-task.js
│   │   │   ├── find-next-task.js
│   │   │   ├── generate-task-files.js
│   │   │   ├── is-task-dependent.js
│   │   │   ├── list-tasks.js
│   │   │   ├── migrate.js
│   │   │   ├── models.js
│   │   │   ├── move-task.js
│   │   │   ├── parse-prd
│   │   │   │   ├── index.js
│   │   │   │   ├── parse-prd-config.js
│   │   │   │   ├── parse-prd-helpers.js
│   │   │   │   ├── parse-prd-non-streaming.js
│   │   │   │   ├── parse-prd-streaming.js
│   │   │   │   └── parse-prd.js
│   │   │   ├── remove-subtask.js
│   │   │   ├── remove-task.js
│   │   │   ├── research.js
│   │   │   ├── response-language.js
│   │   │   ├── scope-adjustment.js
│   │   │   ├── set-task-status.js
│   │   │   ├── tag-management.js
│   │   │   ├── task-exists.js
│   │   │   ├── update-single-task-status.js
│   │   │   ├── update-subtask-by-id.js
│   │   │   ├── update-task-by-id.js
│   │   │   └── update-tasks.js
│   │   ├── task-manager.js
│   │   ├── ui.js
│   │   ├── update-config-tokens.js
│   │   ├── utils
│   │   │   ├── contextGatherer.js
│   │   │   ├── fuzzyTaskSearch.js
│   │   │   └── git-utils.js
│   │   └── utils.js
│   ├── task-complexity-report.json
│   ├── test-claude-errors.js
│   └── test-claude.js
├── src
│   ├── ai-providers
│   │   ├── anthropic.js
│   │   ├── azure.js
│   │   ├── base-provider.js
│   │   ├── bedrock.js
│   │   ├── claude-code.js
│   │   ├── custom-sdk
│   │   │   ├── claude-code
│   │   │   │   ├── errors.js
│   │   │   │   ├── index.js
│   │   │   │   ├── json-extractor.js
│   │   │   │   ├── language-model.js
│   │   │   │   ├── message-converter.js
│   │   │   │   └── types.js
│   │   │   └── grok-cli
│   │   │       ├── errors.js
│   │   │       ├── index.js
│   │   │       ├── json-extractor.js
│   │   │       ├── language-model.js
│   │   │       ├── message-converter.js
│   │   │       └── types.js
│   │   ├── gemini-cli.js
│   │   ├── google-vertex.js
│   │   ├── google.js
│   │   ├── grok-cli.js
│   │   ├── groq.js
│   │   ├── index.js
│   │   ├── ollama.js
│   │   ├── openai.js
│   │   ├── openrouter.js
│   │   ├── perplexity.js
│   │   └── xai.js
│   ├── constants
│   │   ├── commands.js
│   │   ├── paths.js
│   │   ├── profiles.js
│   │   ├── providers.js
│   │   ├── rules-actions.js
│   │   ├── task-priority.js
│   │   └── task-status.js
│   ├── profiles
│   │   ├── amp.js
│   │   ├── base-profile.js
│   │   ├── claude.js
│   │   ├── cline.js
│   │   ├── codex.js
│   │   ├── cursor.js
│   │   ├── gemini.js
│   │   ├── index.js
│   │   ├── kilo.js
│   │   ├── kiro.js
│   │   ├── opencode.js
│   │   ├── roo.js
│   │   ├── trae.js
│   │   ├── vscode.js
│   │   ├── windsurf.js
│   │   └── zed.js
│   ├── progress
│   │   ├── base-progress-tracker.js
│   │   ├── cli-progress-factory.js
│   │   ├── parse-prd-tracker.js
│   │   ├── progress-tracker-builder.js
│   │   └── tracker-ui.js
│   ├── prompts
│   │   ├── add-task.json
│   │   ├── analyze-complexity.json
│   │   ├── expand-task.json
│   │   ├── parse-prd.json
│   │   ├── README.md
│   │   ├── research.json
│   │   ├── schemas
│   │   │   ├── parameter.schema.json
│   │   │   ├── prompt-template.schema.json
│   │   │   ├── README.md
│   │   │   └── variant.schema.json
│   │   ├── update-subtask.json
│   │   ├── update-task.json
│   │   └── update-tasks.json
│   ├── provider-registry
│   │   └── index.js
│   ├── task-master.js
│   ├── ui
│   │   ├── confirm.js
│   │   ├── indicators.js
│   │   └── parse-prd.js
│   └── utils
│       ├── asset-resolver.js
│       ├── create-mcp-config.js
│       ├── format.js
│       ├── getVersion.js
│       ├── logger-utils.js
│       ├── manage-gitignore.js
│       ├── path-utils.js
│       ├── profiles.js
│       ├── rule-transformer.js
│       ├── stream-parser.js
│       └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│   ├── e2e
│   │   ├── e2e_helpers.sh
│   │   ├── parse_llm_output.cjs
│   │   ├── run_e2e.sh
│   │   ├── run_fallback_verification.sh
│   │   └── test_llm_analysis.sh
│   ├── fixture
│   │   └── test-tasks.json
│   ├── fixtures
│   │   ├── .taskmasterconfig
│   │   ├── sample-claude-response.js
│   │   ├── sample-prd.txt
│   │   └── sample-tasks.js
│   ├── integration
│   │   ├── claude-code-optional.test.js
│   │   ├── cli
│   │   │   ├── commands.test.js
│   │   │   ├── complex-cross-tag-scenarios.test.js
│   │   │   └── move-cross-tag.test.js
│   │   ├── manage-gitignore.test.js
│   │   ├── mcp-server
│   │   │   └── direct-functions.test.js
│   │   ├── move-task-cross-tag.integration.test.js
│   │   ├── move-task-simple.integration.test.js
│   │   └── profiles
│   │       ├── amp-init-functionality.test.js
│   │       ├── claude-init-functionality.test.js
│   │       ├── cline-init-functionality.test.js
│   │       ├── codex-init-functionality.test.js
│   │       ├── cursor-init-functionality.test.js
│   │       ├── gemini-init-functionality.test.js
│   │       ├── opencode-init-functionality.test.js
│   │       ├── roo-files-inclusion.test.js
│   │       ├── roo-init-functionality.test.js
│   │       ├── rules-files-inclusion.test.js
│   │       ├── trae-init-functionality.test.js
│   │       ├── vscode-init-functionality.test.js
│   │       └── windsurf-init-functionality.test.js
│   ├── manual
│   │   ├── progress
│   │   │   ├── parse-prd-analysis.js
│   │   │   ├── test-parse-prd.js
│   │   │   └── TESTING_GUIDE.md
│   │   └── prompts
│   │       ├── prompt-test.js
│   │       └── README.md
│   ├── README.md
│   ├── setup.js
│   └── unit
│       ├── ai-providers
│       │   ├── claude-code.test.js
│       │   ├── custom-sdk
│       │   │   └── claude-code
│       │   │       └── language-model.test.js
│       │   ├── gemini-cli.test.js
│       │   ├── mcp-components.test.js
│       │   └── openai.test.js
│       ├── ai-services-unified.test.js
│       ├── commands.test.js
│       ├── config-manager.test.js
│       ├── config-manager.test.mjs
│       ├── dependency-manager.test.js
│       ├── init.test.js
│       ├── initialize-project.test.js
│       ├── kebab-case-validation.test.js
│       ├── manage-gitignore.test.js
│       ├── mcp
│       │   └── tools
│       │       ├── __mocks__
│       │       │   └── move-task.js
│       │       ├── add-task.test.js
│       │       ├── analyze-complexity.test.js
│       │       ├── expand-all.test.js
│       │       ├── get-tasks.test.js
│       │       ├── initialize-project.test.js
│       │       ├── move-task-cross-tag-options.test.js
│       │       ├── move-task-cross-tag.test.js
│       │       └── remove-task.test.js
│       ├── mcp-providers
│       │   ├── mcp-components.test.js
│       │   └── mcp-provider.test.js
│       ├── parse-prd.test.js
│       ├── profiles
│       │   ├── amp-integration.test.js
│       │   ├── claude-integration.test.js
│       │   ├── cline-integration.test.js
│       │   ├── codex-integration.test.js
│       │   ├── cursor-integration.test.js
│       │   ├── gemini-integration.test.js
│       │   ├── kilo-integration.test.js
│       │   ├── kiro-integration.test.js
│       │   ├── mcp-config-validation.test.js
│       │   ├── opencode-integration.test.js
│       │   ├── profile-safety-check.test.js
│       │   ├── roo-integration.test.js
│       │   ├── rule-transformer-cline.test.js
│       │   ├── rule-transformer-cursor.test.js
│       │   ├── rule-transformer-gemini.test.js
│       │   ├── rule-transformer-kilo.test.js
│       │   ├── rule-transformer-kiro.test.js
│       │   ├── rule-transformer-opencode.test.js
│       │   ├── rule-transformer-roo.test.js
│       │   ├── rule-transformer-trae.test.js
│       │   ├── rule-transformer-vscode.test.js
│       │   ├── rule-transformer-windsurf.test.js
│       │   ├── rule-transformer-zed.test.js
│       │   ├── rule-transformer.test.js
│       │   ├── selective-profile-removal.test.js
│       │   ├── subdirectory-support.test.js
│       │   ├── trae-integration.test.js
│       │   ├── vscode-integration.test.js
│       │   ├── windsurf-integration.test.js
│       │   └── zed-integration.test.js
│       ├── progress
│       │   └── base-progress-tracker.test.js
│       ├── prompt-manager.test.js
│       ├── prompts
│       │   └── expand-task-prompt.test.js
│       ├── providers
│       │   └── provider-registry.test.js
│       ├── scripts
│       │   └── modules
│       │       ├── commands
│       │       │   ├── move-cross-tag.test.js
│       │       │   └── README.md
│       │       ├── dependency-manager
│       │       │   ├── circular-dependencies.test.js
│       │       │   ├── cross-tag-dependencies.test.js
│       │       │   └── fix-dependencies-command.test.js
│       │       ├── task-manager
│       │       │   ├── add-subtask.test.js
│       │       │   ├── add-task.test.js
│       │       │   ├── analyze-task-complexity.test.js
│       │       │   ├── clear-subtasks.test.js
│       │       │   ├── complexity-report-tag-isolation.test.js
│       │       │   ├── expand-all-tasks.test.js
│       │       │   ├── expand-task.test.js
│       │       │   ├── find-next-task.test.js
│       │       │   ├── generate-task-files.test.js
│       │       │   ├── list-tasks.test.js
│       │       │   ├── move-task-cross-tag.test.js
│       │       │   ├── move-task.test.js
│       │       │   ├── parse-prd.test.js
│       │       │   ├── remove-subtask.test.js
│       │       │   ├── remove-task.test.js
│       │       │   ├── research.test.js
│       │       │   ├── scope-adjustment.test.js
│       │       │   ├── set-task-status.test.js
│       │       │   ├── setup.js
│       │       │   ├── update-single-task-status.test.js
│       │       │   ├── update-subtask-by-id.test.js
│       │       │   ├── update-task-by-id.test.js
│       │       │   └── update-tasks.test.js
│       │       ├── ui
│       │       │   └── cross-tag-error-display.test.js
│       │       └── utils-tag-aware-paths.test.js
│       ├── task-finder.test.js
│       ├── task-manager
│       │   ├── clear-subtasks.test.js
│       │   ├── move-task.test.js
│       │   ├── tag-boundary.test.js
│       │   └── tag-management.test.js
│       ├── task-master.test.js
│       ├── ui
│       │   └── indicators.test.js
│       ├── ui.test.js
│       ├── utils-strip-ansi.test.js
│       └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
└── turbo.json
```

# Files

--------------------------------------------------------------------------------
/scripts/modules/supported-models.json:
--------------------------------------------------------------------------------

```json
   1 | {
   2 | 	"anthropic": [
   3 | 		{
   4 | 			"id": "claude-sonnet-4-20250514",
   5 | 			"swe_score": 0.727,
   6 | 			"cost_per_1m_tokens": {
   7 | 				"input": 3.0,
   8 | 				"output": 15.0
   9 | 			},
  10 | 			"allowed_roles": ["main", "fallback"],
  11 | 			"max_tokens": 64000,
  12 | 			"supported": true
  13 | 		},
  14 | 		{
  15 | 			"id": "claude-opus-4-20250514",
  16 | 			"swe_score": 0.725,
  17 | 			"cost_per_1m_tokens": {
  18 | 				"input": 15.0,
  19 | 				"output": 75.0
  20 | 			},
  21 | 			"allowed_roles": ["main", "fallback"],
  22 | 			"max_tokens": 32000,
  23 | 			"supported": true
  24 | 		},
  25 | 		{
  26 | 			"id": "claude-3-7-sonnet-20250219",
  27 | 			"swe_score": 0.623,
  28 | 			"cost_per_1m_tokens": {
  29 | 				"input": 3.0,
  30 | 				"output": 15.0
  31 | 			},
  32 | 			"allowed_roles": ["main", "fallback"],
  33 | 			"max_tokens": 120000,
  34 | 			"supported": true
  35 | 		},
  36 | 		{
  37 | 			"id": "claude-3-5-sonnet-20241022",
  38 | 			"swe_score": 0.49,
  39 | 			"cost_per_1m_tokens": {
  40 | 				"input": 3.0,
  41 | 				"output": 15.0
  42 | 			},
  43 | 			"allowed_roles": ["main", "fallback"],
  44 | 			"max_tokens": 8192,
  45 | 			"supported": true
  46 | 		}
  47 | 	],
  48 | 	"claude-code": [
  49 | 		{
  50 | 			"id": "opus",
  51 | 			"swe_score": 0.725,
  52 | 			"cost_per_1m_tokens": {
  53 | 				"input": 0,
  54 | 				"output": 0
  55 | 			},
  56 | 			"allowed_roles": ["main", "fallback", "research"],
  57 | 			"max_tokens": 32000,
  58 | 			"supported": true
  59 | 		},
  60 | 		{
  61 | 			"id": "sonnet",
  62 | 			"swe_score": 0.727,
  63 | 			"cost_per_1m_tokens": {
  64 | 				"input": 0,
  65 | 				"output": 0
  66 | 			},
  67 | 			"allowed_roles": ["main", "fallback", "research"],
  68 | 			"max_tokens": 64000,
  69 | 			"supported": true
  70 | 		}
  71 | 	],
  72 | 	"mcp": [
  73 | 		{
  74 | 			"id": "mcp-sampling",
  75 | 			"swe_score": null,
  76 | 			"cost_per_1m_tokens": {
  77 | 				"input": 0,
  78 | 				"output": 0
  79 | 			},
  80 | 			"allowed_roles": ["main", "fallback", "research"],
  81 | 			"max_tokens": 100000,
  82 | 			"supported": true
  83 | 		}
  84 | 	],
  85 | 	"gemini-cli": [
  86 | 		{
  87 | 			"id": "gemini-2.5-pro",
  88 | 			"swe_score": 0.72,
  89 | 			"cost_per_1m_tokens": {
  90 | 				"input": 0,
  91 | 				"output": 0
  92 | 			},
  93 | 			"allowed_roles": ["main", "fallback", "research"],
  94 | 			"max_tokens": 65536,
  95 | 			"supported": true
  96 | 		},
  97 | 		{
  98 | 			"id": "gemini-2.5-flash",
  99 | 			"swe_score": 0.71,
 100 | 			"cost_per_1m_tokens": {
 101 | 				"input": 0,
 102 | 				"output": 0
 103 | 			},
 104 | 			"allowed_roles": ["main", "fallback", "research"],
 105 | 			"max_tokens": 65536,
 106 | 			"supported": true
 107 | 		}
 108 | 	],
 109 | 	"grok-cli": [
 110 | 		{
 111 | 			"id": "grok-4-latest",
 112 | 			"name": "Grok 4 Latest",
 113 | 			"swe_score": 0.7,
 114 | 			"cost_per_1m_tokens": {
 115 | 				"input": 0,
 116 | 				"output": 0
 117 | 			},
 118 | 			"allowed_roles": ["main", "fallback", "research"],
 119 | 			"max_tokens": 131072,
 120 | 			"supported": true
 121 | 		},
 122 | 		{
 123 | 			"id": "grok-3-latest",
 124 | 			"name": "Grok 3 Latest",
 125 | 			"swe_score": 0.65,
 126 | 			"cost_per_1m_tokens": {
 127 | 				"input": 0,
 128 | 				"output": 0
 129 | 			},
 130 | 			"allowed_roles": ["main", "fallback", "research"],
 131 | 			"max_tokens": 131072,
 132 | 			"supported": true
 133 | 		},
 134 | 		{
 135 | 			"id": "grok-3-fast",
 136 | 			"name": "Grok 3 Fast",
 137 | 			"swe_score": 0.6,
 138 | 			"cost_per_1m_tokens": {
 139 | 				"input": 0,
 140 | 				"output": 0
 141 | 			},
 142 | 			"allowed_roles": ["main", "fallback", "research"],
 143 | 			"max_tokens": 131072,
 144 | 			"supported": true
 145 | 		},
 146 | 		{
 147 | 			"id": "grok-3-mini-fast",
 148 | 			"name": "Grok 3 Mini Fast",
 149 | 			"swe_score": 0.55,
 150 | 			"cost_per_1m_tokens": {
 151 | 				"input": 0,
 152 | 				"output": 0
 153 | 			},
 154 | 			"allowed_roles": ["main", "fallback", "research"],
 155 | 			"max_tokens": 32768,
 156 | 			"supported": true
 157 | 		}
 158 | 	],
 159 | 	"openai": [
 160 | 		{
 161 | 			"id": "gpt-4o",
 162 | 			"swe_score": 0.332,
 163 | 			"cost_per_1m_tokens": {
 164 | 				"input": 2.5,
 165 | 				"output": 10.0
 166 | 			},
 167 | 			"allowed_roles": ["main", "fallback"],
 168 | 			"max_tokens": 16384,
 169 | 			"supported": true
 170 | 		},
 171 | 		{
 172 | 			"id": "o1",
 173 | 			"swe_score": 0.489,
 174 | 			"cost_per_1m_tokens": {
 175 | 				"input": 15.0,
 176 | 				"output": 60.0
 177 | 			},
 178 | 			"allowed_roles": ["main"],
 179 | 			"supported": true
 180 | 		},
 181 | 		{
 182 | 			"id": "o3",
 183 | 			"swe_score": 0.5,
 184 | 			"cost_per_1m_tokens": {
 185 | 				"input": 2.0,
 186 | 				"output": 8.0
 187 | 			},
 188 | 			"allowed_roles": ["main", "fallback"],
 189 | 			"max_tokens": 100000,
 190 | 			"supported": true
 191 | 		},
 192 | 		{
 193 | 			"id": "o3-mini",
 194 | 			"swe_score": 0.493,
 195 | 			"cost_per_1m_tokens": {
 196 | 				"input": 1.1,
 197 | 				"output": 4.4
 198 | 			},
 199 | 			"allowed_roles": ["main"],
 200 | 			"max_tokens": 100000,
 201 | 			"supported": true
 202 | 		},
 203 | 		{
 204 | 			"id": "o4-mini",
 205 | 			"swe_score": 0.45,
 206 | 			"cost_per_1m_tokens": {
 207 | 				"input": 1.1,
 208 | 				"output": 4.4
 209 | 			},
 210 | 			"allowed_roles": ["main", "fallback"],
 211 | 			"supported": true
 212 | 		},
 213 | 		{
 214 | 			"id": "o1-mini",
 215 | 			"swe_score": 0.4,
 216 | 			"cost_per_1m_tokens": {
 217 | 				"input": 1.1,
 218 | 				"output": 4.4
 219 | 			},
 220 | 			"allowed_roles": ["main"],
 221 | 			"supported": true
 222 | 		},
 223 | 		{
 224 | 			"id": "o1-pro",
 225 | 			"swe_score": 0,
 226 | 			"cost_per_1m_tokens": {
 227 | 				"input": 150.0,
 228 | 				"output": 600.0
 229 | 			},
 230 | 			"allowed_roles": ["main"],
 231 | 			"supported": true
 232 | 		},
 233 | 		{
 234 | 			"id": "gpt-4-5-preview",
 235 | 			"swe_score": 0.38,
 236 | 			"cost_per_1m_tokens": {
 237 | 				"input": 75.0,
 238 | 				"output": 150.0
 239 | 			},
 240 | 			"allowed_roles": ["main"],
 241 | 			"supported": true
 242 | 		},
 243 | 		{
 244 | 			"id": "gpt-4-1-mini",
 245 | 			"swe_score": 0,
 246 | 			"cost_per_1m_tokens": {
 247 | 				"input": 0.4,
 248 | 				"output": 1.6
 249 | 			},
 250 | 			"allowed_roles": ["main"],
 251 | 			"supported": true
 252 | 		},
 253 | 		{
 254 | 			"id": "gpt-4-1-nano",
 255 | 			"swe_score": 0,
 256 | 			"cost_per_1m_tokens": {
 257 | 				"input": 0.1,
 258 | 				"output": 0.4
 259 | 			},
 260 | 			"allowed_roles": ["main"],
 261 | 			"supported": true
 262 | 		},
 263 | 		{
 264 | 			"id": "gpt-4o-mini",
 265 | 			"swe_score": 0.3,
 266 | 			"cost_per_1m_tokens": {
 267 | 				"input": 0.15,
 268 | 				"output": 0.6
 269 | 			},
 270 | 			"allowed_roles": ["main"],
 271 | 			"supported": true
 272 | 		},
 273 | 		{
 274 | 			"id": "gpt-4o-search-preview",
 275 | 			"swe_score": 0.33,
 276 | 			"cost_per_1m_tokens": {
 277 | 				"input": 2.5,
 278 | 				"output": 10.0
 279 | 			},
 280 | 			"allowed_roles": ["research"],
 281 | 			"supported": true
 282 | 		},
 283 | 		{
 284 | 			"id": "gpt-4o-mini-search-preview",
 285 | 			"swe_score": 0.3,
 286 | 			"cost_per_1m_tokens": {
 287 | 				"input": 0.15,
 288 | 				"output": 0.6
 289 | 			},
 290 | 			"allowed_roles": ["research"],
 291 | 			"supported": true
 292 | 		},
 293 | 		{
 294 | 			"id": "gpt-5",
 295 | 			"swe_score": 0.749,
 296 | 			"cost_per_1m_tokens": {
 297 | 				"input": 5.0,
 298 | 				"output": 20.0
 299 | 			},
 300 | 			"allowed_roles": ["main", "fallback"],
 301 | 			"max_tokens": 100000,
 302 | 			"temperature": 1,
 303 | 			"supported": true
 304 | 		}
 305 | 	],
 306 | 	"google": [
 307 | 		{
 308 | 			"id": "gemini-2.5-pro-preview-05-06",
 309 | 			"swe_score": 0.638,
 310 | 			"cost_per_1m_tokens": null,
 311 | 			"allowed_roles": ["main", "fallback"],
 312 | 			"max_tokens": 1048000,
 313 | 			"supported": true
 314 | 		},
 315 | 		{
 316 | 			"id": "gemini-2.5-pro-preview-03-25",
 317 | 			"swe_score": 0.638,
 318 | 			"cost_per_1m_tokens": null,
 319 | 			"allowed_roles": ["main", "fallback"],
 320 | 			"max_tokens": 1048000,
 321 | 			"supported": true
 322 | 		},
 323 | 		{
 324 | 			"id": "gemini-2.5-flash-preview-04-17",
 325 | 			"swe_score": 0.604,
 326 | 			"cost_per_1m_tokens": null,
 327 | 			"allowed_roles": ["main", "fallback"],
 328 | 			"max_tokens": 1048000,
 329 | 			"supported": true
 330 | 		},
 331 | 		{
 332 | 			"id": "gemini-2.0-flash",
 333 | 			"swe_score": 0.518,
 334 | 			"cost_per_1m_tokens": {
 335 | 				"input": 0.15,
 336 | 				"output": 0.6
 337 | 			},
 338 | 			"allowed_roles": ["main", "fallback"],
 339 | 			"max_tokens": 1048000,
 340 | 			"supported": true
 341 | 		},
 342 | 		{
 343 | 			"id": "gemini-2.0-flash-lite",
 344 | 			"swe_score": 0,
 345 | 			"cost_per_1m_tokens": null,
 346 | 			"allowed_roles": ["main", "fallback"],
 347 | 			"max_tokens": 1048000,
 348 | 			"supported": true
 349 | 		}
 350 | 	],
 351 | 	"xai": [
 352 | 		{
 353 | 			"id": "grok-3",
 354 | 			"name": "Grok 3",
 355 | 			"swe_score": null,
 356 | 			"cost_per_1m_tokens": {
 357 | 				"input": 3,
 358 | 				"output": 15
 359 | 			},
 360 | 			"allowed_roles": ["main", "fallback", "research"],
 361 | 			"max_tokens": 131072,
 362 | 			"supported": true
 363 | 		},
 364 | 		{
 365 | 			"id": "grok-3-fast",
 366 | 			"name": "Grok 3 Fast",
 367 | 			"swe_score": 0,
 368 | 			"cost_per_1m_tokens": {
 369 | 				"input": 5,
 370 | 				"output": 25
 371 | 			},
 372 | 			"allowed_roles": ["main", "fallback", "research"],
 373 | 			"max_tokens": 131072,
 374 | 			"supported": true
 375 | 		},
 376 | 		{
 377 | 			"id": "grok-4",
 378 | 			"name": "Grok 4",
 379 | 			"swe_score": null,
 380 | 			"cost_per_1m_tokens": {
 381 | 				"input": 3,
 382 | 				"output": 15
 383 | 			},
 384 | 			"allowed_roles": ["main", "fallback", "research"],
 385 | 			"max_tokens": 131072,
 386 | 			"supported": true
 387 | 		}
 388 | 	],
 389 | 	"groq": [
 390 | 		{
 391 | 			"id": "moonshotai/kimi-k2-instruct",
 392 | 			"swe_score": 0.66,
 393 | 			"cost_per_1m_tokens": {
 394 | 				"input": 1.0,
 395 | 				"output": 3.0
 396 | 			},
 397 | 			"allowed_roles": ["main", "fallback"],
 398 | 			"max_tokens": 16384,
 399 | 			"supported": true
 400 | 		},
 401 | 		{
 402 | 			"id": "llama-3.3-70b-versatile",
 403 | 			"swe_score": 0.55,
 404 | 			"cost_per_1m_tokens": {
 405 | 				"input": 0.59,
 406 | 				"output": 0.79
 407 | 			},
 408 | 			"allowed_roles": ["main", "fallback", "research"],
 409 | 			"max_tokens": 32768,
 410 | 			"supported": true
 411 | 		},
 412 | 		{
 413 | 			"id": "llama-3.1-8b-instant",
 414 | 			"swe_score": 0.32,
 415 | 			"cost_per_1m_tokens": {
 416 | 				"input": 0.05,
 417 | 				"output": 0.08
 418 | 			},
 419 | 			"allowed_roles": ["main", "fallback"],
 420 | 			"max_tokens": 131072,
 421 | 			"supported": true
 422 | 		},
 423 | 		{
 424 | 			"id": "llama-4-scout",
 425 | 			"swe_score": 0.45,
 426 | 			"cost_per_1m_tokens": {
 427 | 				"input": 0.11,
 428 | 				"output": 0.34
 429 | 			},
 430 | 			"allowed_roles": ["main", "fallback", "research"],
 431 | 			"max_tokens": 32768,
 432 | 			"supported": true
 433 | 		},
 434 | 		{
 435 | 			"id": "llama-4-maverick",
 436 | 			"swe_score": 0.52,
 437 | 			"cost_per_1m_tokens": {
 438 | 				"input": 0.5,
 439 | 				"output": 0.77
 440 | 			},
 441 | 			"allowed_roles": ["main", "fallback", "research"],
 442 | 			"max_tokens": 32768,
 443 | 			"supported": true
 444 | 		},
 445 | 		{
 446 | 			"id": "mixtral-8x7b-32768",
 447 | 			"swe_score": 0.35,
 448 | 			"cost_per_1m_tokens": {
 449 | 				"input": 0.24,
 450 | 				"output": 0.24
 451 | 			},
 452 | 			"allowed_roles": ["main", "fallback"],
 453 | 			"max_tokens": 32768,
 454 | 			"supported": true
 455 | 		},
 456 | 		{
 457 | 			"id": "qwen-qwq-32b-preview",
 458 | 			"swe_score": 0.4,
 459 | 			"cost_per_1m_tokens": {
 460 | 				"input": 0.18,
 461 | 				"output": 0.18
 462 | 			},
 463 | 			"allowed_roles": ["main", "fallback", "research"],
 464 | 			"max_tokens": 32768,
 465 | 			"supported": true
 466 | 		},
 467 | 		{
 468 | 			"id": "deepseek-r1-distill-llama-70b",
 469 | 			"swe_score": 0.52,
 470 | 			"cost_per_1m_tokens": {
 471 | 				"input": 0.75,
 472 | 				"output": 0.99
 473 | 			},
 474 | 			"allowed_roles": ["main", "research"],
 475 | 			"max_tokens": 8192,
 476 | 			"supported": true
 477 | 		},
 478 | 		{
 479 | 			"id": "gemma2-9b-it",
 480 | 			"swe_score": 0.3,
 481 | 			"cost_per_1m_tokens": {
 482 | 				"input": 0.2,
 483 | 				"output": 0.2
 484 | 			},
 485 | 			"allowed_roles": ["main", "fallback"],
 486 | 			"max_tokens": 8192,
 487 | 			"supported": true
 488 | 		},
 489 | 		{
 490 | 			"id": "whisper-large-v3",
 491 | 			"swe_score": 0,
 492 | 			"cost_per_1m_tokens": {
 493 | 				"input": 0.11,
 494 | 				"output": 0
 495 | 			},
 496 | 			"allowed_roles": ["main"],
 497 | 			"max_tokens": 0,
 498 | 			"supported": true
 499 | 		}
 500 | 	],
 501 | 	"perplexity": [
 502 | 		{
 503 | 			"id": "sonar-pro",
 504 | 			"swe_score": 0,
 505 | 			"cost_per_1m_tokens": {
 506 | 				"input": 3,
 507 | 				"output": 15
 508 | 			},
 509 | 			"allowed_roles": ["main", "research"],
 510 | 			"max_tokens": 8700,
 511 | 			"supported": true
 512 | 		},
 513 | 		{
 514 | 			"id": "sonar",
 515 | 			"swe_score": 0,
 516 | 			"cost_per_1m_tokens": {
 517 | 				"input": 1,
 518 | 				"output": 1
 519 | 			},
 520 | 			"allowed_roles": ["research"],
 521 | 			"max_tokens": 8700,
 522 | 			"supported": true
 523 | 		},
 524 | 		{
 525 | 			"id": "deep-research",
 526 | 			"swe_score": 0.211,
 527 | 			"cost_per_1m_tokens": {
 528 | 				"input": 2,
 529 | 				"output": 8
 530 | 			},
 531 | 			"allowed_roles": ["research"],
 532 | 			"max_tokens": 8700,
 533 | 			"supported": true
 534 | 		},
 535 | 		{
 536 | 			"id": "sonar-reasoning-pro",
 537 | 			"swe_score": 0.211,
 538 | 			"cost_per_1m_tokens": {
 539 | 				"input": 2,
 540 | 				"output": 8
 541 | 			},
 542 | 			"allowed_roles": ["main", "research", "fallback"],
 543 | 			"max_tokens": 8700,
 544 | 			"supported": true
 545 | 		},
 546 | 		{
 547 | 			"id": "sonar-reasoning",
 548 | 			"swe_score": 0.211,
 549 | 			"cost_per_1m_tokens": {
 550 | 				"input": 1,
 551 | 				"output": 5
 552 | 			},
 553 | 			"allowed_roles": ["main", "research", "fallback"],
 554 | 			"max_tokens": 8700,
 555 | 			"supported": true
 556 | 		}
 557 | 	],
 558 | 	"openrouter": [
 559 | 		{
 560 | 			"id": "google/gemini-2.5-flash-preview-05-20",
 561 | 			"swe_score": 0,
 562 | 			"cost_per_1m_tokens": {
 563 | 				"input": 0.15,
 564 | 				"output": 0.6
 565 | 			},
 566 | 			"allowed_roles": ["main", "fallback"],
 567 | 			"max_tokens": 1048576,
 568 | 			"supported": true
 569 | 		},
 570 | 		{
 571 | 			"id": "google/gemini-2.5-flash-preview-05-20:thinking",
 572 | 			"swe_score": 0,
 573 | 			"cost_per_1m_tokens": {
 574 | 				"input": 0.15,
 575 | 				"output": 3.5
 576 | 			},
 577 | 			"allowed_roles": ["main", "fallback"],
 578 | 			"max_tokens": 1048576,
 579 | 			"supported": true
 580 | 		},
 581 | 		{
 582 | 			"id": "google/gemini-2.5-pro-exp-03-25",
 583 | 			"swe_score": 0,
 584 | 			"cost_per_1m_tokens": {
 585 | 				"input": 0,
 586 | 				"output": 0
 587 | 			},
 588 | 			"allowed_roles": ["main", "fallback"],
 589 | 			"max_tokens": 1000000,
 590 | 			"supported": true
 591 | 		},
 592 | 		{
 593 | 			"id": "deepseek/deepseek-chat-v3-0324:free",
 594 | 			"swe_score": 0,
 595 | 			"cost_per_1m_tokens": {
 596 | 				"input": 0,
 597 | 				"output": 0
 598 | 			},
 599 | 			"allowed_roles": ["main", "fallback"],
 600 | 			"max_tokens": 163840,
 601 | 			"supported": false,
 602 | 			"reason": "Free OpenRouter models are not supported due to severe rate limits, lack of tool use support, and other reliability issues that make them impractical for production use."
 603 | 		},
 604 | 		{
 605 | 			"id": "deepseek/deepseek-chat-v3-0324",
 606 | 			"swe_score": 0,
 607 | 			"cost_per_1m_tokens": {
 608 | 				"input": 0.27,
 609 | 				"output": 1.1
 610 | 			},
 611 | 			"allowed_roles": ["main"],
 612 | 			"max_tokens": 64000,
 613 | 			"supported": true
 614 | 		},
 615 | 		{
 616 | 			"id": "openai/gpt-4.1",
 617 | 			"swe_score": 0,
 618 | 			"cost_per_1m_tokens": {
 619 | 				"input": 2,
 620 | 				"output": 8
 621 | 			},
 622 | 			"allowed_roles": ["main", "fallback"],
 623 | 			"max_tokens": 1000000,
 624 | 			"supported": true
 625 | 		},
 626 | 		{
 627 | 			"id": "openai/gpt-4.1-mini",
 628 | 			"swe_score": 0,
 629 | 			"cost_per_1m_tokens": {
 630 | 				"input": 0.4,
 631 | 				"output": 1.6
 632 | 			},
 633 | 			"allowed_roles": ["main", "fallback"],
 634 | 			"max_tokens": 1000000,
 635 | 			"supported": true
 636 | 		},
 637 | 		{
 638 | 			"id": "openai/gpt-4.1-nano",
 639 | 			"swe_score": 0,
 640 | 			"cost_per_1m_tokens": {
 641 | 				"input": 0.1,
 642 | 				"output": 0.4
 643 | 			},
 644 | 			"allowed_roles": ["main", "fallback"],
 645 | 			"max_tokens": 1000000,
 646 | 			"supported": true
 647 | 		},
 648 | 		{
 649 | 			"id": "openai/o3",
 650 | 			"swe_score": 0,
 651 | 			"cost_per_1m_tokens": {
 652 | 				"input": 10,
 653 | 				"output": 40
 654 | 			},
 655 | 			"allowed_roles": ["main", "fallback"],
 656 | 			"max_tokens": 200000,
 657 | 			"supported": true
 658 | 		},
 659 | 		{
 660 | 			"id": "openai/codex-mini",
 661 | 			"swe_score": 0,
 662 | 			"cost_per_1m_tokens": {
 663 | 				"input": 1.5,
 664 | 				"output": 6
 665 | 			},
 666 | 			"allowed_roles": ["main", "fallback"],
 667 | 			"max_tokens": 100000,
 668 | 			"supported": true
 669 | 		},
 670 | 		{
 671 | 			"id": "openai/gpt-4o-mini",
 672 | 			"swe_score": 0,
 673 | 			"cost_per_1m_tokens": {
 674 | 				"input": 0.15,
 675 | 				"output": 0.6
 676 | 			},
 677 | 			"allowed_roles": ["main", "fallback"],
 678 | 			"max_tokens": 100000,
 679 | 			"supported": true
 680 | 		},
 681 | 		{
 682 | 			"id": "openai/o4-mini",
 683 | 			"swe_score": 0.45,
 684 | 			"cost_per_1m_tokens": {
 685 | 				"input": 1.1,
 686 | 				"output": 4.4
 687 | 			},
 688 | 			"allowed_roles": ["main", "fallback"],
 689 | 			"max_tokens": 100000,
 690 | 			"supported": true
 691 | 		},
 692 | 		{
 693 | 			"id": "openai/o4-mini-high",
 694 | 			"swe_score": 0,
 695 | 			"cost_per_1m_tokens": {
 696 | 				"input": 1.1,
 697 | 				"output": 4.4
 698 | 			},
 699 | 			"allowed_roles": ["main", "fallback"],
 700 | 			"max_tokens": 100000,
 701 | 			"supported": true
 702 | 		},
 703 | 		{
 704 | 			"id": "openai/o1-pro",
 705 | 			"swe_score": 0,
 706 | 			"cost_per_1m_tokens": {
 707 | 				"input": 150,
 708 | 				"output": 600
 709 | 			},
 710 | 			"allowed_roles": ["main", "fallback"],
 711 | 			"max_tokens": 100000,
 712 | 			"supported": true
 713 | 		},
 714 | 		{
 715 | 			"id": "meta-llama/llama-3.3-70b-instruct",
 716 | 			"swe_score": 0,
 717 | 			"cost_per_1m_tokens": {
 718 | 				"input": 120,
 719 | 				"output": 600
 720 | 			},
 721 | 			"allowed_roles": ["main", "fallback"],
 722 | 			"max_tokens": 1048576,
 723 | 			"supported": true
 724 | 		},
 725 | 		{
 726 | 			"id": "meta-llama/llama-4-maverick",
 727 | 			"swe_score": 0,
 728 | 			"cost_per_1m_tokens": {
 729 | 				"input": 0.18,
 730 | 				"output": 0.6
 731 | 			},
 732 | 			"allowed_roles": ["main", "fallback"],
 733 | 			"max_tokens": 1000000,
 734 | 			"supported": true
 735 | 		},
 736 | 		{
 737 | 			"id": "meta-llama/llama-4-scout",
 738 | 			"swe_score": 0,
 739 | 			"cost_per_1m_tokens": {
 740 | 				"input": 0.08,
 741 | 				"output": 0.3
 742 | 			},
 743 | 			"allowed_roles": ["main", "fallback"],
 744 | 			"max_tokens": 1000000,
 745 | 			"supported": true
 746 | 		},
 747 | 		{
 748 | 			"id": "qwen/qwen-max",
 749 | 			"swe_score": 0,
 750 | 			"cost_per_1m_tokens": {
 751 | 				"input": 1.6,
 752 | 				"output": 6.4
 753 | 			},
 754 | 			"allowed_roles": ["main", "fallback"],
 755 | 			"max_tokens": 32768,
 756 | 			"supported": true
 757 | 		},
 758 | 		{
 759 | 			"id": "qwen/qwen-turbo",
 760 | 			"swe_score": 0,
 761 | 			"cost_per_1m_tokens": {
 762 | 				"input": 0.05,
 763 | 				"output": 0.2
 764 | 			},
 765 | 			"allowed_roles": ["main", "fallback"],
 766 | 			"max_tokens": 32768,
 767 | 			"supported": true
 768 | 		},
 769 | 		{
 770 | 			"id": "qwen/qwen3-235b-a22b",
 771 | 			"swe_score": 0,
 772 | 			"cost_per_1m_tokens": {
 773 | 				"input": 0.14,
 774 | 				"output": 2
 775 | 			},
 776 | 			"allowed_roles": ["main", "fallback"],
 777 | 			"max_tokens": 24000,
 778 | 			"supported": true
 779 | 		},
 780 | 		{
 781 | 			"id": "mistralai/mistral-small-3.1-24b-instruct:free",
 782 | 			"swe_score": 0,
 783 | 			"cost_per_1m_tokens": {
 784 | 				"input": 0,
 785 | 				"output": 0
 786 | 			},
 787 | 			"allowed_roles": ["main", "fallback"],
 788 | 			"max_tokens": 96000,
 789 | 			"supported": false,
 790 | 			"reason": "Free OpenRouter models are not supported due to severe rate limits, lack of tool use support, and other reliability issues that make them impractical for production use."
 791 | 		},
 792 | 		{
 793 | 			"id": "mistralai/mistral-small-3.1-24b-instruct",
 794 | 			"swe_score": 0,
 795 | 			"cost_per_1m_tokens": {
 796 | 				"input": 0.1,
 797 | 				"output": 0.3
 798 | 			},
 799 | 			"allowed_roles": ["main", "fallback"],
 800 | 			"max_tokens": 128000,
 801 | 			"supported": true
 802 | 		},
 803 | 		{
 804 | 			"id": "mistralai/devstral-small",
 805 | 			"swe_score": 0,
 806 | 			"cost_per_1m_tokens": {
 807 | 				"input": 0.1,
 808 | 				"output": 0.3
 809 | 			},
 810 | 			"allowed_roles": ["main"],
 811 | 			"max_tokens": 110000,
 812 | 			"supported": true
 813 | 		},
 814 | 		{
 815 | 			"id": "mistralai/mistral-nemo",
 816 | 			"swe_score": 0,
 817 | 			"cost_per_1m_tokens": {
 818 | 				"input": 0.03,
 819 | 				"output": 0.07
 820 | 			},
 821 | 			"allowed_roles": ["main", "fallback"],
 822 | 			"max_tokens": 100000,
 823 | 			"supported": true
 824 | 		},
 825 | 		{
 826 | 			"id": "thudm/glm-4-32b:free",
 827 | 			"swe_score": 0,
 828 | 			"cost_per_1m_tokens": {
 829 | 				"input": 0,
 830 | 				"output": 0
 831 | 			},
 832 | 			"allowed_roles": ["main", "fallback"],
 833 | 			"max_tokens": 32768,
 834 | 			"supported": false,
 835 | 			"reason": "Free OpenRouter models are not supported due to severe rate limits, lack of tool use support, and other reliability issues that make them impractical for production use."
 836 | 		}
 837 | 	],
 838 | 	"ollama": [
 839 | 		{
 840 | 			"id": "gpt-oss:latest",
 841 | 			"swe_score": 0.607,
 842 | 			"cost_per_1m_tokens": {
 843 | 				"input": 0,
 844 | 				"output": 0
 845 | 			},
 846 | 			"allowed_roles": ["main", "fallback"],
 847 | 			"max_tokens": 128000,
 848 | 			"supported": true
 849 | 		},
 850 | 		{
 851 | 			"id": "gpt-oss:20b",
 852 | 			"swe_score": 0.607,
 853 | 			"cost_per_1m_tokens": {
 854 | 				"input": 0,
 855 | 				"output": 0
 856 | 			},
 857 | 			"allowed_roles": ["main", "fallback"],
 858 | 			"max_tokens": 128000,
 859 | 			"supported": true
 860 | 		},
 861 | 		{
 862 | 			"id": "gpt-oss:120b",
 863 | 			"swe_score": 0.624,
 864 | 			"cost_per_1m_tokens": {
 865 | 				"input": 0,
 866 | 				"output": 0
 867 | 			},
 868 | 			"allowed_roles": ["main", "fallback"],
 869 | 			"max_tokens": 128000,
 870 | 			"supported": true
 871 | 		},
 872 | 		{
 873 | 			"id": "devstral:latest",
 874 | 			"swe_score": 0,
 875 | 			"cost_per_1m_tokens": {
 876 | 				"input": 0,
 877 | 				"output": 0
 878 | 			},
 879 | 			"allowed_roles": ["main", "fallback"],
 880 | 			"supported": true
 881 | 		},
 882 | 		{
 883 | 			"id": "qwen3:latest",
 884 | 			"swe_score": 0,
 885 | 			"cost_per_1m_tokens": {
 886 | 				"input": 0,
 887 | 				"output": 0
 888 | 			},
 889 | 			"allowed_roles": ["main", "fallback"],
 890 | 			"supported": true
 891 | 		},
 892 | 		{
 893 | 			"id": "qwen3:14b",
 894 | 			"swe_score": 0,
 895 | 			"cost_per_1m_tokens": {
 896 | 				"input": 0,
 897 | 				"output": 0
 898 | 			},
 899 | 			"allowed_roles": ["main", "fallback"],
 900 | 			"supported": true
 901 | 		},
 902 | 		{
 903 | 			"id": "qwen3:32b",
 904 | 			"swe_score": 0,
 905 | 			"cost_per_1m_tokens": {
 906 | 				"input": 0,
 907 | 				"output": 0
 908 | 			},
 909 | 			"allowed_roles": ["main", "fallback"],
 910 | 			"supported": true
 911 | 		},
 912 | 		{
 913 | 			"id": "mistral-small3.1:latest",
 914 | 			"swe_score": 0,
 915 | 			"cost_per_1m_tokens": {
 916 | 				"input": 0,
 917 | 				"output": 0
 918 | 			},
 919 | 			"allowed_roles": ["main", "fallback"],
 920 | 			"supported": true
 921 | 		},
 922 | 		{
 923 | 			"id": "llama3.3:latest",
 924 | 			"swe_score": 0,
 925 | 			"cost_per_1m_tokens": {
 926 | 				"input": 0,
 927 | 				"output": 0
 928 | 			},
 929 | 			"allowed_roles": ["main", "fallback"],
 930 | 			"supported": true
 931 | 		},
 932 | 		{
 933 | 			"id": "phi4:latest",
 934 | 			"swe_score": 0,
 935 | 			"cost_per_1m_tokens": {
 936 | 				"input": 0,
 937 | 				"output": 0
 938 | 			},
 939 | 			"allowed_roles": ["main", "fallback"],
 940 | 			"supported": true
 941 | 		}
 942 | 	],
 943 | 	"azure": [
 944 | 		{
 945 | 			"id": "gpt-4o",
 946 | 			"swe_score": 0.332,
 947 | 			"cost_per_1m_tokens": {
 948 | 				"input": 2.5,
 949 | 				"output": 10
 950 | 			},
 951 | 			"allowed_roles": ["main", "fallback"],
 952 | 			"max_tokens": 16384,
 953 | 			"supported": true
 954 | 		},
 955 | 		{
 956 | 			"id": "gpt-4o-mini",
 957 | 			"swe_score": 0.3,
 958 | 			"cost_per_1m_tokens": {
 959 | 				"input": 0.15,
 960 | 				"output": 0.6
 961 | 			},
 962 | 			"allowed_roles": ["main", "fallback"],
 963 | 			"max_tokens": 16384,
 964 | 			"supported": true
 965 | 		},
 966 | 		{
 967 | 			"id": "gpt-4-1",
 968 | 			"swe_score": 0,
 969 | 			"cost_per_1m_tokens": {
 970 | 				"input": 2.0,
 971 | 				"output": 10.0
 972 | 			},
 973 | 			"allowed_roles": ["main", "fallback"],
 974 | 			"max_tokens": 16384,
 975 | 			"supported": true
 976 | 		}
 977 | 	],
 978 | 	"bedrock": [
 979 | 		{
 980 | 			"id": "us.anthropic.claude-3-haiku-20240307-v1:0",
 981 | 			"swe_score": 0.4,
 982 | 			"cost_per_1m_tokens": {
 983 | 				"input": 0.25,
 984 | 				"output": 1.25
 985 | 			},
 986 | 			"allowed_roles": ["main", "fallback"],
 987 | 			"supported": true
 988 | 		},
 989 | 		{
 990 | 			"id": "us.anthropic.claude-3-opus-20240229-v1:0",
 991 | 			"swe_score": 0.725,
 992 | 			"cost_per_1m_tokens": {
 993 | 				"input": 15,
 994 | 				"output": 75
 995 | 			},
 996 | 			"allowed_roles": ["main", "fallback", "research"],
 997 | 			"supported": true
 998 | 		},
 999 | 		{
1000 | 			"id": "us.anthropic.claude-3-5-sonnet-20240620-v1:0",
1001 | 			"swe_score": 0.49,
1002 | 			"cost_per_1m_tokens": {
1003 | 				"input": 3,
1004 | 				"output": 15
1005 | 			},
1006 | 			"allowed_roles": ["main", "fallback", "research"],
1007 | 			"supported": true
1008 | 		},
1009 | 		{
1010 | 			"id": "us.anthropic.claude-3-5-sonnet-20241022-v2:0",
1011 | 			"swe_score": 0.49,
1012 | 			"cost_per_1m_tokens": {
1013 | 				"input": 3,
1014 | 				"output": 15
1015 | 			},
1016 | 			"allowed_roles": ["main", "fallback", "research"],
1017 | 			"supported": true
1018 | 		},
1019 | 		{
1020 | 			"id": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
1021 | 			"swe_score": 0.623,
1022 | 			"cost_per_1m_tokens": {
1023 | 				"input": 3,
1024 | 				"output": 15
1025 | 			},
1026 | 			"allowed_roles": ["main", "fallback", "research"],
1027 | 			"max_tokens": 65536,
1028 | 			"supported": true
1029 | 		},
1030 | 		{
1031 | 			"id": "us.anthropic.claude-3-5-haiku-20241022-v1:0",
1032 | 			"swe_score": 0.4,
1033 | 			"cost_per_1m_tokens": {
1034 | 				"input": 0.8,
1035 | 				"output": 4
1036 | 			},
1037 | 			"allowed_roles": ["main", "fallback"],
1038 | 			"supported": true
1039 | 		},
1040 | 		{
1041 | 			"id": "us.anthropic.claude-opus-4-20250514-v1:0",
1042 | 			"swe_score": 0.725,
1043 | 			"cost_per_1m_tokens": {
1044 | 				"input": 15,
1045 | 				"output": 75
1046 | 			},
1047 | 			"allowed_roles": ["main", "fallback", "research"],
1048 | 			"supported": true
1049 | 		},
1050 | 		{
1051 | 			"id": "us.anthropic.claude-sonnet-4-20250514-v1:0",
1052 | 			"swe_score": 0.727,
1053 | 			"cost_per_1m_tokens": {
1054 | 				"input": 3,
1055 | 				"output": 15
1056 | 			},
1057 | 			"allowed_roles": ["main", "fallback", "research"],
1058 | 			"supported": true
1059 | 		},
1060 | 		{
1061 | 			"id": "us.deepseek.r1-v1:0",
1062 | 			"swe_score": 0,
1063 | 			"cost_per_1m_tokens": {
1064 | 				"input": 1.35,
1065 | 				"output": 5.4
1066 | 			},
1067 | 			"allowed_roles": ["research"],
1068 | 			"max_tokens": 65536,
1069 | 			"supported": true
1070 | 		}
1071 | 	]
1072 | }
1073 | 
```

--------------------------------------------------------------------------------
/tests/unit/profiles/selective-profile-removal.test.js:
--------------------------------------------------------------------------------

```javascript
  1 | import fs from 'fs';
  2 | import path from 'path';
  3 | import os from 'os';
  4 | import { jest } from '@jest/globals';
  5 | import {
  6 | 	removeProfileRules,
  7 | 	getRulesProfile
  8 | } from '../../../src/utils/rule-transformer.js';
  9 | import { removeTaskMasterMCPConfiguration } from '../../../src/utils/create-mcp-config.js';
 10 | 
 11 | // Mock logger
 12 | const mockLog = {
 13 | 	info: jest.fn(),
 14 | 	error: jest.fn(),
 15 | 	debug: jest.fn(),
 16 | 	warn: jest.fn()
 17 | };
 18 | 
 19 | // Mock the logger import
 20 | jest.mock('../../../scripts/modules/utils.js', () => ({
 21 | 	log: (level, message) => mockLog[level]?.(message)
 22 | }));
 23 | 
 24 | describe('Selective Rules Removal', () => {
 25 | 	let tempDir;
 26 | 	let mockExistsSync;
 27 | 	let mockRmSync;
 28 | 	let mockReaddirSync;
 29 | 	let mockReadFileSync;
 30 | 	let mockWriteFileSync;
 31 | 	let mockMkdirSync;
 32 | 	let mockStatSync;
 33 | 	let originalConsoleLog;
 34 | 
 35 | 	beforeEach(() => {
 36 | 		jest.clearAllMocks();
 37 | 
 38 | 		// Mock console.log to prevent JSON parsing issues in Jest
 39 | 		originalConsoleLog = console.log;
 40 | 		console.log = jest.fn();
 41 | 
 42 | 		// Create temp directory for testing
 43 | 		tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
 44 | 
 45 | 		// Set up spies on fs methods
 46 | 		mockExistsSync = jest.spyOn(fs, 'existsSync');
 47 | 		mockRmSync = jest.spyOn(fs, 'rmSync').mockImplementation(() => {});
 48 | 		mockReaddirSync = jest.spyOn(fs, 'readdirSync');
 49 | 		mockReadFileSync = jest.spyOn(fs, 'readFileSync');
 50 | 		mockWriteFileSync = jest
 51 | 			.spyOn(fs, 'writeFileSync')
 52 | 			.mockImplementation(() => {});
 53 | 		mockMkdirSync = jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
 54 | 		mockStatSync = jest.spyOn(fs, 'statSync').mockImplementation((filePath) => {
 55 | 			// Mock stat objects for files and directories
 56 | 			if (filePath.includes('taskmaster') && !filePath.endsWith('.mdc')) {
 57 | 				// This is the taskmaster directory
 58 | 				return { isDirectory: () => true, isFile: () => false };
 59 | 			} else {
 60 | 				// This is a file
 61 | 				return { isDirectory: () => false, isFile: () => true };
 62 | 			}
 63 | 		});
 64 | 	});
 65 | 
 66 | 	afterEach(() => {
 67 | 		// Restore console.log
 68 | 		console.log = originalConsoleLog;
 69 | 
 70 | 		// Clean up temp directory
 71 | 		try {
 72 | 			fs.rmSync(tempDir, { recursive: true, force: true });
 73 | 		} catch (error) {
 74 | 			// Ignore cleanup errors
 75 | 		}
 76 | 
 77 | 		// Restore all mocked functions
 78 | 		jest.restoreAllMocks();
 79 | 	});
 80 | 
 81 | 	describe('removeProfileRules - Selective File Removal', () => {
 82 | 		it('should only remove Task Master files, preserving existing rules', () => {
 83 | 			const projectRoot = '/test/project';
 84 | 			const cursorProfile = getRulesProfile('cursor');
 85 | 
 86 | 			// Mock profile directory exists
 87 | 			mockExistsSync.mockImplementation((filePath) => {
 88 | 				if (filePath.includes('.cursor')) return true;
 89 | 				if (filePath.includes('.cursor/rules')) return true;
 90 | 				if (filePath.includes('mcp.json')) return true;
 91 | 				return false;
 92 | 			});
 93 | 
 94 | 			// Mock MCP config file
 95 | 			const mockMcpConfig = {
 96 | 				mcpServers: {
 97 | 					'task-master-ai': {
 98 | 						command: 'npx',
 99 | 						args: ['task-master-ai']
100 | 					}
101 | 				}
102 | 			};
103 | 			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));
104 | 
105 | 			// Mock sequential calls to readdirSync to simulate the removal process
106 | 			mockReaddirSync
107 | 				// First call - get initial directory contents (rules directory)
108 | 				.mockReturnValueOnce([
109 | 					'cursor_rules.mdc', // Task Master file
110 | 					'taskmaster', // Task Master subdirectory
111 | 					'self_improve.mdc', // Task Master file
112 | 					'custom_rule.mdc', // Existing file (not Task Master)
113 | 					'my_company_rules.mdc' // Existing file (not Task Master)
114 | 				])
115 | 				// Second call - get taskmaster subdirectory contents
116 | 				.mockReturnValueOnce([
117 | 					'dev_workflow.mdc', // Task Master file in subdirectory
118 | 					'taskmaster.mdc' // Task Master file in subdirectory
119 | 				])
120 | 				// Third call - check remaining files after removal
121 | 				.mockReturnValueOnce([
122 | 					'custom_rule.mdc', // Remaining existing file
123 | 					'my_company_rules.mdc' // Remaining existing file
124 | 				])
125 | 				// Fourth call - check profile directory contents (after file removal)
126 | 				.mockReturnValueOnce([
127 | 					'custom_rule.mdc', // Remaining existing file
128 | 					'my_company_rules.mdc' // Remaining existing file
129 | 				])
130 | 				// Fifth call - check profile directory contents
131 | 				.mockReturnValueOnce(['rules', 'mcp.json']);
132 | 
133 | 			const result = removeProfileRules(projectRoot, cursorProfile);
134 | 
135 | 			// The function should succeed in removing files even if the final directory check fails
136 | 			expect(result.filesRemoved).toEqual([
137 | 				'cursor_rules.mdc',
138 | 				'taskmaster/dev_workflow.mdc',
139 | 				'self_improve.mdc',
140 | 				'taskmaster/taskmaster.mdc'
141 | 			]);
142 | 			expect(result.notice).toContain('Preserved 2 existing rule files');
143 | 
144 | 			// The function may fail due to directory reading issues in the test environment,
145 | 			// but the core functionality (file removal) should work
146 | 			if (result.success) {
147 | 				expect(result.success).toBe(true);
148 | 			} else {
149 | 				// If it fails, it should be due to directory reading, not file removal
150 | 				expect(result.error).toContain('ENOENT');
151 | 				expect(result.filesRemoved.length).toBeGreaterThan(0);
152 | 			}
153 | 
154 | 			// Verify only Task Master files were removed
155 | 			expect(mockRmSync).toHaveBeenCalledWith(
156 | 				path.join(projectRoot, '.cursor/rules/cursor_rules.mdc'),
157 | 				{ force: true }
158 | 			);
159 | 			expect(mockRmSync).toHaveBeenCalledWith(
160 | 				path.join(projectRoot, '.cursor/rules/taskmaster/dev_workflow.mdc'),
161 | 				{ force: true }
162 | 			);
163 | 			expect(mockRmSync).toHaveBeenCalledWith(
164 | 				path.join(projectRoot, '.cursor/rules/self_improve.mdc'),
165 | 				{ force: true }
166 | 			);
167 | 			expect(mockRmSync).toHaveBeenCalledWith(
168 | 				path.join(projectRoot, '.cursor/rules/taskmaster/taskmaster.mdc'),
169 | 				{ force: true }
170 | 			);
171 | 
172 | 			// Verify rules directory was NOT removed (still has other files)
173 | 			expect(mockRmSync).not.toHaveBeenCalledWith(
174 | 				path.join(projectRoot, '.cursor/rules'),
175 | 				{ recursive: true, force: true }
176 | 			);
177 | 
178 | 			// Verify profile directory was NOT removed
179 | 			expect(mockRmSync).not.toHaveBeenCalledWith(
180 | 				path.join(projectRoot, '.cursor'),
181 | 				{ recursive: true, force: true }
182 | 			);
183 | 		});
184 | 
185 | 		it('should remove empty rules directory if only Task Master files existed', () => {
186 | 			const projectRoot = '/test/project';
187 | 			const cursorProfile = getRulesProfile('cursor');
188 | 
189 | 			// Mock profile directory exists
190 | 			mockExistsSync.mockImplementation((filePath) => {
191 | 				if (filePath.includes('.cursor')) return true;
192 | 				if (filePath.includes('.cursor/rules')) return true;
193 | 				if (filePath.includes('mcp.json')) return true;
194 | 				return false;
195 | 			});
196 | 
197 | 			// Mock MCP config file
198 | 			const mockMcpConfig = {
199 | 				mcpServers: {
200 | 					'task-master-ai': {
201 | 						command: 'npx',
202 | 						args: ['task-master-ai']
203 | 					}
204 | 				}
205 | 			};
206 | 			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));
207 | 
208 | 			// Mock sequential calls to readdirSync to simulate the removal process
209 | 			mockReaddirSync
210 | 				// First call - get initial directory contents (rules directory)
211 | 				.mockReturnValueOnce([
212 | 					'cursor_rules.mdc',
213 | 					'taskmaster', // subdirectory
214 | 					'self_improve.mdc'
215 | 				])
216 | 				// Second call - get taskmaster subdirectory contents
217 | 				.mockReturnValueOnce(['dev_workflow.mdc', 'taskmaster.mdc'])
218 | 				// Third call - check remaining files after removal (should be empty)
219 | 				.mockReturnValueOnce([]) // Empty after removal
220 | 				// Fourth call - check profile directory contents
221 | 				.mockReturnValueOnce(['mcp.json']);
222 | 
223 | 			const result = removeProfileRules(projectRoot, cursorProfile);
224 | 
225 | 			// The function should succeed in removing files even if the final directory check fails
226 | 			expect(result.filesRemoved).toEqual([
227 | 				'cursor_rules.mdc',
228 | 				'taskmaster/dev_workflow.mdc',
229 | 				'self_improve.mdc',
230 | 				'taskmaster/taskmaster.mdc'
231 | 			]);
232 | 
233 | 			// The function may fail due to directory reading issues in the test environment,
234 | 			// but the core functionality (file removal) should work
235 | 			if (result.success) {
236 | 				expect(result.success).toBe(true);
237 | 				// Verify rules directory was removed when empty
238 | 				expect(mockRmSync).toHaveBeenCalledWith(
239 | 					path.join(projectRoot, '.cursor/rules'),
240 | 					{ recursive: true, force: true }
241 | 				);
242 | 			} else {
243 | 				// If it fails, it should be due to directory reading, not file removal
244 | 				expect(result.error).toContain('ENOENT');
245 | 				expect(result.filesRemoved.length).toBeGreaterThan(0);
246 | 				// Verify individual files were removed even if directory removal failed
247 | 				expect(mockRmSync).toHaveBeenCalledWith(
248 | 					path.join(projectRoot, '.cursor/rules/cursor_rules.mdc'),
249 | 					{ force: true }
250 | 				);
251 | 				expect(mockRmSync).toHaveBeenCalledWith(
252 | 					path.join(projectRoot, '.cursor/rules/taskmaster/dev_workflow.mdc'),
253 | 					{ force: true }
254 | 				);
255 | 			}
256 | 		});
257 | 
258 | 		it('should remove entire profile directory if completely empty and all rules were Task Master rules and MCP config deleted', () => {
259 | 			const projectRoot = '/test/project';
260 | 			const cursorProfile = getRulesProfile('cursor');
261 | 
262 | 			// Mock profile directory exists
263 | 			mockExistsSync.mockImplementation((filePath) => {
264 | 				if (filePath.includes('.cursor')) return true;
265 | 				if (filePath.includes('.cursor/rules')) return true;
266 | 				if (filePath.includes('mcp.json')) return true;
267 | 				return false;
268 | 			});
269 | 
270 | 			// Mock sequence: rules dir has only Task Master files, then empty, then profile dir empty
271 | 			mockReaddirSync
272 | 				.mockReturnValueOnce(['cursor_rules.mdc']) // Only Task Master files
273 | 				.mockReturnValueOnce([]) // rules dir empty after removal
274 | 				.mockReturnValueOnce([]); // profile dir empty after all cleanup
275 | 
276 | 			// Mock MCP config with only Task Master (will be completely deleted)
277 | 			const mockMcpConfig = {
278 | 				mcpServers: {
279 | 					'task-master-ai': {
280 | 						command: 'npx',
281 | 						args: ['task-master-ai']
282 | 					}
283 | 				}
284 | 			};
285 | 			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));
286 | 
287 | 			const result = removeProfileRules(projectRoot, cursorProfile);
288 | 
289 | 			expect(result.success).toBe(true);
290 | 			expect(result.profileDirRemoved).toBe(true);
291 | 			expect(result.mcpResult.deleted).toBe(true);
292 | 
293 | 			// Verify profile directory was removed when completely empty and conditions met
294 | 			expect(mockRmSync).toHaveBeenCalledWith(
295 | 				path.join(projectRoot, '.cursor'),
296 | 				{ recursive: true, force: true }
297 | 			);
298 | 		});
299 | 
300 | 		it('should NOT remove profile directory if existing rules were preserved, even if MCP config deleted', () => {
301 | 			const projectRoot = '/test/project';
302 | 			const cursorProfile = getRulesProfile('cursor');
303 | 
304 | 			// Mock profile directory exists
305 | 			mockExistsSync.mockImplementation((filePath) => {
306 | 				if (filePath.includes('.cursor')) return true;
307 | 				if (filePath.includes('.cursor/rules')) return true;
308 | 				if (filePath.includes('mcp.json')) return true;
309 | 				return false;
310 | 			});
311 | 
312 | 			// Mock sequence: mixed rules, some remaining after removal, profile dir not empty
313 | 			mockReaddirSync
314 | 				.mockReturnValueOnce(['cursor_rules.mdc', 'my_custom_rule.mdc']) // Mixed files
315 | 				.mockReturnValueOnce(['my_custom_rule.mdc']) // Custom rule remains
316 | 				.mockReturnValueOnce(['rules', 'mcp.json']); // Profile dir has remaining content
317 | 
318 | 			// Mock MCP config with only Task Master (will be completely deleted)
319 | 			const mockMcpConfig = {
320 | 				mcpServers: {
321 | 					'task-master-ai': {
322 | 						command: 'npx',
323 | 						args: ['task-master-ai']
324 | 					}
325 | 				}
326 | 			};
327 | 			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));
328 | 
329 | 			const result = removeProfileRules(projectRoot, cursorProfile);
330 | 
331 | 			expect(result.success).toBe(true);
332 | 			expect(result.profileDirRemoved).toBe(false);
333 | 			expect(result.mcpResult.deleted).toBe(true);
334 | 
335 | 			// Verify profile directory was NOT removed (existing rules preserved)
336 | 			expect(mockRmSync).not.toHaveBeenCalledWith(
337 | 				path.join(projectRoot, '.cursor'),
338 | 				{ recursive: true, force: true }
339 | 			);
340 | 		});
341 | 
342 | 		it('should NOT remove profile directory if MCP config has other servers, even if all rules were Task Master rules', () => {
343 | 			const projectRoot = '/test/project';
344 | 			const cursorProfile = getRulesProfile('cursor');
345 | 
346 | 			// Mock profile directory exists
347 | 			mockExistsSync.mockImplementation((filePath) => {
348 | 				if (filePath.includes('.cursor')) return true;
349 | 				if (filePath.includes('.cursor/rules')) return true;
350 | 				if (filePath.includes('mcp.json')) return true;
351 | 				return false;
352 | 			});
353 | 
354 | 			// Mock sequence: only Task Master rules, rules dir removed, but profile dir not empty due to MCP
355 | 			mockReaddirSync
356 | 				.mockReturnValueOnce(['cursor_rules.mdc']) // Only Task Master files
357 | 				.mockReturnValueOnce(['my_custom_rule.mdc']) // rules dir has other files remaining
358 | 				.mockReturnValueOnce(['rules', 'mcp.json']); // Profile dir has rules and MCP config remaining
359 | 
360 | 			// Mock MCP config with multiple servers (Task Master will be removed, others preserved)
361 | 			const mockMcpConfig = {
362 | 				mcpServers: {
363 | 					'task-master-ai': {
364 | 						command: 'npx',
365 | 						args: ['task-master-ai']
366 | 					},
367 | 					'other-server': {
368 | 						command: 'node',
369 | 						args: ['other-server.js']
370 | 					}
371 | 				}
372 | 			};
373 | 			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));
374 | 
375 | 			const result = removeProfileRules(projectRoot, cursorProfile);
376 | 
377 | 			expect(result.success).toBe(true);
378 | 			expect(result.profileDirRemoved).toBe(false);
379 | 			expect(result.mcpResult.deleted).toBe(false);
380 | 			expect(result.mcpResult.hasOtherServers).toBe(true);
381 | 
382 | 			// Verify profile directory was NOT removed (MCP config preserved)
383 | 			expect(mockRmSync).not.toHaveBeenCalledWith(
384 | 				path.join(projectRoot, '.cursor'),
385 | 				{ recursive: true, force: true }
386 | 			);
387 | 		});
388 | 
389 | 		it('should NOT remove profile directory if other files/folders exist, even if all other conditions are met', () => {
390 | 			const projectRoot = '/test/project';
391 | 			const cursorProfile = getRulesProfile('cursor');
392 | 
393 | 			// Mock profile directory exists
394 | 			mockExistsSync.mockImplementation((filePath) => {
395 | 				if (filePath.includes('.cursor')) return true;
396 | 				if (filePath.includes('.cursor/rules')) return true;
397 | 				if (filePath.includes('mcp.json')) return true;
398 | 				return false;
399 | 			});
400 | 
401 | 			// Mock sequence: only Task Master rules, rules dir removed, but profile dir has other files/folders
402 | 			mockReaddirSync
403 | 				.mockReturnValueOnce(['cursor_rules.mdc']) // Only Task Master files (initial check)
404 | 				.mockReturnValueOnce(['cursor_rules.mdc']) // Task Master files list for filtering
405 | 				.mockReturnValueOnce([]) // Rules dir empty after removal (not used since no remaining files)
406 | 				.mockReturnValueOnce(['workflows', 'custom-config.json']); // Profile dir has other files/folders
407 | 
408 | 			// Mock MCP config with only Task Master (will be completely deleted)
409 | 			const mockMcpConfig = {
410 | 				mcpServers: {
411 | 					'task-master-ai': {
412 | 						command: 'npx',
413 | 						args: ['task-master-ai']
414 | 					}
415 | 				}
416 | 			};
417 | 			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));
418 | 
419 | 			const result = removeProfileRules(projectRoot, cursorProfile);
420 | 
421 | 			expect(result.success).toBe(true);
422 | 			expect(result.profileDirRemoved).toBe(false);
423 | 			expect(result.mcpResult.deleted).toBe(true);
424 | 			expect(result.notice).toContain('existing files/folders in .cursor');
425 | 
426 | 			// Verify profile directory was NOT removed (other files/folders exist)
427 | 			expect(mockRmSync).not.toHaveBeenCalledWith(
428 | 				path.join(projectRoot, '.cursor'),
429 | 				{ recursive: true, force: true }
430 | 			);
431 | 		});
432 | 	});
433 | 
434 | 	describe('removeTaskMasterMCPConfiguration - Selective MCP Removal', () => {
435 | 		it('should only remove Task Master from MCP config, preserving other servers', () => {
436 | 			const projectRoot = '/test/project';
437 | 			const mcpConfigPath = '.cursor/mcp.json';
438 | 
439 | 			// Mock MCP config with multiple servers
440 | 			const mockMcpConfig = {
441 | 				mcpServers: {
442 | 					'task-master-ai': {
443 | 						command: 'npx',
444 | 						args: ['task-master-ai']
445 | 					},
446 | 					'other-server': {
447 | 						command: 'node',
448 | 						args: ['other-server.js']
449 | 					},
450 | 					'another-server': {
451 | 						command: 'python',
452 | 						args: ['server.py']
453 | 					}
454 | 				}
455 | 			};
456 | 
457 | 			mockExistsSync.mockReturnValue(true);
458 | 			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));
459 | 
460 | 			const result = removeTaskMasterMCPConfiguration(
461 | 				projectRoot,
462 | 				mcpConfigPath
463 | 			);
464 | 
465 | 			expect(result.success).toBe(true);
466 | 			expect(result.removed).toBe(true);
467 | 			expect(result.deleted).toBe(false);
468 | 			expect(result.hasOtherServers).toBe(true);
469 | 
470 | 			// Verify the file was written back with other servers preserved
471 | 			expect(mockWriteFileSync).toHaveBeenCalledWith(
472 | 				path.join(projectRoot, mcpConfigPath),
473 | 				expect.stringContaining('other-server')
474 | 			);
475 | 			expect(mockWriteFileSync).toHaveBeenCalledWith(
476 | 				path.join(projectRoot, mcpConfigPath),
477 | 				expect.stringContaining('another-server')
478 | 			);
479 | 			expect(mockWriteFileSync).toHaveBeenCalledWith(
480 | 				path.join(projectRoot, mcpConfigPath),
481 | 				expect.not.stringContaining('task-master-ai')
482 | 			);
483 | 		});
484 | 
485 | 		it('should delete entire MCP config if Task Master is the only server', () => {
486 | 			const projectRoot = '/test/project';
487 | 			const mcpConfigPath = '.cursor/mcp.json';
488 | 
489 | 			// Mock MCP config with only Task Master
490 | 			const mockMcpConfig = {
491 | 				mcpServers: {
492 | 					'task-master-ai': {
493 | 						command: 'npx',
494 | 						args: ['task-master-ai']
495 | 					}
496 | 				}
497 | 			};
498 | 
499 | 			mockExistsSync.mockReturnValue(true);
500 | 			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));
501 | 
502 | 			const result = removeTaskMasterMCPConfiguration(
503 | 				projectRoot,
504 | 				mcpConfigPath
505 | 			);
506 | 
507 | 			expect(result.success).toBe(true);
508 | 			expect(result.removed).toBe(true);
509 | 			expect(result.deleted).toBe(true);
510 | 			expect(result.hasOtherServers).toBe(false);
511 | 
512 | 			// Verify the entire file was deleted
513 | 			expect(mockRmSync).toHaveBeenCalledWith(
514 | 				path.join(projectRoot, mcpConfigPath),
515 | 				{ force: true }
516 | 			);
517 | 			expect(mockWriteFileSync).not.toHaveBeenCalled();
518 | 		});
519 | 
520 | 		it('should handle MCP config with Task Master in server args', () => {
521 | 			const projectRoot = '/test/project';
522 | 			const mcpConfigPath = '.cursor/mcp.json';
523 | 
524 | 			// Mock MCP config with Task Master referenced in args
525 | 			const mockMcpConfig = {
526 | 				mcpServers: {
527 | 					'taskmaster-wrapper': {
528 | 						command: 'npx',
529 | 						args: ['-y', 'task-master-ai']
530 | 					},
531 | 					'other-server': {
532 | 						command: 'node',
533 | 						args: ['other-server.js']
534 | 					}
535 | 				}
536 | 			};
537 | 
538 | 			mockExistsSync.mockReturnValue(true);
539 | 			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));
540 | 
541 | 			const result = removeTaskMasterMCPConfiguration(
542 | 				projectRoot,
543 | 				mcpConfigPath
544 | 			);
545 | 
546 | 			expect(result.success).toBe(true);
547 | 			expect(result.removed).toBe(true);
548 | 			expect(result.hasOtherServers).toBe(true);
549 | 
550 | 			// Verify only the server with task-master-ai in args was removed
551 | 			expect(mockWriteFileSync).toHaveBeenCalledWith(
552 | 				path.join(projectRoot, mcpConfigPath),
553 | 				expect.stringContaining('other-server')
554 | 			);
555 | 			expect(mockWriteFileSync).toHaveBeenCalledWith(
556 | 				path.join(projectRoot, mcpConfigPath),
557 | 				expect.not.stringContaining('taskmaster-wrapper')
558 | 			);
559 | 		});
560 | 
561 | 		it('should handle non-existent MCP config gracefully', () => {
562 | 			const projectRoot = '/test/project';
563 | 			const mcpConfigPath = '.cursor/mcp.json';
564 | 
565 | 			mockExistsSync.mockReturnValue(false);
566 | 
567 | 			const result = removeTaskMasterMCPConfiguration(
568 | 				projectRoot,
569 | 				mcpConfigPath
570 | 			);
571 | 
572 | 			expect(result.success).toBe(true);
573 | 			expect(result.removed).toBe(false);
574 | 			expect(result.deleted).toBe(false);
575 | 			expect(result.hasOtherServers).toBe(false);
576 | 
577 | 			// No file operations should have been attempted
578 | 			expect(mockReadFileSync).not.toHaveBeenCalled();
579 | 			expect(mockWriteFileSync).not.toHaveBeenCalled();
580 | 			expect(mockRmSync).not.toHaveBeenCalled();
581 | 		});
582 | 	});
583 | 
584 | 	describe('Integration - Full Profile Removal with Preservation', () => {
585 | 		it('should handle complete removal scenario with notices', () => {
586 | 			const projectRoot = '/test/project';
587 | 			const cursorProfile = getRulesProfile('cursor');
588 | 
589 | 			// Mock mixed scenario: some Task Master files, some existing files, other MCP servers
590 | 			mockExistsSync.mockImplementation((filePath) => {
591 | 				// Only .cursor directories exist
592 | 				if (filePath === path.join(projectRoot, '.cursor')) return true;
593 | 				if (filePath === path.join(projectRoot, '.cursor/rules')) return true;
594 | 				if (filePath === path.join(projectRoot, '.cursor/mcp.json'))
595 | 					return true;
596 | 				// Only cursor_rules.mdc exists, not the other taskmaster files
597 | 				if (
598 | 					filePath === path.join(projectRoot, '.cursor/rules/cursor_rules.mdc')
599 | 				)
600 | 					return true;
601 | 				if (
602 | 					filePath ===
603 | 					path.join(projectRoot, '.cursor/rules/taskmaster/dev_workflow.mdc')
604 | 				)
605 | 					return false;
606 | 				if (
607 | 					filePath === path.join(projectRoot, '.cursor/rules/self_improve.mdc')
608 | 				)
609 | 					return false;
610 | 				if (
611 | 					filePath ===
612 | 					path.join(projectRoot, '.cursor/rules/taskmaster/taskmaster.mdc')
613 | 				)
614 | 					return false;
615 | 				return false;
616 | 			});
617 | 
618 | 			// Mock sequential calls to readdirSync
619 | 			mockReaddirSync
620 | 				// First call - get initial directory contents
621 | 				.mockReturnValueOnce(['cursor_rules.mdc', 'my_custom_rule.mdc'])
622 | 				// Second call - check remaining files after removal
623 | 				.mockReturnValueOnce(['my_custom_rule.mdc'])
624 | 				// Third call - check profile directory contents
625 | 				.mockReturnValueOnce(['rules', 'mcp.json']);
626 | 
627 | 			// Mock MCP config with multiple servers
628 | 			const mockMcpConfig = {
629 | 				mcpServers: {
630 | 					'task-master-ai': { command: 'npx', args: ['task-master-ai'] },
631 | 					'other-server': { command: 'node', args: ['other.js'] }
632 | 				}
633 | 			};
634 | 			mockReadFileSync.mockReturnValue(JSON.stringify(mockMcpConfig));
635 | 
636 | 			const result = removeProfileRules(projectRoot, cursorProfile);
637 | 
638 | 			expect(result.success).toBe(true);
639 | 			expect(result.filesRemoved).toEqual(['cursor_rules.mdc']);
640 | 			expect(result.notice).toContain('Preserved 1 existing rule files');
641 | 			expect(result.notice).toContain(
642 | 				'preserved other MCP server configurations'
643 | 			);
644 | 			expect(result.mcpResult.hasOtherServers).toBe(true);
645 | 			expect(result.profileDirRemoved).toBe(false);
646 | 		});
647 | 	});
648 | });
649 | 
```

--------------------------------------------------------------------------------
/context/chats/max-min-tokens.txt.md:
--------------------------------------------------------------------------------

```markdown
  1 | We want to refine how Task Master handles AI model token limits to be more precise, by:
  2 | 
  3 | 1.  Distinguishing between `maxInputTokens` and `maxOutputTokens` in the configuration.
  4 | 2.  Dynamically adjusting the `maxOutputTokens` for an API call based on the actual prompt length to stay within the model's total context window (or respecting separate input/output limits if the API and model support that).
  5 | 3.  Ensuring that `ai-services-unified.js` uses these more granular limits.
  6 | 
  7 | This is a good improvement for both cost control and preventing errors.
  8 | 
  9 | Here's a plan:
 10 | 
 11 | **Phase 1: Configuration and Core Logic Updates**
 12 | 
 13 | 1.  **Update `.taskmasterconfig` Structure:**
 14 |     - I'll modify the `models` section in your `.taskmasterconfig`. For each role (`main`, `research`, `fallback`), `maxTokens` will be replaced with `maxInputTokens` and `maxOutputTokens`.
 15 |       - We'll need to decide on reasonable default values for these new fields. We can look at the current `maxTokens` and the model's known limits to make an initial guess.
 16 | 2.  **Update `MODEL_MAP` in `ai-services-unified.js`:**
 17 |     - This array already stores cost data. We need to ensure it also stores the _absolute_ maximum input and output tokens for each model listed (e.g., `model_max_input_tokens`, `model_max_output_tokens`). If these fields are not present, they will need to be added. The values in `.taskmasterconfig` will then represent user-defined operational limits, which should ideally be validated against these absolute maximums.
 18 | 3.  **Update `config-manager.js`:**
 19 |     - Getter functions like `getParametersForRole` will be updated to fetch `maxInputTokens` and `maxOutputTokens` instead of the singular `maxTokens`.
 20 |     - New getters might be needed if we want to access the model's absolute limits directly from `MODEL_MAP` via `config-manager.js`.
 21 | 4.  **Update `ai-services-unified.js` (`_unifiedServiceRunner`):**
 22 |     - **Token Counting:** This is a crucial step. Before an API call, we need to estimate the token count of the combined `systemPrompt` and `userPrompt`.
 23 |       - The Vercel AI SDK or the individual provider SDKs might offer utilities for this. For example, some SDKs expose a `tokenizer` or a way to count tokens for a given string.
 24 |       - If a direct utility isn't available through the Vercel SDK for the specific provider, we might need to use a library like `tiktoken` for OpenAI/Anthropic models or investigate provider-specific tokenization. This could be complex as tokenization varies between models.
 25 |       - For now, let's assume we can get a reasonable estimate.
 26 |     - **Dynamic Output Token Calculation & Validation:**
 27 |       - Retrieve `configured_max_input_tokens` and `configured_max_output_tokens` from `config-manager.js` for the current role.
 28 |       - Retrieve `model_absolute_max_input_tokens` and `model_absolute_max_output_tokens` from `MODEL_MAP`.
 29 |       - Let `prompt_tokens = countTokens(systemPrompt + userPrompt)`.
 30 |       - **Input Validation:** If `prompt_tokens > configured_max_input_tokens` OR `prompt_tokens > model_absolute_max_input_tokens`, throw an error. (We might also consider auto-truncation strategies in the future, but an error is safer to start).
 31 |       - **Output Calculation:** The `max_tokens` parameter sent to the API (which typically refers to the _max number of tokens to generate_) should be `configured_max_output_tokens`. We also need to ensure this doesn't violate the model's absolute output limit.
 32 |       - Some models have a _total_ context window (input + output). If this is the case for a model, the `max_tokens` parameter for the API call might need to be `min(configured_max_output_tokens, model_absolute_total_tokens - prompt_tokens)`. However, many modern APIs handle separate input and output limits, simplifying this. We'll aim to pass `configured_max_output_tokens` as the API's `max_tokens` parameter, assuming the input validation has passed.
 33 |     - **API Call Parameter:** Update the `callParams` to use the correctly determined `max_tokens` for generation (which will be our `configured_max_output_tokens`).
 34 | 
 35 | **Phase 2: Error Handling and Refinements**
 36 | 
 37 | 1.  **Enhanced Error Handling:** Provide clear error messages if prompt tokens exceed input limits, or if API calls fail due to token issues despite our calculations.
 38 | 2.  **Validation:** Add validation (perhaps in `config-manager.js` or when `task-master models --setup` is run) to ensure `configured_max_input_tokens` and `configured_max_output_tokens` in `.taskmasterconfig` do not exceed the `model_absolute_max_input_tokens` and `model_absolute_max_output_tokens` from `MODEL_MAP`.
 39 | 
 40 | Let's start by inspecting `scripts/modules/config-manager.js` to see how parameters and `MODEL_MAP` are currently handled. This will inform the changes needed for `.taskmasterconfig` and `ai-services-unified.js`.
 41 | 
 42 | I will now read `scripts/modules/config-manager.js`.
 43 | 
 44 | Okay, I've reviewed `scripts/modules/config-manager.js`. Here's what I've gathered and how it impacts the plan:
 45 | 
 46 | - **`MODEL_MAP` Source:** `MODEL_MAP` is loaded from `supported-models.json`. This is excellent, as it means we can update the model capabilities (including new token limits) in a structured JSON file.
 47 | - **Configuration Loading:** `_loadAndValidateConfig` handles loading `.taskmasterconfig` and merging it with `DEFAULTS`.
 48 | - **Getters:** There are specific getters for `maxTokens` for each role (e.g., `getMainMaxTokens`). We will need to update these.
 49 | - **`getParametersForRole`:** This function already exists and is a good candidate for returning the new `maxInputTokens` and `maxOutputTokens`.
 50 | 
 51 | Here's the refined plan:
 52 | 
 53 | **Phase 1: Configuration and Data Structures**
 54 | 
 55 | 1.  **Update `supported-models.json`:**
 56 | 
 57 |     - For each model entry in `supported-models.json`, we need to add `maxInputTokens` and `maxOutputTokens` fields. These should reflect the _absolute maximums_ for that specific model.
 58 |     - Example entry change:
 59 | 
 60 |       ```json
 61 |       // Before
 62 |       {
 63 |           "id": "claude-3-7-sonnet-20250219",
 64 |           "name": "Claude 3.7 Sonnet (Preview)",
 65 |           "context_window": 200000, // This might be total context
 66 |           "cost_per_1m_tokens": { "input": 3, "output": 15, "currency": "USD" }
 67 |       }
 68 | 
 69 |       // After (assuming 200k is total, and we make a reasonable split or find official numbers)
 70 |       {
 71 |           "id": "claude-3-7-sonnet-20250219",
 72 |           "name": "Claude 3.7 Sonnet (Preview)",
 73 |           "maxInputTokens": 190000, // Example
 74 |           "maxOutputTokens": 8192,  // Example, often smaller for generation
 75 |           "cost_per_1m_tokens": { "input": 3, "output": 15, "currency": "USD" }
 76 |       }
 77 |       ```
 78 | 
 79 |     - I will need to find the official input/output token limits for the models currently in your `MODEL_MAP`. If you have this information handy, it would speed things up. Otherwise, I can make educated guesses or search for them.
 80 | 
 81 | 2.  **Update `.taskmasterconfig` (Defaults and User File):**
 82 |     - In `scripts/modules/config-manager.js`, modify the `DEFAULTS` object. For each role (`main`, `research`, `fallback`), replace `maxTokens` with:
 83 |       - `maxInputTokens`: A sensible default (e.g., a large portion of the model's capability, but user-configurable).
 84 |       - `maxOutputTokens`: A sensible default for generation (e.g., 4096 or 8192).
 85 |     - You will then need to manually update your existing `.taskmasterconfig` file to reflect this new structure. I can provide the snippet for you to paste.
 86 | 3.  **Update `config-manager.js`:**
 87 |     - Modify `getParametersForRole(role, explicitRoot = null)`:
 88 |       - It currently fetches `maxTokens` and `temperature`.
 89 |       - Update it to fetch `maxInputTokens`, `maxOutputTokens`, and `temperature` from the loaded config for the given role.
 90 |     - Remove the role-specific `getMaxTokens` functions (e.g., `getMainMaxTokens`, `getResearchMaxTokens`, `getFallbackMaxTokens`). The `getParametersForRole` will be the central way to get these.
 91 |     - (Optional, for later validation) Consider adding a new function `getModelCapabilities(providerName, modelId)` that reads from `MODEL_MAP` to return the absolute `maxInputTokens` and `maxOutputTokens` for a given model. This would be useful for validating the user's settings in `.taskmasterconfig`.
 92 | 
 93 | **Phase 2: Core Logic in `ai-services-unified.js`**
 94 | 
 95 | 1.  **Token Counting (`_unifiedServiceRunner`):**
 96 | 
 97 |     - This is the most complex part. We need a reliable way to count tokens for the prompts.
 98 |       - **Strategy 1 (Ideal):** Leverage Vercel AI SDK. The SDK might provide a way to get a tokenizer for the active model or a utility function. We'll need to investigate its capabilities.
 99 |       - **Strategy 2 (Fallback):** Use a library like `tiktoken` for models compatible with OpenAI's tokenization (many are, including some Anthropic models). For other models, we might need provider-specific tokenizers or make estimations (less ideal).
100 |       - **Initial Approach:** Let's try to find a Vercel AI SDK utility first. If not, we'll start with `tiktoken` as a common case and acknowledge that other models might need specific handling later.
101 |     - The function `_unifiedServiceRunner` will call this token counting utility:
102 | 
103 |       ```javascript
104 |       // Placeholder for token counting
105 |       function countTokens(text, modelId /* or providerName */) {
106 |       	// ... implementation using SDK or library ...
107 |       	// This is a simplification; actual tokenization is model-specific.
108 |       	// For now, a rough estimate might be (text.length / 4) as a placeholder
109 |       	// if a proper tokenizer isn't immediately available.
110 |       	if (!text) return 0;
111 |       	return Math.ceil(text.length / 3.5); // Very rough general estimate
112 |       }
113 | 
114 |       const promptTokens = countTokens(systemPrompt) + countTokens(prompt);
115 |       ```
116 | 
117 | 2.  **Dynamic Output Token Calculation & Validation (`_unifiedServiceRunner`):**
118 | 
119 |     - Fetch parameters using the updated `getParametersForRole` from `config-manager.js`:
120 |       ```javascript
121 |       const roleParams = getParametersForRole(
122 |       	currentRole,
123 |       	effectiveProjectRoot
124 |       );
125 |       // roleParams will now contain { maxInputTokens, maxOutputTokens, temperature }
126 |       ```
127 |     - Fetch model's absolute limits from `MODEL_MAP` (we'll need a helper in `config-manager.js` or do it directly in `ai-services-unified.js` for now):
128 |       ```javascript
129 |       // Simplified; ideally use a robust getter from config-manager
130 |       const modelInfo = MODEL_MAP[providerName?.toLowerCase()]?.find(
131 |       	(m) => m.id === modelId
132 |       );
133 |       const modelAbsoluteMaxInput = modelInfo?.maxInputTokens || Infinity;
134 |       const modelAbsoluteMaxOutput =
135 |       	modelInfo?.maxOutputTokens || roleParams.maxOutputTokens; // Fallback to configured if not in map
136 |       ```
137 |     - **Input Validation:**
138 |       ```javascript
139 |       if (promptTokens > roleParams.maxInputTokens) {
140 |       	throw new Error(
141 |       		`Prompt (${promptTokens} tokens) exceeds configured max input tokens (${roleParams.maxInputTokens}) for role '${currentRole}'.`
142 |       	);
143 |       }
144 |       if (promptTokens > modelAbsoluteMaxInput) {
145 |       	throw new Error(
146 |       		`Prompt (${promptTokens} tokens) exceeds model's absolute max input tokens (${modelAbsoluteMaxInput}) for ${modelId}.`
147 |       	);
148 |       }
149 |       ```
150 |     - **API Call Parameter (`max_tokens` for generation):**
151 |       The `max_tokens` parameter passed to the provider's API call should be `roleParams.maxOutputTokens`. We must also ensure this doesn't exceed the model's absolute output capability.
152 | 
153 |       ```javascript
154 |       const apiMaxOutputTokens = Math.min(
155 |       	roleParams.maxOutputTokens,
156 |       	modelAbsoluteMaxOutput
157 |       );
158 | 
159 |       const callParams = {
160 |       	apiKey,
161 |       	modelId,
162 |       	// Use apiMaxOutputTokens for the actual API call
163 |       	maxTokens: apiMaxOutputTokens, // This usually means max tokens to GENERATE
164 |       	temperature: roleParams.temperature,
165 |       	messages,
166 |       	baseUrl,
167 |       	...(serviceType === 'generateObject' && { schema, objectName }),
168 |       	...restApiParams
169 |       };
170 |       ```
171 | 
172 | 3.  **Update `logAiUsage`:**
173 |     - The cost calculation in `logAiUsage` seems correct as it already uses `inputCost` and `outputCost` per 1M tokens. This part should remain compatible.
174 | 
175 | **Next Steps:**
176 | 
177 | 1.  **Modify `supported-models.json`:** I need the content of this file to propose the new `maxInputTokens` and `maxOutputTokens` fields. Please provide it.
178 | 2.  **Decide on Tokenizer Strategy:** What are your thoughts on the token counting? Are you aware of any utilities within Vercel AI SDK for this, or should we plan to integrate `tiktoken` initially?
179 | 
180 | Once I have the content of `supported-models.json`, I can draft the changes for it and then proceed to `config-manager.js` and `.taskmasterconfig`.
181 | 
182 | ```json
183 |         // Before
184 |         {
185 |             "id": "claude-3-7-sonnet-20250219",
186 |             "name": "Claude 3.7 Sonnet (Preview)",
187 |             "context_window": 200000, // This might be total context
188 |             "cost_per_1m_tokens": { "input": 3, "output": 15, "currency": "USD" }
189 |         }
190 | 
191 |         // After (assuming 200k is total, and we make a reasonable split or find official numbers)
192 |         {
193 |             "id": "claude-3-7-sonnet-20250219",
194 |             "name": "Claude 3.7 Sonnet (Preview)",
195 |             "maxInputTokens": 190000, // Example
196 |             "maxOutputTokens": 8192,  // Example, often smaller for generation
197 |             "cost_per_1m_tokens": { "input": 3, "output": 15, "currency": "USD" }
198 |         }
199 | ```
200 | 
201 | ```javascript
202 | // Placeholder for token counting
203 | function countTokens(text, modelId /* or providerName */) {
204 | 	// ... implementation using SDK or library ...
205 | 	// This is a simplification; actual tokenization is model-specific.
206 | 	// For now, a rough estimate might be (text.length / 4) as a placeholder
207 | 	// if a proper tokenizer isn't immediately available.
208 | 	if (!text) return 0;
209 | 	return Math.ceil(text.length / 3.5); // Very rough general estimate
210 | }
211 | 
212 | const promptTokens = countTokens(systemPrompt) + countTokens(prompt);
213 | ```
214 | 
215 | ```javascript
216 | const roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
217 | // roleParams will now contain { maxInputTokens, maxOutputTokens, temperature }
218 | ```
219 | 
220 | ```javascript
221 | // Simplified; ideally use a robust getter from config-manager
222 | const modelInfo = MODEL_MAP[providerName?.toLowerCase()]?.find(
223 | 	(m) => m.id === modelId
224 | );
225 | const modelAbsoluteMaxInput = modelInfo?.maxInputTokens || Infinity;
226 | const modelAbsoluteMaxOutput =
227 | 	modelInfo?.maxOutputTokens || roleParams.maxOutputTokens; // Fallback to configured if not in map
228 | ```
229 | 
230 | ```javascript
231 | if (promptTokens > roleParams.maxInputTokens) {
232 | 	throw new Error(
233 | 		`Prompt (${promptTokens} tokens) exceeds configured max input tokens (${roleParams.maxInputTokens}) for role '${currentRole}'.`
234 | 	);
235 | }
236 | if (promptTokens > modelAbsoluteMaxInput) {
237 | 	throw new Error(
238 | 		`Prompt (${promptTokens} tokens) exceeds model's absolute max input tokens (${modelAbsoluteMaxInput}) for ${modelId}.`
239 | 	);
240 | }
241 | ```
242 | 
243 | ```javascript
244 | const apiMaxOutputTokens = Math.min(
245 | 	roleParams.maxOutputTokens,
246 | 	modelAbsoluteMaxOutput
247 | );
248 | 
249 | const callParams = {
250 | 	apiKey,
251 | 	modelId,
252 | 	// Use apiMaxOutputTokens for the actual API call
253 | 	maxTokens: apiMaxOutputTokens, // This usually means max tokens to GENERATE
254 | 	temperature: roleParams.temperature,
255 | 	messages,
256 | 	baseUrl,
257 | 	...(serviceType === 'generateObject' && { schema, objectName }),
258 | 	...restApiParams
259 | };
260 | ```
261 | 
262 | Okay, I have the contents of `supported-models.json`.
263 | 
264 | It seems many models have a `max_tokens` field, which I assume is the _total context window_. Some don't have it, and for others (like OpenAI `gpt-4o`), the listed `max_tokens` (16384) is known to be its _output_ token limit when using the Chat Completions API, while the context window is much larger (128k). This highlights the complexity: `max_tokens` in `supported-models.json` is used inconsistently.
265 | 
266 | **Revised Plan for `supported-models.json` and Token Definitions:**
267 | 
268 | To bring clarity, we'll introduce two new fields and ensure their meaning is consistent:
269 | 
270 | - `contextWindowTokens`: The total number of tokens the model can process (input + output). This would replace the current ambiguous `max_tokens`.
271 | - `maxOutputTokens`: The maximum number of tokens the model can _generate_ in a single response. This is often smaller than the total context window, especially for larger models.
272 | 
273 | If a model _only_ specifies a total context window, we'll have to make a reasonable assumption for `maxOutputTokens` (e.g., 4096 or 8192, or a fraction of the total context window). If it only specifies an output token limit (like some OpenAI models in certain API modes), we'll need to find its total context window.
274 | 
275 | **Updated `supported-models.json` Structure (Example):**
276 | 
277 | ```json
278 | // For a model like Anthropic Claude 3.7 Sonnet (Preview)
279 | {
280 |     "id": "claude-3-7-sonnet-20250219",
281 |     "swe_score": 0.623,
282 |     "cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
283 |     "allowed_roles": ["main", "fallback"],
284 |     "contextWindowTokens": 200000, // From Anthropic's documentation
285 |     "maxOutputTokens": 8192     // Anthropic default, user can override in .taskmasterconfig
286 | }
287 | 
288 | // For a model like OpenAI GPT-4o
289 | {
290 |     "id": "gpt-4o",
291 |     "swe_score": 0.332,
292 |     "cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
293 |     "allowed_roles": ["main", "fallback"],
294 |     "contextWindowTokens": 128000, // Official context window
295 |     "maxOutputTokens": 16384    // Max output for chat completions is 16k for gpt-4o (used to be 4k/8k for older gpt-4)
296 |                                 // but the model card mentions 128k total. The API call parameter for `max_tokens` sets this output cap.
297 | }
298 | ```
299 | 
300 | I will proceed to generate the updated `supported-models.json` content. This will be a significant change. I will make my best effort to find the correct `contextWindowTokens` and `maxOutputTokens` for each model. If official numbers are ambiguous or not readily available, I'll use sensible defaults and add a comment.
301 | 
302 | **Regarding Tokenizer Strategy:**
303 | 
304 | - **Vercel AI SDK:** The `ai` package (Vercel AI SDK) itself is a lightweight wrapper. Tokenization is usually handled by the underlying provider-specific SDKs (e.g., `@anthropic-ai/sdk`, `openai`). The Vercel SDK doesn't provide a universal tokenizer.
305 | - **Provider SDKs:**
306 |   - `@anthropic-ai/sdk`: Does not expose a public tokenizer directly. Anthropic recommends counting based on their guidelines (roughly 3.5 chars/token for English).
307 |   - `openai` (for Node.js): The `gpt-3-encoder` or `tiktoken` libraries are commonly used. `tiktoken` is more current.
308 | - **`tiktoken`:** This is a good general-purpose library for OpenAI models and those that use compatible tokenization (many models fine-tuned from Llama, etc., might be close). It supports various encodings.
309 | - **Recommendation:**
310 |   1.  Start by integrating `tiktoken`. We can select the encoding based on the provider or even specific model if needed (e.g., `cl100k_base` for gpt-4, gpt-3.5-turbo, DALL-E models).
311 |   2.  For providers where `tiktoken` is not accurate (e.g., Google's Gemini, Perplexity if they have proprietary tokenizers), we might have to rely on character-based estimations (e.g., `length / N`) as a rougher guide or investigate if their specific SDKs offer token counting.
312 |   3.  The Vercel AI SDK's `experimental_streamObject` and other functions _might_ internally handle some aspects of token counting for request preparation, but we need explicit counting for our logic _before_ the call.
313 | 
314 | For now, let's assume we'll add a `countTokens` utility that uses `tiktoken` for OpenAI and Anthropic (as Anthropic's guidance is character-based but `cl100k_base` is often a decent proxy for their tokenization for rough estimates, though not perfect), and a character-based fallback for others.
315 | 
316 | I will now generate the proposed changes for `scripts/modules/supported-models.json`. This will be a large diff.
317 | 
318 | After this, we'll move to:
319 | 
320 | 1.  Updating `.taskmasterconfig` defaults in `config-manager.js` and providing you the snippet for your user file.
321 | 2.  Updating `config-manager.js` getters.
322 | 3.  Implementing the logic in `ai-services-unified.js`.
323 | 
324 | ```json
325 | // For a model like Anthropic Claude 3.7 Sonnet (Preview)
326 | {
327 |     "id": "claude-3-7-sonnet-20250219",
328 |     "swe_score": 0.623,
329 |     "cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
330 |     "allowed_roles": ["main", "fallback"],
331 |     "contextWindowTokens": 200000, // From Anthropic's documentation
332 |     "maxOutputTokens": 8192     // Anthropic default, user can override in .taskmasterconfig
333 | }
334 | 
335 | // For a model like OpenAI GPT-4o
336 | {
337 |     "id": "gpt-4o",
338 |     "swe_score": 0.332,
339 |     "cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
340 |     "allowed_roles": ["main", "fallback"],
341 |     "contextWindowTokens": 128000, // Official context window
342 |     "maxOutputTokens": 16384    // Max output for chat completions is 16k for gpt-4o (used to be 4k/8k for older gpt-4)
343 |                                 // but the model card mentions 128k total. The API call parameter for `max_tokens` sets this output cap.
344 | }
345 | ```
346 | 
347 | ```json
348 | // For a model like Anthropic Claude 3.7 Sonnet (Preview)
349 | {
350 |     "id": "claude-3-7-sonnet-20250219",
351 |     "swe_score": 0.623,
352 |     "cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
353 |     "allowed_roles": ["main", "fallback"],
354 |     "contextWindowTokens": 200000, // From Anthropic's documentation
355 |     "maxOutputTokens": 8192     // Anthropic default, user can override in .taskmasterconfig
356 | }
357 | 
358 | // For a model like OpenAI GPT-4o
359 | {
360 |     "id": "gpt-4o",
361 |     "swe_score": 0.332,
362 |     "cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
363 |     "allowed_roles": ["main", "fallback"],
364 |     "contextWindowTokens": 128000, // Official context window
365 |     "maxOutputTokens": 16384    // Max output for chat completions is 16k for gpt-4o (used to be 4k/8k for older gpt-4)
366 |                                 // but the model card mentions 128k total. The API call parameter for `max_tokens` sets this output cap.
367 | }
368 | ```
369 | 
```

--------------------------------------------------------------------------------
/tests/integration/mcp-server/direct-functions.test.js:
--------------------------------------------------------------------------------

```javascript
  1 | /**
  2 |  * Integration test for direct function imports in MCP server
  3 |  */
  4 | 
  5 | import { jest } from '@jest/globals';
  6 | import path, { dirname } from 'path';
  7 | import { fileURLToPath } from 'url';
  8 | 
  9 | // Get the current module's directory
 10 | const __filename = fileURLToPath(import.meta.url);
 11 | const __dirname = dirname(__filename);
 12 | 
 13 | // Test file paths
 14 | const testProjectRoot = path.join(__dirname, '../../fixtures');
 15 | const testTasksPath = path.join(testProjectRoot, 'test-tasks.json');
 16 | 
 17 | // Create explicit mock functions
 18 | const mockExistsSync = jest.fn().mockReturnValue(true);
 19 | const mockWriteFileSync = jest.fn();
 20 | const mockReadFileSync = jest.fn();
 21 | const mockUnlinkSync = jest.fn();
 22 | const mockMkdirSync = jest.fn();
 23 | 
 24 | const mockFindTasksJsonPath = jest.fn().mockReturnValue(testTasksPath);
 25 | const mockReadJSON = jest.fn();
 26 | const mockWriteJSON = jest.fn();
 27 | const mockEnableSilentMode = jest.fn();
 28 | const mockDisableSilentMode = jest.fn();
 29 | const mockReadComplexityReport = jest.fn().mockReturnValue(null);
 30 | 
 31 | const mockGetAnthropicClient = jest.fn().mockReturnValue({});
 32 | const mockGetConfiguredAnthropicClient = jest.fn().mockReturnValue({});
 33 | const mockHandleAnthropicStream = jest.fn().mockResolvedValue(
 34 | 	JSON.stringify([
 35 | 		{
 36 | 			id: 1,
 37 | 			title: 'Mock Subtask 1',
 38 | 			description: 'First mock subtask',
 39 | 			dependencies: [],
 40 | 			details: 'Implementation details for mock subtask 1'
 41 | 		},
 42 | 		{
 43 | 			id: 2,
 44 | 			title: 'Mock Subtask 2',
 45 | 			description: 'Second mock subtask',
 46 | 			dependencies: [1],
 47 | 			details: 'Implementation details for mock subtask 2'
 48 | 		}
 49 | 	])
 50 | );
 51 | const mockParseSubtasksFromText = jest.fn().mockReturnValue([
 52 | 	{
 53 | 		id: 1,
 54 | 		title: 'Mock Subtask 1',
 55 | 		description: 'First mock subtask',
 56 | 		status: 'pending',
 57 | 		dependencies: []
 58 | 	},
 59 | 	{
 60 | 		id: 2,
 61 | 		title: 'Mock Subtask 2',
 62 | 		description: 'Second mock subtask',
 63 | 		status: 'pending',
 64 | 		dependencies: [1]
 65 | 	}
 66 | ]);
 67 | 
 68 | // Create a mock for expandTask that returns predefined responses instead of making real calls
 69 | const mockExpandTask = jest
 70 | 	.fn()
 71 | 	.mockImplementation(
 72 | 		(taskId, numSubtasks, useResearch, additionalContext, options) => {
 73 | 			const task = {
 74 | 				...(sampleTasks.tasks.find((t) => t.id === taskId) || {}),
 75 | 				subtasks: useResearch
 76 | 					? [
 77 | 							{
 78 | 								id: 1,
 79 | 								title: 'Research-Backed Subtask 1',
 80 | 								description: 'First research-backed subtask',
 81 | 								status: 'pending',
 82 | 								dependencies: []
 83 | 							},
 84 | 							{
 85 | 								id: 2,
 86 | 								title: 'Research-Backed Subtask 2',
 87 | 								description: 'Second research-backed subtask',
 88 | 								status: 'pending',
 89 | 								dependencies: [1]
 90 | 							}
 91 | 						]
 92 | 					: [
 93 | 							{
 94 | 								id: 1,
 95 | 								title: 'Mock Subtask 1',
 96 | 								description: 'First mock subtask',
 97 | 								status: 'pending',
 98 | 								dependencies: []
 99 | 							},
100 | 							{
101 | 								id: 2,
102 | 								title: 'Mock Subtask 2',
103 | 								description: 'Second mock subtask',
104 | 								status: 'pending',
105 | 								dependencies: [1]
106 | 							}
107 | 						]
108 | 			};
109 | 
110 | 			return Promise.resolve(task);
111 | 		}
112 | 	);
113 | 
114 | const mockGenerateTaskFiles = jest.fn().mockResolvedValue(true);
115 | const mockFindTaskById = jest.fn();
116 | const mockTaskExists = jest.fn().mockReturnValue(true);
117 | 
118 | // Mock fs module to avoid file system operations
119 | jest.mock('fs', () => ({
120 | 	existsSync: mockExistsSync,
121 | 	writeFileSync: mockWriteFileSync,
122 | 	readFileSync: mockReadFileSync,
123 | 	unlinkSync: mockUnlinkSync,
124 | 	mkdirSync: mockMkdirSync
125 | }));
126 | 
127 | // Mock utils functions to avoid actual file operations
128 | jest.mock('../../../scripts/modules/utils.js', () => ({
129 | 	readJSON: mockReadJSON,
130 | 	writeJSON: mockWriteJSON,
131 | 	enableSilentMode: mockEnableSilentMode,
132 | 	disableSilentMode: mockDisableSilentMode,
133 | 	readComplexityReport: mockReadComplexityReport,
134 | 	CONFIG: {
135 | 		model: 'claude-3-7-sonnet-20250219',
136 | 		maxTokens: 8192,
137 | 		temperature: 0.2,
138 | 		defaultSubtasks: 5
139 | 	}
140 | }));
141 | 
142 | // Mock path-utils with findTasksJsonPath
143 | jest.mock('../../../mcp-server/src/core/utils/path-utils.js', () => ({
144 | 	findTasksJsonPath: mockFindTasksJsonPath
145 | }));
146 | 
147 | // Mock the AI module to prevent any real API calls
148 | jest.mock('../../../scripts/modules/ai-services-unified.js', () => ({
149 | 	// Mock the functions exported by ai-services-unified.js as needed
150 | 	// For example, if you are testing a function that uses generateTextService:
151 | 	generateTextService: jest.fn().mockResolvedValue('Mock AI Response')
152 | 	// Add other mocks for generateObjectService, streamTextService if used
153 | }));
154 | 
155 | // Mock task-manager.js to avoid real operations
156 | jest.mock('../../../scripts/modules/task-manager.js', () => ({
157 | 	expandTask: mockExpandTask,
158 | 	generateTaskFiles: mockGenerateTaskFiles,
159 | 	findTaskById: mockFindTaskById,
160 | 	taskExists: mockTaskExists
161 | }));
162 | 
163 | // Import dependencies after mocks are set up
164 | import { sampleTasks } from '../../fixtures/sample-tasks.js';
165 | 
166 | // Mock logger
167 | const mockLogger = {
168 | 	info: jest.fn(),
169 | 	error: jest.fn(),
170 | 	debug: jest.fn(),
171 | 	warn: jest.fn()
172 | };
173 | 
174 | // Mock session
175 | const mockSession = {
176 | 	env: {
177 | 		ANTHROPIC_API_KEY: 'mock-api-key',
178 | 		MODEL: 'claude-3-sonnet-20240229',
179 | 		MAX_TOKENS: 4000,
180 | 		TEMPERATURE: '0.2'
181 | 	}
182 | };
183 | 
184 | describe('MCP Server Direct Functions', () => {
185 | 	// Set up before each test
186 | 	beforeEach(() => {
187 | 		jest.clearAllMocks();
188 | 
189 | 		// Default mockReadJSON implementation
190 | 		mockReadJSON.mockReturnValue(JSON.parse(JSON.stringify(sampleTasks)));
191 | 
192 | 		// Default mockFindTaskById implementation
193 | 		mockFindTaskById.mockImplementation((tasks, taskId) => {
194 | 			const id = parseInt(taskId, 10);
195 | 			return tasks.find((t) => t.id === id);
196 | 		});
197 | 
198 | 		// Default mockTaskExists implementation
199 | 		mockTaskExists.mockImplementation((tasks, taskId) => {
200 | 			const id = parseInt(taskId, 10);
201 | 			return tasks.some((t) => t.id === id);
202 | 		});
203 | 
204 | 		// Default findTasksJsonPath implementation
205 | 		mockFindTasksJsonPath.mockImplementation((args) => {
206 | 			// Mock returning null for non-existent files
207 | 			if (args.file === 'non-existent-file.json') {
208 | 				return null;
209 | 			}
210 | 			return testTasksPath;
211 | 		});
212 | 	});
213 | 
214 | 	describe('listTasksDirect', () => {
215 | 		// Sample complexity report for testing
216 | 		const mockComplexityReport = {
217 | 			meta: {
218 | 				generatedAt: '2025-03-24T20:01:35.986Z',
219 | 				tasksAnalyzed: 3,
220 | 				thresholdScore: 5,
221 | 				projectName: 'Test Project',
222 | 				usedResearch: false
223 | 			},
224 | 			complexityAnalysis: [
225 | 				{
226 | 					taskId: 1,
227 | 					taskTitle: 'Initialize Project',
228 | 					complexityScore: 3,
229 | 					recommendedSubtasks: 2
230 | 				},
231 | 				{
232 | 					taskId: 2,
233 | 					taskTitle: 'Create Core Functionality',
234 | 					complexityScore: 8,
235 | 					recommendedSubtasks: 5
236 | 				},
237 | 				{
238 | 					taskId: 3,
239 | 					taskTitle: 'Implement UI Components',
240 | 					complexityScore: 6,
241 | 					recommendedSubtasks: 4
242 | 				}
243 | 			]
244 | 		};
245 | 
246 | 		// Test wrapper function that doesn't rely on the actual implementation
247 | 		async function testListTasks(args, mockLogger) {
248 | 			// File not found case
249 | 			if (args.file === 'non-existent-file.json') {
250 | 				mockLogger.error('Tasks file not found');
251 | 				return {
252 | 					success: false,
253 | 					error: {
254 | 						code: 'FILE_NOT_FOUND_ERROR',
255 | 						message: 'Tasks file not found'
256 | 					}
257 | 				};
258 | 			}
259 | 
260 | 			// Check for complexity report
261 | 			const complexityReport = mockReadComplexityReport();
262 | 			let tasksData = [...sampleTasks.tasks];
263 | 
264 | 			// Add complexity scores if report exists
265 | 			if (complexityReport && complexityReport.complexityAnalysis) {
266 | 				tasksData = tasksData.map((task) => {
267 | 					const analysis = complexityReport.complexityAnalysis.find(
268 | 						(a) => a.taskId === task.id
269 | 					);
270 | 					if (analysis) {
271 | 						return { ...task, complexityScore: analysis.complexityScore };
272 | 					}
273 | 					return task;
274 | 				});
275 | 			}
276 | 
277 | 			// Success case
278 | 			if (!args.status && !args.withSubtasks) {
279 | 				return {
280 | 					success: true,
281 | 					data: {
282 | 						tasks: tasksData,
283 | 						stats: {
284 | 							total: tasksData.length,
285 | 							completed: tasksData.filter((t) => t.status === 'done').length,
286 | 							inProgress: tasksData.filter((t) => t.status === 'in-progress')
287 | 								.length,
288 | 							pending: tasksData.filter((t) => t.status === 'pending').length
289 | 						}
290 | 					}
291 | 				};
292 | 			}
293 | 
294 | 			// Status filter case
295 | 			if (args.status) {
296 | 				const filteredTasks = tasksData.filter((t) => t.status === args.status);
297 | 				return {
298 | 					success: true,
299 | 					data: {
300 | 						tasks: filteredTasks,
301 | 						filter: args.status,
302 | 						stats: {
303 | 							total: tasksData.length,
304 | 							filtered: filteredTasks.length
305 | 						}
306 | 					}
307 | 				};
308 | 			}
309 | 
310 | 			// Include subtasks case
311 | 			if (args.withSubtasks) {
312 | 				return {
313 | 					success: true,
314 | 					data: {
315 | 						tasks: tasksData,
316 | 						includeSubtasks: true,
317 | 						stats: {
318 | 							total: tasksData.length
319 | 						}
320 | 					}
321 | 				};
322 | 			}
323 | 
324 | 			// Default case
325 | 			return {
326 | 				success: true,
327 | 				data: { tasks: [] }
328 | 			};
329 | 		}
330 | 
331 | 		test('should return all tasks when no filter is provided', async () => {
332 | 			// Arrange
333 | 			const args = {
334 | 				projectRoot: testProjectRoot,
335 | 				file: testTasksPath
336 | 			};
337 | 
338 | 			// Act
339 | 			const result = await testListTasks(args, mockLogger);
340 | 
341 | 			// Assert
342 | 			expect(result.success).toBe(true);
343 | 			expect(result.data.tasks.length).toBe(sampleTasks.tasks.length);
344 | 			expect(result.data.stats.total).toBe(sampleTasks.tasks.length);
345 | 		});
346 | 
347 | 		test('should filter tasks by status', async () => {
348 | 			// Arrange
349 | 			const args = {
350 | 				projectRoot: testProjectRoot,
351 | 				file: testTasksPath,
352 | 				status: 'pending'
353 | 			};
354 | 
355 | 			// Act
356 | 			const result = await testListTasks(args, mockLogger);
357 | 
358 | 			// Assert
359 | 			expect(result.success).toBe(true);
360 | 			expect(result.data.filter).toBe('pending');
361 | 			// Should only include pending tasks
362 | 			result.data.tasks.forEach((task) => {
363 | 				expect(task.status).toBe('pending');
364 | 			});
365 | 		});
366 | 
367 | 		test('should include subtasks when requested', async () => {
368 | 			// Arrange
369 | 			const args = {
370 | 				projectRoot: testProjectRoot,
371 | 				file: testTasksPath,
372 | 				withSubtasks: true
373 | 			};
374 | 
375 | 			// Act
376 | 			const result = await testListTasks(args, mockLogger);
377 | 
378 | 			// Assert
379 | 			expect(result.success).toBe(true);
380 | 			expect(result.data.includeSubtasks).toBe(true);
381 | 
382 | 			// Verify subtasks are included for tasks that have them
383 | 			const tasksWithSubtasks = result.data.tasks.filter(
384 | 				(t) => t.subtasks && t.subtasks.length > 0
385 | 			);
386 | 			expect(tasksWithSubtasks.length).toBeGreaterThan(0);
387 | 		});
388 | 
389 | 		test('should handle file not found errors', async () => {
390 | 			// Arrange
391 | 			const args = {
392 | 				projectRoot: testProjectRoot,
393 | 				file: 'non-existent-file.json'
394 | 			};
395 | 
396 | 			// Act
397 | 			const result = await testListTasks(args, mockLogger);
398 | 
399 | 			// Assert
400 | 			expect(result.success).toBe(false);
401 | 			expect(result.error.code).toBe('FILE_NOT_FOUND_ERROR');
402 | 			expect(mockLogger.error).toHaveBeenCalled();
403 | 		});
404 | 
405 | 		test('should include complexity scores when complexity report exists', async () => {
406 | 			// Arrange
407 | 			mockReadComplexityReport.mockReturnValueOnce(mockComplexityReport);
408 | 			const args = {
409 | 				projectRoot: testProjectRoot,
410 | 				file: testTasksPath,
411 | 				withSubtasks: true
412 | 			};
413 | 
414 | 			// Act
415 | 			const result = await testListTasks(args, mockLogger);
416 | 			// Assert
417 | 			expect(result.success).toBe(true);
418 | 
419 | 			// Check that tasks have complexity scores from the report
420 | 			mockComplexityReport.complexityAnalysis.forEach((analysis) => {
421 | 				const task = result.data.tasks.find((t) => t.id === analysis.taskId);
422 | 				if (task) {
423 | 					expect(task.complexityScore).toBe(analysis.complexityScore);
424 | 				}
425 | 			});
426 | 		});
427 | 	});
428 | 
429 | 	describe('expandTaskDirect', () => {
430 | 		// Test wrapper function that returns appropriate results based on the test case
431 | 		async function testExpandTask(args, mockLogger, options = {}) {
432 | 			// Missing task ID case
433 | 			if (!args.id) {
434 | 				mockLogger.error('Task ID is required');
435 | 				return {
436 | 					success: false,
437 | 					error: {
438 | 						code: 'INPUT_VALIDATION_ERROR',
439 | 						message: 'Task ID is required'
440 | 					}
441 | 				};
442 | 			}
443 | 
444 | 			// Non-existent task ID case
445 | 			if (args.id === '999') {
446 | 				mockLogger.error(`Task with ID ${args.id} not found`);
447 | 				return {
448 | 					success: false,
449 | 					error: {
450 | 						code: 'TASK_NOT_FOUND',
451 | 						message: `Task with ID ${args.id} not found`
452 | 					}
453 | 				};
454 | 			}
455 | 
456 | 			// Completed task case
457 | 			if (args.id === '1') {
458 | 				mockLogger.error(
459 | 					`Task ${args.id} is already marked as done and cannot be expanded`
460 | 				);
461 | 				return {
462 | 					success: false,
463 | 					error: {
464 | 						code: 'TASK_COMPLETED',
465 | 						message: `Task ${args.id} is already marked as done and cannot be expanded`
466 | 					}
467 | 				};
468 | 			}
469 | 
470 | 			// For successful cases, record that functions were called but don't make real calls
471 | 			mockEnableSilentMode();
472 | 
473 | 			// This is just a mock call that won't make real API requests
474 | 			// We're using mockExpandTask which is already a mock function
475 | 			const expandedTask = await mockExpandTask(
476 | 				parseInt(args.id, 10),
477 | 				args.num,
478 | 				args.research || false,
479 | 				args.prompt || '',
480 | 				{ mcpLog: mockLogger, session: options.session }
481 | 			);
482 | 
483 | 			mockDisableSilentMode();
484 | 
485 | 			return {
486 | 				success: true,
487 | 				data: {
488 | 					task: expandedTask,
489 | 					subtasksAdded: expandedTask.subtasks.length,
490 | 					hasExistingSubtasks: false
491 | 				}
492 | 			};
493 | 		}
494 | 
495 | 		test('should expand a task with subtasks', async () => {
496 | 			// Arrange
497 | 			const args = {
498 | 				projectRoot: testProjectRoot,
499 | 				file: testTasksPath,
500 | 				id: '3', // ID 3 exists in sampleTasks with status 'pending'
501 | 				num: 2
502 | 			};
503 | 
504 | 			// Act
505 | 			const result = await testExpandTask(args, mockLogger, {
506 | 				session: mockSession
507 | 			});
508 | 
509 | 			// Assert
510 | 			expect(result.success).toBe(true);
511 | 			expect(result.data.task).toBeDefined();
512 | 			expect(result.data.task.subtasks).toBeDefined();
513 | 			expect(result.data.task.subtasks.length).toBe(2);
514 | 			expect(mockExpandTask).toHaveBeenCalledWith(
515 | 				3, // Task ID as number
516 | 				2, // num parameter
517 | 				false, // useResearch
518 | 				'', // prompt
519 | 				expect.objectContaining({
520 | 					mcpLog: mockLogger,
521 | 					session: mockSession
522 | 				})
523 | 			);
524 | 			expect(mockEnableSilentMode).toHaveBeenCalled();
525 | 			expect(mockDisableSilentMode).toHaveBeenCalled();
526 | 		});
527 | 
528 | 		test('should handle missing task ID', async () => {
529 | 			// Arrange
530 | 			const args = {
531 | 				projectRoot: testProjectRoot,
532 | 				file: testTasksPath
533 | 				// id is intentionally missing
534 | 			};
535 | 
536 | 			// Act
537 | 			const result = await testExpandTask(args, mockLogger, {
538 | 				session: mockSession
539 | 			});
540 | 
541 | 			// Assert
542 | 			expect(result.success).toBe(false);
543 | 			expect(result.error.code).toBe('INPUT_VALIDATION_ERROR');
544 | 			expect(mockLogger.error).toHaveBeenCalled();
545 | 			// Make sure no real expand calls were made
546 | 			expect(mockExpandTask).not.toHaveBeenCalled();
547 | 		});
548 | 
549 | 		test('should handle non-existent task ID', async () => {
550 | 			// Arrange
551 | 			const args = {
552 | 				projectRoot: testProjectRoot,
553 | 				file: testTasksPath,
554 | 				id: '999' // Non-existent task ID
555 | 			};
556 | 
557 | 			// Act
558 | 			const result = await testExpandTask(args, mockLogger, {
559 | 				session: mockSession
560 | 			});
561 | 
562 | 			// Assert
563 | 			expect(result.success).toBe(false);
564 | 			expect(result.error.code).toBe('TASK_NOT_FOUND');
565 | 			expect(mockLogger.error).toHaveBeenCalled();
566 | 			// Make sure no real expand calls were made
567 | 			expect(mockExpandTask).not.toHaveBeenCalled();
568 | 		});
569 | 
570 | 		test('should handle completed tasks', async () => {
571 | 			// Arrange
572 | 			const args = {
573 | 				projectRoot: testProjectRoot,
574 | 				file: testTasksPath,
575 | 				id: '1' // Task with 'done' status in sampleTasks
576 | 			};
577 | 
578 | 			// Act
579 | 			const result = await testExpandTask(args, mockLogger, {
580 | 				session: mockSession
581 | 			});
582 | 
583 | 			// Assert
584 | 			expect(result.success).toBe(false);
585 | 			expect(result.error.code).toBe('TASK_COMPLETED');
586 | 			expect(mockLogger.error).toHaveBeenCalled();
587 | 			// Make sure no real expand calls were made
588 | 			expect(mockExpandTask).not.toHaveBeenCalled();
589 | 		});
590 | 
591 | 		test('should use AI client when research flag is set', async () => {
592 | 			// Arrange
593 | 			const args = {
594 | 				projectRoot: testProjectRoot,
595 | 				file: testTasksPath,
596 | 				id: '3',
597 | 				research: true
598 | 			};
599 | 
600 | 			// Act
601 | 			const result = await testExpandTask(args, mockLogger, {
602 | 				session: mockSession
603 | 			});
604 | 
605 | 			// Assert
606 | 			expect(result.success).toBe(true);
607 | 			expect(mockExpandTask).toHaveBeenCalledWith(
608 | 				3, // Task ID as number
609 | 				undefined, // args.num is undefined
610 | 				true, // useResearch should be true
611 | 				'', // prompt
612 | 				expect.objectContaining({
613 | 					mcpLog: mockLogger,
614 | 					session: mockSession
615 | 				})
616 | 			);
617 | 			// Verify the result includes research-backed subtasks
618 | 			expect(result.data.task.subtasks[0].title).toContain('Research-Backed');
619 | 		});
620 | 	});
621 | 
622 | 	describe('expandAllTasksDirect', () => {
623 | 		// Test wrapper function that returns appropriate results based on the test case
624 | 		async function testExpandAllTasks(args, mockLogger, options = {}) {
625 | 			// For successful cases, record that functions were called but don't make real calls
626 | 			mockEnableSilentMode();
627 | 
628 | 			// Mock expandAllTasks - now returns a structured object instead of undefined
629 | 			const mockExpandAll = jest.fn().mockImplementation(async () => {
630 | 				// Return the new structured response that matches the actual implementation
631 | 				return {
632 | 					success: true,
633 | 					expandedCount: 2,
634 | 					failedCount: 0,
635 | 					skippedCount: 1,
636 | 					tasksToExpand: 3,
637 | 					telemetryData: {
638 | 						timestamp: new Date().toISOString(),
639 | 						commandName: 'expand-all-tasks',
640 | 						totalCost: 0.05,
641 | 						totalTokens: 1000,
642 | 						inputTokens: 600,
643 | 						outputTokens: 400
644 | 					}
645 | 				};
646 | 			});
647 | 
648 | 			// Call mock expandAllTasks with the correct signature
649 | 			const result = await mockExpandAll(
650 | 				args.file, // tasksPath
651 | 				args.num, // numSubtasks
652 | 				args.research || false, // useResearch
653 | 				args.prompt || '', // additionalContext
654 | 				args.force || false, // force
655 | 				{
656 | 					mcpLog: mockLogger,
657 | 					session: options.session,
658 | 					projectRoot: args.projectRoot
659 | 				}
660 | 			);
661 | 
662 | 			mockDisableSilentMode();
663 | 
664 | 			return {
665 | 				success: true,
666 | 				data: {
667 | 					message: `Expand all operation completed. Expanded: ${result.expandedCount}, Failed: ${result.failedCount}, Skipped: ${result.skippedCount}`,
668 | 					details: {
669 | 						expandedCount: result.expandedCount,
670 | 						failedCount: result.failedCount,
671 | 						skippedCount: result.skippedCount,
672 | 						tasksToExpand: result.tasksToExpand
673 | 					},
674 | 					telemetryData: result.telemetryData
675 | 				}
676 | 			};
677 | 		}
678 | 
679 | 		test('should expand all pending tasks with subtasks', async () => {
680 | 			// Arrange
681 | 			const args = {
682 | 				projectRoot: testProjectRoot,
683 | 				file: testTasksPath,
684 | 				num: 3
685 | 			};
686 | 
687 | 			// Act
688 | 			const result = await testExpandAllTasks(args, mockLogger, {
689 | 				session: mockSession
690 | 			});
691 | 
692 | 			// Assert
693 | 			expect(result.success).toBe(true);
694 | 			expect(result.data.message).toMatch(/Expand all operation completed/);
695 | 			expect(result.data.details.expandedCount).toBe(2);
696 | 			expect(result.data.details.failedCount).toBe(0);
697 | 			expect(result.data.details.skippedCount).toBe(1);
698 | 			expect(result.data.details.tasksToExpand).toBe(3);
699 | 			expect(result.data.telemetryData).toBeDefined();
700 | 			expect(result.data.telemetryData.commandName).toBe('expand-all-tasks');
701 | 			expect(mockEnableSilentMode).toHaveBeenCalled();
702 | 			expect(mockDisableSilentMode).toHaveBeenCalled();
703 | 		});
704 | 
705 | 		test('should handle research flag', async () => {
706 | 			// Arrange
707 | 			const args = {
708 | 				projectRoot: testProjectRoot,
709 | 				file: testTasksPath,
710 | 				research: true,
711 | 				num: 2
712 | 			};
713 | 
714 | 			// Act
715 | 			const result = await testExpandAllTasks(args, mockLogger, {
716 | 				session: mockSession
717 | 			});
718 | 
719 | 			// Assert
720 | 			expect(result.success).toBe(true);
721 | 			expect(result.data.details.expandedCount).toBe(2);
722 | 			expect(result.data.telemetryData).toBeDefined();
723 | 			expect(mockEnableSilentMode).toHaveBeenCalled();
724 | 			expect(mockDisableSilentMode).toHaveBeenCalled();
725 | 		});
726 | 
727 | 		test('should handle force flag', async () => {
728 | 			// Arrange
729 | 			const args = {
730 | 				projectRoot: testProjectRoot,
731 | 				file: testTasksPath,
732 | 				force: true
733 | 			};
734 | 
735 | 			// Act
736 | 			const result = await testExpandAllTasks(args, mockLogger, {
737 | 				session: mockSession
738 | 			});
739 | 
740 | 			// Assert
741 | 			expect(result.success).toBe(true);
742 | 			expect(result.data.details.expandedCount).toBe(2);
743 | 			expect(result.data.telemetryData).toBeDefined();
744 | 			expect(mockEnableSilentMode).toHaveBeenCalled();
745 | 			expect(mockDisableSilentMode).toHaveBeenCalled();
746 | 		});
747 | 
748 | 		test('should handle additional context/prompt', async () => {
749 | 			// Arrange
750 | 			const args = {
751 | 				projectRoot: testProjectRoot,
752 | 				file: testTasksPath,
753 | 				prompt: 'Additional context for subtasks'
754 | 			};
755 | 
756 | 			// Act
757 | 			const result = await testExpandAllTasks(args, mockLogger, {
758 | 				session: mockSession
759 | 			});
760 | 
761 | 			// Assert
762 | 			expect(result.success).toBe(true);
763 | 			expect(result.data.details.expandedCount).toBe(2);
764 | 			expect(result.data.telemetryData).toBeDefined();
765 | 			expect(mockEnableSilentMode).toHaveBeenCalled();
766 | 			expect(mockDisableSilentMode).toHaveBeenCalled();
767 | 		});
768 | 
769 | 		test('should handle case with no eligible tasks', async () => {
770 | 			// Arrange
771 | 			const args = {
772 | 				projectRoot: testProjectRoot,
773 | 				file: testTasksPath,
774 | 				num: 3
775 | 			};
776 | 
777 | 			// Act - Mock the scenario where no tasks are eligible for expansion
778 | 			async function testNoEligibleTasks(args, mockLogger, options = {}) {
779 | 				mockEnableSilentMode();
780 | 
781 | 				const mockExpandAll = jest.fn().mockImplementation(async () => {
782 | 					return {
783 | 						success: true,
784 | 						expandedCount: 0,
785 | 						failedCount: 0,
786 | 						skippedCount: 0,
787 | 						tasksToExpand: 0,
788 | 						telemetryData: null,
789 | 						message: 'No tasks eligible for expansion.'
790 | 					};
791 | 				});
792 | 
793 | 				const result = await mockExpandAll(
794 | 					args.file,
795 | 					args.num,
796 | 					false,
797 | 					'',
798 | 					false,
799 | 					{
800 | 						mcpLog: mockLogger,
801 | 						session: options.session,
802 | 						projectRoot: args.projectRoot
803 | 					},
804 | 					'json'
805 | 				);
806 | 
807 | 				mockDisableSilentMode();
808 | 
809 | 				return {
810 | 					success: true,
811 | 					data: {
812 | 						message: result.message,
813 | 						details: {
814 | 							expandedCount: result.expandedCount,
815 | 							failedCount: result.failedCount,
816 | 							skippedCount: result.skippedCount,
817 | 							tasksToExpand: result.tasksToExpand
818 | 						},
819 | 						telemetryData: result.telemetryData
820 | 					}
821 | 				};
822 | 			}
823 | 
824 | 			const result = await testNoEligibleTasks(args, mockLogger, {
825 | 				session: mockSession
826 | 			});
827 | 
828 | 			// Assert
829 | 			expect(result.success).toBe(true);
830 | 			expect(result.data.message).toBe('No tasks eligible for expansion.');
831 | 			expect(result.data.details.expandedCount).toBe(0);
832 | 			expect(result.data.details.tasksToExpand).toBe(0);
833 | 			expect(result.data.telemetryData).toBeNull();
834 | 		});
835 | 	});
836 | });
837 | 
```
Page 35/52FirstPrevNextLast