This is page 26 of 38. Use http://codebase.md/eyaltoledano/claude-task-master?page={x} to view the full context. # Directory Structure ``` ├── .changeset │ ├── config.json │ └── README.md ├── .claude │ ├── agents │ │ ├── task-checker.md │ │ ├── task-executor.md │ │ └── task-orchestrator.md │ ├── commands │ │ ├── dedupe.md │ │ └── tm │ │ ├── add-dependency │ │ │ └── add-dependency.md │ │ ├── add-subtask │ │ │ ├── add-subtask.md │ │ │ └── convert-task-to-subtask.md │ │ ├── add-task │ │ │ └── add-task.md │ │ ├── analyze-complexity │ │ │ └── analyze-complexity.md │ │ ├── complexity-report │ │ │ └── complexity-report.md │ │ ├── expand │ │ │ ├── expand-all-tasks.md │ │ │ └── expand-task.md │ │ ├── fix-dependencies │ │ │ └── fix-dependencies.md │ │ ├── generate │ │ │ └── generate-tasks.md │ │ ├── help.md │ │ ├── init │ │ │ ├── init-project-quick.md │ │ │ └── init-project.md │ │ ├── learn.md │ │ ├── list │ │ │ ├── list-tasks-by-status.md │ │ │ ├── list-tasks-with-subtasks.md │ │ │ └── list-tasks.md │ │ ├── models │ │ │ ├── setup-models.md │ │ │ └── view-models.md │ │ ├── next │ │ │ └── next-task.md │ │ ├── parse-prd │ │ │ ├── parse-prd-with-research.md │ │ │ └── parse-prd.md │ │ ├── remove-dependency │ │ │ └── remove-dependency.md │ │ ├── remove-subtask │ │ │ └── remove-subtask.md │ │ ├── remove-subtasks │ │ │ ├── remove-all-subtasks.md │ │ │ └── remove-subtasks.md │ │ ├── remove-task │ │ │ └── remove-task.md │ │ ├── set-status │ │ │ ├── to-cancelled.md │ │ │ ├── to-deferred.md │ │ │ ├── to-done.md │ │ │ ├── to-in-progress.md │ │ │ ├── to-pending.md │ │ │ └── to-review.md │ │ ├── setup │ │ │ ├── install-taskmaster.md │ │ │ └── quick-install-taskmaster.md │ │ ├── show │ │ │ └── show-task.md │ │ ├── status │ │ │ └── project-status.md │ │ ├── sync-readme │ │ │ └── sync-readme.md │ │ ├── tm-main.md │ │ ├── update │ │ │ ├── update-single-task.md │ │ │ ├── update-task.md │ │ │ └── update-tasks-from-id.md │ │ ├── utils │ │ │ └── analyze-project.md │ │ ├── validate-dependencies │ │ │ └── validate-dependencies.md │ │ └── workflows │ │ ├── auto-implement-tasks.md │ │ ├── command-pipeline.md │ │ └── smart-workflow.md │ └── TM_COMMANDS_GUIDE.md ├── .coderabbit.yaml ├── .cursor │ ├── mcp.json │ └── rules │ ├── ai_providers.mdc │ ├── ai_services.mdc │ ├── architecture.mdc │ ├── changeset.mdc │ ├── commands.mdc │ ├── context_gathering.mdc │ ├── cursor_rules.mdc │ ├── dependencies.mdc │ ├── dev_workflow.mdc │ ├── git_workflow.mdc │ ├── glossary.mdc │ ├── mcp.mdc │ ├── new_features.mdc │ ├── self_improve.mdc │ ├── tags.mdc │ ├── taskmaster.mdc │ ├── tasks.mdc │ ├── telemetry.mdc │ ├── test_workflow.mdc │ ├── tests.mdc │ ├── ui.mdc │ └── utilities.mdc ├── .cursorignore ├── .env.example ├── .github │ ├── ISSUE_TEMPLATE │ │ ├── bug_report.md │ │ ├── enhancements---feature-requests.md │ │ └── feedback.md │ ├── PULL_REQUEST_TEMPLATE │ │ ├── bugfix.md │ │ ├── config.yml │ │ ├── feature.md │ │ └── integration.md │ ├── PULL_REQUEST_TEMPLATE.md │ ├── scripts │ │ ├── auto-close-duplicates.mjs │ │ ├── backfill-duplicate-comments.mjs │ │ ├── check-pre-release-mode.mjs │ │ ├── parse-metrics.mjs │ │ ├── release.mjs │ │ ├── tag-extension.mjs │ │ └── utils.mjs │ └── workflows │ ├── auto-close-duplicates.yml │ ├── backfill-duplicate-comments.yml │ ├── ci.yml │ ├── claude-dedupe-issues.yml │ ├── claude-docs-trigger.yml │ ├── claude-docs-updater.yml │ ├── claude-issue-triage.yml │ ├── claude.yml │ ├── extension-ci.yml │ ├── extension-release.yml │ ├── log-issue-events.yml │ ├── pre-release.yml │ ├── release-check.yml │ ├── release.yml │ ├── update-models-md.yml │ └── weekly-metrics-discord.yml ├── .gitignore ├── .kiro │ ├── hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── settings │ │ └── mcp.json │ └── steering │ ├── dev_workflow.md │ ├── kiro_rules.md │ ├── self_improve.md │ ├── taskmaster_hooks_workflow.md │ └── taskmaster.md ├── .manypkg.json ├── .mcp.json ├── .npmignore ├── .nvmrc ├── .taskmaster │ ├── CLAUDE.md │ ├── config.json │ ├── docs │ │ ├── MIGRATION-ROADMAP.md │ │ ├── prd-tm-start.txt │ │ ├── prd.txt │ │ ├── README.md │ │ ├── research │ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md │ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md │ │ │ ├── 2025-06-14_test-save-functionality.md │ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md │ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md │ │ ├── task-template-importing-prd.txt │ │ ├── test-prd.txt │ │ └── tm-core-phase-1.txt │ ├── reports │ │ ├── task-complexity-report_cc-kiro-hooks.json │ │ ├── task-complexity-report_test-prd-tag.json │ │ ├── task-complexity-report_tm-core-phase-1.json │ │ ├── task-complexity-report.json │ │ └── tm-core-complexity.json │ ├── state.json │ ├── tasks │ │ ├── task_001_tm-start.txt │ │ ├── task_002_tm-start.txt │ │ ├── task_003_tm-start.txt │ │ ├── task_004_tm-start.txt │ │ ├── task_007_tm-start.txt │ │ └── tasks.json │ └── templates │ └── example_prd.txt ├── .vscode │ ├── extensions.json │ └── settings.json ├── apps │ ├── cli │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ ├── commands │ │ │ │ ├── auth.command.ts │ │ │ │ ├── context.command.ts │ │ │ │ ├── list.command.ts │ │ │ │ ├── set-status.command.ts │ │ │ │ ├── show.command.ts │ │ │ │ └── start.command.ts │ │ │ ├── index.ts │ │ │ ├── ui │ │ │ │ ├── components │ │ │ │ │ ├── dashboard.component.ts │ │ │ │ │ ├── header.component.ts │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── next-task.component.ts │ │ │ │ │ ├── suggested-steps.component.ts │ │ │ │ │ └── task-detail.component.ts │ │ │ │ └── index.ts │ │ │ └── utils │ │ │ ├── auto-update.ts │ │ │ └── ui.ts │ │ └── tsconfig.json │ ├── docs │ │ ├── archive │ │ │ ├── ai-client-utils-example.mdx │ │ │ ├── ai-development-workflow.mdx │ │ │ ├── command-reference.mdx │ │ │ ├── configuration.mdx │ │ │ ├── cursor-setup.mdx │ │ │ ├── examples.mdx │ │ │ └── Installation.mdx │ │ ├── best-practices │ │ │ ├── advanced-tasks.mdx │ │ │ ├── configuration-advanced.mdx │ │ │ └── index.mdx │ │ ├── capabilities │ │ │ ├── cli-root-commands.mdx │ │ │ ├── index.mdx │ │ │ ├── mcp.mdx │ │ │ └── task-structure.mdx │ │ ├── CHANGELOG.md │ │ ├── docs.json │ │ ├── favicon.svg │ │ ├── getting-started │ │ │ ├── contribute.mdx │ │ │ ├── faq.mdx │ │ │ └── quick-start │ │ │ ├── configuration-quick.mdx │ │ │ ├── execute-quick.mdx │ │ │ ├── installation.mdx │ │ │ ├── moving-forward.mdx │ │ │ ├── prd-quick.mdx │ │ │ ├── quick-start.mdx │ │ │ ├── requirements.mdx │ │ │ ├── rules-quick.mdx │ │ │ └── tasks-quick.mdx │ │ ├── introduction.mdx │ │ ├── licensing.md │ │ ├── logo │ │ │ ├── dark.svg │ │ │ ├── light.svg │ │ │ └── task-master-logo.png │ │ ├── package.json │ │ ├── README.md │ │ ├── style.css │ │ ├── vercel.json │ │ └── whats-new.mdx │ └── extension │ ├── .vscodeignore │ ├── assets │ │ ├── banner.png │ │ ├── icon-dark.svg │ │ ├── icon-light.svg │ │ ├── icon.png │ │ ├── screenshots │ │ │ ├── kanban-board.png │ │ │ └── task-details.png │ │ └── sidebar-icon.svg │ ├── CHANGELOG.md │ ├── components.json │ ├── docs │ │ ├── extension-CI-setup.md │ │ └── extension-development-guide.md │ ├── esbuild.js │ ├── LICENSE │ ├── package.json │ ├── package.mjs │ ├── package.publish.json │ ├── README.md │ ├── src │ │ ├── components │ │ │ ├── ConfigView.tsx │ │ │ ├── constants.ts │ │ │ ├── TaskDetails │ │ │ │ ├── AIActionsSection.tsx │ │ │ │ ├── DetailsSection.tsx │ │ │ │ ├── PriorityBadge.tsx │ │ │ │ ├── SubtasksSection.tsx │ │ │ │ ├── TaskMetadataSidebar.tsx │ │ │ │ └── useTaskDetails.ts │ │ │ ├── TaskDetailsView.tsx │ │ │ ├── TaskMasterLogo.tsx │ │ │ └── ui │ │ │ ├── badge.tsx │ │ │ ├── breadcrumb.tsx │ │ │ ├── button.tsx │ │ │ ├── card.tsx │ │ │ ├── collapsible.tsx │ │ │ ├── CollapsibleSection.tsx │ │ │ ├── dropdown-menu.tsx │ │ │ ├── label.tsx │ │ │ ├── scroll-area.tsx │ │ │ ├── separator.tsx │ │ │ ├── shadcn-io │ │ │ │ └── kanban │ │ │ │ └── index.tsx │ │ │ └── textarea.tsx │ │ ├── extension.ts │ │ ├── index.ts │ │ ├── lib │ │ │ └── utils.ts │ │ ├── services │ │ │ ├── config-service.ts │ │ │ ├── error-handler.ts │ │ │ ├── notification-preferences.ts │ │ │ ├── polling-service.ts │ │ │ ├── polling-strategies.ts │ │ │ ├── sidebar-webview-manager.ts │ │ │ ├── task-repository.ts │ │ │ ├── terminal-manager.ts │ │ │ └── webview-manager.ts │ │ ├── test │ │ │ └── extension.test.ts │ │ ├── utils │ │ │ ├── configManager.ts │ │ │ ├── connectionManager.ts │ │ │ ├── errorHandler.ts │ │ │ ├── event-emitter.ts │ │ │ ├── logger.ts │ │ │ ├── mcpClient.ts │ │ │ ├── notificationPreferences.ts │ │ │ └── task-master-api │ │ │ ├── cache │ │ │ │ └── cache-manager.ts │ │ │ ├── index.ts │ │ │ ├── mcp-client.ts │ │ │ ├── transformers │ │ │ │ └── task-transformer.ts │ │ │ └── types │ │ │ └── index.ts │ │ └── webview │ │ ├── App.tsx │ │ ├── components │ │ │ ├── AppContent.tsx │ │ │ ├── EmptyState.tsx │ │ │ ├── ErrorBoundary.tsx │ │ │ ├── PollingStatus.tsx │ │ │ ├── PriorityBadge.tsx │ │ │ ├── SidebarView.tsx │ │ │ ├── TagDropdown.tsx │ │ │ ├── TaskCard.tsx │ │ │ ├── TaskEditModal.tsx │ │ │ ├── TaskMasterKanban.tsx │ │ │ ├── ToastContainer.tsx │ │ │ └── ToastNotification.tsx │ │ ├── constants │ │ │ └── index.ts │ │ ├── contexts │ │ │ └── VSCodeContext.tsx │ │ ├── hooks │ │ │ ├── useTaskQueries.ts │ │ │ ├── useVSCodeMessages.ts │ │ │ └── useWebviewHeight.ts │ │ ├── index.css │ │ ├── index.tsx │ │ ├── providers │ │ │ └── QueryProvider.tsx │ │ ├── reducers │ │ │ └── appReducer.ts │ │ ├── sidebar.tsx │ │ ├── types │ │ │ └── index.ts │ │ └── utils │ │ ├── logger.ts │ │ └── toast.ts │ └── tsconfig.json ├── assets │ ├── .windsurfrules │ ├── AGENTS.md │ ├── claude │ │ ├── agents │ │ │ ├── task-checker.md │ │ │ ├── task-executor.md │ │ │ └── task-orchestrator.md │ │ ├── commands │ │ │ └── tm │ │ │ ├── add-dependency │ │ │ │ └── add-dependency.md │ │ │ ├── add-subtask │ │ │ │ ├── add-subtask.md │ │ │ │ └── convert-task-to-subtask.md │ │ │ ├── add-task │ │ │ │ └── add-task.md │ │ │ ├── analyze-complexity │ │ │ │ └── analyze-complexity.md │ │ │ ├── clear-subtasks │ │ │ │ ├── clear-all-subtasks.md │ │ │ │ └── clear-subtasks.md │ │ │ ├── complexity-report │ │ │ │ └── complexity-report.md │ │ │ ├── expand │ │ │ │ ├── expand-all-tasks.md │ │ │ │ └── expand-task.md │ │ │ ├── fix-dependencies │ │ │ │ └── fix-dependencies.md │ │ │ ├── generate │ │ │ │ └── generate-tasks.md │ │ │ ├── help.md │ │ │ ├── init │ │ │ │ ├── init-project-quick.md │ │ │ │ └── init-project.md │ │ │ ├── learn.md │ │ │ ├── list │ │ │ │ ├── list-tasks-by-status.md │ │ │ │ ├── list-tasks-with-subtasks.md │ │ │ │ └── list-tasks.md │ │ │ ├── models │ │ │ │ ├── setup-models.md │ │ │ │ └── view-models.md │ │ │ ├── next │ │ │ │ └── next-task.md │ │ │ ├── parse-prd │ │ │ │ ├── parse-prd-with-research.md │ │ │ │ └── parse-prd.md │ │ │ ├── remove-dependency │ │ │ │ └── remove-dependency.md │ │ │ ├── remove-subtask │ │ │ │ └── remove-subtask.md │ │ │ ├── remove-subtasks │ │ │ │ ├── remove-all-subtasks.md │ │ │ │ └── remove-subtasks.md │ │ │ ├── remove-task │ │ │ │ └── remove-task.md │ │ │ ├── set-status │ │ │ │ ├── to-cancelled.md │ │ │ │ ├── to-deferred.md │ │ │ │ ├── to-done.md │ │ │ │ ├── to-in-progress.md │ │ │ │ ├── to-pending.md │ │ │ │ └── to-review.md │ │ │ ├── setup │ │ │ │ ├── install-taskmaster.md │ │ │ │ └── quick-install-taskmaster.md │ │ │ ├── show │ │ │ │ └── show-task.md │ │ │ ├── status │ │ │ │ └── project-status.md │ │ │ ├── sync-readme │ │ │ │ └── sync-readme.md │ │ │ ├── tm-main.md │ │ │ ├── update │ │ │ │ ├── update-single-task.md │ │ │ │ ├── update-task.md │ │ │ │ └── update-tasks-from-id.md │ │ │ ├── utils │ │ │ │ └── analyze-project.md │ │ │ ├── validate-dependencies │ │ │ │ └── validate-dependencies.md │ │ │ └── workflows │ │ │ ├── auto-implement-tasks.md │ │ │ ├── command-pipeline.md │ │ │ └── smart-workflow.md │ │ └── TM_COMMANDS_GUIDE.md │ ├── config.json │ ├── env.example │ ├── example_prd.txt │ ├── gitignore │ ├── kiro-hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── roocode │ │ ├── .roo │ │ │ ├── rules-architect │ │ │ │ └── architect-rules │ │ │ ├── rules-ask │ │ │ │ └── ask-rules │ │ │ ├── rules-code │ │ │ │ └── code-rules │ │ │ ├── rules-debug │ │ │ │ └── debug-rules │ │ │ ├── rules-orchestrator │ │ │ │ └── orchestrator-rules │ │ │ └── rules-test │ │ │ └── test-rules │ │ └── .roomodes │ ├── rules │ │ ├── cursor_rules.mdc │ │ ├── dev_workflow.mdc │ │ ├── self_improve.mdc │ │ ├── taskmaster_hooks_workflow.mdc │ │ └── taskmaster.mdc │ └── scripts_README.md ├── bin │ └── task-master.js ├── biome.json ├── CHANGELOG.md ├── CLAUDE.md ├── context │ ├── chats │ │ ├── add-task-dependencies-1.md │ │ └── max-min-tokens.txt.md │ ├── fastmcp-core.txt │ ├── fastmcp-docs.txt │ ├── MCP_INTEGRATION.md │ ├── mcp-js-sdk-docs.txt │ ├── mcp-protocol-repo.txt │ ├── mcp-protocol-schema-03262025.json │ └── mcp-protocol-spec.txt ├── CONTRIBUTING.md ├── docs │ ├── CLI-COMMANDER-PATTERN.md │ ├── command-reference.md │ ├── configuration.md │ ├── contributor-docs │ │ └── testing-roo-integration.md │ ├── cross-tag-task-movement.md │ ├── examples │ │ └── claude-code-usage.md │ ├── examples.md │ ├── licensing.md │ ├── mcp-provider-guide.md │ ├── mcp-provider.md │ ├── migration-guide.md │ ├── models.md │ ├── providers │ │ └── gemini-cli.md │ ├── README.md │ ├── scripts │ │ └── models-json-to-markdown.js │ ├── task-structure.md │ └── tutorial.md ├── images │ └── logo.png ├── index.js ├── jest.config.js ├── jest.resolver.cjs ├── LICENSE ├── llms-install.md ├── mcp-server │ ├── server.js │ └── src │ ├── core │ │ ├── __tests__ │ │ │ └── context-manager.test.js │ │ ├── context-manager.js │ │ ├── direct-functions │ │ │ ├── add-dependency.js │ │ │ ├── add-subtask.js │ │ │ ├── add-tag.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── cache-stats.js │ │ │ ├── clear-subtasks.js │ │ │ ├── complexity-report.js │ │ │ ├── copy-tag.js │ │ │ ├── create-tag-from-branch.js │ │ │ ├── delete-tag.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── fix-dependencies.js │ │ │ ├── generate-task-files.js │ │ │ ├── initialize-project.js │ │ │ ├── list-tags.js │ │ │ ├── list-tasks.js │ │ │ ├── models.js │ │ │ ├── move-task-cross-tag.js │ │ │ ├── move-task.js │ │ │ ├── next-task.js │ │ │ ├── parse-prd.js │ │ │ ├── remove-dependency.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── rename-tag.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── rules.js │ │ │ ├── scope-down.js │ │ │ ├── scope-up.js │ │ │ ├── set-task-status.js │ │ │ ├── show-task.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ ├── update-tasks.js │ │ │ ├── use-tag.js │ │ │ └── validate-dependencies.js │ │ ├── task-master-core.js │ │ └── utils │ │ ├── env-utils.js │ │ └── path-utils.js │ ├── custom-sdk │ │ ├── errors.js │ │ ├── index.js │ │ ├── json-extractor.js │ │ ├── language-model.js │ │ ├── message-converter.js │ │ └── schema-converter.js │ ├── index.js │ ├── logger.js │ ├── providers │ │ └── mcp-provider.js │ └── tools │ ├── add-dependency.js │ ├── add-subtask.js │ ├── add-tag.js │ ├── add-task.js │ ├── analyze.js │ ├── clear-subtasks.js │ ├── complexity-report.js │ ├── copy-tag.js │ ├── delete-tag.js │ ├── expand-all.js │ ├── expand-task.js │ ├── fix-dependencies.js │ ├── generate.js │ ├── get-operation-status.js │ ├── get-task.js │ ├── get-tasks.js │ ├── index.js │ ├── initialize-project.js │ ├── list-tags.js │ ├── models.js │ ├── move-task.js │ ├── next-task.js │ ├── parse-prd.js │ ├── remove-dependency.js │ ├── remove-subtask.js │ ├── remove-task.js │ ├── rename-tag.js │ ├── research.js │ ├── response-language.js │ ├── rules.js │ ├── scope-down.js │ ├── scope-up.js │ ├── set-task-status.js │ ├── update-subtask.js │ ├── update-task.js │ ├── update.js │ ├── use-tag.js │ ├── utils.js │ └── validate-dependencies.js ├── mcp-test.js ├── output.json ├── package-lock.json ├── package.json ├── packages │ ├── build-config │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ └── tsdown.base.ts │ │ └── tsconfig.json │ └── tm-core │ ├── .gitignore │ ├── CHANGELOG.md │ ├── docs │ │ └── listTasks-architecture.md │ ├── package.json │ ├── POC-STATUS.md │ ├── README.md │ ├── src │ │ ├── auth │ │ │ ├── auth-manager.test.ts │ │ │ ├── auth-manager.ts │ │ │ ├── config.ts │ │ │ ├── credential-store.test.ts │ │ │ ├── credential-store.ts │ │ │ ├── index.ts │ │ │ ├── oauth-service.ts │ │ │ ├── supabase-session-storage.ts │ │ │ └── types.ts │ │ ├── clients │ │ │ ├── index.ts │ │ │ └── supabase-client.ts │ │ ├── config │ │ │ ├── config-manager.spec.ts │ │ │ ├── config-manager.ts │ │ │ ├── index.ts │ │ │ └── services │ │ │ ├── config-loader.service.spec.ts │ │ │ ├── config-loader.service.ts │ │ │ ├── config-merger.service.spec.ts │ │ │ ├── config-merger.service.ts │ │ │ ├── config-persistence.service.spec.ts │ │ │ ├── config-persistence.service.ts │ │ │ ├── environment-config-provider.service.spec.ts │ │ │ ├── environment-config-provider.service.ts │ │ │ ├── index.ts │ │ │ ├── runtime-state-manager.service.spec.ts │ │ │ └── runtime-state-manager.service.ts │ │ ├── constants │ │ │ └── index.ts │ │ ├── entities │ │ │ └── task.entity.ts │ │ ├── errors │ │ │ ├── index.ts │ │ │ └── task-master-error.ts │ │ ├── executors │ │ │ ├── base-executor.ts │ │ │ ├── claude-executor.ts │ │ │ ├── executor-factory.ts │ │ │ ├── executor-service.ts │ │ │ ├── index.ts │ │ │ └── types.ts │ │ ├── index.ts │ │ ├── interfaces │ │ │ ├── ai-provider.interface.ts │ │ │ ├── configuration.interface.ts │ │ │ ├── index.ts │ │ │ └── storage.interface.ts │ │ ├── logger │ │ │ ├── factory.ts │ │ │ ├── index.ts │ │ │ └── logger.ts │ │ ├── mappers │ │ │ └── TaskMapper.ts │ │ ├── parser │ │ │ └── index.ts │ │ ├── providers │ │ │ ├── ai │ │ │ │ ├── base-provider.ts │ │ │ │ └── index.ts │ │ │ └── index.ts │ │ ├── repositories │ │ │ ├── supabase-task-repository.ts │ │ │ └── task-repository.interface.ts │ │ ├── services │ │ │ ├── index.ts │ │ │ ├── organization.service.ts │ │ │ ├── task-execution-service.ts │ │ │ └── task-service.ts │ │ ├── storage │ │ │ ├── api-storage.ts │ │ │ ├── file-storage │ │ │ │ ├── file-operations.ts │ │ │ │ ├── file-storage.ts │ │ │ │ ├── format-handler.ts │ │ │ │ ├── index.ts │ │ │ │ └── path-resolver.ts │ │ │ ├── index.ts │ │ │ └── storage-factory.ts │ │ ├── subpath-exports.test.ts │ │ ├── task-master-core.ts │ │ ├── types │ │ │ ├── database.types.ts │ │ │ ├── index.ts │ │ │ └── legacy.ts │ │ └── utils │ │ ├── id-generator.ts │ │ └── index.ts │ ├── tests │ │ ├── integration │ │ │ └── list-tasks.test.ts │ │ ├── mocks │ │ │ └── mock-provider.ts │ │ ├── setup.ts │ │ └── unit │ │ ├── base-provider.test.ts │ │ ├── executor.test.ts │ │ └── smoke.test.ts │ ├── tsconfig.json │ └── vitest.config.ts ├── README-task-master.md ├── README.md ├── scripts │ ├── dev.js │ ├── init.js │ ├── modules │ │ ├── ai-services-unified.js │ │ ├── commands.js │ │ ├── config-manager.js │ │ ├── dependency-manager.js │ │ ├── index.js │ │ ├── prompt-manager.js │ │ ├── supported-models.json │ │ ├── sync-readme.js │ │ ├── task-manager │ │ │ ├── add-subtask.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── clear-subtasks.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── find-next-task.js │ │ │ ├── generate-task-files.js │ │ │ ├── is-task-dependent.js │ │ │ ├── list-tasks.js │ │ │ ├── migrate.js │ │ │ ├── models.js │ │ │ ├── move-task.js │ │ │ ├── parse-prd │ │ │ │ ├── index.js │ │ │ │ ├── parse-prd-config.js │ │ │ │ ├── parse-prd-helpers.js │ │ │ │ ├── parse-prd-non-streaming.js │ │ │ │ ├── parse-prd-streaming.js │ │ │ │ └── parse-prd.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── scope-adjustment.js │ │ │ ├── set-task-status.js │ │ │ ├── tag-management.js │ │ │ ├── task-exists.js │ │ │ ├── update-single-task-status.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ └── update-tasks.js │ │ ├── task-manager.js │ │ ├── ui.js │ │ ├── update-config-tokens.js │ │ ├── utils │ │ │ ├── contextGatherer.js │ │ │ ├── fuzzyTaskSearch.js │ │ │ └── git-utils.js │ │ └── utils.js │ ├── task-complexity-report.json │ ├── test-claude-errors.js │ └── test-claude.js ├── src │ ├── ai-providers │ │ ├── anthropic.js │ │ ├── azure.js │ │ ├── base-provider.js │ │ ├── bedrock.js │ │ ├── claude-code.js │ │ ├── custom-sdk │ │ │ ├── claude-code │ │ │ │ ├── errors.js │ │ │ │ ├── index.js │ │ │ │ ├── json-extractor.js │ │ │ │ ├── language-model.js │ │ │ │ ├── message-converter.js │ │ │ │ └── types.js │ │ │ └── grok-cli │ │ │ ├── errors.js │ │ │ ├── index.js │ │ │ ├── json-extractor.js │ │ │ ├── language-model.js │ │ │ ├── message-converter.js │ │ │ └── types.js │ │ ├── gemini-cli.js │ │ ├── google-vertex.js │ │ ├── google.js │ │ ├── grok-cli.js │ │ ├── groq.js │ │ ├── index.js │ │ ├── ollama.js │ │ ├── openai.js │ │ ├── openrouter.js │ │ ├── perplexity.js │ │ └── xai.js │ ├── constants │ │ ├── commands.js │ │ ├── paths.js │ │ ├── profiles.js │ │ ├── providers.js │ │ ├── rules-actions.js │ │ ├── task-priority.js │ │ └── task-status.js │ ├── profiles │ │ ├── amp.js │ │ ├── base-profile.js │ │ ├── claude.js │ │ ├── cline.js │ │ ├── codex.js │ │ ├── cursor.js │ │ ├── gemini.js │ │ ├── index.js │ │ ├── kilo.js │ │ ├── kiro.js │ │ ├── opencode.js │ │ ├── roo.js │ │ ├── trae.js │ │ ├── vscode.js │ │ ├── windsurf.js │ │ └── zed.js │ ├── progress │ │ ├── base-progress-tracker.js │ │ ├── cli-progress-factory.js │ │ ├── parse-prd-tracker.js │ │ ├── progress-tracker-builder.js │ │ └── tracker-ui.js │ ├── prompts │ │ ├── add-task.json │ │ ├── analyze-complexity.json │ │ ├── expand-task.json │ │ ├── parse-prd.json │ │ ├── README.md │ │ ├── research.json │ │ ├── schemas │ │ │ ├── parameter.schema.json │ │ │ ├── prompt-template.schema.json │ │ │ ├── README.md │ │ │ └── variant.schema.json │ │ ├── update-subtask.json │ │ ├── update-task.json │ │ └── update-tasks.json │ ├── provider-registry │ │ └── index.js │ ├── task-master.js │ ├── ui │ │ ├── confirm.js │ │ ├── indicators.js │ │ └── parse-prd.js │ └── utils │ ├── asset-resolver.js │ ├── create-mcp-config.js │ ├── format.js │ ├── getVersion.js │ ├── logger-utils.js │ ├── manage-gitignore.js │ ├── path-utils.js │ ├── profiles.js │ ├── rule-transformer.js │ ├── stream-parser.js │ └── timeout-manager.js ├── test-clean-tags.js ├── test-config-manager.js ├── test-prd.txt ├── test-tag-functions.js ├── test-version-check-full.js ├── test-version-check.js ├── tests │ ├── e2e │ │ ├── e2e_helpers.sh │ │ ├── parse_llm_output.cjs │ │ ├── run_e2e.sh │ │ ├── run_fallback_verification.sh │ │ └── test_llm_analysis.sh │ ├── fixture │ │ └── test-tasks.json │ ├── fixtures │ │ ├── .taskmasterconfig │ │ ├── sample-claude-response.js │ │ ├── sample-prd.txt │ │ └── sample-tasks.js │ ├── integration │ │ ├── claude-code-optional.test.js │ │ ├── cli │ │ │ ├── commands.test.js │ │ │ ├── complex-cross-tag-scenarios.test.js │ │ │ └── move-cross-tag.test.js │ │ ├── manage-gitignore.test.js │ │ ├── mcp-server │ │ │ └── direct-functions.test.js │ │ ├── move-task-cross-tag.integration.test.js │ │ ├── move-task-simple.integration.test.js │ │ └── profiles │ │ ├── amp-init-functionality.test.js │ │ ├── claude-init-functionality.test.js │ │ ├── cline-init-functionality.test.js │ │ ├── codex-init-functionality.test.js │ │ ├── cursor-init-functionality.test.js │ │ ├── gemini-init-functionality.test.js │ │ ├── opencode-init-functionality.test.js │ │ ├── roo-files-inclusion.test.js │ │ ├── roo-init-functionality.test.js │ │ ├── rules-files-inclusion.test.js │ │ ├── trae-init-functionality.test.js │ │ ├── vscode-init-functionality.test.js │ │ └── windsurf-init-functionality.test.js │ ├── manual │ │ ├── progress │ │ │ ├── parse-prd-analysis.js │ │ │ ├── test-parse-prd.js │ │ │ └── TESTING_GUIDE.md │ │ └── prompts │ │ ├── prompt-test.js │ │ └── README.md │ ├── README.md │ ├── setup.js │ └── unit │ ├── ai-providers │ │ ├── claude-code.test.js │ │ ├── custom-sdk │ │ │ └── claude-code │ │ │ └── language-model.test.js │ │ ├── gemini-cli.test.js │ │ ├── mcp-components.test.js │ │ └── openai.test.js │ ├── ai-services-unified.test.js │ ├── commands.test.js │ ├── config-manager.test.js │ ├── config-manager.test.mjs │ ├── dependency-manager.test.js │ ├── init.test.js │ ├── initialize-project.test.js │ ├── kebab-case-validation.test.js │ ├── manage-gitignore.test.js │ ├── mcp │ │ └── tools │ │ ├── __mocks__ │ │ │ └── move-task.js │ │ ├── add-task.test.js │ │ ├── analyze-complexity.test.js │ │ ├── expand-all.test.js │ │ ├── get-tasks.test.js │ │ ├── initialize-project.test.js │ │ ├── move-task-cross-tag-options.test.js │ │ ├── move-task-cross-tag.test.js │ │ └── remove-task.test.js │ ├── mcp-providers │ │ ├── mcp-components.test.js │ │ └── mcp-provider.test.js │ ├── parse-prd.test.js │ ├── profiles │ │ ├── amp-integration.test.js │ │ ├── claude-integration.test.js │ │ ├── cline-integration.test.js │ │ ├── codex-integration.test.js │ │ ├── cursor-integration.test.js │ │ ├── gemini-integration.test.js │ │ ├── kilo-integration.test.js │ │ ├── kiro-integration.test.js │ │ ├── mcp-config-validation.test.js │ │ ├── opencode-integration.test.js │ │ ├── profile-safety-check.test.js │ │ ├── roo-integration.test.js │ │ ├── rule-transformer-cline.test.js │ │ ├── rule-transformer-cursor.test.js │ │ ├── rule-transformer-gemini.test.js │ │ ├── rule-transformer-kilo.test.js │ │ ├── rule-transformer-kiro.test.js │ │ ├── rule-transformer-opencode.test.js │ │ ├── rule-transformer-roo.test.js │ │ ├── rule-transformer-trae.test.js │ │ ├── rule-transformer-vscode.test.js │ │ ├── rule-transformer-windsurf.test.js │ │ ├── rule-transformer-zed.test.js │ │ ├── rule-transformer.test.js │ │ ├── selective-profile-removal.test.js │ │ ├── subdirectory-support.test.js │ │ ├── trae-integration.test.js │ │ ├── vscode-integration.test.js │ │ ├── windsurf-integration.test.js │ │ └── zed-integration.test.js │ ├── progress │ │ └── base-progress-tracker.test.js │ ├── prompt-manager.test.js │ ├── prompts │ │ └── expand-task-prompt.test.js │ ├── providers │ │ └── provider-registry.test.js │ ├── scripts │ │ └── modules │ │ ├── commands │ │ │ ├── move-cross-tag.test.js │ │ │ └── README.md │ │ ├── dependency-manager │ │ │ ├── circular-dependencies.test.js │ │ │ ├── cross-tag-dependencies.test.js │ │ │ └── fix-dependencies-command.test.js │ │ ├── task-manager │ │ │ ├── add-subtask.test.js │ │ │ ├── add-task.test.js │ │ │ ├── analyze-task-complexity.test.js │ │ │ ├── clear-subtasks.test.js │ │ │ ├── complexity-report-tag-isolation.test.js │ │ │ ├── expand-all-tasks.test.js │ │ │ ├── expand-task.test.js │ │ │ ├── find-next-task.test.js │ │ │ ├── generate-task-files.test.js │ │ │ ├── list-tasks.test.js │ │ │ ├── move-task-cross-tag.test.js │ │ │ ├── move-task.test.js │ │ │ ├── parse-prd.test.js │ │ │ ├── remove-subtask.test.js │ │ │ ├── remove-task.test.js │ │ │ ├── research.test.js │ │ │ ├── scope-adjustment.test.js │ │ │ ├── set-task-status.test.js │ │ │ ├── setup.js │ │ │ ├── update-single-task-status.test.js │ │ │ ├── update-subtask-by-id.test.js │ │ │ ├── update-task-by-id.test.js │ │ │ └── update-tasks.test.js │ │ ├── ui │ │ │ └── cross-tag-error-display.test.js │ │ └── utils-tag-aware-paths.test.js │ ├── task-finder.test.js │ ├── task-manager │ │ ├── clear-subtasks.test.js │ │ ├── move-task.test.js │ │ ├── tag-boundary.test.js │ │ └── tag-management.test.js │ ├── task-master.test.js │ ├── ui │ │ └── indicators.test.js │ ├── ui.test.js │ ├── utils-strip-ansi.test.js │ └── utils.test.js ├── tsconfig.json ├── tsdown.config.ts └── turbo.json ``` # Files -------------------------------------------------------------------------------- /tests/integration/mcp-server/direct-functions.test.js: -------------------------------------------------------------------------------- ```javascript /** * Integration test for direct function imports in MCP server */ import { jest } from '@jest/globals'; import path, { dirname } from 'path'; import { fileURLToPath } from 'url'; // Get the current module's directory const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); // Test file paths const testProjectRoot = path.join(__dirname, '../../fixtures'); const testTasksPath = path.join(testProjectRoot, 'test-tasks.json'); // Create explicit mock functions const mockExistsSync = jest.fn().mockReturnValue(true); const mockWriteFileSync = jest.fn(); const mockReadFileSync = jest.fn(); const mockUnlinkSync = jest.fn(); const mockMkdirSync = jest.fn(); const mockFindTasksJsonPath = jest.fn().mockReturnValue(testTasksPath); const mockReadJSON = jest.fn(); const mockWriteJSON = jest.fn(); const mockEnableSilentMode = jest.fn(); const mockDisableSilentMode = jest.fn(); const mockReadComplexityReport = jest.fn().mockReturnValue(null); const mockGetAnthropicClient = jest.fn().mockReturnValue({}); const mockGetConfiguredAnthropicClient = jest.fn().mockReturnValue({}); const mockHandleAnthropicStream = jest.fn().mockResolvedValue( JSON.stringify([ { id: 1, title: 'Mock Subtask 1', description: 'First mock subtask', dependencies: [], details: 'Implementation details for mock subtask 1' }, { id: 2, title: 'Mock Subtask 2', description: 'Second mock subtask', dependencies: [1], details: 'Implementation details for mock subtask 2' } ]) ); const mockParseSubtasksFromText = jest.fn().mockReturnValue([ { id: 1, title: 'Mock Subtask 1', description: 'First mock subtask', status: 'pending', dependencies: [] }, { id: 2, title: 'Mock Subtask 2', description: 'Second mock subtask', status: 'pending', dependencies: [1] } ]); // Create a mock for expandTask that returns predefined responses instead of making real calls const mockExpandTask = jest .fn() .mockImplementation( (taskId, numSubtasks, useResearch, additionalContext, options) => { const task = { ...(sampleTasks.tasks.find((t) => t.id === taskId) || {}), subtasks: useResearch ? [ { id: 1, title: 'Research-Backed Subtask 1', description: 'First research-backed subtask', status: 'pending', dependencies: [] }, { id: 2, title: 'Research-Backed Subtask 2', description: 'Second research-backed subtask', status: 'pending', dependencies: [1] } ] : [ { id: 1, title: 'Mock Subtask 1', description: 'First mock subtask', status: 'pending', dependencies: [] }, { id: 2, title: 'Mock Subtask 2', description: 'Second mock subtask', status: 'pending', dependencies: [1] } ] }; return Promise.resolve(task); } ); const mockGenerateTaskFiles = jest.fn().mockResolvedValue(true); const mockFindTaskById = jest.fn(); const mockTaskExists = jest.fn().mockReturnValue(true); // Mock fs module to avoid file system operations jest.mock('fs', () => ({ existsSync: mockExistsSync, writeFileSync: mockWriteFileSync, readFileSync: mockReadFileSync, unlinkSync: mockUnlinkSync, mkdirSync: mockMkdirSync })); // Mock utils functions to avoid actual file operations jest.mock('../../../scripts/modules/utils.js', () => ({ readJSON: mockReadJSON, writeJSON: mockWriteJSON, enableSilentMode: mockEnableSilentMode, disableSilentMode: mockDisableSilentMode, readComplexityReport: mockReadComplexityReport, CONFIG: { model: 'claude-3-7-sonnet-20250219', maxTokens: 8192, temperature: 0.2, defaultSubtasks: 5 } })); // Mock path-utils with findTasksJsonPath jest.mock('../../../mcp-server/src/core/utils/path-utils.js', () => ({ findTasksJsonPath: mockFindTasksJsonPath })); // Mock the AI module to prevent any real API calls jest.mock('../../../scripts/modules/ai-services-unified.js', () => ({ // Mock the functions exported by ai-services-unified.js as needed // For example, if you are testing a function that uses generateTextService: generateTextService: jest.fn().mockResolvedValue('Mock AI Response') // Add other mocks for generateObjectService, streamTextService if used })); // Mock task-manager.js to avoid real operations jest.mock('../../../scripts/modules/task-manager.js', () => ({ expandTask: mockExpandTask, generateTaskFiles: mockGenerateTaskFiles, findTaskById: mockFindTaskById, taskExists: mockTaskExists })); // Import dependencies after mocks are set up import { sampleTasks } from '../../fixtures/sample-tasks.js'; // Mock logger const mockLogger = { info: jest.fn(), error: jest.fn(), debug: jest.fn(), warn: jest.fn() }; // Mock session const mockSession = { env: { ANTHROPIC_API_KEY: 'mock-api-key', MODEL: 'claude-3-sonnet-20240229', MAX_TOKENS: 4000, TEMPERATURE: '0.2' } }; describe('MCP Server Direct Functions', () => { // Set up before each test beforeEach(() => { jest.clearAllMocks(); // Default mockReadJSON implementation mockReadJSON.mockReturnValue(JSON.parse(JSON.stringify(sampleTasks))); // Default mockFindTaskById implementation mockFindTaskById.mockImplementation((tasks, taskId) => { const id = parseInt(taskId, 10); return tasks.find((t) => t.id === id); }); // Default mockTaskExists implementation mockTaskExists.mockImplementation((tasks, taskId) => { const id = parseInt(taskId, 10); return tasks.some((t) => t.id === id); }); // Default findTasksJsonPath implementation mockFindTasksJsonPath.mockImplementation((args) => { // Mock returning null for non-existent files if (args.file === 'non-existent-file.json') { return null; } return testTasksPath; }); }); describe('listTasksDirect', () => { // Sample complexity report for testing const mockComplexityReport = { meta: { generatedAt: '2025-03-24T20:01:35.986Z', tasksAnalyzed: 3, thresholdScore: 5, projectName: 'Test Project', usedResearch: false }, complexityAnalysis: [ { taskId: 1, taskTitle: 'Initialize Project', complexityScore: 3, recommendedSubtasks: 2 }, { taskId: 2, taskTitle: 'Create Core Functionality', complexityScore: 8, recommendedSubtasks: 5 }, { taskId: 3, taskTitle: 'Implement UI Components', complexityScore: 6, recommendedSubtasks: 4 } ] }; // Test wrapper function that doesn't rely on the actual implementation async function testListTasks(args, mockLogger) { // File not found case if (args.file === 'non-existent-file.json') { mockLogger.error('Tasks file not found'); return { success: false, error: { code: 'FILE_NOT_FOUND_ERROR', message: 'Tasks file not found' } }; } // Check for complexity report const complexityReport = mockReadComplexityReport(); let tasksData = [...sampleTasks.tasks]; // Add complexity scores if report exists if (complexityReport && complexityReport.complexityAnalysis) { tasksData = tasksData.map((task) => { const analysis = complexityReport.complexityAnalysis.find( (a) => a.taskId === task.id ); if (analysis) { return { ...task, complexityScore: analysis.complexityScore }; } return task; }); } // Success case if (!args.status && !args.withSubtasks) { return { success: true, data: { tasks: tasksData, stats: { total: tasksData.length, completed: tasksData.filter((t) => t.status === 'done').length, inProgress: tasksData.filter((t) => t.status === 'in-progress') .length, pending: tasksData.filter((t) => t.status === 'pending').length } } }; } // Status filter case if (args.status) { const filteredTasks = tasksData.filter((t) => t.status === args.status); return { success: true, data: { tasks: filteredTasks, filter: args.status, stats: { total: tasksData.length, filtered: filteredTasks.length } } }; } // Include subtasks case if (args.withSubtasks) { return { success: true, data: { tasks: tasksData, includeSubtasks: true, stats: { total: tasksData.length } } }; } // Default case return { success: true, data: { tasks: [] } }; } test('should return all tasks when no filter is provided', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath }; // Act const result = await testListTasks(args, mockLogger); // Assert expect(result.success).toBe(true); expect(result.data.tasks.length).toBe(sampleTasks.tasks.length); expect(result.data.stats.total).toBe(sampleTasks.tasks.length); }); test('should filter tasks by status', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, status: 'pending' }; // Act const result = await testListTasks(args, mockLogger); // Assert expect(result.success).toBe(true); expect(result.data.filter).toBe('pending'); // Should only include pending tasks result.data.tasks.forEach((task) => { expect(task.status).toBe('pending'); }); }); test('should include subtasks when requested', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, withSubtasks: true }; // Act const result = await testListTasks(args, mockLogger); // Assert expect(result.success).toBe(true); expect(result.data.includeSubtasks).toBe(true); // Verify subtasks are included for tasks that have them const tasksWithSubtasks = result.data.tasks.filter( (t) => t.subtasks && t.subtasks.length > 0 ); expect(tasksWithSubtasks.length).toBeGreaterThan(0); }); test('should handle file not found errors', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: 'non-existent-file.json' }; // Act const result = await testListTasks(args, mockLogger); // Assert expect(result.success).toBe(false); expect(result.error.code).toBe('FILE_NOT_FOUND_ERROR'); expect(mockLogger.error).toHaveBeenCalled(); }); test('should include complexity scores when complexity report exists', async () => { // Arrange mockReadComplexityReport.mockReturnValueOnce(mockComplexityReport); const args = { projectRoot: testProjectRoot, file: testTasksPath, withSubtasks: true }; // Act const result = await testListTasks(args, mockLogger); // Assert expect(result.success).toBe(true); // Check that tasks have complexity scores from the report mockComplexityReport.complexityAnalysis.forEach((analysis) => { const task = result.data.tasks.find((t) => t.id === analysis.taskId); if (task) { expect(task.complexityScore).toBe(analysis.complexityScore); } }); }); }); describe('expandTaskDirect', () => { // Test wrapper function that returns appropriate results based on the test case async function testExpandTask(args, mockLogger, options = {}) { // Missing task ID case if (!args.id) { mockLogger.error('Task ID is required'); return { success: false, error: { code: 'INPUT_VALIDATION_ERROR', message: 'Task ID is required' } }; } // Non-existent task ID case if (args.id === '999') { mockLogger.error(`Task with ID ${args.id} not found`); return { success: false, error: { code: 'TASK_NOT_FOUND', message: `Task with ID ${args.id} not found` } }; } // Completed task case if (args.id === '1') { mockLogger.error( `Task ${args.id} is already marked as done and cannot be expanded` ); return { success: false, error: { code: 'TASK_COMPLETED', message: `Task ${args.id} is already marked as done and cannot be expanded` } }; } // For successful cases, record that functions were called but don't make real calls mockEnableSilentMode(); // This is just a mock call that won't make real API requests // We're using mockExpandTask which is already a mock function const expandedTask = await mockExpandTask( parseInt(args.id, 10), args.num, args.research || false, args.prompt || '', { mcpLog: mockLogger, session: options.session } ); mockDisableSilentMode(); return { success: true, data: { task: expandedTask, subtasksAdded: expandedTask.subtasks.length, hasExistingSubtasks: false } }; } test('should expand a task with subtasks', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, id: '3', // ID 3 exists in sampleTasks with status 'pending' num: 2 }; // Act const result = await testExpandTask(args, mockLogger, { session: mockSession }); // Assert expect(result.success).toBe(true); expect(result.data.task).toBeDefined(); expect(result.data.task.subtasks).toBeDefined(); expect(result.data.task.subtasks.length).toBe(2); expect(mockExpandTask).toHaveBeenCalledWith( 3, // Task ID as number 2, // num parameter false, // useResearch '', // prompt expect.objectContaining({ mcpLog: mockLogger, session: mockSession }) ); expect(mockEnableSilentMode).toHaveBeenCalled(); expect(mockDisableSilentMode).toHaveBeenCalled(); }); test('should handle missing task ID', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath // id is intentionally missing }; // Act const result = await testExpandTask(args, mockLogger, { session: mockSession }); // Assert expect(result.success).toBe(false); expect(result.error.code).toBe('INPUT_VALIDATION_ERROR'); expect(mockLogger.error).toHaveBeenCalled(); // Make sure no real expand calls were made expect(mockExpandTask).not.toHaveBeenCalled(); }); test('should handle non-existent task ID', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, id: '999' // Non-existent task ID }; // Act const result = await testExpandTask(args, mockLogger, { session: mockSession }); // Assert expect(result.success).toBe(false); expect(result.error.code).toBe('TASK_NOT_FOUND'); expect(mockLogger.error).toHaveBeenCalled(); // Make sure no real expand calls were made expect(mockExpandTask).not.toHaveBeenCalled(); }); test('should handle completed tasks', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, id: '1' // Task with 'done' status in sampleTasks }; // Act const result = await testExpandTask(args, mockLogger, { session: mockSession }); // Assert expect(result.success).toBe(false); expect(result.error.code).toBe('TASK_COMPLETED'); expect(mockLogger.error).toHaveBeenCalled(); // Make sure no real expand calls were made expect(mockExpandTask).not.toHaveBeenCalled(); }); test('should use AI client when research flag is set', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, id: '3', research: true }; // Act const result = await testExpandTask(args, mockLogger, { session: mockSession }); // Assert expect(result.success).toBe(true); expect(mockExpandTask).toHaveBeenCalledWith( 3, // Task ID as number undefined, // args.num is undefined true, // useResearch should be true '', // prompt expect.objectContaining({ mcpLog: mockLogger, session: mockSession }) ); // Verify the result includes research-backed subtasks expect(result.data.task.subtasks[0].title).toContain('Research-Backed'); }); }); describe('expandAllTasksDirect', () => { // Test wrapper function that returns appropriate results based on the test case async function testExpandAllTasks(args, mockLogger, options = {}) { // For successful cases, record that functions were called but don't make real calls mockEnableSilentMode(); // Mock expandAllTasks - now returns a structured object instead of undefined const mockExpandAll = jest.fn().mockImplementation(async () => { // Return the new structured response that matches the actual implementation return { success: true, expandedCount: 2, failedCount: 0, skippedCount: 1, tasksToExpand: 3, telemetryData: { timestamp: new Date().toISOString(), commandName: 'expand-all-tasks', totalCost: 0.05, totalTokens: 1000, inputTokens: 600, outputTokens: 400 } }; }); // Call mock expandAllTasks with the correct signature const result = await mockExpandAll( args.file, // tasksPath args.num, // numSubtasks args.research || false, // useResearch args.prompt || '', // additionalContext args.force || false, // force { mcpLog: mockLogger, session: options.session, projectRoot: args.projectRoot } ); mockDisableSilentMode(); return { success: true, data: { message: `Expand all operation completed. Expanded: ${result.expandedCount}, Failed: ${result.failedCount}, Skipped: ${result.skippedCount}`, details: { expandedCount: result.expandedCount, failedCount: result.failedCount, skippedCount: result.skippedCount, tasksToExpand: result.tasksToExpand }, telemetryData: result.telemetryData } }; } test('should expand all pending tasks with subtasks', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, num: 3 }; // Act const result = await testExpandAllTasks(args, mockLogger, { session: mockSession }); // Assert expect(result.success).toBe(true); expect(result.data.message).toMatch(/Expand all operation completed/); expect(result.data.details.expandedCount).toBe(2); expect(result.data.details.failedCount).toBe(0); expect(result.data.details.skippedCount).toBe(1); expect(result.data.details.tasksToExpand).toBe(3); expect(result.data.telemetryData).toBeDefined(); expect(result.data.telemetryData.commandName).toBe('expand-all-tasks'); expect(mockEnableSilentMode).toHaveBeenCalled(); expect(mockDisableSilentMode).toHaveBeenCalled(); }); test('should handle research flag', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, research: true, num: 2 }; // Act const result = await testExpandAllTasks(args, mockLogger, { session: mockSession }); // Assert expect(result.success).toBe(true); expect(result.data.details.expandedCount).toBe(2); expect(result.data.telemetryData).toBeDefined(); expect(mockEnableSilentMode).toHaveBeenCalled(); expect(mockDisableSilentMode).toHaveBeenCalled(); }); test('should handle force flag', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, force: true }; // Act const result = await testExpandAllTasks(args, mockLogger, { session: mockSession }); // Assert expect(result.success).toBe(true); expect(result.data.details.expandedCount).toBe(2); expect(result.data.telemetryData).toBeDefined(); expect(mockEnableSilentMode).toHaveBeenCalled(); expect(mockDisableSilentMode).toHaveBeenCalled(); }); test('should handle additional context/prompt', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, prompt: 'Additional context for subtasks' }; // Act const result = await testExpandAllTasks(args, mockLogger, { session: mockSession }); // Assert expect(result.success).toBe(true); expect(result.data.details.expandedCount).toBe(2); expect(result.data.telemetryData).toBeDefined(); expect(mockEnableSilentMode).toHaveBeenCalled(); expect(mockDisableSilentMode).toHaveBeenCalled(); }); test('should handle case with no eligible tasks', async () => { // Arrange const args = { projectRoot: testProjectRoot, file: testTasksPath, num: 3 }; // Act - Mock the scenario where no tasks are eligible for expansion async function testNoEligibleTasks(args, mockLogger, options = {}) { mockEnableSilentMode(); const mockExpandAll = jest.fn().mockImplementation(async () => { return { success: true, expandedCount: 0, failedCount: 0, skippedCount: 0, tasksToExpand: 0, telemetryData: null, message: 'No tasks eligible for expansion.' }; }); const result = await mockExpandAll( args.file, args.num, false, '', false, { mcpLog: mockLogger, session: options.session, projectRoot: args.projectRoot }, 'json' ); mockDisableSilentMode(); return { success: true, data: { message: result.message, details: { expandedCount: result.expandedCount, failedCount: result.failedCount, skippedCount: result.skippedCount, tasksToExpand: result.tasksToExpand }, telemetryData: result.telemetryData } }; } const result = await testNoEligibleTasks(args, mockLogger, { session: mockSession }); // Assert expect(result.success).toBe(true); expect(result.data.message).toBe('No tasks eligible for expansion.'); expect(result.data.details.expandedCount).toBe(0); expect(result.data.details.tasksToExpand).toBe(0); expect(result.data.telemetryData).toBeNull(); }); }); }); ``` -------------------------------------------------------------------------------- /docs/models.md: -------------------------------------------------------------------------------- ```markdown # Available Models as of September 19, 2025 ## Main Models | Provider | Model Name | SWE Score | Input Cost | Output Cost | | ----------- | ---------------------------------------------- | --------- | ---------- | ----------- | | anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 | | anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 | | anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 | | anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 | | claude-code | opus | 0.725 | 0 | 0 | | claude-code | sonnet | 0.727 | 0 | 0 | | mcp | mcp-sampling | — | 0 | 0 | | gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 | | gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 | | grok-cli | grok-4-latest | 0.7 | 0 | 0 | | grok-cli | grok-3-latest | 0.65 | 0 | 0 | | grok-cli | grok-3-fast | 0.6 | 0 | 0 | | grok-cli | grok-3-mini-fast | 0.55 | 0 | 0 | | openai | gpt-4o | 0.332 | 2.5 | 10 | | openai | o1 | 0.489 | 15 | 60 | | openai | o3 | 0.5 | 2 | 8 | | openai | o3-mini | 0.493 | 1.1 | 4.4 | | openai | o4-mini | 0.45 | 1.1 | 4.4 | | openai | o1-mini | 0.4 | 1.1 | 4.4 | | openai | o1-pro | — | 150 | 600 | | openai | gpt-4-5-preview | 0.38 | 75 | 150 | | openai | gpt-4-1-mini | — | 0.4 | 1.6 | | openai | gpt-4-1-nano | — | 0.1 | 0.4 | | openai | gpt-4o-mini | 0.3 | 0.15 | 0.6 | | openai | gpt-5 | 0.749 | 5 | 20 | | google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — | | google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — | | google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — | | google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 | | google | gemini-2.0-flash-lite | — | — | — | | xai | grok-3 | — | 3 | 15 | | xai | grok-3-fast | — | 5 | 25 | | xai | grok-4 | — | 3 | 15 | | groq | moonshotai/kimi-k2-instruct | 0.66 | 1 | 3 | | groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 | | groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 | | groq | llama-4-scout | 0.45 | 0.11 | 0.34 | | groq | llama-4-maverick | 0.52 | 0.5 | 0.77 | | groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 | | groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 | | groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 | | groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 | | groq | whisper-large-v3 | — | 0.11 | 0 | | perplexity | sonar-pro | — | 3 | 15 | | perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 | | perplexity | sonar-reasoning | 0.211 | 1 | 5 | | openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 | | openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 | | openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 | | openrouter | deepseek/deepseek-chat-v3-0324 | — | 0.27 | 1.1 | | openrouter | openai/gpt-4.1 | — | 2 | 8 | | openrouter | openai/gpt-4.1-mini | — | 0.4 | 1.6 | | openrouter | openai/gpt-4.1-nano | — | 0.1 | 0.4 | | openrouter | openai/o3 | — | 10 | 40 | | openrouter | openai/codex-mini | — | 1.5 | 6 | | openrouter | openai/gpt-4o-mini | — | 0.15 | 0.6 | | openrouter | openai/o4-mini | 0.45 | 1.1 | 4.4 | | openrouter | openai/o4-mini-high | — | 1.1 | 4.4 | | openrouter | openai/o1-pro | — | 150 | 600 | | openrouter | meta-llama/llama-3.3-70b-instruct | — | 120 | 600 | | openrouter | meta-llama/llama-4-maverick | — | 0.18 | 0.6 | | openrouter | meta-llama/llama-4-scout | — | 0.08 | 0.3 | | openrouter | qwen/qwen-max | — | 1.6 | 6.4 | | openrouter | qwen/qwen-turbo | — | 0.05 | 0.2 | | openrouter | qwen/qwen3-235b-a22b | — | 0.14 | 2 | | openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 | | openrouter | mistralai/devstral-small | — | 0.1 | 0.3 | | openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 | | ollama | gpt-oss:latest | 0.607 | 0 | 0 | | ollama | gpt-oss:20b | 0.607 | 0 | 0 | | ollama | gpt-oss:120b | 0.624 | 0 | 0 | | ollama | devstral:latest | — | 0 | 0 | | ollama | qwen3:latest | — | 0 | 0 | | ollama | qwen3:14b | — | 0 | 0 | | ollama | qwen3:32b | — | 0 | 0 | | ollama | mistral-small3.1:latest | — | 0 | 0 | | ollama | llama3.3:latest | — | 0 | 0 | | ollama | phi4:latest | — | 0 | 0 | | azure | gpt-4o | 0.332 | 2.5 | 10 | | azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 | | azure | gpt-4-1 | — | 2 | 10 | | bedrock | us.anthropic.claude-3-haiku-20240307-v1:0 | 0.4 | 0.25 | 1.25 | | bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 | | bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 | | bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 | | bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 | | bedrock | us.anthropic.claude-3-5-haiku-20241022-v1:0 | 0.4 | 0.8 | 4 | | bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 | | bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 | ## Research Models | Provider | Model Name | SWE Score | Input Cost | Output Cost | | ----------- | -------------------------------------------- | --------- | ---------- | ----------- | | claude-code | opus | 0.725 | 0 | 0 | | claude-code | sonnet | 0.727 | 0 | 0 | | mcp | mcp-sampling | — | 0 | 0 | | gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 | | gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 | | grok-cli | grok-4-latest | 0.7 | 0 | 0 | | grok-cli | grok-3-latest | 0.65 | 0 | 0 | | grok-cli | grok-3-fast | 0.6 | 0 | 0 | | grok-cli | grok-3-mini-fast | 0.55 | 0 | 0 | | openai | gpt-4o-search-preview | 0.33 | 2.5 | 10 | | openai | gpt-4o-mini-search-preview | 0.3 | 0.15 | 0.6 | | xai | grok-3 | — | 3 | 15 | | xai | grok-3-fast | — | 5 | 25 | | xai | grok-4 | — | 3 | 15 | | groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 | | groq | llama-4-scout | 0.45 | 0.11 | 0.34 | | groq | llama-4-maverick | 0.52 | 0.5 | 0.77 | | groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 | | groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 | | perplexity | sonar-pro | — | 3 | 15 | | perplexity | sonar | — | 1 | 1 | | perplexity | deep-research | 0.211 | 2 | 8 | | perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 | | perplexity | sonar-reasoning | 0.211 | 1 | 5 | | bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 | | bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 | | bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 | | bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 | | bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 | | bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 | | bedrock | us.deepseek.r1-v1:0 | — | 1.35 | 5.4 | ## Fallback Models | Provider | Model Name | SWE Score | Input Cost | Output Cost | | ----------- | ---------------------------------------------- | --------- | ---------- | ----------- | | anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 | | anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 | | anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 | | anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 | | claude-code | opus | 0.725 | 0 | 0 | | claude-code | sonnet | 0.727 | 0 | 0 | | mcp | mcp-sampling | — | 0 | 0 | | gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 | | gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 | | grok-cli | grok-4-latest | 0.7 | 0 | 0 | | grok-cli | grok-3-latest | 0.65 | 0 | 0 | | grok-cli | grok-3-fast | 0.6 | 0 | 0 | | grok-cli | grok-3-mini-fast | 0.55 | 0 | 0 | | openai | gpt-4o | 0.332 | 2.5 | 10 | | openai | o3 | 0.5 | 2 | 8 | | openai | o4-mini | 0.45 | 1.1 | 4.4 | | openai | gpt-5 | 0.749 | 5 | 20 | | google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — | | google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — | | google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — | | google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 | | google | gemini-2.0-flash-lite | — | — | — | | xai | grok-3 | — | 3 | 15 | | xai | grok-3-fast | — | 5 | 25 | | xai | grok-4 | — | 3 | 15 | | groq | moonshotai/kimi-k2-instruct | 0.66 | 1 | 3 | | groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 | | groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 | | groq | llama-4-scout | 0.45 | 0.11 | 0.34 | | groq | llama-4-maverick | 0.52 | 0.5 | 0.77 | | groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 | | groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 | | groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 | | perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 | | perplexity | sonar-reasoning | 0.211 | 1 | 5 | | openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 | | openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 | | openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 | | openrouter | openai/gpt-4.1 | — | 2 | 8 | | openrouter | openai/gpt-4.1-mini | — | 0.4 | 1.6 | | openrouter | openai/gpt-4.1-nano | — | 0.1 | 0.4 | | openrouter | openai/o3 | — | 10 | 40 | | openrouter | openai/codex-mini | — | 1.5 | 6 | | openrouter | openai/gpt-4o-mini | — | 0.15 | 0.6 | | openrouter | openai/o4-mini | 0.45 | 1.1 | 4.4 | | openrouter | openai/o4-mini-high | — | 1.1 | 4.4 | | openrouter | openai/o1-pro | — | 150 | 600 | | openrouter | meta-llama/llama-3.3-70b-instruct | — | 120 | 600 | | openrouter | meta-llama/llama-4-maverick | — | 0.18 | 0.6 | | openrouter | meta-llama/llama-4-scout | — | 0.08 | 0.3 | | openrouter | qwen/qwen-max | — | 1.6 | 6.4 | | openrouter | qwen/qwen-turbo | — | 0.05 | 0.2 | | openrouter | qwen/qwen3-235b-a22b | — | 0.14 | 2 | | openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 | | openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 | | ollama | gpt-oss:latest | 0.607 | 0 | 0 | | ollama | gpt-oss:20b | 0.607 | 0 | 0 | | ollama | gpt-oss:120b | 0.624 | 0 | 0 | | ollama | devstral:latest | — | 0 | 0 | | ollama | qwen3:latest | — | 0 | 0 | | ollama | qwen3:14b | — | 0 | 0 | | ollama | qwen3:32b | — | 0 | 0 | | ollama | mistral-small3.1:latest | — | 0 | 0 | | ollama | llama3.3:latest | — | 0 | 0 | | ollama | phi4:latest | — | 0 | 0 | | azure | gpt-4o | 0.332 | 2.5 | 10 | | azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 | | azure | gpt-4-1 | — | 2 | 10 | | bedrock | us.anthropic.claude-3-haiku-20240307-v1:0 | 0.4 | 0.25 | 1.25 | | bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 | | bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 | | bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 | | bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 | | bedrock | us.anthropic.claude-3-5-haiku-20241022-v1:0 | 0.4 | 0.8 | 4 | | bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 | | bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 | ## Unsupported Models | Provider | Model Name | Reason | | ---------- | --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | openrouter | deepseek/deepseek-chat-v3-0324:free | Free OpenRouter models are not supported due to severe rate limits, lack of tool use support, and other reliability issues that make them impractical for production use. | | openrouter | mistralai/mistral-small-3.1-24b-instruct:free | Free OpenRouter models are not supported due to severe rate limits, lack of tool use support, and other reliability issues that make them impractical for production use. | | openrouter | thudm/glm-4-32b:free | Free OpenRouter models are not supported due to severe rate limits, lack of tool use support, and other reliability issues that make them impractical for production use. | ``` -------------------------------------------------------------------------------- /scripts/modules/task-manager/models.js: -------------------------------------------------------------------------------- ```javascript /** * models.js * Core functionality for managing AI model configurations */ import https from 'https'; import http from 'http'; import { getMainModelId, getResearchModelId, getFallbackModelId, getAvailableModels, getMainProvider, getResearchProvider, getFallbackProvider, isApiKeySet, getMcpApiKeyStatus, getConfig, writeConfig, isConfigFilePresent, getAllProviders, getBaseUrlForRole } from '../config-manager.js'; import { findConfigPath } from '../../../src/utils/path-utils.js'; import { log } from '../utils.js'; import { CUSTOM_PROVIDERS } from '../../../src/constants/providers.js'; // Constants const CONFIG_MISSING_ERROR = 'The configuration file is missing. Run "task-master init" to create it.'; /** * Fetches the list of models from OpenRouter API. * @returns {Promise<Array|null>} A promise that resolves with the list of model IDs or null if fetch fails. */ function fetchOpenRouterModels() { return new Promise((resolve) => { const options = { hostname: 'openrouter.ai', path: '/api/v1/models', method: 'GET', headers: { Accept: 'application/json' } }; const req = https.request(options, (res) => { let data = ''; res.on('data', (chunk) => { data += chunk; }); res.on('end', () => { if (res.statusCode === 200) { try { const parsedData = JSON.parse(data); resolve(parsedData.data || []); // Return the array of models } catch (e) { console.error('Error parsing OpenRouter response:', e); resolve(null); // Indicate failure } } else { console.error( `OpenRouter API request failed with status code: ${res.statusCode}` ); resolve(null); // Indicate failure } }); }); req.on('error', (e) => { console.error('Error fetching OpenRouter models:', e); resolve(null); // Indicate failure }); req.end(); }); } /** * Fetches the list of models from Ollama instance. * @param {string} baseURL - The base URL for the Ollama API (e.g., "http://localhost:11434/api") * @returns {Promise<Array|null>} A promise that resolves with the list of model objects or null if fetch fails. */ function fetchOllamaModels(baseURL = 'http://localhost:11434/api') { return new Promise((resolve) => { try { // Parse the base URL to extract hostname, port, and base path const url = new URL(baseURL); const isHttps = url.protocol === 'https:'; const port = url.port || (isHttps ? 443 : 80); const basePath = url.pathname.endsWith('/') ? url.pathname.slice(0, -1) : url.pathname; const options = { hostname: url.hostname, port: parseInt(port, 10), path: `${basePath}/tags`, method: 'GET', headers: { Accept: 'application/json' } }; const requestLib = isHttps ? https : http; const req = requestLib.request(options, (res) => { let data = ''; res.on('data', (chunk) => { data += chunk; }); res.on('end', () => { if (res.statusCode === 200) { try { const parsedData = JSON.parse(data); resolve(parsedData.models || []); // Return the array of models } catch (e) { console.error('Error parsing Ollama response:', e); resolve(null); // Indicate failure } } else { console.error( `Ollama API request failed with status code: ${res.statusCode}` ); resolve(null); // Indicate failure } }); }); req.on('error', (e) => { console.error('Error fetching Ollama models:', e); resolve(null); // Indicate failure }); req.end(); } catch (e) { console.error('Error parsing Ollama base URL:', e); resolve(null); // Indicate failure } }); } /** * Get the current model configuration * @param {Object} [options] - Options for the operation * @param {Object} [options.session] - Session object containing environment variables (for MCP) * @param {Function} [options.mcpLog] - MCP logger object (for MCP) * @param {string} [options.projectRoot] - Project root directory * @returns {Object} RESTful response with current model configuration */ async function getModelConfiguration(options = {}) { const { mcpLog, projectRoot, session } = options; const report = (level, ...args) => { if (mcpLog && typeof mcpLog[level] === 'function') { mcpLog[level](...args); } }; if (!projectRoot) { throw new Error('Project root is required but not found.'); } // Use centralized config path finding instead of hardcoded path const configPath = findConfigPath(null, { projectRoot }); const configExists = isConfigFilePresent(projectRoot); log( 'debug', `Checking for config file using findConfigPath, found: ${configPath}` ); log( 'debug', `Checking config file using isConfigFilePresent(), exists: ${configExists}` ); if (!configExists) { throw new Error(CONFIG_MISSING_ERROR); } try { // Get current settings - these should use the config from the found path automatically const mainProvider = getMainProvider(projectRoot); const mainModelId = getMainModelId(projectRoot); const researchProvider = getResearchProvider(projectRoot); const researchModelId = getResearchModelId(projectRoot); const fallbackProvider = getFallbackProvider(projectRoot); const fallbackModelId = getFallbackModelId(projectRoot); // Check API keys const mainCliKeyOk = isApiKeySet(mainProvider, session, projectRoot); const mainMcpKeyOk = getMcpApiKeyStatus(mainProvider, projectRoot); const researchCliKeyOk = isApiKeySet( researchProvider, session, projectRoot ); const researchMcpKeyOk = getMcpApiKeyStatus(researchProvider, projectRoot); const fallbackCliKeyOk = fallbackProvider ? isApiKeySet(fallbackProvider, session, projectRoot) : true; const fallbackMcpKeyOk = fallbackProvider ? getMcpApiKeyStatus(fallbackProvider, projectRoot) : true; // Get available models to find detailed info const availableModels = getAvailableModels(projectRoot); // Find model details const mainModelData = availableModels.find((m) => m.id === mainModelId); const researchModelData = availableModels.find( (m) => m.id === researchModelId ); const fallbackModelData = fallbackModelId ? availableModels.find((m) => m.id === fallbackModelId) : null; // Return structured configuration data return { success: true, data: { activeModels: { main: { provider: mainProvider, modelId: mainModelId, sweScore: mainModelData?.swe_score || null, cost: mainModelData?.cost_per_1m_tokens || null, keyStatus: { cli: mainCliKeyOk, mcp: mainMcpKeyOk } }, research: { provider: researchProvider, modelId: researchModelId, sweScore: researchModelData?.swe_score || null, cost: researchModelData?.cost_per_1m_tokens || null, keyStatus: { cli: researchCliKeyOk, mcp: researchMcpKeyOk } }, fallback: fallbackProvider ? { provider: fallbackProvider, modelId: fallbackModelId, sweScore: fallbackModelData?.swe_score || null, cost: fallbackModelData?.cost_per_1m_tokens || null, keyStatus: { cli: fallbackCliKeyOk, mcp: fallbackMcpKeyOk } } : null }, message: 'Successfully retrieved current model configuration' } }; } catch (error) { report('error', `Error getting model configuration: ${error.message}`); return { success: false, error: { code: 'CONFIG_ERROR', message: error.message } }; } } /** * Get all available models not currently in use * @param {Object} [options] - Options for the operation * @param {Object} [options.session] - Session object containing environment variables (for MCP) * @param {Function} [options.mcpLog] - MCP logger object (for MCP) * @param {string} [options.projectRoot] - Project root directory * @returns {Object} RESTful response with available models */ async function getAvailableModelsList(options = {}) { const { mcpLog, projectRoot } = options; const report = (level, ...args) => { if (mcpLog && typeof mcpLog[level] === 'function') { mcpLog[level](...args); } }; if (!projectRoot) { throw new Error('Project root is required but not found.'); } // Use centralized config path finding instead of hardcoded path const configPath = findConfigPath(null, { projectRoot }); const configExists = isConfigFilePresent(projectRoot); log( 'debug', `Checking for config file using findConfigPath, found: ${configPath}` ); log( 'debug', `Checking config file using isConfigFilePresent(), exists: ${configExists}` ); if (!configExists) { throw new Error(CONFIG_MISSING_ERROR); } try { // Get all available models const allAvailableModels = getAvailableModels(projectRoot); if (!allAvailableModels || allAvailableModels.length === 0) { return { success: true, data: { models: [], message: 'No available models found' } }; } // Get currently used model IDs const mainModelId = getMainModelId(projectRoot); const researchModelId = getResearchModelId(projectRoot); const fallbackModelId = getFallbackModelId(projectRoot); // Filter out placeholder models and active models const activeIds = [mainModelId, researchModelId, fallbackModelId].filter( Boolean ); const otherAvailableModels = allAvailableModels.map((model) => ({ provider: model.provider || 'N/A', modelId: model.id, sweScore: model.swe_score || null, cost: model.cost_per_1m_tokens || null, allowedRoles: model.allowed_roles || [] })); return { success: true, data: { models: otherAvailableModels, message: `Successfully retrieved ${otherAvailableModels.length} available models` } }; } catch (error) { report('error', `Error getting available models: ${error.message}`); return { success: false, error: { code: 'MODELS_LIST_ERROR', message: error.message } }; } } /** * Update a specific model in the configuration * @param {string} role - The model role to update ('main', 'research', 'fallback') * @param {string} modelId - The model ID to set for the role * @param {Object} [options] - Options for the operation * @param {string} [options.providerHint] - Provider hint if already determined ('openrouter' or 'ollama') * @param {Object} [options.session] - Session object containing environment variables (for MCP) * @param {Function} [options.mcpLog] - MCP logger object (for MCP) * @param {string} [options.projectRoot] - Project root directory * @returns {Object} RESTful response with result of update operation */ async function setModel(role, modelId, options = {}) { const { mcpLog, projectRoot, providerHint } = options; const report = (level, ...args) => { if (mcpLog && typeof mcpLog[level] === 'function') { mcpLog[level](...args); } }; if (!projectRoot) { throw new Error('Project root is required but not found.'); } // Use centralized config path finding instead of hardcoded path const configPath = findConfigPath(null, { projectRoot }); const configExists = isConfigFilePresent(projectRoot); log( 'debug', `Checking for config file using findConfigPath, found: ${configPath}` ); log( 'debug', `Checking config file using isConfigFilePresent(), exists: ${configExists}` ); if (!configExists) { throw new Error(CONFIG_MISSING_ERROR); } // Validate role if (!['main', 'research', 'fallback'].includes(role)) { return { success: false, error: { code: 'INVALID_ROLE', message: `Invalid role: ${role}. Must be one of: main, research, fallback.` } }; } // Validate model ID if (typeof modelId !== 'string' || modelId.trim() === '') { return { success: false, error: { code: 'INVALID_MODEL_ID', message: `Invalid model ID: ${modelId}. Must be a non-empty string.` } }; } try { const availableModels = getAvailableModels(projectRoot); const currentConfig = getConfig(projectRoot); let determinedProvider = null; // Initialize provider let warningMessage = null; // Find the model data in internal list initially to see if it exists at all let modelData = availableModels.find((m) => m.id === modelId); // --- Revised Logic: Prioritize providerHint --- // if (providerHint) { // Hint provided (--ollama or --openrouter flag used) if (modelData && modelData.provider === providerHint) { // Found internally AND provider matches the hint determinedProvider = providerHint; report( 'info', `Model ${modelId} found internally with matching provider hint ${determinedProvider}.` ); } else { // Either not found internally, OR found but under a DIFFERENT provider than hinted. // Proceed with custom logic based ONLY on the hint. if (providerHint === CUSTOM_PROVIDERS.OPENROUTER) { // Check OpenRouter ONLY because hint was openrouter report('info', `Checking OpenRouter for ${modelId} (as hinted)...`); const openRouterModels = await fetchOpenRouterModels(); if ( openRouterModels && openRouterModels.some((m) => m.id === modelId) ) { determinedProvider = CUSTOM_PROVIDERS.OPENROUTER; // Check if this is a free model (ends with :free) if (modelId.endsWith(':free')) { warningMessage = `Warning: OpenRouter free model '${modelId}' selected. Free models have significant limitations including lower context windows, reduced rate limits, and may not support advanced features like tool_use. Consider using the paid version '${modelId.replace(':free', '')}' for full functionality.`; } else { warningMessage = `Warning: Custom OpenRouter model '${modelId}' set. This model is not officially validated by Taskmaster and may not function as expected.`; } report('warn', warningMessage); } else { // Hinted as OpenRouter but not found in live check throw new Error( `Model ID "${modelId}" not found in the live OpenRouter model list. Please verify the ID and ensure it's available on OpenRouter.` ); } } else if (providerHint === CUSTOM_PROVIDERS.OLLAMA) { // Check Ollama ONLY because hint was ollama report('info', `Checking Ollama for ${modelId} (as hinted)...`); // Get the Ollama base URL from config const ollamaBaseURL = getBaseUrlForRole(role, projectRoot); const ollamaModels = await fetchOllamaModels(ollamaBaseURL); if (ollamaModels === null) { // Connection failed - server probably not running throw new Error( `Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.` ); } else if (ollamaModels.some((m) => m.model === modelId)) { determinedProvider = CUSTOM_PROVIDERS.OLLAMA; warningMessage = `Warning: Custom Ollama model '${modelId}' set. Ensure your Ollama server is running and has pulled this model. Taskmaster cannot guarantee compatibility.`; report('warn', warningMessage); } else { // Server is running but model not found const tagsUrl = `${ollamaBaseURL}/tags`; throw new Error( `Model ID "${modelId}" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}` ); } } else if (providerHint === CUSTOM_PROVIDERS.BEDROCK) { // Set provider without model validation since Bedrock models are managed by AWS determinedProvider = CUSTOM_PROVIDERS.BEDROCK; warningMessage = `Warning: Custom Bedrock model '${modelId}' set. Please ensure the model ID is valid and accessible in your AWS account.`; report('warn', warningMessage); } else if (providerHint === CUSTOM_PROVIDERS.CLAUDE_CODE) { // Claude Code provider - check if model exists in our list determinedProvider = CUSTOM_PROVIDERS.CLAUDE_CODE; // Re-find modelData specifically for claude-code provider const claudeCodeModels = availableModels.filter( (m) => m.provider === 'claude-code' ); const claudeCodeModelData = claudeCodeModels.find( (m) => m.id === modelId ); if (claudeCodeModelData) { // Update modelData to the found claude-code model modelData = claudeCodeModelData; report('info', `Setting Claude Code model '${modelId}'.`); } else { warningMessage = `Warning: Claude Code model '${modelId}' not found in supported models. Setting without validation.`; report('warn', warningMessage); } } else if (providerHint === CUSTOM_PROVIDERS.AZURE) { // Set provider without model validation since Azure models are managed by Azure determinedProvider = CUSTOM_PROVIDERS.AZURE; warningMessage = `Warning: Custom Azure model '${modelId}' set. Please ensure the model deployment is valid and accessible in your Azure account.`; report('warn', warningMessage); } else if (providerHint === CUSTOM_PROVIDERS.VERTEX) { // Set provider without model validation since Vertex models are managed by Google Cloud determinedProvider = CUSTOM_PROVIDERS.VERTEX; warningMessage = `Warning: Custom Vertex AI model '${modelId}' set. Please ensure the model is valid and accessible in your Google Cloud project.`; report('warn', warningMessage); } else if (providerHint === CUSTOM_PROVIDERS.GEMINI_CLI) { // Gemini CLI provider - check if model exists in our list determinedProvider = CUSTOM_PROVIDERS.GEMINI_CLI; // Re-find modelData specifically for gemini-cli provider const geminiCliModels = availableModels.filter( (m) => m.provider === 'gemini-cli' ); const geminiCliModelData = geminiCliModels.find( (m) => m.id === modelId ); if (geminiCliModelData) { // Update modelData to the found gemini-cli model modelData = geminiCliModelData; report('info', `Setting Gemini CLI model '${modelId}'.`); } else { warningMessage = `Warning: Gemini CLI model '${modelId}' not found in supported models. Setting without validation.`; report('warn', warningMessage); } } else { // Invalid provider hint - should not happen with our constants throw new Error(`Invalid provider hint received: ${providerHint}`); } } } else { // No hint provided (flags not used) if (modelData) { // Found internally, use the provider from the internal list determinedProvider = modelData.provider; report( 'info', `Model ${modelId} found internally with provider ${determinedProvider}.` ); } else { // Model not found and no provider hint was given return { success: false, error: { code: 'MODEL_NOT_FOUND_NO_HINT', message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter, --ollama, --bedrock, --azure, or --vertex.` } }; } } // --- End of Revised Logic --- // // At this point, we should have a determinedProvider if the model is valid (internally or custom) if (!determinedProvider) { // This case acts as a safeguard return { success: false, error: { code: 'PROVIDER_UNDETERMINED', message: `Could not determine the provider for model ID "${modelId}".` } }; } // Update configuration currentConfig.models[role] = { ...currentConfig.models[role], // Keep existing params like temperature provider: determinedProvider, modelId: modelId }; // If model data is available, update maxTokens from supported-models.json if (modelData && modelData.max_tokens) { currentConfig.models[role].maxTokens = modelData.max_tokens; } // Write updated configuration const writeResult = writeConfig(currentConfig, projectRoot); if (!writeResult) { return { success: false, error: { code: 'CONFIG_WRITE_ERROR', message: 'Error writing updated configuration to configuration file' } }; } const successMessage = `Successfully set ${role} model to ${modelId} (Provider: ${determinedProvider})`; report('info', successMessage); return { success: true, data: { role, provider: determinedProvider, modelId, message: successMessage, warning: warningMessage // Include warning in the response data } }; } catch (error) { report('error', `Error setting ${role} model: ${error.message}`); return { success: false, error: { code: 'SET_MODEL_ERROR', message: error.message } }; } } /** * Get API key status for all known providers. * @param {Object} [options] - Options for the operation * @param {Object} [options.session] - Session object containing environment variables (for MCP) * @param {Function} [options.mcpLog] - MCP logger object (for MCP) * @param {string} [options.projectRoot] - Project root directory * @returns {Object} RESTful response with API key status report */ async function getApiKeyStatusReport(options = {}) { const { mcpLog, projectRoot, session } = options; const report = (level, ...args) => { if (mcpLog && typeof mcpLog[level] === 'function') { mcpLog[level](...args); } }; try { const providers = getAllProviders(); const providersToCheck = providers.filter( (p) => p.toLowerCase() !== 'ollama' ); // Ollama is not a provider, it's a service, doesn't need an api key usually const statusReport = providersToCheck.map((provider) => { // Use provided projectRoot for MCP status check const cliOk = isApiKeySet(provider, session, projectRoot); // Pass session and projectRoot for CLI check const mcpOk = getMcpApiKeyStatus(provider, projectRoot); return { provider, cli: cliOk, mcp: mcpOk }; }); report('info', 'Successfully generated API key status report.'); return { success: true, data: { report: statusReport, message: 'API key status report generated.' } }; } catch (error) { report('error', `Error generating API key status report: ${error.message}`); return { success: false, error: { code: 'API_KEY_STATUS_ERROR', message: error.message } }; } } export { getModelConfiguration, getAvailableModelsList, setModel, getApiKeyStatusReport }; ``` -------------------------------------------------------------------------------- /scripts/modules/task-manager/add-task.js: -------------------------------------------------------------------------------- ```javascript import path from 'path'; import chalk from 'chalk'; import boxen from 'boxen'; import Table from 'cli-table3'; import { z } from 'zod'; import Fuse from 'fuse.js'; // Import Fuse.js for advanced fuzzy search import { displayBanner, getStatusWithColor, startLoadingIndicator, stopLoadingIndicator, succeedLoadingIndicator, failLoadingIndicator, displayAiUsageSummary, displayContextAnalysis } from '../ui.js'; import { readJSON, writeJSON, log as consoleLog, truncate, ensureTagMetadata, performCompleteTagMigration, markMigrationForNotice } from '../utils.js'; import { generateObjectService } from '../ai-services-unified.js'; import { getDefaultPriority, hasCodebaseAnalysis } from '../config-manager.js'; import { getPromptManager } from '../prompt-manager.js'; import ContextGatherer from '../utils/contextGatherer.js'; import generateTaskFiles from './generate-task-files.js'; import { TASK_PRIORITY_OPTIONS, DEFAULT_TASK_PRIORITY, isValidTaskPriority, normalizeTaskPriority } from '../../../src/constants/task-priority.js'; // Define Zod schema for the expected AI output object const AiTaskDataSchema = z.object({ title: z.string().describe('Clear, concise title for the task'), description: z .string() .describe('A one or two sentence description of the task'), details: z .string() .describe('In-depth implementation details, considerations, and guidance'), testStrategy: z .string() .describe('Detailed approach for verifying task completion'), dependencies: z .array(z.number()) .nullable() .describe( 'Array of task IDs that this task depends on (must be completed before this task can start)' ) }); /** * Get all tasks from all tags * @param {Object} rawData - The raw tagged data object * @returns {Array} A flat array of all task objects */ function getAllTasks(rawData) { let allTasks = []; for (const tagName in rawData) { if ( Object.prototype.hasOwnProperty.call(rawData, tagName) && rawData[tagName] && Array.isArray(rawData[tagName].tasks) ) { allTasks = allTasks.concat(rawData[tagName].tasks); } } return allTasks; } /** * Add a new task using AI * @param {string} tasksPath - Path to the tasks.json file * @param {string} prompt - Description of the task to add (required for AI-driven creation) * @param {Array} dependencies - Task dependencies * @param {string} priority - Task priority * @param {function} reportProgress - Function to report progress to MCP server (optional) * @param {Object} mcpLog - MCP logger object (optional) * @param {Object} session - Session object from MCP server (optional) * @param {string} outputFormat - Output format (text or json) * @param {Object} customEnv - Custom environment variables (optional) - Note: AI params override deprecated * @param {Object} manualTaskData - Manual task data (optional, for direct task creation without AI) * @param {boolean} useResearch - Whether to use the research model (passed to unified service) * @param {Object} context - Context object containing session and potentially projectRoot * @param {string} [context.projectRoot] - Project root path (for MCP/env fallback) * @param {string} [context.commandName] - The name of the command being executed (for telemetry) * @param {string} [context.outputType] - The output type ('cli' or 'mcp', for telemetry) * @param {string} [context.tag] - Tag for the task (optional) * @returns {Promise<object>} An object containing newTaskId and telemetryData */ async function addTask( tasksPath, prompt, dependencies = [], priority = null, context = {}, outputFormat = 'text', // Default to text for CLI manualTaskData = null, useResearch = false ) { const { session, mcpLog, projectRoot, commandName, outputType, tag } = context; const isMCP = !!mcpLog; // Create a consistent logFn object regardless of context const logFn = isMCP ? mcpLog // Use MCP logger if provided : { // Create a wrapper around consoleLog for CLI info: (...args) => consoleLog('info', ...args), warn: (...args) => consoleLog('warn', ...args), error: (...args) => consoleLog('error', ...args), debug: (...args) => consoleLog('debug', ...args), success: (...args) => consoleLog('success', ...args) }; // Validate priority - only accept high, medium, or low let effectivePriority = priority || getDefaultPriority(projectRoot) || DEFAULT_TASK_PRIORITY; // If priority is provided, validate and normalize it if (priority) { const normalizedPriority = normalizeTaskPriority(priority); if (normalizedPriority) { effectivePriority = normalizedPriority; } else { if (outputFormat === 'text') { consoleLog( 'warn', `Invalid priority "${priority}". Using default priority "${DEFAULT_TASK_PRIORITY}".` ); } effectivePriority = DEFAULT_TASK_PRIORITY; } } logFn.info( `Adding new task with prompt: "${prompt}", Priority: ${effectivePriority}, Dependencies: ${dependencies.join(', ') || 'None'}, Research: ${useResearch}, ProjectRoot: ${projectRoot}` ); if (tag) { logFn.info(`Using tag context: ${tag}`); } let loadingIndicator = null; let aiServiceResponse = null; // To store the full response from AI service // Create custom reporter that checks for MCP log const report = (message, level = 'info') => { if (mcpLog) { mcpLog[level](message); } else if (outputFormat === 'text') { consoleLog(level, message); } }; /** * Recursively builds a dependency graph for a given task * @param {Array} tasks - All tasks from tasks.json * @param {number} taskId - ID of the task to analyze * @param {Set} visited - Set of already visited task IDs * @param {Map} depthMap - Map of task ID to its depth in the graph * @param {number} depth - Current depth in the recursion * @return {Object} Dependency graph data */ function buildDependencyGraph( tasks, taskId, visited = new Set(), depthMap = new Map(), depth = 0 ) { // Skip if we've already visited this task or it doesn't exist if (visited.has(taskId)) { return null; } // Find the task const task = tasks.find((t) => t.id === taskId); if (!task) { return null; } // Mark as visited visited.add(taskId); // Update depth if this is a deeper path to this task if (!depthMap.has(taskId) || depth < depthMap.get(taskId)) { depthMap.set(taskId, depth); } // Process dependencies const dependencyData = []; if (task.dependencies && task.dependencies.length > 0) { for (const depId of task.dependencies) { const depData = buildDependencyGraph( tasks, depId, visited, depthMap, depth + 1 ); if (depData) { dependencyData.push(depData); } } } return { id: task.id, title: task.title, description: task.description, status: task.status, dependencies: dependencyData }; } try { // Read the existing tasks - IMPORTANT: Read the raw data without tag resolution let rawData = readJSON(tasksPath, projectRoot, tag); // No tag parameter // Handle the case where readJSON returns resolved data with _rawTaggedData if (rawData && rawData._rawTaggedData) { // Use the raw tagged data and discard the resolved view rawData = rawData._rawTaggedData; } // If file doesn't exist or is invalid, create a new structure in memory if (!rawData) { report( 'tasks.json not found or invalid. Initializing new structure.', 'info' ); rawData = { master: { tasks: [], metadata: { created: new Date().toISOString(), description: 'Default tasks context' } } }; // Do not write the file here; it will be written later with the new task. } // Handle legacy format migration using utilities if (rawData && Array.isArray(rawData.tasks) && !rawData._rawTaggedData) { report('Legacy format detected. Migrating to tagged format...', 'info'); // This is legacy format - migrate it to tagged format rawData = { master: { tasks: rawData.tasks, metadata: rawData.metadata || { created: new Date().toISOString(), updated: new Date().toISOString(), description: 'Tasks for master context' } } }; // Ensure proper metadata using utility ensureTagMetadata(rawData.master, { description: 'Tasks for master context' }); // Do not write the file here; it will be written later with the new task. // Perform complete migration (config.json, state.json) performCompleteTagMigration(tasksPath); markMigrationForNotice(tasksPath); report('Successfully migrated to tagged format.', 'success'); } // Use the provided tag, or the current active tag, or default to 'master' const targetTag = tag; // Ensure the target tag exists if (!rawData[targetTag]) { report( `Tag "${targetTag}" does not exist. Please create it first using the 'add-tag' command.`, 'error' ); throw new Error(`Tag "${targetTag}" not found.`); } // Ensure the target tag has a tasks array and metadata object if (!rawData[targetTag].tasks) { rawData[targetTag].tasks = []; } if (!rawData[targetTag].metadata) { rawData[targetTag].metadata = { created: new Date().toISOString(), updated: new Date().toISOString(), description: `` }; } // Get a flat list of ALL tasks across ALL tags to validate dependencies const allTasks = getAllTasks(rawData); // Find the highest task ID *within the target tag* to determine the next ID const tasksInTargetTag = rawData[targetTag].tasks; const highestId = tasksInTargetTag.length > 0 ? Math.max(...tasksInTargetTag.map((t) => t.id)) : 0; const newTaskId = highestId + 1; // Only show UI box for CLI mode if (outputFormat === 'text') { console.log( boxen(chalk.white.bold(`Creating New Task #${newTaskId}`), { padding: 1, borderColor: 'blue', borderStyle: 'round', margin: { top: 1, bottom: 1 } }) ); } // Validate dependencies before proceeding const invalidDeps = dependencies.filter((depId) => { // Ensure depId is parsed as a number for comparison const numDepId = parseInt(depId, 10); return Number.isNaN(numDepId) || !allTasks.some((t) => t.id === numDepId); }); if (invalidDeps.length > 0) { report( `The following dependencies do not exist or are invalid: ${invalidDeps.join(', ')}`, 'warn' ); report('Removing invalid dependencies...', 'info'); dependencies = dependencies.filter( (depId) => !invalidDeps.includes(depId) ); } // Ensure dependencies are numbers const numericDependencies = dependencies.map((dep) => parseInt(dep, 10)); // Build dependency graphs for explicitly specified dependencies const dependencyGraphs = []; const allRelatedTaskIds = new Set(); const depthMap = new Map(); // First pass: build a complete dependency graph for each specified dependency for (const depId of numericDependencies) { const graph = buildDependencyGraph(allTasks, depId, new Set(), depthMap); if (graph) { dependencyGraphs.push(graph); } } // Second pass: build a set of all related task IDs for flat analysis for (const [taskId, depth] of depthMap.entries()) { allRelatedTaskIds.add(taskId); } let taskData; // Check if manual task data is provided if (manualTaskData) { report('Using manually provided task data', 'info'); taskData = manualTaskData; report('DEBUG: Taking MANUAL task data path.', 'debug'); // Basic validation for manual data if ( !taskData.title || typeof taskData.title !== 'string' || !taskData.description || typeof taskData.description !== 'string' ) { throw new Error( 'Manual task data must include at least a title and description.' ); } } else { report('DEBUG: Taking AI task generation path.', 'debug'); // --- Refactored AI Interaction --- report(`Generating task data with AI with prompt:\n${prompt}`, 'info'); // --- Use the new ContextGatherer --- const contextGatherer = new ContextGatherer(projectRoot, tag); const gatherResult = await contextGatherer.gather({ semanticQuery: prompt, dependencyTasks: numericDependencies, format: 'research' }); const gatheredContext = gatherResult.context; const analysisData = gatherResult.analysisData; // Display context analysis if not in silent mode if (outputFormat === 'text' && analysisData) { displayContextAnalysis(analysisData, prompt, gatheredContext.length); } // Add any manually provided details to the prompt for context let contextFromArgs = ''; if (manualTaskData?.title) contextFromArgs += `\n- Suggested Title: "${manualTaskData.title}"`; if (manualTaskData?.description) contextFromArgs += `\n- Suggested Description: "${manualTaskData.description}"`; if (manualTaskData?.details) contextFromArgs += `\n- Additional Details Context: "${manualTaskData.details}"`; if (manualTaskData?.testStrategy) contextFromArgs += `\n- Additional Test Strategy Context: "${manualTaskData.testStrategy}"`; // Load prompts using PromptManager const promptManager = getPromptManager(); const { systemPrompt, userPrompt } = await promptManager.loadPrompt( 'add-task', { prompt, newTaskId, existingTasks: allTasks, gatheredContext, contextFromArgs, useResearch, priority: effectivePriority, dependencies: numericDependencies, hasCodebaseAnalysis: hasCodebaseAnalysis( useResearch, projectRoot, session ), projectRoot: projectRoot } ); // Start the loading indicator - only for text mode if (outputFormat === 'text') { loadingIndicator = startLoadingIndicator( `Generating new task with ${useResearch ? 'Research' : 'Main'} AI... \n` ); } try { const serviceRole = useResearch ? 'research' : 'main'; report('DEBUG: Calling generateObjectService...', 'debug'); aiServiceResponse = await generateObjectService({ // Capture the full response role: serviceRole, session: session, projectRoot: projectRoot, schema: AiTaskDataSchema, objectName: 'newTaskData', systemPrompt: systemPrompt, prompt: userPrompt, commandName: commandName || 'add-task', // Use passed commandName or default outputType: outputType || (isMCP ? 'mcp' : 'cli') // Use passed outputType or derive }); report('DEBUG: generateObjectService returned successfully.', 'debug'); if (!aiServiceResponse || !aiServiceResponse.mainResult) { throw new Error( 'AI service did not return the expected object structure.' ); } // Prefer mainResult if it looks like a valid task object, otherwise try mainResult.object if ( aiServiceResponse.mainResult.title && aiServiceResponse.mainResult.description ) { taskData = aiServiceResponse.mainResult; } else if ( aiServiceResponse.mainResult.object && aiServiceResponse.mainResult.object.title && aiServiceResponse.mainResult.object.description ) { taskData = aiServiceResponse.mainResult.object; } else { throw new Error('AI service did not return a valid task object.'); } report('Successfully generated task data from AI.', 'success'); // Success! Show checkmark if (loadingIndicator) { succeedLoadingIndicator( loadingIndicator, 'Task generated successfully' ); loadingIndicator = null; // Clear it } } catch (error) { // Failure! Show X if (loadingIndicator) { failLoadingIndicator(loadingIndicator, 'AI generation failed'); loadingIndicator = null; } report( `DEBUG: generateObjectService caught error: ${error.message}`, 'debug' ); report(`Error generating task with AI: ${error.message}`, 'error'); throw error; // Re-throw error after logging } finally { report('DEBUG: generateObjectService finally block reached.', 'debug'); // Clean up if somehow still running if (loadingIndicator) { stopLoadingIndicator(loadingIndicator); } } // --- End Refactored AI Interaction --- } // Create the new task object const newTask = { id: newTaskId, title: taskData.title, description: taskData.description, details: taskData.details || '', testStrategy: taskData.testStrategy || '', status: 'pending', dependencies: taskData.dependencies?.length ? taskData.dependencies : numericDependencies, // Use AI-suggested dependencies if available, fallback to manually specified priority: effectivePriority, subtasks: [] // Initialize with empty subtasks array }; // Additional check: validate all dependencies in the AI response if (taskData.dependencies?.length) { const allValidDeps = taskData.dependencies.every((depId) => { const numDepId = parseInt(depId, 10); return ( !Number.isNaN(numDepId) && allTasks.some((t) => t.id === numDepId) ); }); if (!allValidDeps) { report( 'AI suggested invalid dependencies. Filtering them out...', 'warn' ); newTask.dependencies = taskData.dependencies.filter((depId) => { const numDepId = parseInt(depId, 10); return ( !Number.isNaN(numDepId) && allTasks.some((t) => t.id === numDepId) ); }); } } // Add the task to the tasks array OF THE CORRECT TAG rawData[targetTag].tasks.push(newTask); // Update the tag's metadata ensureTagMetadata(rawData[targetTag], { description: `Tasks for ${targetTag} context` }); report('DEBUG: Writing tasks.json...', 'debug'); // Write the updated raw data back to the file // The writeJSON function will automatically filter out _rawTaggedData writeJSON(tasksPath, rawData, projectRoot, targetTag); report('DEBUG: tasks.json written.', 'debug'); // Show success message - only for text output (CLI) if (outputFormat === 'text') { const table = new Table({ head: [ chalk.cyan.bold('ID'), chalk.cyan.bold('Title'), chalk.cyan.bold('Description') ], colWidths: [5, 30, 50] // Adjust widths as needed }); table.push([ newTask.id, truncate(newTask.title, 27), truncate(newTask.description, 47) ]); console.log(chalk.green('✓ New task created successfully:')); console.log(table.toString()); // Helper to get priority color const getPriorityColor = (p) => { switch (p?.toLowerCase()) { case 'high': return 'red'; case 'low': return 'gray'; default: return 'yellow'; } }; // Check if AI added new dependencies that weren't explicitly provided const aiAddedDeps = newTask.dependencies.filter( (dep) => !numericDependencies.includes(dep) ); // Check if AI removed any dependencies that were explicitly provided const aiRemovedDeps = numericDependencies.filter( (dep) => !newTask.dependencies.includes(dep) ); // Get task titles for dependencies to display const depTitles = {}; newTask.dependencies.forEach((dep) => { const depTask = allTasks.find((t) => t.id === dep); if (depTask) { depTitles[dep] = truncate(depTask.title, 30); } }); // Prepare dependency display string let dependencyDisplay = ''; if (newTask.dependencies.length > 0) { dependencyDisplay = chalk.white('Dependencies:') + '\n'; newTask.dependencies.forEach((dep) => { const isAiAdded = aiAddedDeps.includes(dep); const depType = isAiAdded ? chalk.yellow(' (AI suggested)') : ''; dependencyDisplay += chalk.white( ` - ${dep}: ${depTitles[dep] || 'Unknown task'}${depType}` ) + '\n'; }); } else { dependencyDisplay = chalk.white('Dependencies: None') + '\n'; } // Add info about removed dependencies if any if (aiRemovedDeps.length > 0) { dependencyDisplay += chalk.gray('\nUser-specified dependencies that were not used:') + '\n'; aiRemovedDeps.forEach((dep) => { const depTask = allTasks.find((t) => t.id === dep); const title = depTask ? truncate(depTask.title, 30) : 'Unknown task'; dependencyDisplay += chalk.gray(` - ${dep}: ${title}`) + '\n'; }); } // Add dependency analysis summary let dependencyAnalysis = ''; if (aiAddedDeps.length > 0 || aiRemovedDeps.length > 0) { dependencyAnalysis = '\n' + chalk.white.bold('Dependency Analysis:') + '\n'; if (aiAddedDeps.length > 0) { dependencyAnalysis += chalk.green( `AI identified ${aiAddedDeps.length} additional dependencies` ) + '\n'; } if (aiRemovedDeps.length > 0) { dependencyAnalysis += chalk.yellow( `AI excluded ${aiRemovedDeps.length} user-provided dependencies` ) + '\n'; } } // Show success message box console.log( boxen( chalk.white.bold(`Task ${newTaskId} Created Successfully`) + '\n\n' + chalk.white(`Title: ${newTask.title}`) + '\n' + chalk.white(`Status: ${getStatusWithColor(newTask.status)}`) + '\n' + chalk.white( `Priority: ${chalk[getPriorityColor(newTask.priority)](newTask.priority)}` ) + '\n\n' + dependencyDisplay + dependencyAnalysis + '\n' + chalk.white.bold('Next Steps:') + '\n' + chalk.cyan( `1. Run ${chalk.yellow(`task-master show ${newTaskId}`)} to see complete task details` ) + '\n' + chalk.cyan( `2. Run ${chalk.yellow(`task-master set-status --id=${newTaskId} --status=in-progress`)} to start working on it` ) + '\n' + chalk.cyan( `3. Run ${chalk.yellow(`task-master expand --id=${newTaskId}`)} to break it down into subtasks` ), { padding: 1, borderColor: 'green', borderStyle: 'round' } ) ); // Display AI Usage Summary if telemetryData is available if ( aiServiceResponse && aiServiceResponse.telemetryData && (outputType === 'cli' || outputType === 'text') ) { displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli'); } } report( `DEBUG: Returning new task ID: ${newTaskId} and telemetry.`, 'debug' ); return { newTaskId: newTaskId, telemetryData: aiServiceResponse ? aiServiceResponse.telemetryData : null, tagInfo: aiServiceResponse ? aiServiceResponse.tagInfo : null }; } catch (error) { // Stop any loading indicator on error if (loadingIndicator) { stopLoadingIndicator(loadingIndicator); } report(`Error adding task: ${error.message}`, 'error'); if (outputFormat === 'text') { console.error(chalk.red(`Error: ${error.message}`)); } // In MCP mode, we let the direct function handler catch and format throw error; } } export default addTask; ``` -------------------------------------------------------------------------------- /tests/integration/cli/move-cross-tag.test.js: -------------------------------------------------------------------------------- ```javascript import { jest } from '@jest/globals'; import fs from 'fs'; import path from 'path'; // --- Define mock functions --- const mockMoveTasksBetweenTags = jest.fn(); const mockMoveTask = jest.fn(); const mockGenerateTaskFiles = jest.fn(); const mockLog = jest.fn(); // --- Setup mocks using unstable_mockModule --- jest.unstable_mockModule( '../../../scripts/modules/task-manager/move-task.js', () => ({ default: mockMoveTask, moveTasksBetweenTags: mockMoveTasksBetweenTags }) ); jest.unstable_mockModule( '../../../scripts/modules/task-manager/generate-task-files.js', () => ({ default: mockGenerateTaskFiles }) ); jest.unstable_mockModule('../../../scripts/modules/utils.js', () => ({ log: mockLog, readJSON: jest.fn(), writeJSON: jest.fn(), findProjectRoot: jest.fn(() => '/test/project/root'), getCurrentTag: jest.fn(() => 'master') })); // --- Mock chalk for consistent output formatting --- const mockChalk = { red: jest.fn((text) => text), yellow: jest.fn((text) => text), blue: jest.fn((text) => text), green: jest.fn((text) => text), gray: jest.fn((text) => text), dim: jest.fn((text) => text), bold: { cyan: jest.fn((text) => text), white: jest.fn((text) => text), red: jest.fn((text) => text) }, cyan: { bold: jest.fn((text) => text) }, white: { bold: jest.fn((text) => text) } }; jest.unstable_mockModule('chalk', () => ({ default: mockChalk })); // --- Import modules (AFTER mock setup) --- let moveTaskModule, generateTaskFilesModule, utilsModule, chalk; describe('Cross-Tag Move CLI Integration', () => { // Setup dynamic imports before tests run beforeAll(async () => { moveTaskModule = await import( '../../../scripts/modules/task-manager/move-task.js' ); generateTaskFilesModule = await import( '../../../scripts/modules/task-manager/generate-task-files.js' ); utilsModule = await import('../../../scripts/modules/utils.js'); chalk = (await import('chalk')).default; }); beforeEach(() => { jest.clearAllMocks(); }); // Helper function to capture console output and process.exit calls function captureConsoleAndExit() { const originalConsoleError = console.error; const originalConsoleLog = console.log; const originalProcessExit = process.exit; const errorMessages = []; const logMessages = []; const exitCodes = []; console.error = jest.fn((...args) => { errorMessages.push(args.join(' ')); }); console.log = jest.fn((...args) => { logMessages.push(args.join(' ')); }); process.exit = jest.fn((code) => { exitCodes.push(code); }); return { errorMessages, logMessages, exitCodes, restore: () => { console.error = originalConsoleError; console.log = originalConsoleLog; process.exit = originalProcessExit; } }; } // --- Replicate the move command action handler logic from commands.js --- async function moveAction(options) { const sourceId = options.from; const destinationId = options.to; const fromTag = options.fromTag; const toTag = options.toTag; const withDependencies = options.withDependencies; const ignoreDependencies = options.ignoreDependencies; const force = options.force; // Get the source tag - fallback to current tag if not provided const sourceTag = fromTag || utilsModule.getCurrentTag(); // Check if this is a cross-tag move (different tags) const isCrossTagMove = sourceTag && toTag && sourceTag !== toTag; if (isCrossTagMove) { // Cross-tag move logic if (!sourceId) { const error = new Error( '--from parameter is required for cross-tag moves' ); console.error(chalk.red(`Error: ${error.message}`)); throw error; } const taskIds = sourceId.split(',').map((id) => parseInt(id.trim(), 10)); // Validate parsed task IDs for (let i = 0; i < taskIds.length; i++) { if (isNaN(taskIds[i])) { const error = new Error( `Invalid task ID at position ${i + 1}: "${sourceId.split(',')[i].trim()}" is not a valid number` ); console.error(chalk.red(`Error: ${error.message}`)); throw error; } } const tasksPath = path.join( utilsModule.findProjectRoot(), '.taskmaster', 'tasks', 'tasks.json' ); try { const result = await moveTaskModule.moveTasksBetweenTags( tasksPath, taskIds, sourceTag, toTag, { withDependencies, ignoreDependencies } ); console.log(chalk.green('Successfully moved task(s) between tags')); // Print advisory tips when present if (result && Array.isArray(result.tips) && result.tips.length > 0) { console.log('Next Steps:'); result.tips.forEach((t) => console.log(` • ${t}`)); } // Generate task files for both tags await generateTaskFilesModule.default( tasksPath, path.dirname(tasksPath), { tag: sourceTag } ); await generateTaskFilesModule.default( tasksPath, path.dirname(tasksPath), { tag: toTag } ); } catch (error) { console.error(chalk.red(`Error: ${error.message}`)); // Print ID collision guidance similar to CLI help block if ( typeof error?.message === 'string' && error.message.includes('already exists in target tag') ) { console.log(''); console.log('Conflict: ID already exists in target tag'); console.log( ' • Choose a different target tag without conflicting IDs' ); console.log(' • Move a different set of IDs (avoid existing ones)'); console.log( ' • If needed, move within-tag to a new ID first, then cross-tag move' ); } throw error; } } else { // Handle case where both tags are provided but are the same if (sourceTag && toTag && sourceTag === toTag) { // If both tags are the same and we have destinationId, treat as within-tag move if (destinationId) { if (!sourceId) { const error = new Error( 'Both --from and --to parameters are required for within-tag moves' ); console.error(chalk.red(`Error: ${error.message}`)); throw error; } // Call the existing moveTask function for within-tag moves try { await moveTaskModule.default(sourceId, destinationId); console.log(chalk.green('Successfully moved task')); } catch (error) { console.error(chalk.red(`Error: ${error.message}`)); throw error; } } else { // Same tags but no destinationId - this is an error const error = new Error( `Source and target tags are the same ("${sourceTag}") but no destination specified` ); console.error(chalk.red(`Error: ${error.message}`)); console.log( chalk.yellow( 'For within-tag moves, use: task-master move --from=<sourceId> --to=<destinationId>' ) ); console.log( chalk.yellow( 'For cross-tag moves, use different tags: task-master move --from=<sourceId> --from-tag=<sourceTag> --to-tag=<targetTag>' ) ); throw error; } } else { // Within-tag move logic (existing functionality) if (!sourceId || !destinationId) { const error = new Error( 'Both --from and --to parameters are required for within-tag moves' ); console.error(chalk.red(`Error: ${error.message}`)); throw error; } // Call the existing moveTask function for within-tag moves try { await moveTaskModule.default(sourceId, destinationId); console.log(chalk.green('Successfully moved task')); } catch (error) { console.error(chalk.red(`Error: ${error.message}`)); throw error; } } } } it('should move task without dependencies successfully', async () => { // Mock successful cross-tag move mockMoveTasksBetweenTags.mockResolvedValue(undefined); mockGenerateTaskFiles.mockResolvedValue(undefined); const options = { from: '2', fromTag: 'backlog', toTag: 'in-progress' }; await moveAction(options); expect(mockMoveTasksBetweenTags).toHaveBeenCalledWith( expect.stringContaining('tasks.json'), [2], 'backlog', 'in-progress', { withDependencies: undefined, ignoreDependencies: undefined } ); }); it('should fail to move task with cross-tag dependencies', async () => { // Mock dependency conflict error mockMoveTasksBetweenTags.mockRejectedValue( new Error('Cannot move task due to cross-tag dependency conflicts') ); const options = { from: '1', fromTag: 'backlog', toTag: 'in-progress' }; const { errorMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( 'Cannot move task due to cross-tag dependency conflicts' ); expect(mockMoveTasksBetweenTags).toHaveBeenCalled(); expect( errorMessages.some((msg) => msg.includes('cross-tag dependency conflicts') ) ).toBe(true); restore(); }); it('should move task with dependencies when --with-dependencies is used', async () => { // Mock successful cross-tag move with dependencies mockMoveTasksBetweenTags.mockResolvedValue(undefined); mockGenerateTaskFiles.mockResolvedValue(undefined); const options = { from: '1', fromTag: 'backlog', toTag: 'in-progress', withDependencies: true }; await moveAction(options); expect(mockMoveTasksBetweenTags).toHaveBeenCalledWith( expect.stringContaining('tasks.json'), [1], 'backlog', 'in-progress', { withDependencies: true, ignoreDependencies: undefined } ); }); it('should break dependencies when --ignore-dependencies is used', async () => { // Mock successful cross-tag move with dependency breaking mockMoveTasksBetweenTags.mockResolvedValue(undefined); mockGenerateTaskFiles.mockResolvedValue(undefined); const options = { from: '1', fromTag: 'backlog', toTag: 'in-progress', ignoreDependencies: true }; await moveAction(options); expect(mockMoveTasksBetweenTags).toHaveBeenCalledWith( expect.stringContaining('tasks.json'), [1], 'backlog', 'in-progress', { withDependencies: undefined, ignoreDependencies: true } ); }); it('should create target tag if it does not exist', async () => { // Mock successful cross-tag move to new tag mockMoveTasksBetweenTags.mockResolvedValue(undefined); mockGenerateTaskFiles.mockResolvedValue(undefined); const options = { from: '2', fromTag: 'backlog', toTag: 'new-tag' }; await moveAction(options); expect(mockMoveTasksBetweenTags).toHaveBeenCalledWith( expect.stringContaining('tasks.json'), [2], 'backlog', 'new-tag', { withDependencies: undefined, ignoreDependencies: undefined } ); }); it('should fail to move a subtask directly', async () => { // Mock subtask movement error mockMoveTasksBetweenTags.mockRejectedValue( new Error( 'Cannot move subtasks directly between tags. Please promote the subtask to a full task first.' ) ); const options = { from: '1.2', fromTag: 'backlog', toTag: 'in-progress' }; const { errorMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( 'Cannot move subtasks directly between tags. Please promote the subtask to a full task first.' ); expect(mockMoveTasksBetweenTags).toHaveBeenCalled(); expect(errorMessages.some((msg) => msg.includes('subtasks directly'))).toBe( true ); restore(); }); it('should provide helpful error messages for dependency conflicts', async () => { // Mock dependency conflict with detailed error mockMoveTasksBetweenTags.mockRejectedValue( new Error( 'Cross-tag dependency conflicts detected. Task 1 depends on Task 2 which is in a different tag.' ) ); const options = { from: '1', fromTag: 'backlog', toTag: 'in-progress' }; const { errorMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( 'Cross-tag dependency conflicts detected. Task 1 depends on Task 2 which is in a different tag.' ); expect(mockMoveTasksBetweenTags).toHaveBeenCalled(); expect( errorMessages.some((msg) => msg.includes('Cross-tag dependency conflicts detected') ) ).toBe(true); restore(); }); it('should print advisory tips when result.tips are returned (ignore-dependencies)', async () => { const { errorMessages, logMessages, restore } = captureConsoleAndExit(); try { // Arrange: mock move to return tips mockMoveTasksBetweenTags.mockResolvedValue({ message: 'ok', tips: [ 'Run "task-master validate-dependencies" to check for dependency issues.', 'Run "task-master fix-dependencies" to automatically repair dangling dependencies.' ] }); await moveAction({ from: '2', fromTag: 'backlog', toTag: 'in-progress', ignoreDependencies: true }); const joined = logMessages.join('\n'); expect(joined).toContain('Next Steps'); expect(joined).toContain('validate-dependencies'); expect(joined).toContain('fix-dependencies'); } finally { restore(); } }); it('should print ID collision suggestions when target already has the ID', async () => { const { errorMessages, logMessages, restore } = captureConsoleAndExit(); try { // Arrange: mock move to throw collision const err = new Error( 'Task 1 already exists in target tag "in-progress"' ); mockMoveTasksBetweenTags.mockRejectedValue(err); await expect( moveAction({ from: '1', fromTag: 'backlog', toTag: 'in-progress' }) ).rejects.toThrow('already exists in target tag'); const joined = logMessages.join('\n'); expect(joined).toContain('Conflict: ID already exists in target tag'); expect(joined).toContain('different target tag'); expect(joined).toContain('different set of IDs'); expect(joined).toContain('within-tag'); } finally { restore(); } }); it('should handle same tag error correctly', async () => { const options = { from: '1', fromTag: 'backlog', toTag: 'backlog' // Same tag but no destination }; const { errorMessages, logMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( 'Source and target tags are the same ("backlog") but no destination specified' ); expect( errorMessages.some((msg) => msg.includes( 'Source and target tags are the same ("backlog") but no destination specified' ) ) ).toBe(true); expect( logMessages.some((msg) => msg.includes('For within-tag moves')) ).toBe(true); expect(logMessages.some((msg) => msg.includes('For cross-tag moves'))).toBe( true ); restore(); }); it('should use current tag when --from-tag is not provided', async () => { // Mock successful move with current tag fallback mockMoveTasksBetweenTags.mockResolvedValue({ message: 'Successfully moved task(s) between tags' }); // Mock getCurrentTag to return 'master' utilsModule.getCurrentTag.mockReturnValue('master'); // Simulate command: task-master move --from=1 --to-tag=in-progress // (no --from-tag provided, should use current tag 'master') await moveAction({ from: '1', toTag: 'in-progress', withDependencies: false, ignoreDependencies: false // fromTag is intentionally not provided to test fallback }); // Verify that moveTasksBetweenTags was called with 'master' as source tag expect(mockMoveTasksBetweenTags).toHaveBeenCalledWith( expect.stringContaining('.taskmaster/tasks/tasks.json'), [1], // parseInt converts string to number 'master', // Should use current tag as fallback 'in-progress', { withDependencies: false, ignoreDependencies: false } ); // Verify that generateTaskFiles was called for both tags expect(generateTaskFilesModule.default).toHaveBeenCalledWith( expect.stringContaining('.taskmaster/tasks/tasks.json'), expect.stringContaining('.taskmaster/tasks'), { tag: 'master' } ); expect(generateTaskFilesModule.default).toHaveBeenCalledWith( expect.stringContaining('.taskmaster/tasks/tasks.json'), expect.stringContaining('.taskmaster/tasks'), { tag: 'in-progress' } ); }); it('should move multiple tasks with comma-separated IDs successfully', async () => { // Mock successful cross-tag move for multiple tasks mockMoveTasksBetweenTags.mockResolvedValue(undefined); mockGenerateTaskFiles.mockResolvedValue(undefined); const options = { from: '1,2,3', fromTag: 'backlog', toTag: 'in-progress' }; await moveAction(options); expect(mockMoveTasksBetweenTags).toHaveBeenCalledWith( expect.stringContaining('tasks.json'), [1, 2, 3], // Should parse comma-separated string to array of integers 'backlog', 'in-progress', { withDependencies: undefined, ignoreDependencies: undefined } ); // Verify task files are generated for both tags expect(mockGenerateTaskFiles).toHaveBeenCalledTimes(2); expect(mockGenerateTaskFiles).toHaveBeenCalledWith( expect.stringContaining('tasks.json'), expect.stringContaining('.taskmaster/tasks'), { tag: 'backlog' } ); expect(mockGenerateTaskFiles).toHaveBeenCalledWith( expect.stringContaining('tasks.json'), expect.stringContaining('.taskmaster/tasks'), { tag: 'in-progress' } ); }); // Note: --force flag is no longer supported for cross-tag moves it('should fail when invalid task ID is provided', async () => { const options = { from: '1,abc,3', // Invalid ID in middle fromTag: 'backlog', toTag: 'in-progress' }; const { errorMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( 'Invalid task ID at position 2: "abc" is not a valid number' ); expect( errorMessages.some((msg) => msg.includes('Invalid task ID at position 2')) ).toBe(true); restore(); }); it('should fail when first task ID is invalid', async () => { const options = { from: 'abc,2,3', // Invalid ID at start fromTag: 'backlog', toTag: 'in-progress' }; const { errorMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( 'Invalid task ID at position 1: "abc" is not a valid number' ); expect( errorMessages.some((msg) => msg.includes('Invalid task ID at position 1')) ).toBe(true); restore(); }); it('should fail when last task ID is invalid', async () => { const options = { from: '1,2,xyz', // Invalid ID at end fromTag: 'backlog', toTag: 'in-progress' }; const { errorMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( 'Invalid task ID at position 3: "xyz" is not a valid number' ); expect( errorMessages.some((msg) => msg.includes('Invalid task ID at position 3')) ).toBe(true); restore(); }); it('should fail when single invalid task ID is provided', async () => { const options = { from: 'invalid', fromTag: 'backlog', toTag: 'in-progress' }; const { errorMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( 'Invalid task ID at position 1: "invalid" is not a valid number' ); expect( errorMessages.some((msg) => msg.includes('Invalid task ID at position 1')) ).toBe(true); restore(); }); // Note: --force combinations removed // Note: --force combinations removed // Note: --force combinations removed it('should handle whitespace in comma-separated task IDs', async () => { // Mock successful cross-tag move with whitespace mockMoveTasksBetweenTags.mockResolvedValue(undefined); mockGenerateTaskFiles.mockResolvedValue(undefined); const options = { from: ' 1 , 2 , 3 ', // Whitespace around IDs and commas fromTag: 'backlog', toTag: 'in-progress' }; await moveAction(options); expect(mockMoveTasksBetweenTags).toHaveBeenCalledWith( expect.stringContaining('tasks.json'), [1, 2, 3], // Should trim whitespace and parse as integers 'backlog', 'in-progress', { withDependencies: undefined, ignoreDependencies: undefined, force: undefined } ); }); it('should fail when --from parameter is missing for cross-tag move', async () => { const options = { fromTag: 'backlog', toTag: 'in-progress' // from is intentionally missing }; const { errorMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( '--from parameter is required for cross-tag moves' ); expect( errorMessages.some((msg) => msg.includes('--from parameter is required for cross-tag moves') ) ).toBe(true); restore(); }); it('should fail when both --from and --to are missing for within-tag move', async () => { const options = { // Both from and to are missing for within-tag move }; const { errorMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( 'Both --from and --to parameters are required for within-tag moves' ); expect( errorMessages.some((msg) => msg.includes( 'Both --from and --to parameters are required for within-tag moves' ) ) ).toBe(true); restore(); }); it('should handle within-tag move when only --from is provided', async () => { // Mock successful within-tag move mockMoveTask.mockResolvedValue(undefined); const options = { from: '1', to: '2' // No tags specified, should use within-tag logic }; await moveAction(options); expect(mockMoveTask).toHaveBeenCalledWith('1', '2'); expect(mockMoveTasksBetweenTags).not.toHaveBeenCalled(); }); it('should handle within-tag move when both tags are the same', async () => { // Mock successful within-tag move mockMoveTask.mockResolvedValue(undefined); const options = { from: '1', to: '2', fromTag: 'master', toTag: 'master' // Same tag, should use within-tag logic }; await moveAction(options); expect(mockMoveTask).toHaveBeenCalledWith('1', '2'); expect(mockMoveTasksBetweenTags).not.toHaveBeenCalled(); }); it('should fail when both tags are the same but no destination is provided', async () => { const options = { from: '1', fromTag: 'master', toTag: 'master' // Same tag but no destination }; const { errorMessages, logMessages, restore } = captureConsoleAndExit(); await expect(moveAction(options)).rejects.toThrow( 'Source and target tags are the same ("master") but no destination specified' ); expect( errorMessages.some((msg) => msg.includes( 'Source and target tags are the same ("master") but no destination specified' ) ) ).toBe(true); expect( logMessages.some((msg) => msg.includes('For within-tag moves')) ).toBe(true); expect(logMessages.some((msg) => msg.includes('For cross-tag moves'))).toBe( true ); restore(); }); }); ``` -------------------------------------------------------------------------------- /scripts/modules/task-manager/analyze-task-complexity.js: -------------------------------------------------------------------------------- ```javascript import chalk from 'chalk'; import boxen from 'boxen'; import readline from 'readline'; import fs from 'fs'; import { log, readJSON, isSilentMode } from '../utils.js'; import { startLoadingIndicator, stopLoadingIndicator, displayAiUsageSummary } from '../ui.js'; import { generateTextService } from '../ai-services-unified.js'; import { getDebugFlag, getProjectName, hasCodebaseAnalysis } from '../config-manager.js'; import { getPromptManager } from '../prompt-manager.js'; import { COMPLEXITY_REPORT_FILE, LEGACY_TASKS_FILE } from '../../../src/constants/paths.js'; import { CUSTOM_PROVIDERS } from '../../../src/constants/providers.js'; import { resolveComplexityReportOutputPath } from '../../../src/utils/path-utils.js'; import { ContextGatherer } from '../utils/contextGatherer.js'; import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js'; import { flattenTasksWithSubtasks } from '../utils.js'; /** * Generates the prompt for complexity analysis. * (Moved from ai-services.js and simplified) * @param {Object} tasksData - The tasks data object. * @param {string} [gatheredContext] - The gathered context for the analysis. * @returns {string} The generated prompt. */ function generateInternalComplexityAnalysisPrompt( tasksData, gatheredContext = '' ) { const tasksString = JSON.stringify(tasksData.tasks, null, 2); let prompt = `Analyze the following tasks to determine their complexity (1-10 scale) and recommend the number of subtasks for expansion. Provide a brief reasoning and an initial expansion prompt for each. Tasks: ${tasksString}`; if (gatheredContext) { prompt += `\n\n# Project Context\n\n${gatheredContext}`; } prompt += ` Respond ONLY with a valid JSON array matching the schema: [ { "taskId": <number>, "taskTitle": "<string>", "complexityScore": <number 1-10>, "recommendedSubtasks": <number>, "expansionPrompt": "<string>", "reasoning": "<string>" }, ... ] Do not include any explanatory text, markdown formatting, or code block markers before or after the JSON array.`; return prompt; } /** * Analyzes task complexity and generates expansion recommendations * @param {Object} options Command options * @param {string} options.file - Path to tasks file * @param {string} options.output - Path to report output file * @param {string|number} [options.threshold] - Complexity threshold * @param {boolean} [options.research] - Use research role * @param {string} [options.projectRoot] - Project root path (for MCP/env fallback). * @param {string} [options.tag] - Tag for the task * @param {string} [options.id] - Comma-separated list of task IDs to analyze specifically * @param {number} [options.from] - Starting task ID in a range to analyze * @param {number} [options.to] - Ending task ID in a range to analyze * @param {Object} [options._filteredTasksData] - Pre-filtered task data (internal use) * @param {number} [options._originalTaskCount] - Original task count (internal use) * @param {Object} context - Context object, potentially containing session and mcpLog * @param {Object} [context.session] - Session object from MCP server (optional) * @param {Object} [context.mcpLog] - MCP logger object (optional) * @param {function} [context.reportProgress] - Deprecated: Function to report progress (ignored) */ async function analyzeTaskComplexity(options, context = {}) { const { session, mcpLog } = context; const tasksPath = options.file || LEGACY_TASKS_FILE; const thresholdScore = parseFloat(options.threshold || '5'); const useResearch = options.research || false; const projectRoot = options.projectRoot; const tag = options.tag; // New parameters for task ID filtering const specificIds = options.id ? options.id .split(',') .map((id) => parseInt(id.trim(), 10)) .filter((id) => !Number.isNaN(id)) : null; const fromId = options.from !== undefined ? parseInt(options.from, 10) : null; const toId = options.to !== undefined ? parseInt(options.to, 10) : null; const outputFormat = mcpLog ? 'json' : 'text'; const reportLog = (message, level = 'info') => { if (mcpLog) { mcpLog[level](message); } else if (!isSilentMode() && outputFormat === 'text') { log(level, message); } }; // Resolve output path using tag-aware resolution const outputPath = resolveComplexityReportOutputPath( options.output, { projectRoot, tag }, reportLog ); if (outputFormat === 'text') { console.log( chalk.blue( 'Analyzing task complexity and generating expansion recommendations...' ) ); } try { reportLog(`Reading tasks from ${tasksPath}...`, 'info'); let tasksData; let originalTaskCount = 0; let originalData = null; if (options._filteredTasksData) { tasksData = options._filteredTasksData; originalTaskCount = options._originalTaskCount || tasksData.tasks.length; if (!options._originalTaskCount) { try { originalData = readJSON(tasksPath, projectRoot, tag); if (originalData && originalData.tasks) { originalTaskCount = originalData.tasks.length; } } catch (e) { log('warn', `Could not read original tasks file: ${e.message}`); } } } else { originalData = readJSON(tasksPath, projectRoot, tag); if ( !originalData || !originalData.tasks || !Array.isArray(originalData.tasks) || originalData.tasks.length === 0 ) { throw new Error('No tasks found in the tasks file'); } originalTaskCount = originalData.tasks.length; // Filter tasks based on active status const activeStatuses = ['pending', 'blocked', 'in-progress']; let filteredTasks = originalData.tasks.filter((task) => activeStatuses.includes(task.status?.toLowerCase() || 'pending') ); // Apply ID filtering if specified if (specificIds && specificIds.length > 0) { reportLog( `Filtering tasks by specific IDs: ${specificIds.join(', ')}`, 'info' ); filteredTasks = filteredTasks.filter((task) => specificIds.includes(task.id) ); if (outputFormat === 'text') { if (filteredTasks.length === 0 && specificIds.length > 0) { console.log( chalk.yellow( `Warning: No active tasks found with IDs: ${specificIds.join(', ')}` ) ); } else if (filteredTasks.length < specificIds.length) { const foundIds = filteredTasks.map((t) => t.id); const missingIds = specificIds.filter( (id) => !foundIds.includes(id) ); console.log( chalk.yellow( `Warning: Some requested task IDs were not found or are not active: ${missingIds.join(', ')}` ) ); } } } // Apply range filtering if specified else if (fromId !== null || toId !== null) { const effectiveFromId = fromId !== null ? fromId : 1; const effectiveToId = toId !== null ? toId : Math.max(...originalData.tasks.map((t) => t.id)); reportLog( `Filtering tasks by ID range: ${effectiveFromId} to ${effectiveToId}`, 'info' ); filteredTasks = filteredTasks.filter( (task) => task.id >= effectiveFromId && task.id <= effectiveToId ); if (outputFormat === 'text' && filteredTasks.length === 0) { console.log( chalk.yellow( `Warning: No active tasks found in range: ${effectiveFromId}-${effectiveToId}` ) ); } } tasksData = { ...originalData, tasks: filteredTasks, _originalTaskCount: originalTaskCount }; } // --- Context Gathering --- let gatheredContext = ''; if (originalData && originalData.tasks.length > 0) { try { const contextGatherer = new ContextGatherer(projectRoot, tag); const allTasksFlat = flattenTasksWithSubtasks(originalData.tasks); const fuzzySearch = new FuzzyTaskSearch( allTasksFlat, 'analyze-complexity' ); // Create a query from the tasks being analyzed const searchQuery = tasksData.tasks .map((t) => `${t.title} ${t.description}`) .join(' '); const searchResults = fuzzySearch.findRelevantTasks(searchQuery, { maxResults: 10 }); const relevantTaskIds = fuzzySearch.getTaskIds(searchResults); if (relevantTaskIds.length > 0) { const contextResult = await contextGatherer.gather({ tasks: relevantTaskIds, format: 'research' }); gatheredContext = contextResult.context || ''; } } catch (contextError) { reportLog( `Could not gather additional context: ${contextError.message}`, 'warn' ); } } // --- End Context Gathering --- const skippedCount = originalTaskCount - tasksData.tasks.length; reportLog( `Found ${originalTaskCount} total tasks in the task file.`, 'info' ); // Updated messaging to reflect filtering logic if (specificIds || fromId !== null || toId !== null) { const filterMsg = specificIds ? `Analyzing ${tasksData.tasks.length} tasks with specific IDs: ${specificIds.join(', ')}` : `Analyzing ${tasksData.tasks.length} tasks in range: ${fromId || 1} to ${toId || 'end'}`; reportLog(filterMsg, 'info'); if (outputFormat === 'text') { console.log(chalk.blue(filterMsg)); } } else if (skippedCount > 0) { const skipMessage = `Skipping ${skippedCount} tasks marked as done/cancelled/deferred. Analyzing ${tasksData.tasks.length} active tasks.`; reportLog(skipMessage, 'info'); if (outputFormat === 'text') { console.log(chalk.yellow(skipMessage)); } } // Check for existing report before doing analysis let existingReport = null; const existingAnalysisMap = new Map(); // For quick lookups by task ID try { if (fs.existsSync(outputPath)) { existingReport = JSON.parse(fs.readFileSync(outputPath, 'utf8')); reportLog(`Found existing complexity report at ${outputPath}`, 'info'); if ( existingReport && existingReport.complexityAnalysis && Array.isArray(existingReport.complexityAnalysis) ) { // Create lookup map of existing analysis entries existingReport.complexityAnalysis.forEach((item) => { existingAnalysisMap.set(item.taskId, item); }); reportLog( `Existing report contains ${existingReport.complexityAnalysis.length} task analyses`, 'info' ); } } } catch (readError) { reportLog( `Warning: Could not read existing report: ${readError.message}`, 'warn' ); existingReport = null; existingAnalysisMap.clear(); } if (tasksData.tasks.length === 0) { // If using ID filtering but no matching tasks, return existing report or empty if (existingReport && (specificIds || fromId !== null || toId !== null)) { reportLog( 'No matching tasks found for analysis. Keeping existing report.', 'info' ); if (outputFormat === 'text') { console.log( chalk.yellow( 'No matching tasks found for analysis. Keeping existing report.' ) ); } return { report: existingReport, telemetryData: null }; } // Otherwise create empty report const emptyReport = { meta: { generatedAt: new Date().toISOString(), tasksAnalyzed: 0, thresholdScore: thresholdScore, projectName: getProjectName(session), usedResearch: useResearch }, complexityAnalysis: existingReport?.complexityAnalysis || [] }; reportLog(`Writing complexity report to ${outputPath}...`, 'info'); fs.writeFileSync( outputPath, JSON.stringify(emptyReport, null, '\t'), 'utf8' ); reportLog( `Task complexity analysis complete. Report written to ${outputPath}`, 'success' ); if (outputFormat === 'text') { console.log( chalk.green( `Task complexity analysis complete. Report written to ${outputPath}` ) ); const highComplexity = 0; const mediumComplexity = 0; const lowComplexity = 0; const totalAnalyzed = 0; console.log('\nComplexity Analysis Summary:'); console.log('----------------------------'); console.log(`Tasks in input file: ${originalTaskCount}`); console.log(`Tasks successfully analyzed: ${totalAnalyzed}`); console.log(`High complexity tasks: ${highComplexity}`); console.log(`Medium complexity tasks: ${mediumComplexity}`); console.log(`Low complexity tasks: ${lowComplexity}`); console.log( `Sum verification: ${highComplexity + mediumComplexity + lowComplexity} (should equal ${totalAnalyzed})` ); console.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`); console.log( `\nSee ${outputPath} for the full report and expansion commands.` ); console.log( boxen( chalk.white.bold('Suggested Next Steps:') + '\n\n' + `${chalk.cyan('1.')} Run ${chalk.yellow('task-master complexity-report')} to review detailed findings\n` + `${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down complex tasks\n` + `${chalk.cyan('3.')} Run ${chalk.yellow('task-master expand --all')} to expand all pending tasks based on complexity`, { padding: 1, borderColor: 'cyan', borderStyle: 'round', margin: { top: 1 } } ) ); } return { report: emptyReport, telemetryData: null }; } // Continue with regular analysis path // Load prompts using PromptManager const promptManager = getPromptManager(); // Check if Claude Code is being used as the provider const promptParams = { tasks: tasksData.tasks, gatheredContext: gatheredContext || '', useResearch: useResearch, hasCodebaseAnalysis: hasCodebaseAnalysis( useResearch, projectRoot, session ), projectRoot: projectRoot || '' }; const { systemPrompt, userPrompt: prompt } = await promptManager.loadPrompt( 'analyze-complexity', promptParams, 'default' ); let loadingIndicator = null; if (outputFormat === 'text') { loadingIndicator = startLoadingIndicator( `${useResearch ? 'Researching' : 'Analyzing'} the complexity of your tasks with AI...\n` ); } let aiServiceResponse = null; let complexityAnalysis = null; try { const role = useResearch ? 'research' : 'main'; aiServiceResponse = await generateTextService({ prompt, systemPrompt, role, session, projectRoot, commandName: 'analyze-complexity', outputType: mcpLog ? 'mcp' : 'cli' }); if (loadingIndicator) { stopLoadingIndicator(loadingIndicator); loadingIndicator = null; } if (outputFormat === 'text') { readline.clearLine(process.stdout, 0); readline.cursorTo(process.stdout, 0); console.log( chalk.green('AI service call complete. Parsing response...') ); } reportLog('Parsing complexity analysis from text response...', 'info'); try { let cleanedResponse = aiServiceResponse.mainResult; cleanedResponse = cleanedResponse.trim(); const codeBlockMatch = cleanedResponse.match( /```(?:json)?\s*([\s\S]*?)\s*```/ ); if (codeBlockMatch) { cleanedResponse = codeBlockMatch[1].trim(); } else { const firstBracket = cleanedResponse.indexOf('['); const lastBracket = cleanedResponse.lastIndexOf(']'); if (firstBracket !== -1 && lastBracket > firstBracket) { cleanedResponse = cleanedResponse.substring( firstBracket, lastBracket + 1 ); } else { reportLog( 'Warning: Response does not appear to be a JSON array.', 'warn' ); } } if (outputFormat === 'text' && getDebugFlag(session)) { console.log(chalk.gray('Attempting to parse cleaned JSON...')); console.log(chalk.gray('Cleaned response (first 100 chars):')); console.log(chalk.gray(cleanedResponse.substring(0, 100))); console.log(chalk.gray('Last 100 chars:')); console.log( chalk.gray(cleanedResponse.substring(cleanedResponse.length - 100)) ); } complexityAnalysis = JSON.parse(cleanedResponse); } catch (parseError) { if (loadingIndicator) stopLoadingIndicator(loadingIndicator); reportLog( `Error parsing complexity analysis JSON: ${parseError.message}`, 'error' ); if (outputFormat === 'text') { console.error( chalk.red( `Error parsing complexity analysis JSON: ${parseError.message}` ) ); } throw parseError; } const taskIds = tasksData.tasks.map((t) => t.id); const analysisTaskIds = complexityAnalysis.map((a) => a.taskId); const missingTaskIds = taskIds.filter( (id) => !analysisTaskIds.includes(id) ); if (missingTaskIds.length > 0) { reportLog( `Missing analysis for ${missingTaskIds.length} tasks: ${missingTaskIds.join(', ')}`, 'warn' ); if (outputFormat === 'text') { console.log( chalk.yellow( `Missing analysis for ${missingTaskIds.length} tasks: ${missingTaskIds.join(', ')}` ) ); } for (const missingId of missingTaskIds) { const missingTask = tasksData.tasks.find((t) => t.id === missingId); if (missingTask) { reportLog(`Adding default analysis for task ${missingId}`, 'info'); complexityAnalysis.push({ taskId: missingId, taskTitle: missingTask.title, complexityScore: 5, recommendedSubtasks: 3, expansionPrompt: `Break down this task with a focus on ${missingTask.title.toLowerCase()}.`, reasoning: 'Automatically added due to missing analysis in AI response.' }); } } } // Merge with existing report - only keep entries from the current tag let finalComplexityAnalysis = []; if (existingReport && Array.isArray(existingReport.complexityAnalysis)) { // Create a map of task IDs that we just analyzed const analyzedTaskIds = new Set( complexityAnalysis.map((item) => item.taskId) ); // Keep existing entries that weren't in this analysis run AND belong to the current tag // We determine tag membership by checking if the task ID exists in the current tag's tasks const currentTagTaskIds = new Set(tasksData.tasks.map((t) => t.id)); const existingEntriesNotAnalyzed = existingReport.complexityAnalysis.filter( (item) => !analyzedTaskIds.has(item.taskId) && currentTagTaskIds.has(item.taskId) // Only keep entries for tasks in current tag ); // Combine with new analysis finalComplexityAnalysis = [ ...existingEntriesNotAnalyzed, ...complexityAnalysis ]; reportLog( `Merged ${complexityAnalysis.length} new analyses with ${existingEntriesNotAnalyzed.length} existing entries from current tag`, 'info' ); } else { // No existing report or invalid format, just use the new analysis finalComplexityAnalysis = complexityAnalysis; } const report = { meta: { generatedAt: new Date().toISOString(), tasksAnalyzed: tasksData.tasks.length, totalTasks: originalTaskCount, analysisCount: finalComplexityAnalysis.length, thresholdScore: thresholdScore, projectName: getProjectName(session), usedResearch: useResearch }, complexityAnalysis: finalComplexityAnalysis }; reportLog(`Writing complexity report to ${outputPath}...`, 'info'); fs.writeFileSync(outputPath, JSON.stringify(report, null, '\t'), 'utf8'); reportLog( `Task complexity analysis complete. Report written to ${outputPath}`, 'success' ); if (outputFormat === 'text') { console.log( chalk.green( `Task complexity analysis complete. Report written to ${outputPath}` ) ); // Calculate statistics specifically for this analysis run const highComplexity = complexityAnalysis.filter( (t) => t.complexityScore >= 8 ).length; const mediumComplexity = complexityAnalysis.filter( (t) => t.complexityScore >= 5 && t.complexityScore < 8 ).length; const lowComplexity = complexityAnalysis.filter( (t) => t.complexityScore < 5 ).length; const totalAnalyzed = complexityAnalysis.length; console.log('\nCurrent Analysis Summary:'); console.log('----------------------------'); console.log(`Tasks analyzed in this run: ${totalAnalyzed}`); console.log(`High complexity tasks: ${highComplexity}`); console.log(`Medium complexity tasks: ${mediumComplexity}`); console.log(`Low complexity tasks: ${lowComplexity}`); if (existingReport) { console.log('\nUpdated Report Summary:'); console.log('----------------------------'); console.log( `Total analyses in report: ${finalComplexityAnalysis.length}` ); console.log( `Analyses from previous runs: ${finalComplexityAnalysis.length - totalAnalyzed}` ); console.log(`New/updated analyses: ${totalAnalyzed}`); } console.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`); console.log( `\nSee ${outputPath} for the full report and expansion commands.` ); console.log( boxen( chalk.white.bold('Suggested Next Steps:') + '\n\n' + `${chalk.cyan('1.')} Run ${chalk.yellow('task-master complexity-report')} to review detailed findings\n` + `${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down complex tasks\n` + `${chalk.cyan('3.')} Run ${chalk.yellow('task-master expand --all')} to expand all pending tasks based on complexity`, { padding: 1, borderColor: 'cyan', borderStyle: 'round', margin: { top: 1 } } ) ); if (getDebugFlag(session)) { console.debug( chalk.gray( `Final analysis object: ${JSON.stringify(report, null, 2)}` ) ); } if (aiServiceResponse.telemetryData) { displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli'); } } return { report: report, telemetryData: aiServiceResponse?.telemetryData, tagInfo: aiServiceResponse?.tagInfo }; } catch (aiError) { if (loadingIndicator) stopLoadingIndicator(loadingIndicator); reportLog(`Error during AI service call: ${aiError.message}`, 'error'); if (outputFormat === 'text') { console.error( chalk.red(`Error during AI service call: ${aiError.message}`) ); if (aiError.message.includes('API key')) { console.log( chalk.yellow( '\nPlease ensure your API keys are correctly configured in .env or ~/.taskmaster/.env' ) ); console.log( chalk.yellow("Run 'task-master models --setup' if needed.") ); } } throw aiError; } } catch (error) { reportLog(`Error analyzing task complexity: ${error.message}`, 'error'); if (outputFormat === 'text') { console.error( chalk.red(`Error analyzing task complexity: ${error.message}`) ); if (getDebugFlag(session)) { console.error(error); } process.exit(1); } else { throw error; } } } export default analyzeTaskComplexity; ```