This is page 29 of 38. Use http://codebase.md/eyaltoledano/claude-task-master?page={x} to view the full context. # Directory Structure ``` ├── .changeset │ ├── config.json │ └── README.md ├── .claude │ ├── agents │ │ ├── task-checker.md │ │ ├── task-executor.md │ │ └── task-orchestrator.md │ ├── commands │ │ ├── dedupe.md │ │ └── tm │ │ ├── add-dependency │ │ │ └── add-dependency.md │ │ ├── add-subtask │ │ │ ├── add-subtask.md │ │ │ └── convert-task-to-subtask.md │ │ ├── add-task │ │ │ └── add-task.md │ │ ├── analyze-complexity │ │ │ └── analyze-complexity.md │ │ ├── complexity-report │ │ │ └── complexity-report.md │ │ ├── expand │ │ │ ├── expand-all-tasks.md │ │ │ └── expand-task.md │ │ ├── fix-dependencies │ │ │ └── fix-dependencies.md │ │ ├── generate │ │ │ └── generate-tasks.md │ │ ├── help.md │ │ ├── init │ │ │ ├── init-project-quick.md │ │ │ └── init-project.md │ │ ├── learn.md │ │ ├── list │ │ │ ├── list-tasks-by-status.md │ │ │ ├── list-tasks-with-subtasks.md │ │ │ └── list-tasks.md │ │ ├── models │ │ │ ├── setup-models.md │ │ │ └── view-models.md │ │ ├── next │ │ │ └── next-task.md │ │ ├── parse-prd │ │ │ ├── parse-prd-with-research.md │ │ │ └── parse-prd.md │ │ ├── remove-dependency │ │ │ └── remove-dependency.md │ │ ├── remove-subtask │ │ │ └── remove-subtask.md │ │ ├── remove-subtasks │ │ │ ├── remove-all-subtasks.md │ │ │ └── remove-subtasks.md │ │ ├── remove-task │ │ │ └── remove-task.md │ │ ├── set-status │ │ │ ├── to-cancelled.md │ │ │ ├── to-deferred.md │ │ │ ├── to-done.md │ │ │ ├── to-in-progress.md │ │ │ ├── to-pending.md │ │ │ └── to-review.md │ │ ├── setup │ │ │ ├── install-taskmaster.md │ │ │ └── quick-install-taskmaster.md │ │ ├── show │ │ │ └── show-task.md │ │ ├── status │ │ │ └── project-status.md │ │ ├── sync-readme │ │ │ └── sync-readme.md │ │ ├── tm-main.md │ │ ├── update │ │ │ ├── update-single-task.md │ │ │ ├── update-task.md │ │ │ └── update-tasks-from-id.md │ │ ├── utils │ │ │ └── analyze-project.md │ │ ├── validate-dependencies │ │ │ └── validate-dependencies.md │ │ └── workflows │ │ ├── auto-implement-tasks.md │ │ ├── command-pipeline.md │ │ └── smart-workflow.md │ └── TM_COMMANDS_GUIDE.md ├── .coderabbit.yaml ├── .cursor │ ├── mcp.json │ └── rules │ ├── ai_providers.mdc │ ├── ai_services.mdc │ ├── architecture.mdc │ ├── changeset.mdc │ ├── commands.mdc │ ├── context_gathering.mdc │ ├── cursor_rules.mdc │ ├── dependencies.mdc │ ├── dev_workflow.mdc │ ├── git_workflow.mdc │ ├── glossary.mdc │ ├── mcp.mdc │ ├── new_features.mdc │ ├── self_improve.mdc │ ├── tags.mdc │ ├── taskmaster.mdc │ ├── tasks.mdc │ ├── telemetry.mdc │ ├── test_workflow.mdc │ ├── tests.mdc │ ├── ui.mdc │ └── utilities.mdc ├── .cursorignore ├── .env.example ├── .github │ ├── ISSUE_TEMPLATE │ │ ├── bug_report.md │ │ ├── enhancements---feature-requests.md │ │ └── feedback.md │ ├── PULL_REQUEST_TEMPLATE │ │ ├── bugfix.md │ │ ├── config.yml │ │ ├── feature.md │ │ └── integration.md │ ├── PULL_REQUEST_TEMPLATE.md │ ├── scripts │ │ ├── auto-close-duplicates.mjs │ │ ├── backfill-duplicate-comments.mjs │ │ ├── check-pre-release-mode.mjs │ │ ├── parse-metrics.mjs │ │ ├── release.mjs │ │ ├── tag-extension.mjs │ │ └── utils.mjs │ └── workflows │ ├── auto-close-duplicates.yml │ ├── backfill-duplicate-comments.yml │ ├── ci.yml │ ├── claude-dedupe-issues.yml │ ├── claude-docs-trigger.yml │ ├── claude-docs-updater.yml │ ├── claude-issue-triage.yml │ ├── claude.yml │ ├── extension-ci.yml │ ├── extension-release.yml │ ├── log-issue-events.yml │ ├── pre-release.yml │ ├── release-check.yml │ ├── release.yml │ ├── update-models-md.yml │ └── weekly-metrics-discord.yml ├── .gitignore ├── .kiro │ ├── hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── settings │ │ └── mcp.json │ └── steering │ ├── dev_workflow.md │ ├── kiro_rules.md │ ├── self_improve.md │ ├── taskmaster_hooks_workflow.md │ └── taskmaster.md ├── .manypkg.json ├── .mcp.json ├── .npmignore ├── .nvmrc ├── .taskmaster │ ├── CLAUDE.md │ ├── config.json │ ├── docs │ │ ├── MIGRATION-ROADMAP.md │ │ ├── prd-tm-start.txt │ │ ├── prd.txt │ │ ├── README.md │ │ ├── research │ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md │ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md │ │ │ ├── 2025-06-14_test-save-functionality.md │ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md │ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md │ │ ├── task-template-importing-prd.txt │ │ ├── test-prd.txt │ │ └── tm-core-phase-1.txt │ ├── reports │ │ ├── task-complexity-report_cc-kiro-hooks.json │ │ ├── task-complexity-report_test-prd-tag.json │ │ ├── task-complexity-report_tm-core-phase-1.json │ │ ├── task-complexity-report.json │ │ └── tm-core-complexity.json │ ├── state.json │ ├── tasks │ │ ├── task_001_tm-start.txt │ │ ├── task_002_tm-start.txt │ │ ├── task_003_tm-start.txt │ │ ├── task_004_tm-start.txt │ │ ├── task_007_tm-start.txt │ │ └── tasks.json │ └── templates │ └── example_prd.txt ├── .vscode │ ├── extensions.json │ └── settings.json ├── apps │ ├── cli │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ ├── commands │ │ │ │ ├── auth.command.ts │ │ │ │ ├── context.command.ts │ │ │ │ ├── list.command.ts │ │ │ │ ├── set-status.command.ts │ │ │ │ ├── show.command.ts │ │ │ │ └── start.command.ts │ │ │ ├── index.ts │ │ │ ├── ui │ │ │ │ ├── components │ │ │ │ │ ├── dashboard.component.ts │ │ │ │ │ ├── header.component.ts │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── next-task.component.ts │ │ │ │ │ ├── suggested-steps.component.ts │ │ │ │ │ └── task-detail.component.ts │ │ │ │ └── index.ts │ │ │ └── utils │ │ │ ├── auto-update.ts │ │ │ └── ui.ts │ │ └── tsconfig.json │ ├── docs │ │ ├── archive │ │ │ ├── ai-client-utils-example.mdx │ │ │ ├── ai-development-workflow.mdx │ │ │ ├── command-reference.mdx │ │ │ ├── configuration.mdx │ │ │ ├── cursor-setup.mdx │ │ │ ├── examples.mdx │ │ │ └── Installation.mdx │ │ ├── best-practices │ │ │ ├── advanced-tasks.mdx │ │ │ ├── configuration-advanced.mdx │ │ │ └── index.mdx │ │ ├── capabilities │ │ │ ├── cli-root-commands.mdx │ │ │ ├── index.mdx │ │ │ ├── mcp.mdx │ │ │ └── task-structure.mdx │ │ ├── CHANGELOG.md │ │ ├── docs.json │ │ ├── favicon.svg │ │ ├── getting-started │ │ │ ├── contribute.mdx │ │ │ ├── faq.mdx │ │ │ └── quick-start │ │ │ ├── configuration-quick.mdx │ │ │ ├── execute-quick.mdx │ │ │ ├── installation.mdx │ │ │ ├── moving-forward.mdx │ │ │ ├── prd-quick.mdx │ │ │ ├── quick-start.mdx │ │ │ ├── requirements.mdx │ │ │ ├── rules-quick.mdx │ │ │ └── tasks-quick.mdx │ │ ├── introduction.mdx │ │ ├── licensing.md │ │ ├── logo │ │ │ ├── dark.svg │ │ │ ├── light.svg │ │ │ └── task-master-logo.png │ │ ├── package.json │ │ ├── README.md │ │ ├── style.css │ │ ├── vercel.json │ │ └── whats-new.mdx │ └── extension │ ├── .vscodeignore │ ├── assets │ │ ├── banner.png │ │ ├── icon-dark.svg │ │ ├── icon-light.svg │ │ ├── icon.png │ │ ├── screenshots │ │ │ ├── kanban-board.png │ │ │ └── task-details.png │ │ └── sidebar-icon.svg │ ├── CHANGELOG.md │ ├── components.json │ ├── docs │ │ ├── extension-CI-setup.md │ │ └── extension-development-guide.md │ ├── esbuild.js │ ├── LICENSE │ ├── package.json │ ├── package.mjs │ ├── package.publish.json │ ├── README.md │ ├── src │ │ ├── components │ │ │ ├── ConfigView.tsx │ │ │ ├── constants.ts │ │ │ ├── TaskDetails │ │ │ │ ├── AIActionsSection.tsx │ │ │ │ ├── DetailsSection.tsx │ │ │ │ ├── PriorityBadge.tsx │ │ │ │ ├── SubtasksSection.tsx │ │ │ │ ├── TaskMetadataSidebar.tsx │ │ │ │ └── useTaskDetails.ts │ │ │ ├── TaskDetailsView.tsx │ │ │ ├── TaskMasterLogo.tsx │ │ │ └── ui │ │ │ ├── badge.tsx │ │ │ ├── breadcrumb.tsx │ │ │ ├── button.tsx │ │ │ ├── card.tsx │ │ │ ├── collapsible.tsx │ │ │ ├── CollapsibleSection.tsx │ │ │ ├── dropdown-menu.tsx │ │ │ ├── label.tsx │ │ │ ├── scroll-area.tsx │ │ │ ├── separator.tsx │ │ │ ├── shadcn-io │ │ │ │ └── kanban │ │ │ │ └── index.tsx │ │ │ └── textarea.tsx │ │ ├── extension.ts │ │ ├── index.ts │ │ ├── lib │ │ │ └── utils.ts │ │ ├── services │ │ │ ├── config-service.ts │ │ │ ├── error-handler.ts │ │ │ ├── notification-preferences.ts │ │ │ ├── polling-service.ts │ │ │ ├── polling-strategies.ts │ │ │ ├── sidebar-webview-manager.ts │ │ │ ├── task-repository.ts │ │ │ ├── terminal-manager.ts │ │ │ └── webview-manager.ts │ │ ├── test │ │ │ └── extension.test.ts │ │ ├── utils │ │ │ ├── configManager.ts │ │ │ ├── connectionManager.ts │ │ │ ├── errorHandler.ts │ │ │ ├── event-emitter.ts │ │ │ ├── logger.ts │ │ │ ├── mcpClient.ts │ │ │ ├── notificationPreferences.ts │ │ │ └── task-master-api │ │ │ ├── cache │ │ │ │ └── cache-manager.ts │ │ │ ├── index.ts │ │ │ ├── mcp-client.ts │ │ │ ├── transformers │ │ │ │ └── task-transformer.ts │ │ │ └── types │ │ │ └── index.ts │ │ └── webview │ │ ├── App.tsx │ │ ├── components │ │ │ ├── AppContent.tsx │ │ │ ├── EmptyState.tsx │ │ │ ├── ErrorBoundary.tsx │ │ │ ├── PollingStatus.tsx │ │ │ ├── PriorityBadge.tsx │ │ │ ├── SidebarView.tsx │ │ │ ├── TagDropdown.tsx │ │ │ ├── TaskCard.tsx │ │ │ ├── TaskEditModal.tsx │ │ │ ├── TaskMasterKanban.tsx │ │ │ ├── ToastContainer.tsx │ │ │ └── ToastNotification.tsx │ │ ├── constants │ │ │ └── index.ts │ │ ├── contexts │ │ │ └── VSCodeContext.tsx │ │ ├── hooks │ │ │ ├── useTaskQueries.ts │ │ │ ├── useVSCodeMessages.ts │ │ │ └── useWebviewHeight.ts │ │ ├── index.css │ │ ├── index.tsx │ │ ├── providers │ │ │ └── QueryProvider.tsx │ │ ├── reducers │ │ │ └── appReducer.ts │ │ ├── sidebar.tsx │ │ ├── types │ │ │ └── index.ts │ │ └── utils │ │ ├── logger.ts │ │ └── toast.ts │ └── tsconfig.json ├── assets │ ├── .windsurfrules │ ├── AGENTS.md │ ├── claude │ │ ├── agents │ │ │ ├── task-checker.md │ │ │ ├── task-executor.md │ │ │ └── task-orchestrator.md │ │ ├── commands │ │ │ └── tm │ │ │ ├── add-dependency │ │ │ │ └── add-dependency.md │ │ │ ├── add-subtask │ │ │ │ ├── add-subtask.md │ │ │ │ └── convert-task-to-subtask.md │ │ │ ├── add-task │ │ │ │ └── add-task.md │ │ │ ├── analyze-complexity │ │ │ │ └── analyze-complexity.md │ │ │ ├── clear-subtasks │ │ │ │ ├── clear-all-subtasks.md │ │ │ │ └── clear-subtasks.md │ │ │ ├── complexity-report │ │ │ │ └── complexity-report.md │ │ │ ├── expand │ │ │ │ ├── expand-all-tasks.md │ │ │ │ └── expand-task.md │ │ │ ├── fix-dependencies │ │ │ │ └── fix-dependencies.md │ │ │ ├── generate │ │ │ │ └── generate-tasks.md │ │ │ ├── help.md │ │ │ ├── init │ │ │ │ ├── init-project-quick.md │ │ │ │ └── init-project.md │ │ │ ├── learn.md │ │ │ ├── list │ │ │ │ ├── list-tasks-by-status.md │ │ │ │ ├── list-tasks-with-subtasks.md │ │ │ │ └── list-tasks.md │ │ │ ├── models │ │ │ │ ├── setup-models.md │ │ │ │ └── view-models.md │ │ │ ├── next │ │ │ │ └── next-task.md │ │ │ ├── parse-prd │ │ │ │ ├── parse-prd-with-research.md │ │ │ │ └── parse-prd.md │ │ │ ├── remove-dependency │ │ │ │ └── remove-dependency.md │ │ │ ├── remove-subtask │ │ │ │ └── remove-subtask.md │ │ │ ├── remove-subtasks │ │ │ │ ├── remove-all-subtasks.md │ │ │ │ └── remove-subtasks.md │ │ │ ├── remove-task │ │ │ │ └── remove-task.md │ │ │ ├── set-status │ │ │ │ ├── to-cancelled.md │ │ │ │ ├── to-deferred.md │ │ │ │ ├── to-done.md │ │ │ │ ├── to-in-progress.md │ │ │ │ ├── to-pending.md │ │ │ │ └── to-review.md │ │ │ ├── setup │ │ │ │ ├── install-taskmaster.md │ │ │ │ └── quick-install-taskmaster.md │ │ │ ├── show │ │ │ │ └── show-task.md │ │ │ ├── status │ │ │ │ └── project-status.md │ │ │ ├── sync-readme │ │ │ │ └── sync-readme.md │ │ │ ├── tm-main.md │ │ │ ├── update │ │ │ │ ├── update-single-task.md │ │ │ │ ├── update-task.md │ │ │ │ └── update-tasks-from-id.md │ │ │ ├── utils │ │ │ │ └── analyze-project.md │ │ │ ├── validate-dependencies │ │ │ │ └── validate-dependencies.md │ │ │ └── workflows │ │ │ ├── auto-implement-tasks.md │ │ │ ├── command-pipeline.md │ │ │ └── smart-workflow.md │ │ └── TM_COMMANDS_GUIDE.md │ ├── config.json │ ├── env.example │ ├── example_prd.txt │ ├── gitignore │ ├── kiro-hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── roocode │ │ ├── .roo │ │ │ ├── rules-architect │ │ │ │ └── architect-rules │ │ │ ├── rules-ask │ │ │ │ └── ask-rules │ │ │ ├── rules-code │ │ │ │ └── code-rules │ │ │ ├── rules-debug │ │ │ │ └── debug-rules │ │ │ ├── rules-orchestrator │ │ │ │ └── orchestrator-rules │ │ │ └── rules-test │ │ │ └── test-rules │ │ └── .roomodes │ ├── rules │ │ ├── cursor_rules.mdc │ │ ├── dev_workflow.mdc │ │ ├── self_improve.mdc │ │ ├── taskmaster_hooks_workflow.mdc │ │ └── taskmaster.mdc │ └── scripts_README.md ├── bin │ └── task-master.js ├── biome.json ├── CHANGELOG.md ├── CLAUDE.md ├── context │ ├── chats │ │ ├── add-task-dependencies-1.md │ │ └── max-min-tokens.txt.md │ ├── fastmcp-core.txt │ ├── fastmcp-docs.txt │ ├── MCP_INTEGRATION.md │ ├── mcp-js-sdk-docs.txt │ ├── mcp-protocol-repo.txt │ ├── mcp-protocol-schema-03262025.json │ └── mcp-protocol-spec.txt ├── CONTRIBUTING.md ├── docs │ ├── CLI-COMMANDER-PATTERN.md │ ├── command-reference.md │ ├── configuration.md │ ├── contributor-docs │ │ └── testing-roo-integration.md │ ├── cross-tag-task-movement.md │ ├── examples │ │ └── claude-code-usage.md │ ├── examples.md │ ├── licensing.md │ ├── mcp-provider-guide.md │ ├── mcp-provider.md │ ├── migration-guide.md │ ├── models.md │ ├── providers │ │ └── gemini-cli.md │ ├── README.md │ ├── scripts │ │ └── models-json-to-markdown.js │ ├── task-structure.md │ └── tutorial.md ├── images │ └── logo.png ├── index.js ├── jest.config.js ├── jest.resolver.cjs ├── LICENSE ├── llms-install.md ├── mcp-server │ ├── server.js │ └── src │ ├── core │ │ ├── __tests__ │ │ │ └── context-manager.test.js │ │ ├── context-manager.js │ │ ├── direct-functions │ │ │ ├── add-dependency.js │ │ │ ├── add-subtask.js │ │ │ ├── add-tag.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── cache-stats.js │ │ │ ├── clear-subtasks.js │ │ │ ├── complexity-report.js │ │ │ ├── copy-tag.js │ │ │ ├── create-tag-from-branch.js │ │ │ ├── delete-tag.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── fix-dependencies.js │ │ │ ├── generate-task-files.js │ │ │ ├── initialize-project.js │ │ │ ├── list-tags.js │ │ │ ├── list-tasks.js │ │ │ ├── models.js │ │ │ ├── move-task-cross-tag.js │ │ │ ├── move-task.js │ │ │ ├── next-task.js │ │ │ ├── parse-prd.js │ │ │ ├── remove-dependency.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── rename-tag.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── rules.js │ │ │ ├── scope-down.js │ │ │ ├── scope-up.js │ │ │ ├── set-task-status.js │ │ │ ├── show-task.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ ├── update-tasks.js │ │ │ ├── use-tag.js │ │ │ └── validate-dependencies.js │ │ ├── task-master-core.js │ │ └── utils │ │ ├── env-utils.js │ │ └── path-utils.js │ ├── custom-sdk │ │ ├── errors.js │ │ ├── index.js │ │ ├── json-extractor.js │ │ ├── language-model.js │ │ ├── message-converter.js │ │ └── schema-converter.js │ ├── index.js │ ├── logger.js │ ├── providers │ │ └── mcp-provider.js │ └── tools │ ├── add-dependency.js │ ├── add-subtask.js │ ├── add-tag.js │ ├── add-task.js │ ├── analyze.js │ ├── clear-subtasks.js │ ├── complexity-report.js │ ├── copy-tag.js │ ├── delete-tag.js │ ├── expand-all.js │ ├── expand-task.js │ ├── fix-dependencies.js │ ├── generate.js │ ├── get-operation-status.js │ ├── get-task.js │ ├── get-tasks.js │ ├── index.js │ ├── initialize-project.js │ ├── list-tags.js │ ├── models.js │ ├── move-task.js │ ├── next-task.js │ ├── parse-prd.js │ ├── remove-dependency.js │ ├── remove-subtask.js │ ├── remove-task.js │ ├── rename-tag.js │ ├── research.js │ ├── response-language.js │ ├── rules.js │ ├── scope-down.js │ ├── scope-up.js │ ├── set-task-status.js │ ├── update-subtask.js │ ├── update-task.js │ ├── update.js │ ├── use-tag.js │ ├── utils.js │ └── validate-dependencies.js ├── mcp-test.js ├── output.json ├── package-lock.json ├── package.json ├── packages │ ├── build-config │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ └── tsdown.base.ts │ │ └── tsconfig.json │ └── tm-core │ ├── .gitignore │ ├── CHANGELOG.md │ ├── docs │ │ └── listTasks-architecture.md │ ├── package.json │ ├── POC-STATUS.md │ ├── README.md │ ├── src │ │ ├── auth │ │ │ ├── auth-manager.test.ts │ │ │ ├── auth-manager.ts │ │ │ ├── config.ts │ │ │ ├── credential-store.test.ts │ │ │ ├── credential-store.ts │ │ │ ├── index.ts │ │ │ ├── oauth-service.ts │ │ │ ├── supabase-session-storage.ts │ │ │ └── types.ts │ │ ├── clients │ │ │ ├── index.ts │ │ │ └── supabase-client.ts │ │ ├── config │ │ │ ├── config-manager.spec.ts │ │ │ ├── config-manager.ts │ │ │ ├── index.ts │ │ │ └── services │ │ │ ├── config-loader.service.spec.ts │ │ │ ├── config-loader.service.ts │ │ │ ├── config-merger.service.spec.ts │ │ │ ├── config-merger.service.ts │ │ │ ├── config-persistence.service.spec.ts │ │ │ ├── config-persistence.service.ts │ │ │ ├── environment-config-provider.service.spec.ts │ │ │ ├── environment-config-provider.service.ts │ │ │ ├── index.ts │ │ │ ├── runtime-state-manager.service.spec.ts │ │ │ └── runtime-state-manager.service.ts │ │ ├── constants │ │ │ └── index.ts │ │ ├── entities │ │ │ └── task.entity.ts │ │ ├── errors │ │ │ ├── index.ts │ │ │ └── task-master-error.ts │ │ ├── executors │ │ │ ├── base-executor.ts │ │ │ ├── claude-executor.ts │ │ │ ├── executor-factory.ts │ │ │ ├── executor-service.ts │ │ │ ├── index.ts │ │ │ └── types.ts │ │ ├── index.ts │ │ ├── interfaces │ │ │ ├── ai-provider.interface.ts │ │ │ ├── configuration.interface.ts │ │ │ ├── index.ts │ │ │ └── storage.interface.ts │ │ ├── logger │ │ │ ├── factory.ts │ │ │ ├── index.ts │ │ │ └── logger.ts │ │ ├── mappers │ │ │ └── TaskMapper.ts │ │ ├── parser │ │ │ └── index.ts │ │ ├── providers │ │ │ ├── ai │ │ │ │ ├── base-provider.ts │ │ │ │ └── index.ts │ │ │ └── index.ts │ │ ├── repositories │ │ │ ├── supabase-task-repository.ts │ │ │ └── task-repository.interface.ts │ │ ├── services │ │ │ ├── index.ts │ │ │ ├── organization.service.ts │ │ │ ├── task-execution-service.ts │ │ │ └── task-service.ts │ │ ├── storage │ │ │ ├── api-storage.ts │ │ │ ├── file-storage │ │ │ │ ├── file-operations.ts │ │ │ │ ├── file-storage.ts │ │ │ │ ├── format-handler.ts │ │ │ │ ├── index.ts │ │ │ │ └── path-resolver.ts │ │ │ ├── index.ts │ │ │ └── storage-factory.ts │ │ ├── subpath-exports.test.ts │ │ ├── task-master-core.ts │ │ ├── types │ │ │ ├── database.types.ts │ │ │ ├── index.ts │ │ │ └── legacy.ts │ │ └── utils │ │ ├── id-generator.ts │ │ └── index.ts │ ├── tests │ │ ├── integration │ │ │ └── list-tasks.test.ts │ │ ├── mocks │ │ │ └── mock-provider.ts │ │ ├── setup.ts │ │ └── unit │ │ ├── base-provider.test.ts │ │ ├── executor.test.ts │ │ └── smoke.test.ts │ ├── tsconfig.json │ └── vitest.config.ts ├── README-task-master.md ├── README.md ├── scripts │ ├── dev.js │ ├── init.js │ ├── modules │ │ ├── ai-services-unified.js │ │ ├── commands.js │ │ ├── config-manager.js │ │ ├── dependency-manager.js │ │ ├── index.js │ │ ├── prompt-manager.js │ │ ├── supported-models.json │ │ ├── sync-readme.js │ │ ├── task-manager │ │ │ ├── add-subtask.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── clear-subtasks.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── find-next-task.js │ │ │ ├── generate-task-files.js │ │ │ ├── is-task-dependent.js │ │ │ ├── list-tasks.js │ │ │ ├── migrate.js │ │ │ ├── models.js │ │ │ ├── move-task.js │ │ │ ├── parse-prd │ │ │ │ ├── index.js │ │ │ │ ├── parse-prd-config.js │ │ │ │ ├── parse-prd-helpers.js │ │ │ │ ├── parse-prd-non-streaming.js │ │ │ │ ├── parse-prd-streaming.js │ │ │ │ └── parse-prd.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── scope-adjustment.js │ │ │ ├── set-task-status.js │ │ │ ├── tag-management.js │ │ │ ├── task-exists.js │ │ │ ├── update-single-task-status.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ └── update-tasks.js │ │ ├── task-manager.js │ │ ├── ui.js │ │ ├── update-config-tokens.js │ │ ├── utils │ │ │ ├── contextGatherer.js │ │ │ ├── fuzzyTaskSearch.js │ │ │ └── git-utils.js │ │ └── utils.js │ ├── task-complexity-report.json │ ├── test-claude-errors.js │ └── test-claude.js ├── src │ ├── ai-providers │ │ ├── anthropic.js │ │ ├── azure.js │ │ ├── base-provider.js │ │ ├── bedrock.js │ │ ├── claude-code.js │ │ ├── custom-sdk │ │ │ ├── claude-code │ │ │ │ ├── errors.js │ │ │ │ ├── index.js │ │ │ │ ├── json-extractor.js │ │ │ │ ├── language-model.js │ │ │ │ ├── message-converter.js │ │ │ │ └── types.js │ │ │ └── grok-cli │ │ │ ├── errors.js │ │ │ ├── index.js │ │ │ ├── json-extractor.js │ │ │ ├── language-model.js │ │ │ ├── message-converter.js │ │ │ └── types.js │ │ ├── gemini-cli.js │ │ ├── google-vertex.js │ │ ├── google.js │ │ ├── grok-cli.js │ │ ├── groq.js │ │ ├── index.js │ │ ├── ollama.js │ │ ├── openai.js │ │ ├── openrouter.js │ │ ├── perplexity.js │ │ └── xai.js │ ├── constants │ │ ├── commands.js │ │ ├── paths.js │ │ ├── profiles.js │ │ ├── providers.js │ │ ├── rules-actions.js │ │ ├── task-priority.js │ │ └── task-status.js │ ├── profiles │ │ ├── amp.js │ │ ├── base-profile.js │ │ ├── claude.js │ │ ├── cline.js │ │ ├── codex.js │ │ ├── cursor.js │ │ ├── gemini.js │ │ ├── index.js │ │ ├── kilo.js │ │ ├── kiro.js │ │ ├── opencode.js │ │ ├── roo.js │ │ ├── trae.js │ │ ├── vscode.js │ │ ├── windsurf.js │ │ └── zed.js │ ├── progress │ │ ├── base-progress-tracker.js │ │ ├── cli-progress-factory.js │ │ ├── parse-prd-tracker.js │ │ ├── progress-tracker-builder.js │ │ └── tracker-ui.js │ ├── prompts │ │ ├── add-task.json │ │ ├── analyze-complexity.json │ │ ├── expand-task.json │ │ ├── parse-prd.json │ │ ├── README.md │ │ ├── research.json │ │ ├── schemas │ │ │ ├── parameter.schema.json │ │ │ ├── prompt-template.schema.json │ │ │ ├── README.md │ │ │ └── variant.schema.json │ │ ├── update-subtask.json │ │ ├── update-task.json │ │ └── update-tasks.json │ ├── provider-registry │ │ └── index.js │ ├── task-master.js │ ├── ui │ │ ├── confirm.js │ │ ├── indicators.js │ │ └── parse-prd.js │ └── utils │ ├── asset-resolver.js │ ├── create-mcp-config.js │ ├── format.js │ ├── getVersion.js │ ├── logger-utils.js │ ├── manage-gitignore.js │ ├── path-utils.js │ ├── profiles.js │ ├── rule-transformer.js │ ├── stream-parser.js │ └── timeout-manager.js ├── test-clean-tags.js ├── test-config-manager.js ├── test-prd.txt ├── test-tag-functions.js ├── test-version-check-full.js ├── test-version-check.js ├── tests │ ├── e2e │ │ ├── e2e_helpers.sh │ │ ├── parse_llm_output.cjs │ │ ├── run_e2e.sh │ │ ├── run_fallback_verification.sh │ │ └── test_llm_analysis.sh │ ├── fixture │ │ └── test-tasks.json │ ├── fixtures │ │ ├── .taskmasterconfig │ │ ├── sample-claude-response.js │ │ ├── sample-prd.txt │ │ └── sample-tasks.js │ ├── integration │ │ ├── claude-code-optional.test.js │ │ ├── cli │ │ │ ├── commands.test.js │ │ │ ├── complex-cross-tag-scenarios.test.js │ │ │ └── move-cross-tag.test.js │ │ ├── manage-gitignore.test.js │ │ ├── mcp-server │ │ │ └── direct-functions.test.js │ │ ├── move-task-cross-tag.integration.test.js │ │ ├── move-task-simple.integration.test.js │ │ └── profiles │ │ ├── amp-init-functionality.test.js │ │ ├── claude-init-functionality.test.js │ │ ├── cline-init-functionality.test.js │ │ ├── codex-init-functionality.test.js │ │ ├── cursor-init-functionality.test.js │ │ ├── gemini-init-functionality.test.js │ │ ├── opencode-init-functionality.test.js │ │ ├── roo-files-inclusion.test.js │ │ ├── roo-init-functionality.test.js │ │ ├── rules-files-inclusion.test.js │ │ ├── trae-init-functionality.test.js │ │ ├── vscode-init-functionality.test.js │ │ └── windsurf-init-functionality.test.js │ ├── manual │ │ ├── progress │ │ │ ├── parse-prd-analysis.js │ │ │ ├── test-parse-prd.js │ │ │ └── TESTING_GUIDE.md │ │ └── prompts │ │ ├── prompt-test.js │ │ └── README.md │ ├── README.md │ ├── setup.js │ └── unit │ ├── ai-providers │ │ ├── claude-code.test.js │ │ ├── custom-sdk │ │ │ └── claude-code │ │ │ └── language-model.test.js │ │ ├── gemini-cli.test.js │ │ ├── mcp-components.test.js │ │ └── openai.test.js │ ├── ai-services-unified.test.js │ ├── commands.test.js │ ├── config-manager.test.js │ ├── config-manager.test.mjs │ ├── dependency-manager.test.js │ ├── init.test.js │ ├── initialize-project.test.js │ ├── kebab-case-validation.test.js │ ├── manage-gitignore.test.js │ ├── mcp │ │ └── tools │ │ ├── __mocks__ │ │ │ └── move-task.js │ │ ├── add-task.test.js │ │ ├── analyze-complexity.test.js │ │ ├── expand-all.test.js │ │ ├── get-tasks.test.js │ │ ├── initialize-project.test.js │ │ ├── move-task-cross-tag-options.test.js │ │ ├── move-task-cross-tag.test.js │ │ └── remove-task.test.js │ ├── mcp-providers │ │ ├── mcp-components.test.js │ │ └── mcp-provider.test.js │ ├── parse-prd.test.js │ ├── profiles │ │ ├── amp-integration.test.js │ │ ├── claude-integration.test.js │ │ ├── cline-integration.test.js │ │ ├── codex-integration.test.js │ │ ├── cursor-integration.test.js │ │ ├── gemini-integration.test.js │ │ ├── kilo-integration.test.js │ │ ├── kiro-integration.test.js │ │ ├── mcp-config-validation.test.js │ │ ├── opencode-integration.test.js │ │ ├── profile-safety-check.test.js │ │ ├── roo-integration.test.js │ │ ├── rule-transformer-cline.test.js │ │ ├── rule-transformer-cursor.test.js │ │ ├── rule-transformer-gemini.test.js │ │ ├── rule-transformer-kilo.test.js │ │ ├── rule-transformer-kiro.test.js │ │ ├── rule-transformer-opencode.test.js │ │ ├── rule-transformer-roo.test.js │ │ ├── rule-transformer-trae.test.js │ │ ├── rule-transformer-vscode.test.js │ │ ├── rule-transformer-windsurf.test.js │ │ ├── rule-transformer-zed.test.js │ │ ├── rule-transformer.test.js │ │ ├── selective-profile-removal.test.js │ │ ├── subdirectory-support.test.js │ │ ├── trae-integration.test.js │ │ ├── vscode-integration.test.js │ │ ├── windsurf-integration.test.js │ │ └── zed-integration.test.js │ ├── progress │ │ └── base-progress-tracker.test.js │ ├── prompt-manager.test.js │ ├── prompts │ │ └── expand-task-prompt.test.js │ ├── providers │ │ └── provider-registry.test.js │ ├── scripts │ │ └── modules │ │ ├── commands │ │ │ ├── move-cross-tag.test.js │ │ │ └── README.md │ │ ├── dependency-manager │ │ │ ├── circular-dependencies.test.js │ │ │ ├── cross-tag-dependencies.test.js │ │ │ └── fix-dependencies-command.test.js │ │ ├── task-manager │ │ │ ├── add-subtask.test.js │ │ │ ├── add-task.test.js │ │ │ ├── analyze-task-complexity.test.js │ │ │ ├── clear-subtasks.test.js │ │ │ ├── complexity-report-tag-isolation.test.js │ │ │ ├── expand-all-tasks.test.js │ │ │ ├── expand-task.test.js │ │ │ ├── find-next-task.test.js │ │ │ ├── generate-task-files.test.js │ │ │ ├── list-tasks.test.js │ │ │ ├── move-task-cross-tag.test.js │ │ │ ├── move-task.test.js │ │ │ ├── parse-prd.test.js │ │ │ ├── remove-subtask.test.js │ │ │ ├── remove-task.test.js │ │ │ ├── research.test.js │ │ │ ├── scope-adjustment.test.js │ │ │ ├── set-task-status.test.js │ │ │ ├── setup.js │ │ │ ├── update-single-task-status.test.js │ │ │ ├── update-subtask-by-id.test.js │ │ │ ├── update-task-by-id.test.js │ │ │ └── update-tasks.test.js │ │ ├── ui │ │ │ └── cross-tag-error-display.test.js │ │ └── utils-tag-aware-paths.test.js │ ├── task-finder.test.js │ ├── task-manager │ │ ├── clear-subtasks.test.js │ │ ├── move-task.test.js │ │ ├── tag-boundary.test.js │ │ └── tag-management.test.js │ ├── task-master.test.js │ ├── ui │ │ └── indicators.test.js │ ├── ui.test.js │ ├── utils-strip-ansi.test.js │ └── utils.test.js ├── tsconfig.json ├── tsdown.config.ts └── turbo.json ``` # Files -------------------------------------------------------------------------------- /tests/unit/scripts/modules/task-manager/complexity-report-tag-isolation.test.js: -------------------------------------------------------------------------------- ```javascript /** * Tests for complexity report tag isolation functionality * Verifies that different tags maintain separate complexity reports */ import { jest } from '@jest/globals'; import fs from 'fs'; import path from 'path'; // Mock the dependencies jest.unstable_mockModule('../../../../../src/utils/path-utils.js', () => ({ resolveComplexityReportOutputPath: jest.fn(), findComplexityReportPath: jest.fn(), findConfigPath: jest.fn(), findPRDPath: jest.fn(() => '/mock/project/root/.taskmaster/docs/PRD.md'), findTasksPath: jest.fn( () => '/mock/project/root/.taskmaster/tasks/tasks.json' ), findProjectRoot: jest.fn(() => '/mock/project/root'), normalizeProjectRoot: jest.fn((root) => root) })); jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({ readJSON: jest.fn(), writeJSON: jest.fn(), log: jest.fn(), isSilentMode: jest.fn(() => false), enableSilentMode: jest.fn(), disableSilentMode: jest.fn(), flattenTasksWithSubtasks: jest.fn((tasks) => tasks), getTagAwareFilePath: jest.fn((basePath, tag, projectRoot) => { if (tag && tag !== 'master') { const dir = path.dirname(basePath); const ext = path.extname(basePath); const name = path.basename(basePath, ext); return path.join(projectRoot || '.', dir, `${name}_${tag}${ext}`); } return path.join(projectRoot || '.', basePath); }), findTaskById: jest.fn((tasks, taskId) => { if (!tasks || !Array.isArray(tasks)) { return { task: null, originalSubtaskCount: null, originalSubtasks: null }; } const id = parseInt(taskId, 10); const task = tasks.find((t) => t.id === id); return task ? { task, originalSubtaskCount: null, originalSubtasks: null } : { task: null, originalSubtaskCount: null, originalSubtasks: null }; }), taskExists: jest.fn((tasks, taskId) => { if (!tasks || !Array.isArray(tasks)) return false; const id = parseInt(taskId, 10); return tasks.some((t) => t.id === id); }), formatTaskId: jest.fn((id) => `Task ${id}`), findCycles: jest.fn(() => []), truncate: jest.fn((text) => text), addComplexityToTask: jest.fn((task, complexity) => ({ ...task, complexity })), aggregateTelemetry: jest.fn((telemetryArray) => telemetryArray[0] || {}), ensureTagMetadata: jest.fn((tagObj) => tagObj), getCurrentTag: jest.fn(() => 'master'), markMigrationForNotice: jest.fn(), performCompleteTagMigration: jest.fn(), setTasksForTag: jest.fn(), getTasksForTag: jest.fn((data, tag) => data[tag]?.tasks || []), findProjectRoot: jest.fn(() => '/mock/project/root'), readComplexityReport: jest.fn(), findTaskInComplexityReport: jest.fn(), resolveEnvVariable: jest.fn((varName) => `mock_${varName}`), isEmpty: jest.fn(() => false), normalizeProjectRoot: jest.fn((root) => root), slugifyTagForFilePath: jest.fn((tagName) => { if (!tagName || typeof tagName !== 'string') { return 'unknown-tag'; } return tagName.replace(/[^a-zA-Z0-9_-]/g, '-').toLowerCase(); }), createTagAwareFilePath: jest.fn((basePath, tag, projectRoot) => { if (tag && tag !== 'master') { const dir = path.dirname(basePath); const ext = path.extname(basePath); const name = path.basename(basePath, ext); // Use the slugified tag const slugifiedTag = tag.replace(/[^a-zA-Z0-9_-]/g, '-').toLowerCase(); return path.join( projectRoot || '.', dir, `${name}_${slugifiedTag}${ext}` ); } return path.join(projectRoot || '.', basePath); }), traverseDependencies: jest.fn((sourceTasks, allTasks, options = {}) => []), CONFIG: { defaultSubtasks: 3 } })); jest.unstable_mockModule( '../../../../../scripts/modules/ai-services-unified.js', () => ({ generateTextService: jest.fn().mockImplementation((params) => { const commandName = params?.commandName || 'default'; if (commandName === 'analyze-complexity') { // Check if this is for a specific tag test by looking at the prompt const isFeatureTag = params?.prompt?.includes('feature') || params?.role === 'feature'; const isMasterTag = params?.prompt?.includes('master') || params?.role === 'master'; let taskTitle = 'Test Task'; if (isFeatureTag) { taskTitle = 'Feature Task 1'; } else if (isMasterTag) { taskTitle = 'Master Task 1'; } return Promise.resolve({ mainResult: JSON.stringify([ { taskId: 1, taskTitle: taskTitle, complexityScore: 7, recommendedSubtasks: 4, expansionPrompt: 'Break down this task', reasoning: 'This task is moderately complex' }, { taskId: 2, taskTitle: 'Task 2', complexityScore: 5, recommendedSubtasks: 3, expansionPrompt: 'Break down this task with a focus on task 2.', reasoning: 'Automatically added due to missing analysis in AI response.' } ]), telemetryData: { timestamp: new Date().toISOString(), commandName: 'analyze-complexity', modelUsed: 'claude-3-5-sonnet', providerName: 'anthropic', inputTokens: 1000, outputTokens: 500, totalTokens: 1500, totalCost: 0.012414, currency: 'USD' } }); } else { // Default for expand-task and others return Promise.resolve({ mainResult: JSON.stringify({ subtasks: [ { id: 1, title: 'Subtask 1', description: 'First subtask', dependencies: [], details: 'Implementation details', status: 'pending', testStrategy: 'Test strategy' } ] }), telemetryData: { timestamp: new Date().toISOString(), commandName: commandName || 'expand-task', modelUsed: 'claude-3-5-sonnet', providerName: 'anthropic', inputTokens: 1000, outputTokens: 500, totalTokens: 1500, totalCost: 0.012414, currency: 'USD' } }); } }), streamTextService: jest.fn().mockResolvedValue({ mainResult: async function* () { yield '{"tasks":['; yield '{"id":1,"title":"Test Task","priority":"high"}'; yield ']}'; }, telemetryData: { timestamp: new Date().toISOString(), commandName: 'analyze-complexity', modelUsed: 'claude-3-5-sonnet', providerName: 'anthropic', inputTokens: 1000, outputTokens: 500, totalTokens: 1500, totalCost: 0.012414, currency: 'USD' } }), generateObjectService: jest.fn().mockResolvedValue({ mainResult: { object: { subtasks: [ { id: 1, title: 'Subtask 1', description: 'First subtask', dependencies: [], details: 'Implementation details', status: 'pending', testStrategy: 'Test strategy' } ] } }, telemetryData: { timestamp: new Date().toISOString(), commandName: 'expand-task', modelUsed: 'claude-3-5-sonnet', providerName: 'anthropic', inputTokens: 1000, outputTokens: 500, totalTokens: 1500, totalCost: 0.012414, currency: 'USD' } }) }) ); jest.unstable_mockModule( '../../../../../scripts/modules/config-manager.js', () => ({ // Core config access getConfig: jest.fn(() => ({ models: { main: { provider: 'anthropic', modelId: 'claude-3-5-sonnet' } }, global: { projectName: 'Test Project' } })), writeConfig: jest.fn(() => true), ConfigurationError: class extends Error {}, isConfigFilePresent: jest.fn(() => true), // Validation validateProvider: jest.fn(() => true), validateProviderModelCombination: jest.fn(() => true), VALIDATED_PROVIDERS: ['anthropic', 'openai', 'perplexity'], CUSTOM_PROVIDERS: { OLLAMA: 'ollama', BEDROCK: 'bedrock' }, ALL_PROVIDERS: ['anthropic', 'openai', 'perplexity', 'ollama', 'bedrock'], MODEL_MAP: { anthropic: [ { id: 'claude-3-5-sonnet', cost_per_1m_tokens: { input: 3, output: 15 } } ], openai: [{ id: 'gpt-4', cost_per_1m_tokens: { input: 30, output: 60 } }] }, getAvailableModels: jest.fn(() => [ { id: 'claude-3-5-sonnet', name: 'Claude 3.5 Sonnet', provider: 'anthropic' }, { id: 'gpt-4', name: 'GPT-4', provider: 'openai' } ]), // Role-specific getters getMainProvider: jest.fn(() => 'anthropic'), getMainModelId: jest.fn(() => 'claude-3-5-sonnet'), getMainMaxTokens: jest.fn(() => 4000), getMainTemperature: jest.fn(() => 0.7), getResearchProvider: jest.fn(() => 'perplexity'), getResearchModelId: jest.fn(() => 'sonar-pro'), getResearchMaxTokens: jest.fn(() => 8700), getResearchTemperature: jest.fn(() => 0.1), getFallbackProvider: jest.fn(() => 'anthropic'), getFallbackModelId: jest.fn(() => 'claude-3-5-sonnet'), getFallbackMaxTokens: jest.fn(() => 4000), getFallbackTemperature: jest.fn(() => 0.7), getBaseUrlForRole: jest.fn(() => undefined), // Global setting getters getLogLevel: jest.fn(() => 'info'), getDebugFlag: jest.fn(() => false), getDefaultNumTasks: jest.fn(() => 10), getDefaultSubtasks: jest.fn(() => 5), getDefaultPriority: jest.fn(() => 'medium'), getProjectName: jest.fn(() => 'Test Project'), getOllamaBaseURL: jest.fn(() => 'http://localhost:11434/api'), getAzureBaseURL: jest.fn(() => undefined), getBedrockBaseURL: jest.fn(() => undefined), getParametersForRole: jest.fn(() => ({ maxTokens: 4000, temperature: 0.7 })), getUserId: jest.fn(() => '1234567890'), // API Key Checkers isApiKeySet: jest.fn(() => true), getMcpApiKeyStatus: jest.fn(() => true), // Additional functions getAllProviders: jest.fn(() => ['anthropic', 'openai', 'perplexity']), getVertexProjectId: jest.fn(() => undefined), getVertexLocation: jest.fn(() => undefined), hasCodebaseAnalysis: jest.fn(() => false) }) ); jest.unstable_mockModule( '../../../../../scripts/modules/prompt-manager.js', () => ({ getPromptManager: jest.fn().mockReturnValue({ loadPrompt: jest.fn().mockResolvedValue({ systemPrompt: 'Mocked system prompt', userPrompt: 'Mocked user prompt' }) }) }) ); jest.unstable_mockModule( '../../../../../scripts/modules/utils/contextGatherer.js', () => { class MockContextGatherer { constructor(projectRoot, tag) { this.projectRoot = projectRoot; this.tag = tag; this.allTasks = []; } async gather(options = {}) { return { context: 'Mock context gathered', analysisData: null, contextSections: 1, finalTaskIds: options.tasks || [] }; } } return { default: MockContextGatherer, ContextGatherer: MockContextGatherer, createContextGatherer: jest.fn( (projectRoot, tag) => new MockContextGatherer(projectRoot, tag) ) }; } ); jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({ startLoadingIndicator: jest.fn(() => ({ stop: jest.fn() })), stopLoadingIndicator: jest.fn(), displayAiUsageSummary: jest.fn(), displayBanner: jest.fn(), getStatusWithColor: jest.fn((status) => status), succeedLoadingIndicator: jest.fn(), failLoadingIndicator: jest.fn(), warnLoadingIndicator: jest.fn(), infoLoadingIndicator: jest.fn(), displayContextAnalysis: jest.fn(), createProgressBar: jest.fn(() => ({ start: jest.fn(), stop: jest.fn(), update: jest.fn() })), displayTable: jest.fn(), displayBox: jest.fn(), displaySuccess: jest.fn(), displayError: jest.fn(), displayWarning: jest.fn(), displayInfo: jest.fn(), displayTaskDetails: jest.fn(), displayTaskList: jest.fn(), displayComplexityReport: jest.fn(), displayNextTask: jest.fn(), displayDependencyStatus: jest.fn(), displayMigrationNotice: jest.fn(), formatDependenciesWithStatus: jest.fn((deps) => deps), formatTaskId: jest.fn((id) => `Task ${id}`), formatPriority: jest.fn((priority) => priority), formatDuration: jest.fn((duration) => duration), formatDate: jest.fn((date) => date), formatComplexityScore: jest.fn((score) => score), formatTelemetryData: jest.fn((data) => data), formatContextSummary: jest.fn((context) => context), formatTagName: jest.fn((tag) => tag), formatFilePath: jest.fn((path) => path), getComplexityWithColor: jest.fn((complexity) => complexity), getPriorityWithColor: jest.fn((priority) => priority), getTagWithColor: jest.fn((tag) => tag), getDependencyWithColor: jest.fn((dep) => dep), getTelemetryWithColor: jest.fn((data) => data), getContextWithColor: jest.fn((context) => context) })); // Mock fs module const mockWriteFileSync = jest.fn(); const mockExistsSync = jest.fn(); const mockReadFileSync = jest.fn(); const mockMkdirSync = jest.fn(); jest.unstable_mockModule('fs', () => ({ default: { existsSync: mockExistsSync, readFileSync: mockReadFileSync, writeFileSync: mockWriteFileSync, mkdirSync: mockMkdirSync }, existsSync: mockExistsSync, readFileSync: mockReadFileSync, writeFileSync: mockWriteFileSync, mkdirSync: mockMkdirSync })); // Import the mocked modules const { resolveComplexityReportOutputPath, findComplexityReportPath } = await import('../../../../../src/utils/path-utils.js'); const { readJSON, writeJSON, getTagAwareFilePath } = await import( '../../../../../scripts/modules/utils.js' ); const { generateTextService, streamTextService } = await import( '../../../../../scripts/modules/ai-services-unified.js' ); // Import the modules under test const { default: analyzeTaskComplexity } = await import( '../../../../../scripts/modules/task-manager/analyze-task-complexity.js' ); const { default: expandTask } = await import( '../../../../../scripts/modules/task-manager/expand-task.js' ); describe('Complexity Report Tag Isolation', () => { const projectRoot = '/mock/project/root'; const sampleTasks = { tasks: [ { id: 1, title: 'Task 1', description: 'First task', status: 'pending' }, { id: 2, title: 'Task 2', description: 'Second task', status: 'pending' } ] }; const sampleComplexityReport = { meta: { generatedAt: new Date().toISOString(), tasksAnalyzed: 2, totalTasks: 2, analysisCount: 2, thresholdScore: 5, projectName: 'Test Project', usedResearch: false }, complexityAnalysis: [ { taskId: 1, taskTitle: 'Task 1', complexityScore: 7, recommendedSubtasks: 4, expansionPrompt: 'Break down this task', reasoning: 'This task is moderately complex' }, { taskId: 2, taskTitle: 'Task 2', complexityScore: 5, recommendedSubtasks: 3, expansionPrompt: 'Break down this task', reasoning: 'This task is moderately complex' } ] }; beforeEach(() => { jest.clearAllMocks(); // Default mock implementations readJSON.mockReturnValue(sampleTasks); mockExistsSync.mockReturnValue(false); mockMkdirSync.mockImplementation(() => {}); // Mock resolveComplexityReportOutputPath to return tag-aware paths resolveComplexityReportOutputPath.mockImplementation( (explicitPath, args) => { const tag = args?.tag; if (explicitPath) { return explicitPath; } let filename = 'task-complexity-report.json'; if (tag && tag !== 'master') { // Use slugified tag for cross-platform compatibility const slugifiedTag = tag .replace(/[^a-zA-Z0-9_-]/g, '-') .toLowerCase(); filename = `task-complexity-report_${slugifiedTag}.json`; } return path.join(projectRoot, '.taskmaster/reports', filename); } ); // Mock findComplexityReportPath to return tag-aware paths findComplexityReportPath.mockImplementation((explicitPath, args) => { const tag = args?.tag; if (explicitPath) { return explicitPath; } let filename = 'task-complexity-report.json'; if (tag && tag !== 'master') { filename = `task-complexity-report_${tag}.json`; } return path.join(projectRoot, '.taskmaster/reports', filename); }); }); describe('Path Resolution Tag Isolation', () => { test('should resolve master tag to default filename', () => { const result = resolveComplexityReportOutputPath(null, { tag: 'master', projectRoot }); expect(result).toBe( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report.json' ) ); }); test('should resolve non-master tag to tag-specific filename', () => { const result = resolveComplexityReportOutputPath(null, { tag: 'feature-auth', projectRoot }); expect(result).toBe( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report_feature-auth.json' ) ); }); test('should resolve undefined tag to default filename', () => { const result = resolveComplexityReportOutputPath(null, { projectRoot }); expect(result).toBe( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report.json' ) ); }); test('should respect explicit path over tag-aware resolution', () => { const explicitPath = '/custom/path/report.json'; const result = resolveComplexityReportOutputPath(explicitPath, { tag: 'feature-auth', projectRoot }); expect(result).toBe(explicitPath); }); }); describe('Analysis Generation Tag Isolation', () => { test('should generate master tag report to default location', async () => { const options = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: 'master' }; await analyzeTaskComplexity(options, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); expect(resolveComplexityReportOutputPath).toHaveBeenCalledWith( undefined, expect.objectContaining({ tag: 'master', projectRoot }), expect.any(Function) ); expect(mockWriteFileSync).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report.json' ), expect.any(String), 'utf8' ); }); test('should generate feature tag report to tag-specific location', async () => { const options = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: 'feature-auth' }; await analyzeTaskComplexity(options, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); expect(resolveComplexityReportOutputPath).toHaveBeenCalledWith( undefined, expect.objectContaining({ tag: 'feature-auth', projectRoot }), expect.any(Function) ); expect(mockWriteFileSync).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report_feature-auth.json' ), expect.any(String), 'utf8' ); }); test('should not overwrite master report when analyzing feature tag', async () => { // First, analyze master tag const masterOptions = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: 'master' }; await analyzeTaskComplexity(masterOptions, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); // Clear mocks to verify separate calls jest.clearAllMocks(); readJSON.mockReturnValue(sampleTasks); // Then, analyze feature tag const featureOptions = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: 'feature-auth' }; await analyzeTaskComplexity(featureOptions, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); // Verify that the feature tag analysis wrote to its own file expect(mockWriteFileSync).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report_feature-auth.json' ), expect.any(String), 'utf8' ); // Verify that it did NOT write to the master file expect(mockWriteFileSync).not.toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report.json' ), expect.any(String), 'utf8' ); }); }); describe('Report Reading Tag Isolation', () => { test('should read master tag report from default location', async () => { // Mock existing master report mockExistsSync.mockImplementation((filepath) => { return filepath.endsWith('task-complexity-report.json'); }); mockReadFileSync.mockReturnValue(JSON.stringify(sampleComplexityReport)); const options = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: 'master' }; await analyzeTaskComplexity(options, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); expect(mockExistsSync).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report.json' ) ); }); test('should read feature tag report from tag-specific location', async () => { // Mock existing feature tag report mockExistsSync.mockImplementation((filepath) => { return filepath.endsWith('task-complexity-report_feature-auth.json'); }); mockReadFileSync.mockReturnValue(JSON.stringify(sampleComplexityReport)); const options = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: 'feature-auth' }; await analyzeTaskComplexity(options, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); expect(mockExistsSync).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report_feature-auth.json' ) ); }); test('should not read master report when working with feature tag', async () => { // Mock that feature tag report exists but master doesn't mockExistsSync.mockImplementation((filepath) => { return filepath.endsWith('task-complexity-report_feature-auth.json'); }); mockReadFileSync.mockReturnValue(JSON.stringify(sampleComplexityReport)); const options = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: 'feature-auth' }; await analyzeTaskComplexity(options, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); // Should check for feature tag report expect(mockExistsSync).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report_feature-auth.json' ) ); // Should NOT check for master report expect(mockExistsSync).not.toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report.json' ) ); }); }); describe('Expand Task Tag Isolation', () => { test('should use tag-specific complexity report for expansion', async () => { // Mock existing feature tag report mockExistsSync.mockImplementation((filepath) => { return filepath.endsWith('task-complexity-report_feature-auth.json'); }); mockReadFileSync.mockReturnValue(JSON.stringify(sampleComplexityReport)); const tasksPath = path.join(projectRoot, 'tasks/tasks.json'); const taskId = 1; const numSubtasks = 3; await expandTask( tasksPath, taskId, numSubtasks, false, // useResearch '', // additionalContext { projectRoot, tag: 'feature-auth', complexityReportPath: path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report_feature-auth.json' ), mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }, false // force ); // Should read from feature tag report expect(readJSON).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report_feature-auth.json' ) ); }); test('should use master complexity report for master tag expansion', async () => { // Mock existing master report mockExistsSync.mockImplementation((filepath) => { return filepath.endsWith('task-complexity-report.json'); }); mockReadFileSync.mockReturnValue(JSON.stringify(sampleComplexityReport)); const tasksPath = path.join(projectRoot, 'tasks/tasks.json'); const taskId = 1; const numSubtasks = 3; await expandTask( tasksPath, taskId, numSubtasks, false, // useResearch '', // additionalContext { projectRoot, tag: 'master', complexityReportPath: path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report.json' ), mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }, false // force ); // Should read from master report expect(readJSON).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report.json' ) ); }); }); describe('Cross-Tag Contamination Prevention', () => { test('should maintain separate reports for different tags', async () => { // Create different complexity reports for different tags const masterReport = { ...sampleComplexityReport, complexityAnalysis: [ { taskId: 1, taskTitle: 'Master Task 1', complexityScore: 8, recommendedSubtasks: 5, expansionPrompt: 'Master expansion', reasoning: 'Master task reasoning' } ] }; const featureReport = { ...sampleComplexityReport, complexityAnalysis: [ { taskId: 1, taskTitle: 'Feature Task 1', complexityScore: 6, recommendedSubtasks: 3, expansionPrompt: 'Feature expansion', reasoning: 'Feature task reasoning' } ] }; // Mock file system to return different reports for different paths mockExistsSync.mockImplementation((filepath) => { return filepath.includes('task-complexity-report'); }); mockReadFileSync.mockImplementation((filepath) => { if (filepath.includes('task-complexity-report_feature-auth.json')) { return JSON.stringify(featureReport); } else if (filepath.includes('task-complexity-report.json')) { return JSON.stringify(masterReport); } return '{}'; }); // Analyze master tag const masterOptions = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: 'master' }; await analyzeTaskComplexity(masterOptions, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); // Verify that master report was written to master location expect(mockWriteFileSync).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report.json' ), expect.stringContaining('"taskTitle": "Test Task"'), 'utf8' ); // Clear mocks jest.clearAllMocks(); readJSON.mockReturnValue(sampleTasks); // Analyze feature tag const featureOptions = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: 'feature-auth' }; await analyzeTaskComplexity(featureOptions, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); // Verify that feature report was written to feature location expect(mockWriteFileSync).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report_feature-auth.json' ), expect.stringContaining('"taskTitle": "Test Task"'), 'utf8' ); }); }); describe('Edge Cases', () => { test('should handle empty tag gracefully', async () => { const options = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: '' }; await analyzeTaskComplexity(options, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); expect(resolveComplexityReportOutputPath).toHaveBeenCalledWith( undefined, expect.objectContaining({ tag: '', projectRoot }), expect.any(Function) ); }); test('should handle null tag gracefully', async () => { const options = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: null }; await analyzeTaskComplexity(options, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); expect(resolveComplexityReportOutputPath).toHaveBeenCalledWith( undefined, expect.objectContaining({ tag: null, projectRoot }), expect.any(Function) ); }); test('should handle special characters in tag names', async () => { const options = { file: 'tasks/tasks.json', threshold: '5', projectRoot, tag: 'feature/user-auth-v2' }; await analyzeTaskComplexity(options, { projectRoot, mcpLog: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() } }); expect(resolveComplexityReportOutputPath).toHaveBeenCalledWith( undefined, expect.objectContaining({ tag: 'feature/user-auth-v2', projectRoot }), expect.any(Function) ); expect(mockWriteFileSync).toHaveBeenCalledWith( path.join( projectRoot, '.taskmaster/reports', 'task-complexity-report_feature-user-auth-v2.json' ), expect.any(String), 'utf8' ); }); }); }); ``` -------------------------------------------------------------------------------- /context/fastmcp-core.txt: -------------------------------------------------------------------------------- ``` import { Server } from "@modelcontextprotocol/sdk/server/index.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { CallToolRequestSchema, ClientCapabilities, CompleteRequestSchema, CreateMessageRequestSchema, ErrorCode, GetPromptRequestSchema, ListPromptsRequestSchema, ListResourcesRequestSchema, ListResourceTemplatesRequestSchema, ListToolsRequestSchema, McpError, ReadResourceRequestSchema, Root, RootsListChangedNotificationSchema, ServerCapabilities, SetLevelRequestSchema, } from "@modelcontextprotocol/sdk/types.js"; import { zodToJsonSchema } from "zod-to-json-schema"; import { z } from "zod"; import { setTimeout as delay } from "timers/promises"; import { readFile } from "fs/promises"; import { fileTypeFromBuffer } from "file-type"; import { StrictEventEmitter } from "strict-event-emitter-types"; import { EventEmitter } from "events"; import Fuse from "fuse.js"; import { startSSEServer } from "mcp-proxy"; import { Transport } from "@modelcontextprotocol/sdk/shared/transport.js"; import parseURITemplate from "uri-templates"; import http from "http"; import { fetch } from "undici"; export type SSEServer = { close: () => Promise<void>; }; type FastMCPEvents<T extends FastMCPSessionAuth> = { connect: (event: { session: FastMCPSession<T> }) => void; disconnect: (event: { session: FastMCPSession<T> }) => void; }; type FastMCPSessionEvents = { rootsChanged: (event: { roots: Root[] }) => void; error: (event: { error: Error }) => void; }; /** * Generates an image content object from a URL, file path, or buffer. */ export const imageContent = async ( input: { url: string } | { path: string } | { buffer: Buffer }, ): Promise<ImageContent> => { let rawData: Buffer; if ("url" in input) { const response = await fetch(input.url); if (!response.ok) { throw new Error(`Failed to fetch image from URL: ${response.statusText}`); } rawData = Buffer.from(await response.arrayBuffer()); } else if ("path" in input) { rawData = await readFile(input.path); } else if ("buffer" in input) { rawData = input.buffer; } else { throw new Error( "Invalid input: Provide a valid 'url', 'path', or 'buffer'", ); } const mimeType = await fileTypeFromBuffer(rawData); const base64Data = rawData.toString("base64"); return { type: "image", data: base64Data, mimeType: mimeType?.mime ?? "image/png", } as const; }; abstract class FastMCPError extends Error { public constructor(message?: string) { super(message); this.name = new.target.name; } } type Extra = unknown; type Extras = Record<string, Extra>; export class UnexpectedStateError extends FastMCPError { public extras?: Extras; public constructor(message: string, extras?: Extras) { super(message); this.name = new.target.name; this.extras = extras; } } /** * An error that is meant to be surfaced to the user. */ export class UserError extends UnexpectedStateError {} type ToolParameters = z.ZodTypeAny; type Literal = boolean | null | number | string | undefined; type SerializableValue = | Literal | SerializableValue[] | { [key: string]: SerializableValue }; type Progress = { /** * The progress thus far. This should increase every time progress is made, even if the total is unknown. */ progress: number; /** * Total number of items to process (or total progress required), if known. */ total?: number; }; type Context<T extends FastMCPSessionAuth> = { session: T | undefined; reportProgress: (progress: Progress) => Promise<void>; log: { debug: (message: string, data?: SerializableValue) => void; error: (message: string, data?: SerializableValue) => void; info: (message: string, data?: SerializableValue) => void; warn: (message: string, data?: SerializableValue) => void; }; }; type TextContent = { type: "text"; text: string; }; const TextContentZodSchema = z .object({ type: z.literal("text"), /** * The text content of the message. */ text: z.string(), }) .strict() satisfies z.ZodType<TextContent>; type ImageContent = { type: "image"; data: string; mimeType: string; }; const ImageContentZodSchema = z .object({ type: z.literal("image"), /** * The base64-encoded image data. */ data: z.string().base64(), /** * The MIME type of the image. Different providers may support different image types. */ mimeType: z.string(), }) .strict() satisfies z.ZodType<ImageContent>; type Content = TextContent | ImageContent; const ContentZodSchema = z.discriminatedUnion("type", [ TextContentZodSchema, ImageContentZodSchema, ]) satisfies z.ZodType<Content>; type ContentResult = { content: Content[]; isError?: boolean; }; const ContentResultZodSchema = z .object({ content: ContentZodSchema.array(), isError: z.boolean().optional(), }) .strict() satisfies z.ZodType<ContentResult>; type Completion = { values: string[]; total?: number; hasMore?: boolean; }; /** * https://github.com/modelcontextprotocol/typescript-sdk/blob/3164da64d085ec4e022ae881329eee7b72f208d4/src/types.ts#L983-L1003 */ const CompletionZodSchema = z.object({ /** * An array of completion values. Must not exceed 100 items. */ values: z.array(z.string()).max(100), /** * The total number of completion options available. This can exceed the number of values actually sent in the response. */ total: z.optional(z.number().int()), /** * Indicates whether there are additional completion options beyond those provided in the current response, even if the exact total is unknown. */ hasMore: z.optional(z.boolean()), }) satisfies z.ZodType<Completion>; type Tool<T extends FastMCPSessionAuth, Params extends ToolParameters = ToolParameters> = { name: string; description?: string; parameters?: Params; execute: ( args: z.infer<Params>, context: Context<T>, ) => Promise<string | ContentResult | TextContent | ImageContent>; }; type ResourceResult = | { text: string; } | { blob: string; }; type InputResourceTemplateArgument = Readonly<{ name: string; description?: string; complete?: ArgumentValueCompleter; }>; type ResourceTemplateArgument = Readonly<{ name: string; description?: string; complete?: ArgumentValueCompleter; }>; type ResourceTemplate< Arguments extends ResourceTemplateArgument[] = ResourceTemplateArgument[], > = { uriTemplate: string; name: string; description?: string; mimeType?: string; arguments: Arguments; complete?: (name: string, value: string) => Promise<Completion>; load: ( args: ResourceTemplateArgumentsToObject<Arguments>, ) => Promise<ResourceResult>; }; type ResourceTemplateArgumentsToObject<T extends { name: string }[]> = { [K in T[number]["name"]]: string; }; type InputResourceTemplate< Arguments extends ResourceTemplateArgument[] = ResourceTemplateArgument[], > = { uriTemplate: string; name: string; description?: string; mimeType?: string; arguments: Arguments; load: ( args: ResourceTemplateArgumentsToObject<Arguments>, ) => Promise<ResourceResult>; }; type Resource = { uri: string; name: string; description?: string; mimeType?: string; load: () => Promise<ResourceResult | ResourceResult[]>; complete?: (name: string, value: string) => Promise<Completion>; }; type ArgumentValueCompleter = (value: string) => Promise<Completion>; type InputPromptArgument = Readonly<{ name: string; description?: string; required?: boolean; complete?: ArgumentValueCompleter; enum?: string[]; }>; type PromptArgumentsToObject<T extends { name: string; required?: boolean }[]> = { [K in T[number]["name"]]: Extract< T[number], { name: K } >["required"] extends true ? string : string | undefined; }; type InputPrompt< Arguments extends InputPromptArgument[] = InputPromptArgument[], Args = PromptArgumentsToObject<Arguments>, > = { name: string; description?: string; arguments?: InputPromptArgument[]; load: (args: Args) => Promise<string>; }; type PromptArgument = Readonly<{ name: string; description?: string; required?: boolean; complete?: ArgumentValueCompleter; enum?: string[]; }>; type Prompt< Arguments extends PromptArgument[] = PromptArgument[], Args = PromptArgumentsToObject<Arguments>, > = { arguments?: PromptArgument[]; complete?: (name: string, value: string) => Promise<Completion>; description?: string; load: (args: Args) => Promise<string>; name: string; }; type ServerOptions<T extends FastMCPSessionAuth> = { name: string; version: `${number}.${number}.${number}`; authenticate?: Authenticate<T>; }; type LoggingLevel = | "debug" | "info" | "notice" | "warning" | "error" | "critical" | "alert" | "emergency"; const FastMCPSessionEventEmitterBase: { new (): StrictEventEmitter<EventEmitter, FastMCPSessionEvents>; } = EventEmitter; class FastMCPSessionEventEmitter extends FastMCPSessionEventEmitterBase {} type SamplingResponse = { model: string; stopReason?: "endTurn" | "stopSequence" | "maxTokens" | string; role: "user" | "assistant"; content: TextContent | ImageContent; }; type FastMCPSessionAuth = Record<string, unknown> | undefined; export class FastMCPSession<T extends FastMCPSessionAuth = FastMCPSessionAuth> extends FastMCPSessionEventEmitter { #capabilities: ServerCapabilities = {}; #clientCapabilities?: ClientCapabilities; #loggingLevel: LoggingLevel = "info"; #prompts: Prompt[] = []; #resources: Resource[] = []; #resourceTemplates: ResourceTemplate[] = []; #roots: Root[] = []; #server: Server; #auth: T | undefined; constructor({ auth, name, version, tools, resources, resourcesTemplates, prompts, }: { auth?: T; name: string; version: string; tools: Tool<T>[]; resources: Resource[]; resourcesTemplates: InputResourceTemplate[]; prompts: Prompt[]; }) { super(); this.#auth = auth; if (tools.length) { this.#capabilities.tools = {}; } if (resources.length || resourcesTemplates.length) { this.#capabilities.resources = {}; } if (prompts.length) { for (const prompt of prompts) { this.addPrompt(prompt); } this.#capabilities.prompts = {}; } this.#capabilities.logging = {}; this.#server = new Server( { name: name, version: version }, { capabilities: this.#capabilities }, ); this.setupErrorHandling(); this.setupLoggingHandlers(); this.setupRootsHandlers(); this.setupCompleteHandlers(); if (tools.length) { this.setupToolHandlers(tools); } if (resources.length || resourcesTemplates.length) { for (const resource of resources) { this.addResource(resource); } this.setupResourceHandlers(resources); if (resourcesTemplates.length) { for (const resourceTemplate of resourcesTemplates) { this.addResourceTemplate(resourceTemplate); } this.setupResourceTemplateHandlers(resourcesTemplates); } } if (prompts.length) { this.setupPromptHandlers(prompts); } } private addResource(inputResource: Resource) { this.#resources.push(inputResource); } private addResourceTemplate(inputResourceTemplate: InputResourceTemplate) { const completers: Record<string, ArgumentValueCompleter> = {}; for (const argument of inputResourceTemplate.arguments ?? []) { if (argument.complete) { completers[argument.name] = argument.complete; } } const resourceTemplate = { ...inputResourceTemplate, complete: async (name: string, value: string) => { if (completers[name]) { return await completers[name](value); } return { values: [], }; }, }; this.#resourceTemplates.push(resourceTemplate); } private addPrompt(inputPrompt: InputPrompt) { const completers: Record<string, ArgumentValueCompleter> = {}; const enums: Record<string, string[]> = {}; for (const argument of inputPrompt.arguments ?? []) { if (argument.complete) { completers[argument.name] = argument.complete; } if (argument.enum) { enums[argument.name] = argument.enum; } } const prompt = { ...inputPrompt, complete: async (name: string, value: string) => { if (completers[name]) { return await completers[name](value); } if (enums[name]) { const fuse = new Fuse(enums[name], { keys: ["value"], }); const result = fuse.search(value); return { values: result.map((item) => item.item), total: result.length, }; } return { values: [], }; }, }; this.#prompts.push(prompt); } public get clientCapabilities(): ClientCapabilities | null { return this.#clientCapabilities ?? null; } public get server(): Server { return this.#server; } #pingInterval: ReturnType<typeof setInterval> | null = null; public async requestSampling( message: z.infer<typeof CreateMessageRequestSchema>["params"], ): Promise<SamplingResponse> { return this.#server.createMessage(message); } public async connect(transport: Transport) { if (this.#server.transport) { throw new UnexpectedStateError("Server is already connected"); } await this.#server.connect(transport); let attempt = 0; while (attempt++ < 10) { const capabilities = await this.#server.getClientCapabilities(); if (capabilities) { this.#clientCapabilities = capabilities; break; } await delay(100); } if (!this.#clientCapabilities) { console.warn('[warning] FastMCP could not infer client capabilities') } if (this.#clientCapabilities?.roots?.listChanged) { try { const roots = await this.#server.listRoots(); this.#roots = roots.roots; } catch(e) { console.error(`[error] FastMCP received error listing roots.\n\n${e instanceof Error ? e.stack : JSON.stringify(e)}`) } } this.#pingInterval = setInterval(async () => { try { await this.#server.ping(); } catch (error) { this.emit("error", { error: error as Error, }); } }, 1000); } public get roots(): Root[] { return this.#roots; } public async close() { if (this.#pingInterval) { clearInterval(this.#pingInterval); } try { await this.#server.close(); } catch (error) { console.error("[MCP Error]", "could not close server", error); } } private setupErrorHandling() { this.#server.onerror = (error) => { console.error("[MCP Error]", error); }; } public get loggingLevel(): LoggingLevel { return this.#loggingLevel; } private setupCompleteHandlers() { this.#server.setRequestHandler(CompleteRequestSchema, async (request) => { if (request.params.ref.type === "ref/prompt") { const prompt = this.#prompts.find( (prompt) => prompt.name === request.params.ref.name, ); if (!prompt) { throw new UnexpectedStateError("Unknown prompt", { request, }); } if (!prompt.complete) { throw new UnexpectedStateError("Prompt does not support completion", { request, }); } const completion = CompletionZodSchema.parse( await prompt.complete( request.params.argument.name, request.params.argument.value, ), ); return { completion, }; } if (request.params.ref.type === "ref/resource") { const resource = this.#resourceTemplates.find( (resource) => resource.uriTemplate === request.params.ref.uri, ); if (!resource) { throw new UnexpectedStateError("Unknown resource", { request, }); } if (!("uriTemplate" in resource)) { throw new UnexpectedStateError("Unexpected resource"); } if (!resource.complete) { throw new UnexpectedStateError( "Resource does not support completion", { request, }, ); } const completion = CompletionZodSchema.parse( await resource.complete( request.params.argument.name, request.params.argument.value, ), ); return { completion, }; } throw new UnexpectedStateError("Unexpected completion request", { request, }); }); } private setupRootsHandlers() { this.#server.setNotificationHandler( RootsListChangedNotificationSchema, () => { this.#server.listRoots().then((roots) => { this.#roots = roots.roots; this.emit("rootsChanged", { roots: roots.roots, }); }); }, ); } private setupLoggingHandlers() { this.#server.setRequestHandler(SetLevelRequestSchema, (request) => { this.#loggingLevel = request.params.level; return {}; }); } private setupToolHandlers(tools: Tool<T>[]) { this.#server.setRequestHandler(ListToolsRequestSchema, async () => { return { tools: tools.map((tool) => { return { name: tool.name, description: tool.description, inputSchema: tool.parameters ? zodToJsonSchema(tool.parameters) : undefined, }; }), }; }); this.#server.setRequestHandler(CallToolRequestSchema, async (request) => { const tool = tools.find((tool) => tool.name === request.params.name); if (!tool) { throw new McpError( ErrorCode.MethodNotFound, `Unknown tool: ${request.params.name}`, ); } let args: any = undefined; if (tool.parameters) { const parsed = tool.parameters.safeParse(request.params.arguments); if (!parsed.success) { throw new McpError( ErrorCode.InvalidParams, `Invalid ${request.params.name} parameters`, ); } args = parsed.data; } const progressToken = request.params?._meta?.progressToken; let result: ContentResult; try { const reportProgress = async (progress: Progress) => { await this.#server.notification({ method: "notifications/progress", params: { ...progress, progressToken, }, }); }; const log = { debug: (message: string, context?: SerializableValue) => { this.#server.sendLoggingMessage({ level: "debug", data: { message, context, }, }); }, error: (message: string, context?: SerializableValue) => { this.#server.sendLoggingMessage({ level: "error", data: { message, context, }, }); }, info: (message: string, context?: SerializableValue) => { this.#server.sendLoggingMessage({ level: "info", data: { message, context, }, }); }, warn: (message: string, context?: SerializableValue) => { this.#server.sendLoggingMessage({ level: "warning", data: { message, context, }, }); }, }; const maybeStringResult = await tool.execute(args, { reportProgress, log, session: this.#auth, }); if (typeof maybeStringResult === "string") { result = ContentResultZodSchema.parse({ content: [{ type: "text", text: maybeStringResult }], }); } else if ("type" in maybeStringResult) { result = ContentResultZodSchema.parse({ content: [maybeStringResult], }); } else { result = ContentResultZodSchema.parse(maybeStringResult); } } catch (error) { if (error instanceof UserError) { return { content: [{ type: "text", text: error.message }], isError: true, }; } return { content: [{ type: "text", text: `Error: ${error}` }], isError: true, }; } return result; }); } private setupResourceHandlers(resources: Resource[]) { this.#server.setRequestHandler(ListResourcesRequestSchema, async () => { return { resources: resources.map((resource) => { return { uri: resource.uri, name: resource.name, mimeType: resource.mimeType, }; }), }; }); this.#server.setRequestHandler( ReadResourceRequestSchema, async (request) => { if ("uri" in request.params) { const resource = resources.find( (resource) => "uri" in resource && resource.uri === request.params.uri, ); if (!resource) { for (const resourceTemplate of this.#resourceTemplates) { const uriTemplate = parseURITemplate( resourceTemplate.uriTemplate, ); const match = uriTemplate.fromUri(request.params.uri); if (!match) { continue; } const uri = uriTemplate.fill(match); const result = await resourceTemplate.load(match); return { contents: [ { uri: uri, mimeType: resourceTemplate.mimeType, name: resourceTemplate.name, ...result, }, ], }; } throw new McpError( ErrorCode.MethodNotFound, `Unknown resource: ${request.params.uri}`, ); } if (!("uri" in resource)) { throw new UnexpectedStateError("Resource does not support reading"); } let maybeArrayResult: Awaited<ReturnType<Resource["load"]>>; try { maybeArrayResult = await resource.load(); } catch (error) { throw new McpError( ErrorCode.InternalError, `Error reading resource: ${error}`, { uri: resource.uri, }, ); } if (Array.isArray(maybeArrayResult)) { return { contents: maybeArrayResult.map((result) => ({ uri: resource.uri, mimeType: resource.mimeType, name: resource.name, ...result, })), }; } else { return { contents: [ { uri: resource.uri, mimeType: resource.mimeType, name: resource.name, ...maybeArrayResult, }, ], }; } } throw new UnexpectedStateError("Unknown resource request", { request, }); }, ); } private setupResourceTemplateHandlers(resourceTemplates: ResourceTemplate[]) { this.#server.setRequestHandler( ListResourceTemplatesRequestSchema, async () => { return { resourceTemplates: resourceTemplates.map((resourceTemplate) => { return { name: resourceTemplate.name, uriTemplate: resourceTemplate.uriTemplate, }; }), }; }, ); } private setupPromptHandlers(prompts: Prompt[]) { this.#server.setRequestHandler(ListPromptsRequestSchema, async () => { return { prompts: prompts.map((prompt) => { return { name: prompt.name, description: prompt.description, arguments: prompt.arguments, complete: prompt.complete, }; }), }; }); this.#server.setRequestHandler(GetPromptRequestSchema, async (request) => { const prompt = prompts.find( (prompt) => prompt.name === request.params.name, ); if (!prompt) { throw new McpError( ErrorCode.MethodNotFound, `Unknown prompt: ${request.params.name}`, ); } const args = request.params.arguments; for (const arg of prompt.arguments ?? []) { if (arg.required && !(args && arg.name in args)) { throw new McpError( ErrorCode.InvalidRequest, `Missing required argument: ${arg.name}`, ); } } let result: Awaited<ReturnType<Prompt["load"]>>; try { result = await prompt.load(args as Record<string, string | undefined>); } catch (error) { throw new McpError( ErrorCode.InternalError, `Error loading prompt: ${error}`, ); } return { description: prompt.description, messages: [ { role: "user", content: { type: "text", text: result }, }, ], }; }); } } const FastMCPEventEmitterBase: { new (): StrictEventEmitter<EventEmitter, FastMCPEvents<FastMCPSessionAuth>>; } = EventEmitter; class FastMCPEventEmitter extends FastMCPEventEmitterBase {} type Authenticate<T> = (request: http.IncomingMessage) => Promise<T>; export class FastMCP<T extends Record<string, unknown> | undefined = undefined> extends FastMCPEventEmitter { #options: ServerOptions<T>; #prompts: InputPrompt[] = []; #resources: Resource[] = []; #resourcesTemplates: InputResourceTemplate[] = []; #sessions: FastMCPSession<T>[] = []; #sseServer: SSEServer | null = null; #tools: Tool<T>[] = []; #authenticate: Authenticate<T> | undefined; constructor(public options: ServerOptions<T>) { super(); this.#options = options; this.#authenticate = options.authenticate; } public get sessions(): FastMCPSession<T>[] { return this.#sessions; } /** * Adds a tool to the server. */ public addTool<Params extends ToolParameters>(tool: Tool<T, Params>) { this.#tools.push(tool as unknown as Tool<T>); } /** * Adds a resource to the server. */ public addResource(resource: Resource) { this.#resources.push(resource); } /** * Adds a resource template to the server. */ public addResourceTemplate< const Args extends InputResourceTemplateArgument[], >(resource: InputResourceTemplate<Args>) { this.#resourcesTemplates.push(resource); } /** * Adds a prompt to the server. */ public addPrompt<const Args extends InputPromptArgument[]>( prompt: InputPrompt<Args>, ) { this.#prompts.push(prompt); } /** * Starts the server. */ public async start( options: | { transportType: "stdio" } | { transportType: "sse"; sse: { endpoint: `/${string}`; port: number }; } = { transportType: "stdio", }, ) { if (options.transportType === "stdio") { const transport = new StdioServerTransport(); const session = new FastMCPSession<T>({ name: this.#options.name, version: this.#options.version, tools: this.#tools, resources: this.#resources, resourcesTemplates: this.#resourcesTemplates, prompts: this.#prompts, }); await session.connect(transport); this.#sessions.push(session); this.emit("connect", { session, }); } else if (options.transportType === "sse") { this.#sseServer = await startSSEServer<FastMCPSession<T>>({ endpoint: options.sse.endpoint as `/${string}`, port: options.sse.port, createServer: async (request) => { let auth: T | undefined; if (this.#authenticate) { auth = await this.#authenticate(request); } return new FastMCPSession<T>({ auth, name: this.#options.name, version: this.#options.version, tools: this.#tools, resources: this.#resources, resourcesTemplates: this.#resourcesTemplates, prompts: this.#prompts, }); }, onClose: (session) => { this.emit("disconnect", { session, }); }, onConnect: async (session) => { this.#sessions.push(session); this.emit("connect", { session, }); }, }); console.info( `server is running on SSE at http://localhost:${options.sse.port}${options.sse.endpoint}`, ); } else { throw new Error("Invalid transport type"); } } /** * Stops the server. */ public async stop() { if (this.#sseServer) { this.#sseServer.close(); } } } export type { Context }; export type { Tool, ToolParameters }; export type { Content, TextContent, ImageContent, ContentResult }; export type { Progress, SerializableValue }; export type { Resource, ResourceResult }; export type { ResourceTemplate, ResourceTemplateArgument }; export type { Prompt, PromptArgument }; export type { InputPrompt, InputPromptArgument }; export type { ServerOptions, LoggingLevel }; export type { FastMCPEvents, FastMCPSessionEvents }; ``` -------------------------------------------------------------------------------- /.kiro/steering/dev_workflow.md: -------------------------------------------------------------------------------- ```markdown --- inclusion: always --- # Taskmaster Development Workflow This guide outlines the standard process for using Taskmaster to manage software development projects. It is written as a set of instructions for you, the AI agent. - **Your Default Stance**: For most projects, the user can work directly within the `master` task context. Your initial actions should operate on this default context unless a clear pattern for multi-context work emerges. - **Your Goal**: Your role is to elevate the user's workflow by intelligently introducing advanced features like **Tagged Task Lists** when you detect the appropriate context. Do not force tags on the user; suggest them as a helpful solution to a specific need. ## The Basic Loop The fundamental development cycle you will facilitate is: 1. **`list`**: Show the user what needs to be done. 2. **`next`**: Help the user decide what to work on. 3. **`show <id>`**: Provide details for a specific task. 4. **`expand <id>`**: Break down a complex task into smaller, manageable subtasks. 5. **Implement**: The user writes the code and tests. 6. **`update-subtask`**: Log progress and findings on behalf of the user. 7. **`set-status`**: Mark tasks and subtasks as `done` as work is completed. 8. **Repeat**. All your standard command executions should operate on the user's current task context, which defaults to `master`. --- ## Standard Development Workflow Process ### Simple Workflow (Default Starting Point) For new projects or when users are getting started, operate within the `master` tag context: - Start new projects by running `initialize_project` tool / `task-master init` or `parse_prd` / `task-master parse-prd --input='<prd-file.txt>'` (see @`taskmaster.md`) to generate initial tasks.json with tagged structure - Configure rule sets during initialization with `--rules` flag (e.g., `task-master init --rules kiro,windsurf`) or manage them later with `task-master rules add/remove` commands - Begin coding sessions with `get_tasks` / `task-master list` (see @`taskmaster.md`) to see current tasks, status, and IDs - Determine the next task to work on using `next_task` / `task-master next` (see @`taskmaster.md`) - Analyze task complexity with `analyze_project_complexity` / `task-master analyze-complexity --research` (see @`taskmaster.md`) before breaking down tasks - Review complexity report using `complexity_report` / `task-master complexity-report` (see @`taskmaster.md`) - Select tasks based on dependencies (all marked 'done'), priority level, and ID order - View specific task details using `get_task` / `task-master show <id>` (see @`taskmaster.md`) to understand implementation requirements - Break down complex tasks using `expand_task` / `task-master expand --id=<id> --force --research` (see @`taskmaster.md`) with appropriate flags like `--force` (to replace existing subtasks) and `--research` - Implement code following task details, dependencies, and project standards - Mark completed tasks with `set_task_status` / `task-master set-status --id=<id> --status=done` (see @`taskmaster.md`) - Update dependent tasks when implementation differs from original plan using `update` / `task-master update --from=<id> --prompt="..."` or `update_task` / `task-master update-task --id=<id> --prompt="..."` (see @`taskmaster.md`) --- ## Leveling Up: Agent-Led Multi-Context Workflows While the basic workflow is powerful, your primary opportunity to add value is by identifying when to introduce **Tagged Task Lists**. These patterns are your tools for creating a more organized and efficient development environment for the user, especially if you detect agentic or parallel development happening across the same session. **Critical Principle**: Most users should never see a difference in their experience. Only introduce advanced workflows when you detect clear indicators that the project has evolved beyond simple task management. ### When to Introduce Tags: Your Decision Patterns Here are the patterns to look for. When you detect one, you should propose the corresponding workflow to the user. #### Pattern 1: Simple Git Feature Branching This is the most common and direct use case for tags. - **Trigger**: The user creates a new git branch (e.g., `git checkout -b feature/user-auth`). - **Your Action**: Propose creating a new tag that mirrors the branch name to isolate the feature's tasks from `master`. - **Your Suggested Prompt**: *"I see you've created a new branch named 'feature/user-auth'. To keep all related tasks neatly organized and separate from your main list, I can create a corresponding task tag for you. This helps prevent merge conflicts in your `tasks.json` file later. Shall I create the 'feature-user-auth' tag?"* - **Tool to Use**: `task-master add-tag --from-branch` #### Pattern 2: Team Collaboration - **Trigger**: The user mentions working with teammates (e.g., "My teammate Alice is handling the database schema," or "I need to review Bob's work on the API."). - **Your Action**: Suggest creating a separate tag for the user's work to prevent conflicts with shared master context. - **Your Suggested Prompt**: *"Since you're working with Alice, I can create a separate task context for your work to avoid conflicts. This way, Alice can continue working with the master list while you have your own isolated context. When you're ready to merge your work, we can coordinate the tasks back to master. Shall I create a tag for your current work?"* - **Tool to Use**: `task-master add-tag my-work --copy-from-current --description="My tasks while collaborating with Alice"` #### Pattern 3: Experiments or Risky Refactors - **Trigger**: The user wants to try something that might not be kept (e.g., "I want to experiment with switching our state management library," or "Let's refactor the old API module, but I want to keep the current tasks as a reference."). - **Your Action**: Propose creating a sandboxed tag for the experimental work. - **Your Suggested Prompt**: *"This sounds like a great experiment. To keep these new tasks separate from our main plan, I can create a temporary 'experiment-zustand' tag for this work. If we decide not to proceed, we can simply delete the tag without affecting the main task list. Sound good?"* - **Tool to Use**: `task-master add-tag experiment-zustand --description="Exploring Zustand migration"` #### Pattern 4: Large Feature Initiatives (PRD-Driven) This is a more structured approach for significant new features or epics. - **Trigger**: The user describes a large, multi-step feature that would benefit from a formal plan. - **Your Action**: Propose a comprehensive, PRD-driven workflow. - **Your Suggested Prompt**: *"This sounds like a significant new feature. To manage this effectively, I suggest we create a dedicated task context for it. Here's the plan: I'll create a new tag called 'feature-xyz', then we can draft a Product Requirements Document (PRD) together to scope the work. Once the PRD is ready, I'll automatically generate all the necessary tasks within that new tag. How does that sound?"* - **Your Implementation Flow**: 1. **Create an empty tag**: `task-master add-tag feature-xyz --description "Tasks for the new XYZ feature"`. You can also start by creating a git branch if applicable, and then create the tag from that branch. 2. **Collaborate & Create PRD**: Work with the user to create a detailed PRD file (e.g., `.taskmaster/docs/feature-xyz-prd.txt`). 3. **Parse PRD into the new tag**: `task-master parse-prd .taskmaster/docs/feature-xyz-prd.txt --tag feature-xyz` 4. **Prepare the new task list**: Follow up by suggesting `analyze-complexity` and `expand-all` for the newly created tasks within the `feature-xyz` tag. #### Pattern 5: Version-Based Development Tailor your approach based on the project maturity indicated by tag names. - **Prototype/MVP Tags** (`prototype`, `mvp`, `poc`, `v0.x`): - **Your Approach**: Focus on speed and functionality over perfection - **Task Generation**: Create tasks that emphasize "get it working" over "get it perfect" - **Complexity Level**: Lower complexity, fewer subtasks, more direct implementation paths - **Research Prompts**: Include context like "This is a prototype - prioritize speed and basic functionality over optimization" - **Example Prompt Addition**: *"Since this is for the MVP, I'll focus on tasks that get core functionality working quickly rather than over-engineering."* - **Production/Mature Tags** (`v1.0+`, `production`, `stable`): - **Your Approach**: Emphasize robustness, testing, and maintainability - **Task Generation**: Include comprehensive error handling, testing, documentation, and optimization - **Complexity Level**: Higher complexity, more detailed subtasks, thorough implementation paths - **Research Prompts**: Include context like "This is for production - prioritize reliability, performance, and maintainability" - **Example Prompt Addition**: *"Since this is for production, I'll ensure tasks include proper error handling, testing, and documentation."* ### Advanced Workflow (Tag-Based & PRD-Driven) **When to Transition**: Recognize when the project has evolved (or has initiated a project which existing code) beyond simple task management. Look for these indicators: - User mentions teammates or collaboration needs - Project has grown to 15+ tasks with mixed priorities - User creates feature branches or mentions major initiatives - User initializes Taskmaster on an existing, complex codebase - User describes large features that would benefit from dedicated planning **Your Role in Transition**: Guide the user to a more sophisticated workflow that leverages tags for organization and PRDs for comprehensive planning. #### Master List Strategy (High-Value Focus) Once you transition to tag-based workflows, the `master` tag should ideally contain only: - **High-level deliverables** that provide significant business value - **Major milestones** and epic-level features - **Critical infrastructure** work that affects the entire project - **Release-blocking** items **What NOT to put in master**: - Detailed implementation subtasks (these go in feature-specific tags' parent tasks) - Refactoring work (create dedicated tags like `refactor-auth`) - Experimental features (use `experiment-*` tags) - Team member-specific tasks (use person-specific tags) #### PRD-Driven Feature Development **For New Major Features**: 1. **Identify the Initiative**: When user describes a significant feature 2. **Create Dedicated Tag**: `add_tag feature-[name] --description="[Feature description]"` 3. **Collaborative PRD Creation**: Work with user to create comprehensive PRD in `.taskmaster/docs/feature-[name]-prd.txt` 4. **Parse & Prepare**: - `parse_prd .taskmaster/docs/feature-[name]-prd.txt --tag=feature-[name]` - `analyze_project_complexity --tag=feature-[name] --research` - `expand_all --tag=feature-[name] --research` 5. **Add Master Reference**: Create a high-level task in `master` that references the feature tag **For Existing Codebase Analysis**: When users initialize Taskmaster on existing projects: 1. **Codebase Discovery**: Use your native tools for producing deep context about the code base. You may use `research` tool with `--tree` and `--files` to collect up to date information using the existing architecture as context. 2. **Collaborative Assessment**: Work with user to identify improvement areas, technical debt, or new features 3. **Strategic PRD Creation**: Co-author PRDs that include: - Current state analysis (based on your codebase research) - Proposed improvements or new features - Implementation strategy considering existing code 4. **Tag-Based Organization**: Parse PRDs into appropriate tags (`refactor-api`, `feature-dashboard`, `tech-debt`, etc.) 5. **Master List Curation**: Keep only the most valuable initiatives in master The parse-prd's `--append` flag enables the user to parse multiple PRDs within tags or across tags. PRDs should be focused and the number of tasks they are parsed into should be strategically chosen relative to the PRD's complexity and level of detail. ### Workflow Transition Examples **Example 1: Simple → Team-Based** ``` User: "Alice is going to help with the API work" Your Response: "Great! To avoid conflicts, I'll create a separate task context for your work. Alice can continue with the master list while you work in your own context. When you're ready to merge, we can coordinate the tasks back together." Action: add_tag my-api-work --copy-from-current --description="My API tasks while collaborating with Alice" ``` **Example 2: Simple → PRD-Driven** ``` User: "I want to add a complete user dashboard with analytics, user management, and reporting" Your Response: "This sounds like a major feature that would benefit from detailed planning. Let me create a dedicated context for this work and we can draft a PRD together to ensure we capture all requirements." Actions: 1. add_tag feature-dashboard --description="User dashboard with analytics and management" 2. Collaborate on PRD creation 3. parse_prd dashboard-prd.txt --tag=feature-dashboard 4. Add high-level "User Dashboard" task to master ``` **Example 3: Existing Project → Strategic Planning** ``` User: "I just initialized Taskmaster on my existing React app. It's getting messy and I want to improve it." Your Response: "Let me research your codebase to understand the current architecture, then we can create a strategic plan for improvements." Actions: 1. research "Current React app architecture and improvement opportunities" --tree --files=src/ 2. Collaborate on improvement PRD based on findings 3. Create tags for different improvement areas (refactor-components, improve-state-management, etc.) 4. Keep only major improvement initiatives in master ``` --- ## Primary Interaction: MCP Server vs. CLI Taskmaster offers two primary ways to interact: 1. **MCP Server (Recommended for Integrated Tools)**: - For AI agents and integrated development environments (like Kiro), interacting via the **MCP server is the preferred method**. - The MCP server exposes Taskmaster functionality through a set of tools (e.g., `get_tasks`, `add_subtask`). - This method offers better performance, structured data exchange, and richer error handling compared to CLI parsing. - Refer to @`mcp.md` for details on the MCP architecture and available tools. - A comprehensive list and description of MCP tools and their corresponding CLI commands can be found in @`taskmaster.md`. - **Restart the MCP server** if core logic in `scripts/modules` or MCP tool/direct function definitions change. - **Note**: MCP tools fully support tagged task lists with complete tag management capabilities. 2. **`task-master` CLI (For Users & Fallback)**: - The global `task-master` command provides a user-friendly interface for direct terminal interaction. - It can also serve as a fallback if the MCP server is inaccessible or a specific function isn't exposed via MCP. - Install globally with `npm install -g task-master-ai` or use locally via `npx task-master-ai ...`. - The CLI commands often mirror the MCP tools (e.g., `task-master list` corresponds to `get_tasks`). - Refer to @`taskmaster.md` for a detailed command reference. - **Tagged Task Lists**: CLI fully supports the new tagged system with seamless migration. ## How the Tag System Works (For Your Reference) - **Data Structure**: Tasks are organized into separate contexts (tags) like "master", "feature-branch", or "v2.0". - **Silent Migration**: Existing projects automatically migrate to use a "master" tag with zero disruption. - **Context Isolation**: Tasks in different tags are completely separate. Changes in one tag do not affect any other tag. - **Manual Control**: The user is always in control. There is no automatic switching. You facilitate switching by using `use-tag <name>`. - **Full CLI & MCP Support**: All tag management commands are available through both the CLI and MCP tools for you to use. Refer to @`taskmaster.md` for a full command list. --- ## Task Complexity Analysis - Run `analyze_project_complexity` / `task-master analyze-complexity --research` (see @`taskmaster.md`) for comprehensive analysis - Review complexity report via `complexity_report` / `task-master complexity-report` (see @`taskmaster.md`) for a formatted, readable version. - Focus on tasks with highest complexity scores (8-10) for detailed breakdown - Use analysis results to determine appropriate subtask allocation - Note that reports are automatically used by the `expand_task` tool/command ## Task Breakdown Process - Use `expand_task` / `task-master expand --id=<id>`. It automatically uses the complexity report if found, otherwise generates default number of subtasks. - Use `--num=<number>` to specify an explicit number of subtasks, overriding defaults or complexity report recommendations. - Add `--research` flag to leverage Perplexity AI for research-backed expansion. - Add `--force` flag to clear existing subtasks before generating new ones (default is to append). - Use `--prompt="<context>"` to provide additional context when needed. - Review and adjust generated subtasks as necessary. - Use `expand_all` tool or `task-master expand --all` to expand multiple pending tasks at once, respecting flags like `--force` and `--research`. - If subtasks need complete replacement (regardless of the `--force` flag on `expand`), clear them first with `clear_subtasks` / `task-master clear-subtasks --id=<id>`. ## Implementation Drift Handling - When implementation differs significantly from planned approach - When future tasks need modification due to current implementation choices - When new dependencies or requirements emerge - Use `update` / `task-master update --from=<futureTaskId> --prompt='<explanation>\nUpdate context...' --research` to update multiple future tasks. - Use `update_task` / `task-master update-task --id=<taskId> --prompt='<explanation>\nUpdate context...' --research` to update a single specific task. ## Task Status Management - Use 'pending' for tasks ready to be worked on - Use 'done' for completed and verified tasks - Use 'deferred' for postponed tasks - Add custom status values as needed for project-specific workflows ## Task Structure Fields - **id**: Unique identifier for the task (Example: `1`, `1.1`) - **title**: Brief, descriptive title (Example: `"Initialize Repo"`) - **description**: Concise summary of what the task involves (Example: `"Create a new repository, set up initial structure."`) - **status**: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`) - **dependencies**: IDs of prerequisite tasks (Example: `[1, 2.1]`) - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) - This helps quickly identify which prerequisite tasks are blocking work - **priority**: Importance level (Example: `"high"`, `"medium"`, `"low"`) - **details**: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`) - **testStrategy**: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`) - **subtasks**: List of smaller, more specific tasks (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`) - Refer to task structure details (previously linked to `tasks.md`). ## Configuration Management (Updated) Taskmaster configuration is managed through two main mechanisms: 1. **`.taskmaster/config.json` File (Primary):** * Located in the project root directory. * Stores most configuration settings: AI model selections (main, research, fallback), parameters (max tokens, temperature), logging level, default subtasks/priority, project name, etc. * **Tagged System Settings**: Includes `global.defaultTag` (defaults to "master") and `tags` section for tag management configuration. * **Managed via `task-master models --setup` command.** Do not edit manually unless you know what you are doing. * **View/Set specific models via `task-master models` command or `models` MCP tool.** * Created automatically when you run `task-master models --setup` for the first time or during tagged system migration. 2. **Environment Variables (`.env` / `mcp.json`):** * Used **only** for sensitive API keys and specific endpoint URLs. * Place API keys (one per provider) in a `.env` file in the project root for CLI usage. * For MCP/Kiro integration, configure these keys in the `env` section of `.kiro/mcp.json`. * Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.md`). 3. **`.taskmaster/state.json` File (Tagged System State):** * Tracks current tag context and migration status. * Automatically created during tagged system migration. * Contains: `currentTag`, `lastSwitched`, `migrationNoticeShown`. **Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool. **If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.kiro/mcp.json`. **If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project. ## Rules Management Taskmaster supports multiple AI coding assistant rule sets that can be configured during project initialization or managed afterward: - **Available Profiles**: Claude Code, Cline, Codex, Kiro, Roo Code, Trae, Windsurf (claude, cline, codex, kiro, roo, trae, windsurf) - **During Initialization**: Use `task-master init --rules kiro,windsurf` to specify which rule sets to include - **After Initialization**: Use `task-master rules add <profiles>` or `task-master rules remove <profiles>` to manage rule sets - **Interactive Setup**: Use `task-master rules setup` to launch an interactive prompt for selecting rule profiles - **Default Behavior**: If no `--rules` flag is specified during initialization, all available rule profiles are included - **Rule Structure**: Each profile creates its own directory (e.g., `.kiro/steering`, `.roo/rules`) with appropriate configuration files ## Determining the Next Task - Run `next_task` / `task-master next` to show the next task to work on. - The command identifies tasks with all dependencies satisfied - Tasks are prioritized by priority level, dependency count, and ID - The command shows comprehensive task information including: - Basic task details and description - Implementation details - Subtasks (if they exist) - Contextual suggested actions - Recommended before starting any new development work - Respects your project's dependency structure - Ensures tasks are completed in the appropriate sequence - Provides ready-to-use commands for common task actions ## Viewing Specific Task Details - Run `get_task` / `task-master show <id>` to view a specific task. - Use dot notation for subtasks: `task-master show 1.2` (shows subtask 2 of task 1) - Displays comprehensive information similar to the next command, but for a specific task - For parent tasks, shows all subtasks and their current status - For subtasks, shows parent task information and relationship - Provides contextual suggested actions appropriate for the specific task - Useful for examining task details before implementation or checking status ## Managing Task Dependencies - Use `add_dependency` / `task-master add-dependency --id=<id> --depends-on=<id>` to add a dependency. - Use `remove_dependency` / `task-master remove-dependency --id=<id> --depends-on=<id>` to remove a dependency. - The system prevents circular dependencies and duplicate dependency entries - Dependencies are checked for existence before being added or removed - Task files are automatically regenerated after dependency changes - Dependencies are visualized with status indicators in task listings and files ## Task Reorganization - Use `move_task` / `task-master move --from=<id> --to=<id>` to move tasks or subtasks within the hierarchy - This command supports several use cases: - Moving a standalone task to become a subtask (e.g., `--from=5 --to=7`) - Moving a subtask to become a standalone task (e.g., `--from=5.2 --to=7`) - Moving a subtask to a different parent (e.g., `--from=5.2 --to=7.3`) - Reordering subtasks within the same parent (e.g., `--from=5.2 --to=5.4`) - Moving a task to a new, non-existent ID position (e.g., `--from=5 --to=25`) - Moving multiple tasks at once using comma-separated IDs (e.g., `--from=10,11,12 --to=16,17,18`) - The system includes validation to prevent data loss: - Allows moving to non-existent IDs by creating placeholder tasks - Prevents moving to existing task IDs that have content (to avoid overwriting) - Validates source tasks exist before attempting to move them - The system maintains proper parent-child relationships and dependency integrity - Task files are automatically regenerated after the move operation - This provides greater flexibility in organizing and refining your task structure as project understanding evolves - This is especially useful when dealing with potential merge conflicts arising from teams creating tasks on separate branches. Solve these conflicts very easily by moving your tasks and keeping theirs. ## Iterative Subtask Implementation Once a task has been broken down into subtasks using `expand_task` or similar methods, follow this iterative process for implementation: 1. **Understand the Goal (Preparation):** * Use `get_task` / `task-master show <subtaskId>` (see @`taskmaster.md`) to thoroughly understand the specific goals and requirements of the subtask. 2. **Initial Exploration & Planning (Iteration 1):** * This is the first attempt at creating a concrete implementation plan. * Explore the codebase to identify the precise files, functions, and even specific lines of code that will need modification. * Determine the intended code changes (diffs) and their locations. * Gather *all* relevant details from this exploration phase. 3. **Log the Plan:** * Run `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<detailed plan>'`. * Provide the *complete and detailed* findings from the exploration phase in the prompt. Include file paths, line numbers, proposed diffs, reasoning, and any potential challenges identified. Do not omit details. The goal is to create a rich, timestamped log within the subtask's `details`. 4. **Verify the Plan:** * Run `get_task` / `task-master show <subtaskId>` again to confirm that the detailed implementation plan has been successfully appended to the subtask's details. 5. **Begin Implementation:** * Set the subtask status using `set_task_status` / `task-master set-status --id=<subtaskId> --status=in-progress`. * Start coding based on the logged plan. 6. **Refine and Log Progress (Iteration 2+):** * As implementation progresses, you will encounter challenges, discover nuances, or confirm successful approaches. * **Before appending new information**: Briefly review the *existing* details logged in the subtask (using `get_task` or recalling from context) to ensure the update adds fresh insights and avoids redundancy. * **Regularly** use `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<update details>\n- What worked...\n- What didn't work...'` to append new findings. * **Crucially, log:** * What worked ("fundamental truths" discovered). * What didn't work and why (to avoid repeating mistakes). * Specific code snippets or configurations that were successful. * Decisions made, especially if confirmed with user input. * Any deviations from the initial plan and the reasoning. * The objective is to continuously enrich the subtask's details, creating a log of the implementation journey that helps the AI (and human developers) learn, adapt, and avoid repeating errors. 7. **Review & Update Rules (Post-Implementation):** * Once the implementation for the subtask is functionally complete, review all code changes and the relevant chat history. * Identify any new or modified code patterns, conventions, or best practices established during the implementation. * Create new or update existing rules following internal guidelines (previously linked to `cursor_rules.md` and `self_improve.md`). 8. **Mark Task Complete:** * After verifying the implementation and updating any necessary rules, mark the subtask as completed: `set_task_status` / `task-master set-status --id=<subtaskId> --status=done`. 9. **Commit Changes (If using Git):** * Stage the relevant code changes and any updated/new rule files (`git add .`). * Craft a comprehensive Git commit message summarizing the work done for the subtask, including both code implementation and any rule adjustments. * Execute the commit command directly in the terminal (e.g., `git commit -m 'feat(module): Implement feature X for subtask <subtaskId>\n\n- Details about changes...\n- Updated rule Y for pattern Z'`). * Consider if a Changeset is needed according to internal versioning guidelines (previously linked to `changeset.md`). If so, run `npm run changeset`, stage the generated file, and amend the commit or create a new one. 10. **Proceed to Next Subtask:** * Identify the next subtask (e.g., using `next_task` / `task-master next`). ## Code Analysis & Refactoring Techniques - **Top-Level Function Search**: - Useful for understanding module structure or planning refactors. - Use grep/ripgrep to find exported functions/constants: `rg "export (async function|function|const) \w+"` or similar patterns. - Can help compare functions between files during migrations or identify potential naming conflicts. --- *This workflow provides a general guideline. Adapt it based on your specific project needs and team practices.* ``` -------------------------------------------------------------------------------- /scripts/modules/task-manager/move-task.js: -------------------------------------------------------------------------------- ```javascript import path from 'path'; import { log, readJSON, writeJSON, setTasksForTag, traverseDependencies } from '../utils.js'; import generateTaskFiles from './generate-task-files.js'; import { findCrossTagDependencies, getDependentTaskIds, validateSubtaskMove } from '../dependency-manager.js'; /** * Find all dependencies recursively for a set of source tasks with depth limiting * @param {Array} sourceTasks - The source tasks to find dependencies for * @param {Array} allTasks - All available tasks from all tags * @param {Object} options - Options object * @param {number} options.maxDepth - Maximum recursion depth (default: 50) * @param {boolean} options.includeSelf - Whether to include self-references (default: false) * @returns {Array} Array of all dependency task IDs */ function findAllDependenciesRecursively(sourceTasks, allTasks, options = {}) { return traverseDependencies(sourceTasks, allTasks, { ...options, direction: 'forward', logger: { warn: console.warn } }); } /** * Structured error class for move operations */ class MoveTaskError extends Error { constructor(code, message, data = {}) { super(message); this.name = 'MoveTaskError'; this.code = code; this.data = data; } } /** * Error codes for move operations */ const MOVE_ERROR_CODES = { CROSS_TAG_DEPENDENCY_CONFLICTS: 'CROSS_TAG_DEPENDENCY_CONFLICTS', CANNOT_MOVE_SUBTASK: 'CANNOT_MOVE_SUBTASK', SOURCE_TARGET_TAGS_SAME: 'SOURCE_TARGET_TAGS_SAME', TASK_NOT_FOUND: 'TASK_NOT_FOUND', SUBTASK_NOT_FOUND: 'SUBTASK_NOT_FOUND', PARENT_TASK_NOT_FOUND: 'PARENT_TASK_NOT_FOUND', PARENT_TASK_NO_SUBTASKS: 'PARENT_TASK_NO_SUBTASKS', DESTINATION_TASK_NOT_FOUND: 'DESTINATION_TASK_NOT_FOUND', TASK_ALREADY_EXISTS: 'TASK_ALREADY_EXISTS', INVALID_TASKS_FILE: 'INVALID_TASKS_FILE', ID_COUNT_MISMATCH: 'ID_COUNT_MISMATCH', INVALID_SOURCE_TAG: 'INVALID_SOURCE_TAG', INVALID_TARGET_TAG: 'INVALID_TARGET_TAG' }; /** * Normalize a dependency value to its numeric parent task ID. * - Numbers are returned as-is (if finite) * - Numeric strings are parsed ("5" -> 5) * - Dotted strings return the parent portion ("5.2" -> 5) * - Empty/invalid values return null * - null/undefined are preserved * @param {number|string|null|undefined} dep * @returns {number|null|undefined} */ function normalizeDependency(dep) { if (dep === null || dep === undefined) return dep; if (typeof dep === 'number') return Number.isFinite(dep) ? dep : null; if (typeof dep === 'string') { const trimmed = dep.trim(); if (trimmed === '') return null; const parentPart = trimmed.includes('.') ? trimmed.split('.')[0] : trimmed; const parsed = parseInt(parentPart, 10); return Number.isFinite(parsed) ? parsed : null; } return null; } /** * Normalize an array of dependency values to numeric IDs. * Preserves null/undefined input (returns as-is) and filters out invalid entries. * @param {Array<any>|null|undefined} deps * @returns {Array<number>|null|undefined} */ function normalizeDependencies(deps) { if (deps === null || deps === undefined) return deps; if (!Array.isArray(deps)) return deps; return deps .map((d) => normalizeDependency(d)) .filter((n) => Number.isFinite(n)); } /** * Move one or more tasks/subtasks to new positions * @param {string} tasksPath - Path to tasks.json file * @param {string} sourceId - ID(s) of the task/subtask to move (e.g., '5' or '5.2' or '5,6,7') * @param {string} destinationId - ID(s) of the destination (e.g., '7' or '7.3' or '7,8,9') * @param {boolean} generateFiles - Whether to regenerate task files after moving * @param {Object} options - Additional options * @param {string} options.projectRoot - Project root directory for tag resolution * @param {string} options.tag - Explicit tag to use (optional) * @returns {Object} Result object with moved task details */ async function moveTask( tasksPath, sourceId, destinationId, generateFiles = false, options = {} ) { const { projectRoot, tag } = options; // Check if we have comma-separated IDs (batch move) const sourceIds = sourceId.split(',').map((id) => id.trim()); const destinationIds = destinationId.split(',').map((id) => id.trim()); if (sourceIds.length !== destinationIds.length) { throw new MoveTaskError( MOVE_ERROR_CODES.ID_COUNT_MISMATCH, `Number of source IDs (${sourceIds.length}) must match number of destination IDs (${destinationIds.length})` ); } // For batch moves, process each pair sequentially if (sourceIds.length > 1) { const results = []; for (let i = 0; i < sourceIds.length; i++) { const result = await moveTask( tasksPath, sourceIds[i], destinationIds[i], false, // Don't generate files for each individual move options ); results.push(result); } // Generate files once at the end if requested if (generateFiles) { await generateTaskFiles(tasksPath, path.dirname(tasksPath), { tag: tag, projectRoot: projectRoot }); } return { message: `Successfully moved ${sourceIds.length} tasks/subtasks`, moves: results }; } // Single move logic // Read the raw data without tag resolution to preserve tagged structure let rawData = readJSON(tasksPath, projectRoot, tag); // Handle the case where readJSON returns resolved data with _rawTaggedData if (rawData && rawData._rawTaggedData) { // Use the raw tagged data and discard the resolved view rawData = rawData._rawTaggedData; } // Ensure the tag exists in the raw data if (!rawData || !rawData[tag] || !Array.isArray(rawData[tag].tasks)) { throw new MoveTaskError( MOVE_ERROR_CODES.INVALID_TASKS_FILE, `Invalid tasks file or tag "${tag}" not found at ${tasksPath}` ); } // Get the tasks for the current tag const tasks = rawData[tag].tasks; log( 'info', `Moving task/subtask ${sourceId} to ${destinationId} (tag: ${tag})` ); // Parse source and destination IDs const isSourceSubtask = sourceId.includes('.'); const isDestSubtask = destinationId.includes('.'); let result; if (isSourceSubtask && isDestSubtask) { // Subtask to subtask result = moveSubtaskToSubtask(tasks, sourceId, destinationId); } else if (isSourceSubtask && !isDestSubtask) { // Subtask to task result = moveSubtaskToTask(tasks, sourceId, destinationId); } else if (!isSourceSubtask && isDestSubtask) { // Task to subtask result = moveTaskToSubtask(tasks, sourceId, destinationId); } else { // Task to task result = moveTaskToTask(tasks, sourceId, destinationId); } // Update the data structure with the modified tasks rawData[tag].tasks = tasks; // Always write the data object, never the _rawTaggedData directly // The writeJSON function will filter out _rawTaggedData automatically writeJSON(tasksPath, rawData, options.projectRoot, tag); if (generateFiles) { await generateTaskFiles(tasksPath, path.dirname(tasksPath), { tag: tag, projectRoot: projectRoot }); } return result; } // Helper functions for different move scenarios function moveSubtaskToSubtask(tasks, sourceId, destinationId) { // Parse IDs const [sourceParentId, sourceSubtaskId] = sourceId .split('.') .map((id) => parseInt(id, 10)); const [destParentId, destSubtaskId] = destinationId .split('.') .map((id) => parseInt(id, 10)); // Find source and destination parent tasks const sourceParentTask = tasks.find((t) => t.id === sourceParentId); const destParentTask = tasks.find((t) => t.id === destParentId); if (!sourceParentTask) { throw new MoveTaskError( MOVE_ERROR_CODES.PARENT_TASK_NOT_FOUND, `Source parent task with ID ${sourceParentId} not found` ); } if (!destParentTask) { throw new MoveTaskError( MOVE_ERROR_CODES.PARENT_TASK_NOT_FOUND, `Destination parent task with ID ${destParentId} not found` ); } // Initialize subtasks arrays if they don't exist (based on commit fixes) if (!sourceParentTask.subtasks) { sourceParentTask.subtasks = []; } if (!destParentTask.subtasks) { destParentTask.subtasks = []; } // Find source subtask const sourceSubtaskIndex = sourceParentTask.subtasks.findIndex( (st) => st.id === sourceSubtaskId ); if (sourceSubtaskIndex === -1) { throw new MoveTaskError( MOVE_ERROR_CODES.SUBTASK_NOT_FOUND, `Source subtask ${sourceId} not found` ); } const sourceSubtask = sourceParentTask.subtasks[sourceSubtaskIndex]; if (sourceParentId === destParentId) { // Moving within the same parent if (destParentTask.subtasks.length > 0) { const destSubtaskIndex = destParentTask.subtasks.findIndex( (st) => st.id === destSubtaskId ); if (destSubtaskIndex !== -1) { // Remove from old position sourceParentTask.subtasks.splice(sourceSubtaskIndex, 1); // Insert at new position (adjust index if moving within same array) const adjustedIndex = sourceSubtaskIndex < destSubtaskIndex ? destSubtaskIndex - 1 : destSubtaskIndex; destParentTask.subtasks.splice(adjustedIndex + 1, 0, sourceSubtask); } else { // Destination subtask doesn't exist, insert at end sourceParentTask.subtasks.splice(sourceSubtaskIndex, 1); destParentTask.subtasks.push(sourceSubtask); } } else { // No existing subtasks, this will be the first one sourceParentTask.subtasks.splice(sourceSubtaskIndex, 1); destParentTask.subtasks.push(sourceSubtask); } } else { // Moving between different parents moveSubtaskToAnotherParent( sourceSubtask, sourceParentTask, sourceSubtaskIndex, destParentTask, destSubtaskId ); } return { message: `Moved subtask ${sourceId} to ${destinationId}`, movedItem: sourceSubtask }; } function moveSubtaskToTask(tasks, sourceId, destinationId) { // Parse source ID const [sourceParentId, sourceSubtaskId] = sourceId .split('.') .map((id) => parseInt(id, 10)); const destTaskId = parseInt(destinationId, 10); // Find source parent and destination task const sourceParentTask = tasks.find((t) => t.id === sourceParentId); if (!sourceParentTask) { throw new MoveTaskError( MOVE_ERROR_CODES.PARENT_TASK_NOT_FOUND, `Source parent task with ID ${sourceParentId} not found` ); } if (!sourceParentTask.subtasks) { throw new MoveTaskError( MOVE_ERROR_CODES.PARENT_TASK_NO_SUBTASKS, `Source parent task ${sourceParentId} has no subtasks` ); } // Find source subtask const sourceSubtaskIndex = sourceParentTask.subtasks.findIndex( (st) => st.id === sourceSubtaskId ); if (sourceSubtaskIndex === -1) { throw new MoveTaskError( MOVE_ERROR_CODES.SUBTASK_NOT_FOUND, `Source subtask ${sourceId} not found` ); } const sourceSubtask = sourceParentTask.subtasks[sourceSubtaskIndex]; // Check if destination task exists const existingDestTask = tasks.find((t) => t.id === destTaskId); if (existingDestTask) { throw new MoveTaskError( MOVE_ERROR_CODES.TASK_ALREADY_EXISTS, `Cannot move to existing task ID ${destTaskId}. Choose a different ID or use subtask destination.` ); } // Create new task from subtask const newTask = { id: destTaskId, title: sourceSubtask.title, description: sourceSubtask.description, status: sourceSubtask.status || 'pending', dependencies: sourceSubtask.dependencies || [], priority: sourceSubtask.priority || 'medium', details: sourceSubtask.details || '', testStrategy: sourceSubtask.testStrategy || '', subtasks: [] }; // Remove subtask from source parent sourceParentTask.subtasks.splice(sourceSubtaskIndex, 1); // Insert new task in correct position const insertIndex = tasks.findIndex((t) => t.id > destTaskId); if (insertIndex === -1) { tasks.push(newTask); } else { tasks.splice(insertIndex, 0, newTask); } return { message: `Converted subtask ${sourceId} to task ${destinationId}`, movedItem: newTask }; } function moveTaskToSubtask(tasks, sourceId, destinationId) { // Parse IDs const sourceTaskId = parseInt(sourceId, 10); const [destParentId, destSubtaskId] = destinationId .split('.') .map((id) => parseInt(id, 10)); // Find source task and destination parent const sourceTaskIndex = tasks.findIndex((t) => t.id === sourceTaskId); const destParentTask = tasks.find((t) => t.id === destParentId); if (sourceTaskIndex === -1) { throw new MoveTaskError( MOVE_ERROR_CODES.TASK_NOT_FOUND, `Source task with ID ${sourceTaskId} not found` ); } if (!destParentTask) { throw new MoveTaskError( MOVE_ERROR_CODES.PARENT_TASK_NOT_FOUND, `Destination parent task with ID ${destParentId} not found` ); } const sourceTask = tasks[sourceTaskIndex]; // Initialize subtasks array if it doesn't exist (based on commit fixes) if (!destParentTask.subtasks) { destParentTask.subtasks = []; } // Create new subtask from task const newSubtask = { id: destSubtaskId, title: sourceTask.title, description: sourceTask.description, status: sourceTask.status || 'pending', dependencies: sourceTask.dependencies || [], details: sourceTask.details || '', testStrategy: sourceTask.testStrategy || '' }; // Find insertion position (based on commit fixes) let destSubtaskIndex = -1; if (destParentTask.subtasks.length > 0) { destSubtaskIndex = destParentTask.subtasks.findIndex( (st) => st.id === destSubtaskId ); if (destSubtaskIndex === -1) { // Subtask doesn't exist, we'll insert at the end destSubtaskIndex = destParentTask.subtasks.length - 1; } } // Insert at specific position (based on commit fixes) const insertPosition = destSubtaskIndex === -1 ? 0 : destSubtaskIndex + 1; destParentTask.subtasks.splice(insertPosition, 0, newSubtask); // Remove the original task from the tasks array tasks.splice(sourceTaskIndex, 1); return { message: `Converted task ${sourceId} to subtask ${destinationId}`, movedItem: newSubtask }; } function moveTaskToTask(tasks, sourceId, destinationId) { const sourceTaskId = parseInt(sourceId, 10); const destTaskId = parseInt(destinationId, 10); // Find source task const sourceTaskIndex = tasks.findIndex((t) => t.id === sourceTaskId); if (sourceTaskIndex === -1) { throw new MoveTaskError( MOVE_ERROR_CODES.TASK_NOT_FOUND, `Source task with ID ${sourceTaskId} not found` ); } const sourceTask = tasks[sourceTaskIndex]; // Check if destination exists const destTaskIndex = tasks.findIndex((t) => t.id === destTaskId); if (destTaskIndex !== -1) { // Destination exists - this could be overwriting or swapping const destTask = tasks[destTaskIndex]; // For now, throw an error to avoid accidental overwrites throw new MoveTaskError( MOVE_ERROR_CODES.TASK_ALREADY_EXISTS, `Task with ID ${destTaskId} already exists. Use a different destination ID.` ); } else { // Destination doesn't exist - create new task ID return moveTaskToNewId(tasks, sourceTaskIndex, sourceTask, destTaskId); } } function moveSubtaskToAnotherParent( sourceSubtask, sourceParentTask, sourceSubtaskIndex, destParentTask, destSubtaskId ) { const destSubtaskId_num = parseInt(destSubtaskId, 10); // Create new subtask with destination ID const newSubtask = { ...sourceSubtask, id: destSubtaskId_num }; // Initialize subtasks array if it doesn't exist (based on commit fixes) if (!destParentTask.subtasks) { destParentTask.subtasks = []; } // Find insertion position let destSubtaskIndex = -1; if (destParentTask.subtasks.length > 0) { destSubtaskIndex = destParentTask.subtasks.findIndex( (st) => st.id === destSubtaskId_num ); if (destSubtaskIndex === -1) { // Subtask doesn't exist, we'll insert at the end destSubtaskIndex = destParentTask.subtasks.length - 1; } } // Insert at the destination position (based on commit fixes) const insertPosition = destSubtaskIndex === -1 ? 0 : destSubtaskIndex + 1; destParentTask.subtasks.splice(insertPosition, 0, newSubtask); // Remove the subtask from the original parent sourceParentTask.subtasks.splice(sourceSubtaskIndex, 1); return newSubtask; } function moveTaskToNewId(tasks, sourceTaskIndex, sourceTask, destTaskId) { const destTaskIndex = tasks.findIndex((t) => t.id === destTaskId); // Create moved task with new ID const movedTask = { ...sourceTask, id: destTaskId }; // Update any dependencies that reference the old task ID tasks.forEach((task) => { if (task.dependencies && task.dependencies.includes(sourceTask.id)) { const depIndex = task.dependencies.indexOf(sourceTask.id); task.dependencies[depIndex] = destTaskId; } if (task.subtasks) { task.subtasks.forEach((subtask) => { if ( subtask.dependencies && subtask.dependencies.includes(sourceTask.id) ) { const depIndex = subtask.dependencies.indexOf(sourceTask.id); subtask.dependencies[depIndex] = destTaskId; } }); } }); // Update dependencies within movedTask's subtasks that reference sibling subtasks if (Array.isArray(movedTask.subtasks)) { movedTask.subtasks.forEach((subtask) => { if (Array.isArray(subtask.dependencies)) { subtask.dependencies = subtask.dependencies.map((dep) => { // If dependency is a string like "oldParent.subId", update to "newParent.subId" if (typeof dep === 'string' && dep.includes('.')) { const [depParent, depSub] = dep.split('.'); if (parseInt(depParent, 10) === sourceTask.id) { return `${destTaskId}.${depSub}`; } } // If dependency is a number, and matches a subtask ID in the moved task, leave as is (context is implied) return dep; }); } }); } // Strategy based on commit fixes: remove source first, then replace destination // This avoids index shifting problems // Remove the source task first tasks.splice(sourceTaskIndex, 1); // Adjust the destination index if the source was before the destination // Since we removed the source, indices after it shift down by 1 const adjustedDestIndex = sourceTaskIndex < destTaskIndex ? destTaskIndex - 1 : destTaskIndex; // Replace the placeholder destination task with the moved task (based on commit fixes) if (adjustedDestIndex >= 0 && adjustedDestIndex < tasks.length) { tasks[adjustedDestIndex] = movedTask; } else { // Insert at the end if index is out of bounds tasks.push(movedTask); } log('info', `Moved task ${sourceTask.id} to new ID ${destTaskId}`); return { message: `Moved task ${sourceTask.id} to new ID ${destTaskId}`, movedItem: movedTask }; } /** * Get all tasks from all tags with tag information * @param {Object} rawData - The raw tagged data object * @returns {Array} A flat array of all task objects with tag property */ function getAllTasksWithTags(rawData) { let allTasks = []; for (const tagName in rawData) { if ( Object.prototype.hasOwnProperty.call(rawData, tagName) && rawData[tagName] && Array.isArray(rawData[tagName].tasks) ) { const tasksWithTag = rawData[tagName].tasks.map((task) => ({ ...task, tag: tagName })); allTasks = allTasks.concat(tasksWithTag); } } return allTasks; } /** * Validate move operation parameters and data * @param {string} tasksPath - Path to tasks.json file * @param {Array} taskIds - Array of task IDs to move * @param {string} sourceTag - Source tag name * @param {string} targetTag - Target tag name * @param {Object} context - Context object * @returns {Object} Validation result with rawData and sourceTasks */ async function validateMove(tasksPath, taskIds, sourceTag, targetTag, context) { const { projectRoot } = context; // Read the raw data without tag resolution to preserve tagged structure let rawData = readJSON(tasksPath, projectRoot, sourceTag); // Handle the case where readJSON returns resolved data with _rawTaggedData if (rawData && rawData._rawTaggedData) { rawData = rawData._rawTaggedData; } // Validate source tag exists if ( !rawData || !rawData[sourceTag] || !Array.isArray(rawData[sourceTag].tasks) ) { throw new MoveTaskError( MOVE_ERROR_CODES.INVALID_SOURCE_TAG, `Source tag "${sourceTag}" not found or invalid` ); } // Create target tag if it doesn't exist if (!rawData[targetTag]) { rawData[targetTag] = { tasks: [] }; log('info', `Created new tag "${targetTag}"`); } // Normalize all IDs to strings once for consistent comparison const normalizedSearchIds = taskIds.map((id) => String(id)); const sourceTasks = rawData[sourceTag].tasks.filter((t) => { const normalizedTaskId = String(t.id); return normalizedSearchIds.includes(normalizedTaskId); }); // Validate subtask movement taskIds.forEach((taskId) => { validateSubtaskMove(taskId, sourceTag, targetTag); }); return { rawData, sourceTasks }; } /** * Load and prepare task data for move operation * @param {Object} validation - Validation result from validateMove * @returns {Object} Prepared data with rawData, sourceTasks, and allTasks */ async function prepareTaskData(validation) { const { rawData, sourceTasks } = validation; // Get all tasks for validation const allTasks = getAllTasksWithTags(rawData); return { rawData, sourceTasks, allTasks }; } /** * Resolve dependencies and determine tasks to move * @param {Array} sourceTasks - Source tasks to move * @param {Array} allTasks - All available tasks from all tags * @param {Object} options - Move options * @param {Array} taskIds - Original task IDs * @param {string} sourceTag - Source tag name * @param {string} targetTag - Target tag name * @returns {Object} Tasks to move and dependency resolution info */ async function resolveDependencies( sourceTasks, allTasks, options, taskIds, sourceTag, targetTag ) { const { withDependencies = false, ignoreDependencies = false } = options; // Scope allTasks to the source tag to avoid cross-tag contamination when // computing dependency chains for --with-dependencies const tasksInSourceTag = Array.isArray(allTasks) ? allTasks.filter((t) => t && t.tag === sourceTag) : []; // Handle --with-dependencies flag first (regardless of cross-tag dependencies) if (withDependencies) { // Move dependent tasks along with main tasks // Find ALL dependencies recursively, but only using tasks from the source tag const allDependentTaskIdsRaw = findAllDependenciesRecursively( sourceTasks, tasksInSourceTag, { maxDepth: 100, includeSelf: false } ); // Filter dependent IDs to those that actually exist in the source tag const sourceTagIds = new Set( tasksInSourceTag.map((t) => typeof t.id === 'string' ? parseInt(t.id, 10) : t.id ) ); const allDependentTaskIds = allDependentTaskIdsRaw.filter((depId) => { // Only numeric task IDs are eligible to be moved (subtasks cannot be moved cross-tag) const normalizedId = normalizeDependency(depId); return Number.isFinite(normalizedId) && sourceTagIds.has(normalizedId); }); const allTaskIdsToMove = [...new Set([...taskIds, ...allDependentTaskIds])]; log( 'info', `Moving ${allTaskIdsToMove.length} tasks (including dependencies): ${allTaskIdsToMove.join(', ')}` ); return { tasksToMove: allTaskIdsToMove, dependencyResolution: { type: 'with-dependencies', dependentTasks: allDependentTaskIds } }; } // Find cross-tag dependencies (these shouldn't exist since dependencies are only within tags) const crossTagDependencies = findCrossTagDependencies( sourceTasks, sourceTag, targetTag, allTasks ); if (crossTagDependencies.length > 0) { if (ignoreDependencies) { // Break cross-tag dependencies (edge case - shouldn't normally happen) sourceTasks.forEach((task) => { const sourceTagTasks = tasksInSourceTag; const targetTagTasks = Array.isArray(allTasks) ? allTasks.filter((t) => t && t.tag === targetTag) : []; task.dependencies = task.dependencies.filter((depId) => { const parentTaskId = normalizeDependency(depId); // If dependency resolves to a task in the source tag, drop it (would be cross-tag after move) if ( Number.isFinite(parentTaskId) && sourceTagTasks.some((t) => t.id === parentTaskId) ) { return false; } // If dependency resolves to a task in the target tag, keep it if ( Number.isFinite(parentTaskId) && targetTagTasks.some((t) => t.id === parentTaskId) ) { return true; } // Otherwise, keep as-is (unknown/unresolved dependency) return true; }); }); log( 'warn', `Removed ${crossTagDependencies.length} cross-tag dependencies` ); return { tasksToMove: taskIds, dependencyResolution: { type: 'ignored-dependencies', conflicts: crossTagDependencies } }; } else { // Block move and show error throw new MoveTaskError( MOVE_ERROR_CODES.CROSS_TAG_DEPENDENCY_CONFLICTS, `Cannot move tasks: ${crossTagDependencies.length} cross-tag dependency conflicts found`, { conflicts: crossTagDependencies, sourceTag, targetTag, taskIds } ); } } return { tasksToMove: taskIds, dependencyResolution: { type: 'no-conflicts' } }; } /** * Execute the actual move operation * @param {Array} tasksToMove - Array of task IDs to move * @param {string} sourceTag - Source tag name * @param {string} targetTag - Target tag name * @param {Object} rawData - Raw data object * @param {Object} context - Context object * @param {string} tasksPath - Path to tasks.json file * @returns {Object} Move operation result */ async function executeMoveOperation( tasksToMove, sourceTag, targetTag, rawData, context, tasksPath ) { const { projectRoot } = context; const movedTasks = []; // Move each task from source to target tag for (const taskId of tasksToMove) { // Normalize taskId to number for comparison const normalizedTaskId = typeof taskId === 'string' ? parseInt(taskId, 10) : taskId; const sourceTaskIndex = rawData[sourceTag].tasks.findIndex( (t) => t.id === normalizedTaskId ); if (sourceTaskIndex === -1) { throw new MoveTaskError( MOVE_ERROR_CODES.TASK_NOT_FOUND, `Task ${taskId} not found in source tag "${sourceTag}"` ); } const taskToMove = rawData[sourceTag].tasks[sourceTaskIndex]; // Check for ID conflicts in target tag const existingTaskIndex = rawData[targetTag].tasks.findIndex( (t) => t.id === normalizedTaskId ); if (existingTaskIndex !== -1) { throw new MoveTaskError( MOVE_ERROR_CODES.TASK_ALREADY_EXISTS, `Task ${taskId} already exists in target tag "${targetTag}"`, { conflictingId: normalizedTaskId, targetTag, suggestions: [ 'Choose a different target tag without conflicting IDs', 'Move a different set of IDs (avoid existing ones)', 'If needed, move within-tag to a new ID first, then cross-tag move' ] } ); } // Remove from source tag rawData[sourceTag].tasks.splice(sourceTaskIndex, 1); // Preserve task metadata and add to target tag const taskWithPreservedMetadata = preserveTaskMetadata( taskToMove, sourceTag, targetTag ); rawData[targetTag].tasks.push(taskWithPreservedMetadata); movedTasks.push({ id: taskId, fromTag: sourceTag, toTag: targetTag }); log('info', `Moved task ${taskId} from "${sourceTag}" to "${targetTag}"`); } return { rawData, movedTasks }; } /** * Finalize the move operation by saving data and returning result * @param {Object} moveResult - Result from executeMoveOperation * @param {string} tasksPath - Path to tasks.json file * @param {Object} context - Context object * @param {string} sourceTag - Source tag name * @param {string} targetTag - Target tag name * @returns {Object} Final result object */ async function finalizeMove( moveResult, tasksPath, context, sourceTag, targetTag, dependencyResolution ) { const { projectRoot } = context; const { rawData, movedTasks } = moveResult; // Write the updated data writeJSON(tasksPath, rawData, projectRoot, null); const response = { message: `Successfully moved ${movedTasks.length} tasks from "${sourceTag}" to "${targetTag}"`, movedTasks }; // If we intentionally broke cross-tag dependencies, provide tips to validate & fix if ( dependencyResolution && dependencyResolution.type === 'ignored-dependencies' ) { response.tips = [ 'Run "task-master validate-dependencies" to check for dependency issues.', 'Run "task-master fix-dependencies" to automatically repair dangling dependencies.' ]; } return response; } /** * Move tasks between different tags with dependency handling * @param {string} tasksPath - Path to tasks.json file * @param {Array} taskIds - Array of task IDs to move * @param {string} sourceTag - Source tag name * @param {string} targetTag - Target tag name * @param {Object} options - Move options * @param {boolean} options.withDependencies - Move dependent tasks along with main task * @param {boolean} options.ignoreDependencies - Break cross-tag dependencies during move * @param {Object} context - Context object containing projectRoot and tag information * @returns {Object} Result object with moved task details */ async function moveTasksBetweenTags( tasksPath, taskIds, sourceTag, targetTag, options = {}, context = {} ) { // 1. Validation phase const validation = await validateMove( tasksPath, taskIds, sourceTag, targetTag, context ); // 2. Load and prepare data const { rawData, sourceTasks, allTasks } = await prepareTaskData(validation); // 3. Handle dependencies const { tasksToMove, dependencyResolution } = await resolveDependencies( sourceTasks, allTasks, options, taskIds, sourceTag, targetTag ); // 4. Execute move const moveResult = await executeMoveOperation( tasksToMove, sourceTag, targetTag, rawData, context, tasksPath ); // 5. Save and return return await finalizeMove( moveResult, tasksPath, context, sourceTag, targetTag, dependencyResolution ); } /** * Detect ID conflicts in target tag * @param {Array} taskIds - Array of task IDs to check * @param {string} targetTag - Target tag name * @param {Object} rawData - Raw data object * @returns {Array} Array of conflicting task IDs */ function detectIdConflicts(taskIds, targetTag, rawData) { const conflicts = []; if (!rawData[targetTag] || !Array.isArray(rawData[targetTag].tasks)) { return conflicts; } taskIds.forEach((taskId) => { // Normalize taskId to number for comparison const normalizedTaskId = typeof taskId === 'string' ? parseInt(taskId, 10) : taskId; const existingTask = rawData[targetTag].tasks.find( (t) => t.id === normalizedTaskId ); if (existingTask) { conflicts.push(taskId); } }); return conflicts; } /** * Preserve task metadata during cross-tag moves * @param {Object} task - Task object * @param {string} sourceTag - Source tag name * @param {string} targetTag - Target tag name * @returns {Object} Task object with preserved metadata */ function preserveTaskMetadata(task, sourceTag, targetTag) { // Update the tag property to reflect the new location task.tag = targetTag; // Add move history to task metadata if (!task.metadata) { task.metadata = {}; } if (!task.metadata.moveHistory) { task.metadata.moveHistory = []; } task.metadata.moveHistory.push({ fromTag: sourceTag, toTag: targetTag, timestamp: new Date().toISOString() }); return task; } export default moveTask; export { moveTasksBetweenTags, getAllTasksWithTags, detectIdConflicts, preserveTaskMetadata, MoveTaskError, MOVE_ERROR_CODES }; ``` -------------------------------------------------------------------------------- /tests/unit/scripts/modules/task-manager/expand-task.test.js: -------------------------------------------------------------------------------- ```javascript /** * Tests for the expand-task.js module */ import { jest } from '@jest/globals'; import fs from 'fs'; import { createGetTagAwareFilePathMock, createSlugifyTagForFilePathMock } from './setup.js'; // Mock the dependencies before importing the module under test jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({ readJSON: jest.fn(), writeJSON: jest.fn(), log: jest.fn(), CONFIG: { model: 'mock-claude-model', maxTokens: 4000, temperature: 0.7, debug: false }, sanitizePrompt: jest.fn((prompt) => prompt), truncate: jest.fn((text) => text), isSilentMode: jest.fn(() => false), findTaskById: jest.fn(), findProjectRoot: jest.fn((tasksPath) => '/mock/project/root'), getCurrentTag: jest.fn(() => 'master'), ensureTagMetadata: jest.fn((tagObj) => tagObj), flattenTasksWithSubtasks: jest.fn((tasks) => { const allTasks = []; const queue = [...(tasks || [])]; while (queue.length > 0) { const task = queue.shift(); allTasks.push(task); if (task.subtasks) { for (const subtask of task.subtasks) { queue.push({ ...subtask, id: `${task.id}.${subtask.id}` }); } } } return allTasks; }), getTagAwareFilePath: createGetTagAwareFilePathMock(), slugifyTagForFilePath: createSlugifyTagForFilePathMock(), readComplexityReport: jest.fn(), markMigrationForNotice: jest.fn(), performCompleteTagMigration: jest.fn(), setTasksForTag: jest.fn(), getTasksForTag: jest.fn((data, tag) => data[tag]?.tasks || []) })); jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({ displayBanner: jest.fn(), getStatusWithColor: jest.fn((status) => status), startLoadingIndicator: jest.fn(), stopLoadingIndicator: jest.fn(), succeedLoadingIndicator: jest.fn(), failLoadingIndicator: jest.fn(), warnLoadingIndicator: jest.fn(), infoLoadingIndicator: jest.fn(), displayAiUsageSummary: jest.fn(), displayContextAnalysis: jest.fn() })); jest.unstable_mockModule( '../../../../../scripts/modules/ai-services-unified.js', () => ({ generateTextService: jest.fn().mockResolvedValue({ mainResult: JSON.stringify({ subtasks: [ { id: 1, title: 'Set up project structure', description: 'Create the basic project directory structure and configuration files', dependencies: [], details: 'Initialize package.json, create src/ and test/ directories, set up linting configuration', status: 'pending', testStrategy: 'Verify all expected files and directories are created' }, { id: 2, title: 'Implement core functionality', description: 'Develop the main application logic and core features', dependencies: [1], details: 'Create main classes, implement business logic, set up data models', status: 'pending', testStrategy: 'Unit tests for all core functions and classes' }, { id: 3, title: 'Add user interface', description: 'Create the user interface components and layouts', dependencies: [2], details: 'Design UI components, implement responsive layouts, add user interactions', status: 'pending', testStrategy: 'UI tests and visual regression testing' } ] }), telemetryData: { timestamp: new Date().toISOString(), userId: '1234567890', commandName: 'expand-task', modelUsed: 'claude-3-5-sonnet', providerName: 'anthropic', inputTokens: 1000, outputTokens: 500, totalTokens: 1500, totalCost: 0.012414, currency: 'USD' } }) }) ); jest.unstable_mockModule( '../../../../../scripts/modules/config-manager.js', () => ({ getDefaultSubtasks: jest.fn(() => 3), getDebugFlag: jest.fn(() => false), getDefaultNumTasks: jest.fn(() => 10), getMainProvider: jest.fn(() => 'openai'), getResearchProvider: jest.fn(() => 'perplexity'), hasCodebaseAnalysis: jest.fn(() => false) }) ); jest.unstable_mockModule( '../../../../../scripts/modules/utils/contextGatherer.js', () => ({ ContextGatherer: jest.fn().mockImplementation(() => ({ gather: jest.fn().mockResolvedValue({ context: 'Mock project context from files' }) })) }) ); jest.unstable_mockModule( '../../../../../scripts/modules/utils/fuzzyTaskSearch.js', () => ({ FuzzyTaskSearch: jest.fn().mockImplementation(() => ({ findRelevantTasks: jest.fn().mockReturnValue([]), getTaskIds: jest.fn().mockReturnValue([]) })) }) ); jest.unstable_mockModule( '../../../../../scripts/modules/task-manager/generate-task-files.js', () => ({ default: jest.fn().mockResolvedValue() }) ); jest.unstable_mockModule( '../../../../../scripts/modules/prompt-manager.js', () => ({ getPromptManager: jest.fn().mockReturnValue({ loadPrompt: jest.fn().mockResolvedValue({ systemPrompt: 'Mocked system prompt', userPrompt: 'Mocked user prompt' }) }) }) ); // Mock external UI libraries jest.unstable_mockModule('chalk', () => ({ default: { white: { bold: jest.fn((text) => text) }, cyan: Object.assign( jest.fn((text) => text), { bold: jest.fn((text) => text) } ), green: jest.fn((text) => text), yellow: jest.fn((text) => text), bold: jest.fn((text) => text) } })); jest.unstable_mockModule('boxen', () => ({ default: jest.fn((text) => text) })); jest.unstable_mockModule('cli-table3', () => ({ default: jest.fn().mockImplementation(() => ({ push: jest.fn(), toString: jest.fn(() => 'mocked table') })) })); // Mock process.exit to prevent Jest worker crashes const mockExit = jest.spyOn(process, 'exit').mockImplementation((code) => { throw new Error(`process.exit called with "${code}"`); }); // Import the mocked modules const { readJSON, writeJSON, log, findTaskById, ensureTagMetadata, readComplexityReport, findProjectRoot } = await import('../../../../../scripts/modules/utils.js'); const { generateTextService } = await import( '../../../../../scripts/modules/ai-services-unified.js' ); const generateTaskFiles = ( await import( '../../../../../scripts/modules/task-manager/generate-task-files.js' ) ).default; const { getDefaultSubtasks } = await import( '../../../../../scripts/modules/config-manager.js' ); // Import the module under test const { default: expandTask } = await import( '../../../../../scripts/modules/task-manager/expand-task.js' ); describe('expandTask', () => { const sampleTasks = { master: { tasks: [ { id: 1, title: 'Task 1', description: 'First task', status: 'done', dependencies: [], details: 'Already completed task', subtasks: [] }, { id: 2, title: 'Task 2', description: 'Second task', status: 'pending', dependencies: [], details: 'Task ready for expansion', subtasks: [] }, { id: 3, title: 'Complex Task', description: 'A complex task that needs breakdown', status: 'pending', dependencies: [1], details: 'This task involves multiple steps', subtasks: [] }, { id: 4, title: 'Task with existing subtasks', description: 'Task that already has subtasks', status: 'pending', dependencies: [], details: 'Has existing subtasks', subtasks: [ { id: 1, title: 'Existing subtask', description: 'Already exists', status: 'pending', dependencies: [] } ] } ] }, 'feature-branch': { tasks: [ { id: 1, title: 'Feature Task 1', description: 'Task in feature branch', status: 'pending', dependencies: [], details: 'Feature-specific task', subtasks: [] } ] } }; // Create a helper function for consistent mcpLog mock const createMcpLogMock = () => ({ info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn(), success: jest.fn() }); beforeEach(() => { jest.clearAllMocks(); mockExit.mockClear(); // Default readJSON implementation - returns tagged structure readJSON.mockImplementation((tasksPath, projectRoot, tag) => { const sampleTasksCopy = JSON.parse(JSON.stringify(sampleTasks)); const selectedTag = tag || 'master'; return { ...sampleTasksCopy[selectedTag], tag: selectedTag, _rawTaggedData: sampleTasksCopy }; }); // Default findTaskById implementation findTaskById.mockImplementation((tasks, taskId) => { const id = parseInt(taskId, 10); return tasks.find((t) => t.id === id); }); // Default complexity report (no report available) readComplexityReport.mockReturnValue(null); // Mock findProjectRoot to return consistent path for complexity report findProjectRoot.mockReturnValue('/mock/project/root'); writeJSON.mockResolvedValue(); generateTaskFiles.mockResolvedValue(); log.mockImplementation(() => {}); // Mock console.log to avoid output during tests jest.spyOn(console, 'log').mockImplementation(() => {}); }); afterEach(() => { console.log.mockRestore(); }); describe('Basic Functionality', () => { test('should expand a task with AI-generated subtasks', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const numSubtasks = 3; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; // Act const result = await expandTask( tasksPath, taskId, numSubtasks, false, '', context, false ); // Assert expect(readJSON).toHaveBeenCalledWith( tasksPath, '/mock/project/root', undefined ); expect(generateTextService).toHaveBeenCalledWith(expect.any(Object)); expect(writeJSON).toHaveBeenCalledWith( tasksPath, expect.objectContaining({ tasks: expect.arrayContaining([ expect.objectContaining({ id: 2, subtasks: expect.arrayContaining([ expect.objectContaining({ id: 1, title: 'Set up project structure', status: 'pending' }), expect.objectContaining({ id: 2, title: 'Implement core functionality', status: 'pending' }), expect.objectContaining({ id: 3, title: 'Add user interface', status: 'pending' }) ]) }) ]), tag: 'master', _rawTaggedData: expect.objectContaining({ master: expect.objectContaining({ tasks: expect.any(Array) }) }) }), '/mock/project/root', undefined ); expect(result).toEqual( expect.objectContaining({ task: expect.objectContaining({ id: 2, subtasks: expect.arrayContaining([ expect.objectContaining({ id: 1, title: 'Set up project structure', status: 'pending' }), expect.objectContaining({ id: 2, title: 'Implement core functionality', status: 'pending' }), expect.objectContaining({ id: 3, title: 'Add user interface', status: 'pending' }) ]) }), telemetryData: expect.any(Object) }) ); }); test('should handle research flag correctly', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const numSubtasks = 3; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; // Act await expandTask( tasksPath, taskId, numSubtasks, true, // useResearch = true 'Additional context for research', context, false ); // Assert expect(generateTextService).toHaveBeenCalledWith( expect.objectContaining({ role: 'research', commandName: expect.any(String) }) ); }); test('should handle complexity report integration without errors', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; // Act & Assert - Should complete without errors const result = await expandTask( tasksPath, taskId, undefined, // numSubtasks not specified false, '', context, false ); // Assert - Should successfully expand and return expected structure expect(result).toEqual( expect.objectContaining({ task: expect.objectContaining({ id: 2, subtasks: expect.any(Array) }), telemetryData: expect.any(Object) }) ); expect(generateTextService).toHaveBeenCalled(); }); }); describe('Tag Handling (The Critical Bug Fix)', () => { test('should preserve tagged structure when expanding with default tag', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root', tag: 'master' // Explicit tag context }; // Act await expandTask(tasksPath, taskId, 3, false, '', context, false); // Assert - CRITICAL: Check tag is passed to readJSON and writeJSON expect(readJSON).toHaveBeenCalledWith( tasksPath, '/mock/project/root', 'master' ); expect(writeJSON).toHaveBeenCalledWith( tasksPath, expect.objectContaining({ tag: 'master', _rawTaggedData: expect.objectContaining({ master: expect.any(Object), 'feature-branch': expect.any(Object) }) }), '/mock/project/root', 'master' // CRITICAL: Tag must be passed to writeJSON ); }); test('should preserve tagged structure when expanding with non-default tag', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '1'; // Task in feature-branch const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root', tag: 'feature-branch' // Different tag context }; // Configure readJSON to return feature-branch data readJSON.mockImplementation((tasksPath, projectRoot, tag) => { const sampleTasksCopy = JSON.parse(JSON.stringify(sampleTasks)); return { ...sampleTasksCopy['feature-branch'], tag: 'feature-branch', _rawTaggedData: sampleTasksCopy }; }); // Act await expandTask(tasksPath, taskId, 3, false, '', context, false); // Assert - CRITICAL: Check tag preservation for non-default tag expect(readJSON).toHaveBeenCalledWith( tasksPath, '/mock/project/root', 'feature-branch' ); expect(writeJSON).toHaveBeenCalledWith( tasksPath, expect.objectContaining({ tag: 'feature-branch', _rawTaggedData: expect.objectContaining({ master: expect.any(Object), 'feature-branch': expect.any(Object) }) }), '/mock/project/root', 'feature-branch' // CRITICAL: Correct tag passed to writeJSON ); }); test('should NOT corrupt tagged structure when tag is undefined', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' // No tag specified - should default gracefully }; // Act await expandTask(tasksPath, taskId, 3, false, '', context, false); // Assert - Should still preserve structure with undefined tag expect(readJSON).toHaveBeenCalledWith( tasksPath, '/mock/project/root', undefined ); expect(writeJSON).toHaveBeenCalledWith( tasksPath, expect.objectContaining({ _rawTaggedData: expect.objectContaining({ master: expect.any(Object) }) }), '/mock/project/root', undefined ); // CRITICAL: Verify structure is NOT flattened to old format const writeCallArgs = writeJSON.mock.calls[0][1]; expect(writeCallArgs).toHaveProperty('tasks'); // Should have tasks property from readJSON mock expect(writeCallArgs).toHaveProperty('_rawTaggedData'); // Should preserve tagged structure }); }); describe('Force Flag Handling', () => { test('should replace existing subtasks when force=true', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '4'; // Task with existing subtasks const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; // Act await expandTask(tasksPath, taskId, 3, false, '', context, true); // Assert - Should replace existing subtasks expect(writeJSON).toHaveBeenCalledWith( tasksPath, expect.objectContaining({ tasks: expect.arrayContaining([ expect.objectContaining({ id: 4, subtasks: expect.arrayContaining([ expect.objectContaining({ id: 1, title: 'Set up project structure' }) ]) }) ]) }), '/mock/project/root', undefined ); }); test('should append to existing subtasks when force=false', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '4'; // Task with existing subtasks const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; // Act await expandTask(tasksPath, taskId, 3, false, '', context, false); // Assert - Should append to existing subtasks with proper ID increments expect(writeJSON).toHaveBeenCalledWith( tasksPath, expect.objectContaining({ tasks: expect.arrayContaining([ expect.objectContaining({ id: 4, subtasks: expect.arrayContaining([ // Should contain both existing and new subtasks expect.any(Object), expect.any(Object), expect.any(Object), expect.any(Object) // 1 existing + 3 new = 4 total ]) }) ]) }), '/mock/project/root', undefined ); }); }); describe('Complexity Report Integration (Tag-Specific)', () => { test('should use tag-specific complexity report when available', async () => { // Arrange const { getPromptManager } = await import( '../../../../../scripts/modules/prompt-manager.js' ); const mockLoadPrompt = jest.fn().mockResolvedValue({ systemPrompt: 'Generate exactly 5 subtasks for complexity report', userPrompt: 'Please break this task into 5 parts\n\nUser provided context' }); getPromptManager.mockReturnValue({ loadPrompt: mockLoadPrompt }); const tasksPath = 'tasks/tasks.json'; const taskId = '1'; // Task in feature-branch const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root', tag: 'feature-branch', complexityReportPath: '/mock/project/root/task-complexity-report_feature-branch.json' }; // Stub fs.existsSync to simulate complexity report exists for this tag const existsSpy = jest .spyOn(fs, 'existsSync') .mockImplementation((filepath) => filepath.endsWith('task-complexity-report_feature-branch.json') ); // Stub readJSON to return complexity report when reading the report path readJSON.mockImplementation((filepath, projectRootParam, tagParam) => { if (filepath.includes('task-complexity-report_feature-branch.json')) { return { complexityAnalysis: [ { taskId: 1, complexityScore: 8, recommendedSubtasks: 5, reasoning: 'Needs five detailed steps', expansionPrompt: 'Please break this task into 5 parts' } ] }; } // Default tasks data for tasks.json const sampleTasksCopy = JSON.parse(JSON.stringify(sampleTasks)); const selectedTag = tagParam || 'master'; return { ...sampleTasksCopy[selectedTag], tag: selectedTag, _rawTaggedData: sampleTasksCopy }; }); // Act await expandTask(tasksPath, taskId, undefined, false, '', context, false); // Assert - generateTextService called with systemPrompt for 5 subtasks const callArg = generateTextService.mock.calls[0][0]; expect(callArg.systemPrompt).toContain('Generate exactly 5 subtasks'); // Assert - Should use complexity-report variant with expansion prompt expect(mockLoadPrompt).toHaveBeenCalledWith( 'expand-task', expect.objectContaining({ subtaskCount: 5, expansionPrompt: 'Please break this task into 5 parts' }), 'complexity-report' ); // Clean up stub existsSpy.mockRestore(); }); }); describe('Error Handling', () => { test('should handle non-existent task ID', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '999'; // Non-existent task const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; findTaskById.mockReturnValue(null); // Act & Assert await expect( expandTask(tasksPath, taskId, 3, false, '', context, false) ).rejects.toThrow('Task 999 not found'); expect(writeJSON).not.toHaveBeenCalled(); }); test('should expand tasks regardless of status (including done tasks)', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '1'; // Task with 'done' status const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; // Act const result = await expandTask( tasksPath, taskId, 3, false, '', context, false ); // Assert - Should successfully expand even 'done' tasks expect(writeJSON).toHaveBeenCalled(); expect(result).toEqual( expect.objectContaining({ task: expect.objectContaining({ id: 1, status: 'done', // Status unchanged subtasks: expect.arrayContaining([ expect.objectContaining({ id: 1, title: 'Set up project structure', status: 'pending' }) ]) }), telemetryData: expect.any(Object) }) ); }); test('should handle AI service failures', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; generateTextService.mockRejectedValueOnce(new Error('AI service error')); // Act & Assert await expect( expandTask(tasksPath, taskId, 3, false, '', context, false) ).rejects.toThrow('AI service error'); expect(writeJSON).not.toHaveBeenCalled(); }); test('should handle file read errors', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; readJSON.mockImplementation(() => { throw new Error('File read failed'); }); // Act & Assert await expect( expandTask(tasksPath, taskId, 3, false, '', context, false) ).rejects.toThrow('File read failed'); expect(writeJSON).not.toHaveBeenCalled(); }); test('should handle invalid tasks data', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; readJSON.mockReturnValue(null); // Act & Assert await expect( expandTask(tasksPath, taskId, 3, false, '', context, false) ).rejects.toThrow(); }); }); describe('Output Format Handling', () => { test('should display telemetry for CLI output format', async () => { // Arrange const { displayAiUsageSummary } = await import( '../../../../../scripts/modules/ui.js' ); const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const context = { projectRoot: '/mock/project/root' // No mcpLog - should trigger CLI mode }; // Act await expandTask(tasksPath, taskId, 3, false, '', context, false); // Assert - Should display telemetry for CLI users expect(displayAiUsageSummary).toHaveBeenCalledWith( expect.objectContaining({ commandName: 'expand-task', modelUsed: 'claude-3-5-sonnet', totalCost: 0.012414 }), 'cli' ); }); test('should not display telemetry for MCP output format', async () => { // Arrange const { displayAiUsageSummary } = await import( '../../../../../scripts/modules/ui.js' ); const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; // Act await expandTask(tasksPath, taskId, 3, false, '', context, false); // Assert - Should NOT display telemetry for MCP (handled at higher level) expect(displayAiUsageSummary).not.toHaveBeenCalled(); }); }); describe('Edge Cases', () => { test('should handle empty additional context', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; // Act await expandTask(tasksPath, taskId, 3, false, '', context, false); // Assert - Should work with empty context (but may include project context) expect(generateTextService).toHaveBeenCalledWith( expect.objectContaining({ prompt: expect.stringMatching(/.*/) // Just ensure prompt exists }) ); }); test('should handle additional context correctly', async () => { // Arrange const { getPromptManager } = await import( '../../../../../scripts/modules/prompt-manager.js' ); const mockLoadPrompt = jest.fn().mockResolvedValue({ systemPrompt: 'Mocked system prompt', userPrompt: 'Mocked user prompt with context' }); getPromptManager.mockReturnValue({ loadPrompt: mockLoadPrompt }); const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const additionalContext = 'Use React hooks and TypeScript'; const context = { mcpLog: createMcpLogMock(), projectRoot: '/mock/project/root' }; // Act await expandTask( tasksPath, taskId, 3, false, additionalContext, context, false ); // Assert - Should pass separate context parameters to prompt manager expect(mockLoadPrompt).toHaveBeenCalledWith( 'expand-task', expect.objectContaining({ additionalContext: expect.stringContaining( 'Use React hooks and TypeScript' ), gatheredContext: expect.stringContaining( 'Mock project context from files' ) }), expect.any(String) ); // Additional assertion to verify the context parameters are passed separately const call = mockLoadPrompt.mock.calls[0]; const parameters = call[1]; expect(parameters.additionalContext).toContain( 'Use React hooks and TypeScript' ); expect(parameters.gatheredContext).toContain( 'Mock project context from files' ); }); test('should handle missing project root in context', async () => { // Arrange const tasksPath = 'tasks/tasks.json'; const taskId = '2'; const context = { mcpLog: createMcpLogMock() // No projectRoot in context }; // Act await expandTask(tasksPath, taskId, 3, false, '', context, false); // Assert - Should derive project root from tasksPath expect(findProjectRoot).toHaveBeenCalledWith(tasksPath); expect(readJSON).toHaveBeenCalledWith( tasksPath, '/mock/project/root', undefined ); }); }); describe('Dynamic Subtask Generation', () => { const tasksPath = 'tasks/tasks.json'; const taskId = 1; const context = { session: null, mcpLog: null }; beforeEach(() => { // Reset all mocks jest.clearAllMocks(); // Setup default mocks readJSON.mockReturnValue({ tasks: [ { id: 1, title: 'Test Task', description: 'A test task', status: 'pending', subtasks: [] } ] }); findTaskById.mockReturnValue({ id: 1, title: 'Test Task', description: 'A test task', status: 'pending', subtasks: [] }); findProjectRoot.mockReturnValue('/mock/project/root'); }); test('should accept 0 as valid numSubtasks value for dynamic generation', async () => { // Act - Call with numSubtasks=0 (should not throw error) const result = await expandTask( tasksPath, taskId, 0, false, '', context, false ); // Assert - Should complete successfully expect(result).toBeDefined(); expect(generateTextService).toHaveBeenCalled(); }); test('should use dynamic prompting when numSubtasks is 0', async () => { // Mock getPromptManager to return realistic prompt with dynamic content const { getPromptManager } = await import( '../../../../../scripts/modules/prompt-manager.js' ); const mockLoadPrompt = jest.fn().mockResolvedValue({ systemPrompt: 'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into an appropriate number of specific subtasks that can be implemented one by one.', userPrompt: 'Break down this task into an appropriate number of specific subtasks' }); getPromptManager.mockReturnValue({ loadPrompt: mockLoadPrompt }); // Act await expandTask(tasksPath, taskId, 0, false, '', context, false); // Assert - Verify generateTextService was called expect(generateTextService).toHaveBeenCalled(); // Get the call arguments to verify the system prompt const callArgs = generateTextService.mock.calls[0][0]; expect(callArgs.systemPrompt).toContain( 'an appropriate number of specific subtasks' ); }); test('should use specific count prompting when numSubtasks is positive', async () => { // Mock getPromptManager to return realistic prompt with specific count const { getPromptManager } = await import( '../../../../../scripts/modules/prompt-manager.js' ); const mockLoadPrompt = jest.fn().mockResolvedValue({ systemPrompt: 'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 5 specific subtasks that can be implemented one by one.', userPrompt: 'Break down this task into exactly 5 specific subtasks' }); getPromptManager.mockReturnValue({ loadPrompt: mockLoadPrompt }); // Act await expandTask(tasksPath, taskId, 5, false, '', context, false); // Assert - Verify generateTextService was called expect(generateTextService).toHaveBeenCalled(); // Get the call arguments to verify the system prompt const callArgs = generateTextService.mock.calls[0][0]; expect(callArgs.systemPrompt).toContain('5 specific subtasks'); }); test('should reject negative numSubtasks values and fallback to default', async () => { // Mock getDefaultSubtasks to return a specific value getDefaultSubtasks.mockReturnValue(4); // Mock getPromptManager to return realistic prompt with default count const { getPromptManager } = await import( '../../../../../scripts/modules/prompt-manager.js' ); const mockLoadPrompt = jest.fn().mockResolvedValue({ systemPrompt: 'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 4 specific subtasks that can be implemented one by one.', userPrompt: 'Break down this task into exactly 4 specific subtasks' }); getPromptManager.mockReturnValue({ loadPrompt: mockLoadPrompt }); // Act await expandTask(tasksPath, taskId, -3, false, '', context, false); // Assert - Should use default value instead of negative expect(generateTextService).toHaveBeenCalled(); const callArgs = generateTextService.mock.calls[0][0]; expect(callArgs.systemPrompt).toContain('4 specific subtasks'); }); test('should use getDefaultSubtasks when numSubtasks is undefined', async () => { // Mock getDefaultSubtasks to return a specific value getDefaultSubtasks.mockReturnValue(6); // Mock getPromptManager to return realistic prompt with default count const { getPromptManager } = await import( '../../../../../scripts/modules/prompt-manager.js' ); const mockLoadPrompt = jest.fn().mockResolvedValue({ systemPrompt: 'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 6 specific subtasks that can be implemented one by one.', userPrompt: 'Break down this task into exactly 6 specific subtasks' }); getPromptManager.mockReturnValue({ loadPrompt: mockLoadPrompt }); // Act - Call without specifying numSubtasks (undefined) await expandTask(tasksPath, taskId, undefined, false, '', context, false); // Assert - Should use default value expect(generateTextService).toHaveBeenCalled(); const callArgs = generateTextService.mock.calls[0][0]; expect(callArgs.systemPrompt).toContain('6 specific subtasks'); }); test('should use getDefaultSubtasks when numSubtasks is null', async () => { // Mock getDefaultSubtasks to return a specific value getDefaultSubtasks.mockReturnValue(7); // Mock getPromptManager to return realistic prompt with default count const { getPromptManager } = await import( '../../../../../scripts/modules/prompt-manager.js' ); const mockLoadPrompt = jest.fn().mockResolvedValue({ systemPrompt: 'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 7 specific subtasks that can be implemented one by one.', userPrompt: 'Break down this task into exactly 7 specific subtasks' }); getPromptManager.mockReturnValue({ loadPrompt: mockLoadPrompt }); // Act - Call with null numSubtasks await expandTask(tasksPath, taskId, null, false, '', context, false); // Assert - Should use default value expect(generateTextService).toHaveBeenCalled(); const callArgs = generateTextService.mock.calls[0][0]; expect(callArgs.systemPrompt).toContain('7 specific subtasks'); }); }); }); ```