This is page 10 of 38. Use http://codebase.md/eyaltoledano/claude-task-master?page={x} to view the full context. # Directory Structure ``` ├── .changeset │ ├── config.json │ └── README.md ├── .claude │ ├── agents │ │ ├── task-checker.md │ │ ├── task-executor.md │ │ └── task-orchestrator.md │ ├── commands │ │ ├── dedupe.md │ │ └── tm │ │ ├── add-dependency │ │ │ └── add-dependency.md │ │ ├── add-subtask │ │ │ ├── add-subtask.md │ │ │ └── convert-task-to-subtask.md │ │ ├── add-task │ │ │ └── add-task.md │ │ ├── analyze-complexity │ │ │ └── analyze-complexity.md │ │ ├── complexity-report │ │ │ └── complexity-report.md │ │ ├── expand │ │ │ ├── expand-all-tasks.md │ │ │ └── expand-task.md │ │ ├── fix-dependencies │ │ │ └── fix-dependencies.md │ │ ├── generate │ │ │ └── generate-tasks.md │ │ ├── help.md │ │ ├── init │ │ │ ├── init-project-quick.md │ │ │ └── init-project.md │ │ ├── learn.md │ │ ├── list │ │ │ ├── list-tasks-by-status.md │ │ │ ├── list-tasks-with-subtasks.md │ │ │ └── list-tasks.md │ │ ├── models │ │ │ ├── setup-models.md │ │ │ └── view-models.md │ │ ├── next │ │ │ └── next-task.md │ │ ├── parse-prd │ │ │ ├── parse-prd-with-research.md │ │ │ └── parse-prd.md │ │ ├── remove-dependency │ │ │ └── remove-dependency.md │ │ ├── remove-subtask │ │ │ └── remove-subtask.md │ │ ├── remove-subtasks │ │ │ ├── remove-all-subtasks.md │ │ │ └── remove-subtasks.md │ │ ├── remove-task │ │ │ └── remove-task.md │ │ ├── set-status │ │ │ ├── to-cancelled.md │ │ │ ├── to-deferred.md │ │ │ ├── to-done.md │ │ │ ├── to-in-progress.md │ │ │ ├── to-pending.md │ │ │ └── to-review.md │ │ ├── setup │ │ │ ├── install-taskmaster.md │ │ │ └── quick-install-taskmaster.md │ │ ├── show │ │ │ └── show-task.md │ │ ├── status │ │ │ └── project-status.md │ │ ├── sync-readme │ │ │ └── sync-readme.md │ │ ├── tm-main.md │ │ ├── update │ │ │ ├── update-single-task.md │ │ │ ├── update-task.md │ │ │ └── update-tasks-from-id.md │ │ ├── utils │ │ │ └── analyze-project.md │ │ ├── validate-dependencies │ │ │ └── validate-dependencies.md │ │ └── workflows │ │ ├── auto-implement-tasks.md │ │ ├── command-pipeline.md │ │ └── smart-workflow.md │ └── TM_COMMANDS_GUIDE.md ├── .coderabbit.yaml ├── .cursor │ ├── mcp.json │ └── rules │ ├── ai_providers.mdc │ ├── ai_services.mdc │ ├── architecture.mdc │ ├── changeset.mdc │ ├── commands.mdc │ ├── context_gathering.mdc │ ├── cursor_rules.mdc │ ├── dependencies.mdc │ ├── dev_workflow.mdc │ ├── git_workflow.mdc │ ├── glossary.mdc │ ├── mcp.mdc │ ├── new_features.mdc │ ├── self_improve.mdc │ ├── tags.mdc │ ├── taskmaster.mdc │ ├── tasks.mdc │ ├── telemetry.mdc │ ├── test_workflow.mdc │ ├── tests.mdc │ ├── ui.mdc │ └── utilities.mdc ├── .cursorignore ├── .env.example ├── .github │ ├── ISSUE_TEMPLATE │ │ ├── bug_report.md │ │ ├── enhancements---feature-requests.md │ │ └── feedback.md │ ├── PULL_REQUEST_TEMPLATE │ │ ├── bugfix.md │ │ ├── config.yml │ │ ├── feature.md │ │ └── integration.md │ ├── PULL_REQUEST_TEMPLATE.md │ ├── scripts │ │ ├── auto-close-duplicates.mjs │ │ ├── backfill-duplicate-comments.mjs │ │ ├── check-pre-release-mode.mjs │ │ ├── parse-metrics.mjs │ │ ├── release.mjs │ │ ├── tag-extension.mjs │ │ └── utils.mjs │ └── workflows │ ├── auto-close-duplicates.yml │ ├── backfill-duplicate-comments.yml │ ├── ci.yml │ ├── claude-dedupe-issues.yml │ ├── claude-docs-trigger.yml │ ├── claude-docs-updater.yml │ ├── claude-issue-triage.yml │ ├── claude.yml │ ├── extension-ci.yml │ ├── extension-release.yml │ ├── log-issue-events.yml │ ├── pre-release.yml │ ├── release-check.yml │ ├── release.yml │ ├── update-models-md.yml │ └── weekly-metrics-discord.yml ├── .gitignore ├── .kiro │ ├── hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── settings │ │ └── mcp.json │ └── steering │ ├── dev_workflow.md │ ├── kiro_rules.md │ ├── self_improve.md │ ├── taskmaster_hooks_workflow.md │ └── taskmaster.md ├── .manypkg.json ├── .mcp.json ├── .npmignore ├── .nvmrc ├── .taskmaster │ ├── CLAUDE.md │ ├── config.json │ ├── docs │ │ ├── MIGRATION-ROADMAP.md │ │ ├── prd-tm-start.txt │ │ ├── prd.txt │ │ ├── README.md │ │ ├── research │ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md │ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md │ │ │ ├── 2025-06-14_test-save-functionality.md │ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md │ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md │ │ ├── task-template-importing-prd.txt │ │ ├── test-prd.txt │ │ └── tm-core-phase-1.txt │ ├── reports │ │ ├── task-complexity-report_cc-kiro-hooks.json │ │ ├── task-complexity-report_test-prd-tag.json │ │ ├── task-complexity-report_tm-core-phase-1.json │ │ ├── task-complexity-report.json │ │ └── tm-core-complexity.json │ ├── state.json │ ├── tasks │ │ ├── task_001_tm-start.txt │ │ ├── task_002_tm-start.txt │ │ ├── task_003_tm-start.txt │ │ ├── task_004_tm-start.txt │ │ ├── task_007_tm-start.txt │ │ └── tasks.json │ └── templates │ └── example_prd.txt ├── .vscode │ ├── extensions.json │ └── settings.json ├── apps │ ├── cli │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ ├── commands │ │ │ │ ├── auth.command.ts │ │ │ │ ├── context.command.ts │ │ │ │ ├── list.command.ts │ │ │ │ ├── set-status.command.ts │ │ │ │ ├── show.command.ts │ │ │ │ └── start.command.ts │ │ │ ├── index.ts │ │ │ ├── ui │ │ │ │ ├── components │ │ │ │ │ ├── dashboard.component.ts │ │ │ │ │ ├── header.component.ts │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── next-task.component.ts │ │ │ │ │ ├── suggested-steps.component.ts │ │ │ │ │ └── task-detail.component.ts │ │ │ │ └── index.ts │ │ │ └── utils │ │ │ ├── auto-update.ts │ │ │ └── ui.ts │ │ └── tsconfig.json │ ├── docs │ │ ├── archive │ │ │ ├── ai-client-utils-example.mdx │ │ │ ├── ai-development-workflow.mdx │ │ │ ├── command-reference.mdx │ │ │ ├── configuration.mdx │ │ │ ├── cursor-setup.mdx │ │ │ ├── examples.mdx │ │ │ └── Installation.mdx │ │ ├── best-practices │ │ │ ├── advanced-tasks.mdx │ │ │ ├── configuration-advanced.mdx │ │ │ └── index.mdx │ │ ├── capabilities │ │ │ ├── cli-root-commands.mdx │ │ │ ├── index.mdx │ │ │ ├── mcp.mdx │ │ │ └── task-structure.mdx │ │ ├── CHANGELOG.md │ │ ├── docs.json │ │ ├── favicon.svg │ │ ├── getting-started │ │ │ ├── contribute.mdx │ │ │ ├── faq.mdx │ │ │ └── quick-start │ │ │ ├── configuration-quick.mdx │ │ │ ├── execute-quick.mdx │ │ │ ├── installation.mdx │ │ │ ├── moving-forward.mdx │ │ │ ├── prd-quick.mdx │ │ │ ├── quick-start.mdx │ │ │ ├── requirements.mdx │ │ │ ├── rules-quick.mdx │ │ │ └── tasks-quick.mdx │ │ ├── introduction.mdx │ │ ├── licensing.md │ │ ├── logo │ │ │ ├── dark.svg │ │ │ ├── light.svg │ │ │ └── task-master-logo.png │ │ ├── package.json │ │ ├── README.md │ │ ├── style.css │ │ ├── vercel.json │ │ └── whats-new.mdx │ └── extension │ ├── .vscodeignore │ ├── assets │ │ ├── banner.png │ │ ├── icon-dark.svg │ │ ├── icon-light.svg │ │ ├── icon.png │ │ ├── screenshots │ │ │ ├── kanban-board.png │ │ │ └── task-details.png │ │ └── sidebar-icon.svg │ ├── CHANGELOG.md │ ├── components.json │ ├── docs │ │ ├── extension-CI-setup.md │ │ └── extension-development-guide.md │ ├── esbuild.js │ ├── LICENSE │ ├── package.json │ ├── package.mjs │ ├── package.publish.json │ ├── README.md │ ├── src │ │ ├── components │ │ │ ├── ConfigView.tsx │ │ │ ├── constants.ts │ │ │ ├── TaskDetails │ │ │ │ ├── AIActionsSection.tsx │ │ │ │ ├── DetailsSection.tsx │ │ │ │ ├── PriorityBadge.tsx │ │ │ │ ├── SubtasksSection.tsx │ │ │ │ ├── TaskMetadataSidebar.tsx │ │ │ │ └── useTaskDetails.ts │ │ │ ├── TaskDetailsView.tsx │ │ │ ├── TaskMasterLogo.tsx │ │ │ └── ui │ │ │ ├── badge.tsx │ │ │ ├── breadcrumb.tsx │ │ │ ├── button.tsx │ │ │ ├── card.tsx │ │ │ ├── collapsible.tsx │ │ │ ├── CollapsibleSection.tsx │ │ │ ├── dropdown-menu.tsx │ │ │ ├── label.tsx │ │ │ ├── scroll-area.tsx │ │ │ ├── separator.tsx │ │ │ ├── shadcn-io │ │ │ │ └── kanban │ │ │ │ └── index.tsx │ │ │ └── textarea.tsx │ │ ├── extension.ts │ │ ├── index.ts │ │ ├── lib │ │ │ └── utils.ts │ │ ├── services │ │ │ ├── config-service.ts │ │ │ ├── error-handler.ts │ │ │ ├── notification-preferences.ts │ │ │ ├── polling-service.ts │ │ │ ├── polling-strategies.ts │ │ │ ├── sidebar-webview-manager.ts │ │ │ ├── task-repository.ts │ │ │ ├── terminal-manager.ts │ │ │ └── webview-manager.ts │ │ ├── test │ │ │ └── extension.test.ts │ │ ├── utils │ │ │ ├── configManager.ts │ │ │ ├── connectionManager.ts │ │ │ ├── errorHandler.ts │ │ │ ├── event-emitter.ts │ │ │ ├── logger.ts │ │ │ ├── mcpClient.ts │ │ │ ├── notificationPreferences.ts │ │ │ └── task-master-api │ │ │ ├── cache │ │ │ │ └── cache-manager.ts │ │ │ ├── index.ts │ │ │ ├── mcp-client.ts │ │ │ ├── transformers │ │ │ │ └── task-transformer.ts │ │ │ └── types │ │ │ └── index.ts │ │ └── webview │ │ ├── App.tsx │ │ ├── components │ │ │ ├── AppContent.tsx │ │ │ ├── EmptyState.tsx │ │ │ ├── ErrorBoundary.tsx │ │ │ ├── PollingStatus.tsx │ │ │ ├── PriorityBadge.tsx │ │ │ ├── SidebarView.tsx │ │ │ ├── TagDropdown.tsx │ │ │ ├── TaskCard.tsx │ │ │ ├── TaskEditModal.tsx │ │ │ ├── TaskMasterKanban.tsx │ │ │ ├── ToastContainer.tsx │ │ │ └── ToastNotification.tsx │ │ ├── constants │ │ │ └── index.ts │ │ ├── contexts │ │ │ └── VSCodeContext.tsx │ │ ├── hooks │ │ │ ├── useTaskQueries.ts │ │ │ ├── useVSCodeMessages.ts │ │ │ └── useWebviewHeight.ts │ │ ├── index.css │ │ ├── index.tsx │ │ ├── providers │ │ │ └── QueryProvider.tsx │ │ ├── reducers │ │ │ └── appReducer.ts │ │ ├── sidebar.tsx │ │ ├── types │ │ │ └── index.ts │ │ └── utils │ │ ├── logger.ts │ │ └── toast.ts │ └── tsconfig.json ├── assets │ ├── .windsurfrules │ ├── AGENTS.md │ ├── claude │ │ ├── agents │ │ │ ├── task-checker.md │ │ │ ├── task-executor.md │ │ │ └── task-orchestrator.md │ │ ├── commands │ │ │ └── tm │ │ │ ├── add-dependency │ │ │ │ └── add-dependency.md │ │ │ ├── add-subtask │ │ │ │ ├── add-subtask.md │ │ │ │ └── convert-task-to-subtask.md │ │ │ ├── add-task │ │ │ │ └── add-task.md │ │ │ ├── analyze-complexity │ │ │ │ └── analyze-complexity.md │ │ │ ├── clear-subtasks │ │ │ │ ├── clear-all-subtasks.md │ │ │ │ └── clear-subtasks.md │ │ │ ├── complexity-report │ │ │ │ └── complexity-report.md │ │ │ ├── expand │ │ │ │ ├── expand-all-tasks.md │ │ │ │ └── expand-task.md │ │ │ ├── fix-dependencies │ │ │ │ └── fix-dependencies.md │ │ │ ├── generate │ │ │ │ └── generate-tasks.md │ │ │ ├── help.md │ │ │ ├── init │ │ │ │ ├── init-project-quick.md │ │ │ │ └── init-project.md │ │ │ ├── learn.md │ │ │ ├── list │ │ │ │ ├── list-tasks-by-status.md │ │ │ │ ├── list-tasks-with-subtasks.md │ │ │ │ └── list-tasks.md │ │ │ ├── models │ │ │ │ ├── setup-models.md │ │ │ │ └── view-models.md │ │ │ ├── next │ │ │ │ └── next-task.md │ │ │ ├── parse-prd │ │ │ │ ├── parse-prd-with-research.md │ │ │ │ └── parse-prd.md │ │ │ ├── remove-dependency │ │ │ │ └── remove-dependency.md │ │ │ ├── remove-subtask │ │ │ │ └── remove-subtask.md │ │ │ ├── remove-subtasks │ │ │ │ ├── remove-all-subtasks.md │ │ │ │ └── remove-subtasks.md │ │ │ ├── remove-task │ │ │ │ └── remove-task.md │ │ │ ├── set-status │ │ │ │ ├── to-cancelled.md │ │ │ │ ├── to-deferred.md │ │ │ │ ├── to-done.md │ │ │ │ ├── to-in-progress.md │ │ │ │ ├── to-pending.md │ │ │ │ └── to-review.md │ │ │ ├── setup │ │ │ │ ├── install-taskmaster.md │ │ │ │ └── quick-install-taskmaster.md │ │ │ ├── show │ │ │ │ └── show-task.md │ │ │ ├── status │ │ │ │ └── project-status.md │ │ │ ├── sync-readme │ │ │ │ └── sync-readme.md │ │ │ ├── tm-main.md │ │ │ ├── update │ │ │ │ ├── update-single-task.md │ │ │ │ ├── update-task.md │ │ │ │ └── update-tasks-from-id.md │ │ │ ├── utils │ │ │ │ └── analyze-project.md │ │ │ ├── validate-dependencies │ │ │ │ └── validate-dependencies.md │ │ │ └── workflows │ │ │ ├── auto-implement-tasks.md │ │ │ ├── command-pipeline.md │ │ │ └── smart-workflow.md │ │ └── TM_COMMANDS_GUIDE.md │ ├── config.json │ ├── env.example │ ├── example_prd.txt │ ├── gitignore │ ├── kiro-hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── roocode │ │ ├── .roo │ │ │ ├── rules-architect │ │ │ │ └── architect-rules │ │ │ ├── rules-ask │ │ │ │ └── ask-rules │ │ │ ├── rules-code │ │ │ │ └── code-rules │ │ │ ├── rules-debug │ │ │ │ └── debug-rules │ │ │ ├── rules-orchestrator │ │ │ │ └── orchestrator-rules │ │ │ └── rules-test │ │ │ └── test-rules │ │ └── .roomodes │ ├── rules │ │ ├── cursor_rules.mdc │ │ ├── dev_workflow.mdc │ │ ├── self_improve.mdc │ │ ├── taskmaster_hooks_workflow.mdc │ │ └── taskmaster.mdc │ └── scripts_README.md ├── bin │ └── task-master.js ├── biome.json ├── CHANGELOG.md ├── CLAUDE.md ├── context │ ├── chats │ │ ├── add-task-dependencies-1.md │ │ └── max-min-tokens.txt.md │ ├── fastmcp-core.txt │ ├── fastmcp-docs.txt │ ├── MCP_INTEGRATION.md │ ├── mcp-js-sdk-docs.txt │ ├── mcp-protocol-repo.txt │ ├── mcp-protocol-schema-03262025.json │ └── mcp-protocol-spec.txt ├── CONTRIBUTING.md ├── docs │ ├── CLI-COMMANDER-PATTERN.md │ ├── command-reference.md │ ├── configuration.md │ ├── contributor-docs │ │ └── testing-roo-integration.md │ ├── cross-tag-task-movement.md │ ├── examples │ │ └── claude-code-usage.md │ ├── examples.md │ ├── licensing.md │ ├── mcp-provider-guide.md │ ├── mcp-provider.md │ ├── migration-guide.md │ ├── models.md │ ├── providers │ │ └── gemini-cli.md │ ├── README.md │ ├── scripts │ │ └── models-json-to-markdown.js │ ├── task-structure.md │ └── tutorial.md ├── images │ └── logo.png ├── index.js ├── jest.config.js ├── jest.resolver.cjs ├── LICENSE ├── llms-install.md ├── mcp-server │ ├── server.js │ └── src │ ├── core │ │ ├── __tests__ │ │ │ └── context-manager.test.js │ │ ├── context-manager.js │ │ ├── direct-functions │ │ │ ├── add-dependency.js │ │ │ ├── add-subtask.js │ │ │ ├── add-tag.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── cache-stats.js │ │ │ ├── clear-subtasks.js │ │ │ ├── complexity-report.js │ │ │ ├── copy-tag.js │ │ │ ├── create-tag-from-branch.js │ │ │ ├── delete-tag.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── fix-dependencies.js │ │ │ ├── generate-task-files.js │ │ │ ├── initialize-project.js │ │ │ ├── list-tags.js │ │ │ ├── list-tasks.js │ │ │ ├── models.js │ │ │ ├── move-task-cross-tag.js │ │ │ ├── move-task.js │ │ │ ├── next-task.js │ │ │ ├── parse-prd.js │ │ │ ├── remove-dependency.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── rename-tag.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── rules.js │ │ │ ├── scope-down.js │ │ │ ├── scope-up.js │ │ │ ├── set-task-status.js │ │ │ ├── show-task.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ ├── update-tasks.js │ │ │ ├── use-tag.js │ │ │ └── validate-dependencies.js │ │ ├── task-master-core.js │ │ └── utils │ │ ├── env-utils.js │ │ └── path-utils.js │ ├── custom-sdk │ │ ├── errors.js │ │ ├── index.js │ │ ├── json-extractor.js │ │ ├── language-model.js │ │ ├── message-converter.js │ │ └── schema-converter.js │ ├── index.js │ ├── logger.js │ ├── providers │ │ └── mcp-provider.js │ └── tools │ ├── add-dependency.js │ ├── add-subtask.js │ ├── add-tag.js │ ├── add-task.js │ ├── analyze.js │ ├── clear-subtasks.js │ ├── complexity-report.js │ ├── copy-tag.js │ ├── delete-tag.js │ ├── expand-all.js │ ├── expand-task.js │ ├── fix-dependencies.js │ ├── generate.js │ ├── get-operation-status.js │ ├── get-task.js │ ├── get-tasks.js │ ├── index.js │ ├── initialize-project.js │ ├── list-tags.js │ ├── models.js │ ├── move-task.js │ ├── next-task.js │ ├── parse-prd.js │ ├── remove-dependency.js │ ├── remove-subtask.js │ ├── remove-task.js │ ├── rename-tag.js │ ├── research.js │ ├── response-language.js │ ├── rules.js │ ├── scope-down.js │ ├── scope-up.js │ ├── set-task-status.js │ ├── update-subtask.js │ ├── update-task.js │ ├── update.js │ ├── use-tag.js │ ├── utils.js │ └── validate-dependencies.js ├── mcp-test.js ├── output.json ├── package-lock.json ├── package.json ├── packages │ ├── build-config │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ └── tsdown.base.ts │ │ └── tsconfig.json │ └── tm-core │ ├── .gitignore │ ├── CHANGELOG.md │ ├── docs │ │ └── listTasks-architecture.md │ ├── package.json │ ├── POC-STATUS.md │ ├── README.md │ ├── src │ │ ├── auth │ │ │ ├── auth-manager.test.ts │ │ │ ├── auth-manager.ts │ │ │ ├── config.ts │ │ │ ├── credential-store.test.ts │ │ │ ├── credential-store.ts │ │ │ ├── index.ts │ │ │ ├── oauth-service.ts │ │ │ ├── supabase-session-storage.ts │ │ │ └── types.ts │ │ ├── clients │ │ │ ├── index.ts │ │ │ └── supabase-client.ts │ │ ├── config │ │ │ ├── config-manager.spec.ts │ │ │ ├── config-manager.ts │ │ │ ├── index.ts │ │ │ └── services │ │ │ ├── config-loader.service.spec.ts │ │ │ ├── config-loader.service.ts │ │ │ ├── config-merger.service.spec.ts │ │ │ ├── config-merger.service.ts │ │ │ ├── config-persistence.service.spec.ts │ │ │ ├── config-persistence.service.ts │ │ │ ├── environment-config-provider.service.spec.ts │ │ │ ├── environment-config-provider.service.ts │ │ │ ├── index.ts │ │ │ ├── runtime-state-manager.service.spec.ts │ │ │ └── runtime-state-manager.service.ts │ │ ├── constants │ │ │ └── index.ts │ │ ├── entities │ │ │ └── task.entity.ts │ │ ├── errors │ │ │ ├── index.ts │ │ │ └── task-master-error.ts │ │ ├── executors │ │ │ ├── base-executor.ts │ │ │ ├── claude-executor.ts │ │ │ ├── executor-factory.ts │ │ │ ├── executor-service.ts │ │ │ ├── index.ts │ │ │ └── types.ts │ │ ├── index.ts │ │ ├── interfaces │ │ │ ├── ai-provider.interface.ts │ │ │ ├── configuration.interface.ts │ │ │ ├── index.ts │ │ │ └── storage.interface.ts │ │ ├── logger │ │ │ ├── factory.ts │ │ │ ├── index.ts │ │ │ └── logger.ts │ │ ├── mappers │ │ │ └── TaskMapper.ts │ │ ├── parser │ │ │ └── index.ts │ │ ├── providers │ │ │ ├── ai │ │ │ │ ├── base-provider.ts │ │ │ │ └── index.ts │ │ │ └── index.ts │ │ ├── repositories │ │ │ ├── supabase-task-repository.ts │ │ │ └── task-repository.interface.ts │ │ ├── services │ │ │ ├── index.ts │ │ │ ├── organization.service.ts │ │ │ ├── task-execution-service.ts │ │ │ └── task-service.ts │ │ ├── storage │ │ │ ├── api-storage.ts │ │ │ ├── file-storage │ │ │ │ ├── file-operations.ts │ │ │ │ ├── file-storage.ts │ │ │ │ ├── format-handler.ts │ │ │ │ ├── index.ts │ │ │ │ └── path-resolver.ts │ │ │ ├── index.ts │ │ │ └── storage-factory.ts │ │ ├── subpath-exports.test.ts │ │ ├── task-master-core.ts │ │ ├── types │ │ │ ├── database.types.ts │ │ │ ├── index.ts │ │ │ └── legacy.ts │ │ └── utils │ │ ├── id-generator.ts │ │ └── index.ts │ ├── tests │ │ ├── integration │ │ │ └── list-tasks.test.ts │ │ ├── mocks │ │ │ └── mock-provider.ts │ │ ├── setup.ts │ │ └── unit │ │ ├── base-provider.test.ts │ │ ├── executor.test.ts │ │ └── smoke.test.ts │ ├── tsconfig.json │ └── vitest.config.ts ├── README-task-master.md ├── README.md ├── scripts │ ├── dev.js │ ├── init.js │ ├── modules │ │ ├── ai-services-unified.js │ │ ├── commands.js │ │ ├── config-manager.js │ │ ├── dependency-manager.js │ │ ├── index.js │ │ ├── prompt-manager.js │ │ ├── supported-models.json │ │ ├── sync-readme.js │ │ ├── task-manager │ │ │ ├── add-subtask.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── clear-subtasks.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── find-next-task.js │ │ │ ├── generate-task-files.js │ │ │ ├── is-task-dependent.js │ │ │ ├── list-tasks.js │ │ │ ├── migrate.js │ │ │ ├── models.js │ │ │ ├── move-task.js │ │ │ ├── parse-prd │ │ │ │ ├── index.js │ │ │ │ ├── parse-prd-config.js │ │ │ │ ├── parse-prd-helpers.js │ │ │ │ ├── parse-prd-non-streaming.js │ │ │ │ ├── parse-prd-streaming.js │ │ │ │ └── parse-prd.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── scope-adjustment.js │ │ │ ├── set-task-status.js │ │ │ ├── tag-management.js │ │ │ ├── task-exists.js │ │ │ ├── update-single-task-status.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ └── update-tasks.js │ │ ├── task-manager.js │ │ ├── ui.js │ │ ├── update-config-tokens.js │ │ ├── utils │ │ │ ├── contextGatherer.js │ │ │ ├── fuzzyTaskSearch.js │ │ │ └── git-utils.js │ │ └── utils.js │ ├── task-complexity-report.json │ ├── test-claude-errors.js │ └── test-claude.js ├── src │ ├── ai-providers │ │ ├── anthropic.js │ │ ├── azure.js │ │ ├── base-provider.js │ │ ├── bedrock.js │ │ ├── claude-code.js │ │ ├── custom-sdk │ │ │ ├── claude-code │ │ │ │ ├── errors.js │ │ │ │ ├── index.js │ │ │ │ ├── json-extractor.js │ │ │ │ ├── language-model.js │ │ │ │ ├── message-converter.js │ │ │ │ └── types.js │ │ │ └── grok-cli │ │ │ ├── errors.js │ │ │ ├── index.js │ │ │ ├── json-extractor.js │ │ │ ├── language-model.js │ │ │ ├── message-converter.js │ │ │ └── types.js │ │ ├── gemini-cli.js │ │ ├── google-vertex.js │ │ ├── google.js │ │ ├── grok-cli.js │ │ ├── groq.js │ │ ├── index.js │ │ ├── ollama.js │ │ ├── openai.js │ │ ├── openrouter.js │ │ ├── perplexity.js │ │ └── xai.js │ ├── constants │ │ ├── commands.js │ │ ├── paths.js │ │ ├── profiles.js │ │ ├── providers.js │ │ ├── rules-actions.js │ │ ├── task-priority.js │ │ └── task-status.js │ ├── profiles │ │ ├── amp.js │ │ ├── base-profile.js │ │ ├── claude.js │ │ ├── cline.js │ │ ├── codex.js │ │ ├── cursor.js │ │ ├── gemini.js │ │ ├── index.js │ │ ├── kilo.js │ │ ├── kiro.js │ │ ├── opencode.js │ │ ├── roo.js │ │ ├── trae.js │ │ ├── vscode.js │ │ ├── windsurf.js │ │ └── zed.js │ ├── progress │ │ ├── base-progress-tracker.js │ │ ├── cli-progress-factory.js │ │ ├── parse-prd-tracker.js │ │ ├── progress-tracker-builder.js │ │ └── tracker-ui.js │ ├── prompts │ │ ├── add-task.json │ │ ├── analyze-complexity.json │ │ ├── expand-task.json │ │ ├── parse-prd.json │ │ ├── README.md │ │ ├── research.json │ │ ├── schemas │ │ │ ├── parameter.schema.json │ │ │ ├── prompt-template.schema.json │ │ │ ├── README.md │ │ │ └── variant.schema.json │ │ ├── update-subtask.json │ │ ├── update-task.json │ │ └── update-tasks.json │ ├── provider-registry │ │ └── index.js │ ├── task-master.js │ ├── ui │ │ ├── confirm.js │ │ ├── indicators.js │ │ └── parse-prd.js │ └── utils │ ├── asset-resolver.js │ ├── create-mcp-config.js │ ├── format.js │ ├── getVersion.js │ ├── logger-utils.js │ ├── manage-gitignore.js │ ├── path-utils.js │ ├── profiles.js │ ├── rule-transformer.js │ ├── stream-parser.js │ └── timeout-manager.js ├── test-clean-tags.js ├── test-config-manager.js ├── test-prd.txt ├── test-tag-functions.js ├── test-version-check-full.js ├── test-version-check.js ├── tests │ ├── e2e │ │ ├── e2e_helpers.sh │ │ ├── parse_llm_output.cjs │ │ ├── run_e2e.sh │ │ ├── run_fallback_verification.sh │ │ └── test_llm_analysis.sh │ ├── fixture │ │ └── test-tasks.json │ ├── fixtures │ │ ├── .taskmasterconfig │ │ ├── sample-claude-response.js │ │ ├── sample-prd.txt │ │ └── sample-tasks.js │ ├── integration │ │ ├── claude-code-optional.test.js │ │ ├── cli │ │ │ ├── commands.test.js │ │ │ ├── complex-cross-tag-scenarios.test.js │ │ │ └── move-cross-tag.test.js │ │ ├── manage-gitignore.test.js │ │ ├── mcp-server │ │ │ └── direct-functions.test.js │ │ ├── move-task-cross-tag.integration.test.js │ │ ├── move-task-simple.integration.test.js │ │ └── profiles │ │ ├── amp-init-functionality.test.js │ │ ├── claude-init-functionality.test.js │ │ ├── cline-init-functionality.test.js │ │ ├── codex-init-functionality.test.js │ │ ├── cursor-init-functionality.test.js │ │ ├── gemini-init-functionality.test.js │ │ ├── opencode-init-functionality.test.js │ │ ├── roo-files-inclusion.test.js │ │ ├── roo-init-functionality.test.js │ │ ├── rules-files-inclusion.test.js │ │ ├── trae-init-functionality.test.js │ │ ├── vscode-init-functionality.test.js │ │ └── windsurf-init-functionality.test.js │ ├── manual │ │ ├── progress │ │ │ ├── parse-prd-analysis.js │ │ │ ├── test-parse-prd.js │ │ │ └── TESTING_GUIDE.md │ │ └── prompts │ │ ├── prompt-test.js │ │ └── README.md │ ├── README.md │ ├── setup.js │ └── unit │ ├── ai-providers │ │ ├── claude-code.test.js │ │ ├── custom-sdk │ │ │ └── claude-code │ │ │ └── language-model.test.js │ │ ├── gemini-cli.test.js │ │ ├── mcp-components.test.js │ │ └── openai.test.js │ ├── ai-services-unified.test.js │ ├── commands.test.js │ ├── config-manager.test.js │ ├── config-manager.test.mjs │ ├── dependency-manager.test.js │ ├── init.test.js │ ├── initialize-project.test.js │ ├── kebab-case-validation.test.js │ ├── manage-gitignore.test.js │ ├── mcp │ │ └── tools │ │ ├── __mocks__ │ │ │ └── move-task.js │ │ ├── add-task.test.js │ │ ├── analyze-complexity.test.js │ │ ├── expand-all.test.js │ │ ├── get-tasks.test.js │ │ ├── initialize-project.test.js │ │ ├── move-task-cross-tag-options.test.js │ │ ├── move-task-cross-tag.test.js │ │ └── remove-task.test.js │ ├── mcp-providers │ │ ├── mcp-components.test.js │ │ └── mcp-provider.test.js │ ├── parse-prd.test.js │ ├── profiles │ │ ├── amp-integration.test.js │ │ ├── claude-integration.test.js │ │ ├── cline-integration.test.js │ │ ├── codex-integration.test.js │ │ ├── cursor-integration.test.js │ │ ├── gemini-integration.test.js │ │ ├── kilo-integration.test.js │ │ ├── kiro-integration.test.js │ │ ├── mcp-config-validation.test.js │ │ ├── opencode-integration.test.js │ │ ├── profile-safety-check.test.js │ │ ├── roo-integration.test.js │ │ ├── rule-transformer-cline.test.js │ │ ├── rule-transformer-cursor.test.js │ │ ├── rule-transformer-gemini.test.js │ │ ├── rule-transformer-kilo.test.js │ │ ├── rule-transformer-kiro.test.js │ │ ├── rule-transformer-opencode.test.js │ │ ├── rule-transformer-roo.test.js │ │ ├── rule-transformer-trae.test.js │ │ ├── rule-transformer-vscode.test.js │ │ ├── rule-transformer-windsurf.test.js │ │ ├── rule-transformer-zed.test.js │ │ ├── rule-transformer.test.js │ │ ├── selective-profile-removal.test.js │ │ ├── subdirectory-support.test.js │ │ ├── trae-integration.test.js │ │ ├── vscode-integration.test.js │ │ ├── windsurf-integration.test.js │ │ └── zed-integration.test.js │ ├── progress │ │ └── base-progress-tracker.test.js │ ├── prompt-manager.test.js │ ├── prompts │ │ └── expand-task-prompt.test.js │ ├── providers │ │ └── provider-registry.test.js │ ├── scripts │ │ └── modules │ │ ├── commands │ │ │ ├── move-cross-tag.test.js │ │ │ └── README.md │ │ ├── dependency-manager │ │ │ ├── circular-dependencies.test.js │ │ │ ├── cross-tag-dependencies.test.js │ │ │ └── fix-dependencies-command.test.js │ │ ├── task-manager │ │ │ ├── add-subtask.test.js │ │ │ ├── add-task.test.js │ │ │ ├── analyze-task-complexity.test.js │ │ │ ├── clear-subtasks.test.js │ │ │ ├── complexity-report-tag-isolation.test.js │ │ │ ├── expand-all-tasks.test.js │ │ │ ├── expand-task.test.js │ │ │ ├── find-next-task.test.js │ │ │ ├── generate-task-files.test.js │ │ │ ├── list-tasks.test.js │ │ │ ├── move-task-cross-tag.test.js │ │ │ ├── move-task.test.js │ │ │ ├── parse-prd.test.js │ │ │ ├── remove-subtask.test.js │ │ │ ├── remove-task.test.js │ │ │ ├── research.test.js │ │ │ ├── scope-adjustment.test.js │ │ │ ├── set-task-status.test.js │ │ │ ├── setup.js │ │ │ ├── update-single-task-status.test.js │ │ │ ├── update-subtask-by-id.test.js │ │ │ ├── update-task-by-id.test.js │ │ │ └── update-tasks.test.js │ │ ├── ui │ │ │ └── cross-tag-error-display.test.js │ │ └── utils-tag-aware-paths.test.js │ ├── task-finder.test.js │ ├── task-manager │ │ ├── clear-subtasks.test.js │ │ ├── move-task.test.js │ │ ├── tag-boundary.test.js │ │ └── tag-management.test.js │ ├── task-master.test.js │ ├── ui │ │ └── indicators.test.js │ ├── ui.test.js │ ├── utils-strip-ansi.test.js │ └── utils.test.js ├── tsconfig.json ├── tsdown.config.ts └── turbo.json ``` # Files -------------------------------------------------------------------------------- /apps/extension/package.mjs: -------------------------------------------------------------------------------- ``` import { execSync } from 'child_process'; import path from 'path'; import { fileURLToPath } from 'url'; import fs from 'fs-extra'; // --- Configuration --- const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); const packageDir = path.resolve(__dirname, 'vsix-build'); // --- End Configuration --- try { console.log('🚀 Starting packaging process...'); // 1. Build Project console.log('\nBuilding JavaScript...'); execSync('npm run build:js', { stdio: 'inherit' }); console.log('\nBuilding CSS...'); execSync('npm run build:css', { stdio: 'inherit' }); // 2. Prepare Clean Directory console.log(`\nPreparing clean directory at: ${packageDir}`); fs.emptyDirSync(packageDir); // 3. Copy Build Artifacts (excluding source maps) console.log('Copying build artifacts...'); const distDir = path.resolve(__dirname, 'dist'); const targetDistDir = path.resolve(packageDir, 'dist'); fs.ensureDirSync(targetDistDir); // Only copy the files we need (exclude .map files) const filesToCopy = ['extension.js', 'index.js', 'index.css', 'sidebar.js']; for (const file of filesToCopy) { const srcFile = path.resolve(distDir, file); const destFile = path.resolve(targetDistDir, file); if (fs.existsSync(srcFile)) { fs.copySync(srcFile, destFile); console.log(` - Copied dist/${file}`); } } // 4. Copy additional files const additionalFiles = ['README.md', 'CHANGELOG.md', 'AGENTS.md']; for (const file of additionalFiles) { if (fs.existsSync(path.resolve(__dirname, file))) { fs.copySync( path.resolve(__dirname, file), path.resolve(packageDir, file) ); console.log(` - Copied ${file}`); } } // 5. Sync versions and prepare the final package.json console.log('Syncing versions and preparing the final package.json...'); // Read current versions const devPackagePath = path.resolve(__dirname, 'package.json'); const publishPackagePath = path.resolve(__dirname, 'package.publish.json'); const devPackage = JSON.parse(fs.readFileSync(devPackagePath, 'utf8')); const publishPackage = JSON.parse( fs.readFileSync(publishPackagePath, 'utf8') ); // Handle RC versions for VS Code Marketplace let finalVersion = devPackage.version; if (finalVersion.includes('-rc.')) { console.log( ' - Detected RC version, transforming for VS Code Marketplace...' ); // Extract base version and RC number const baseVersion = finalVersion.replace(/-rc\.\d+$/, ''); const rcMatch = finalVersion.match(/rc\.(\d+)/); const rcNumber = rcMatch ? parseInt(rcMatch[1]) : 0; // For each RC iteration, increment the patch version // This ensures unique versions in VS Code Marketplace if (rcNumber > 0) { const [major, minor, patch] = baseVersion.split('.').map(Number); finalVersion = `${major}.${minor}.${patch + rcNumber}`; console.log( ` - RC version mapping: ${devPackage.version} → ${finalVersion}` ); } else { finalVersion = baseVersion; console.log( ` - RC version mapping: ${devPackage.version} → ${finalVersion}` ); } } // Check if versions need updating if (publishPackage.version !== finalVersion) { console.log( ` - Version sync needed: ${publishPackage.version} → ${finalVersion}` ); publishPackage.version = finalVersion; // Update the source package.publish.json file with the final version fs.writeFileSync( publishPackagePath, JSON.stringify(publishPackage, null, '\t') + '\n' ); console.log(` - Updated package.publish.json version to ${finalVersion}`); } else { console.log(` - Versions already in sync: ${finalVersion}`); } // Copy the (now synced) package.publish.json as package.json fs.copySync(publishPackagePath, path.resolve(packageDir, 'package.json')); console.log(' - Copied package.publish.json as package.json'); // 6. Copy .vscodeignore if it exists if (fs.existsSync(path.resolve(__dirname, '.vscodeignore'))) { fs.copySync( path.resolve(__dirname, '.vscodeignore'), path.resolve(packageDir, '.vscodeignore') ); console.log(' - Copied .vscodeignore'); } // 7. Copy LICENSE if it exists if (fs.existsSync(path.resolve(__dirname, 'LICENSE'))) { fs.copySync( path.resolve(__dirname, 'LICENSE'), path.resolve(packageDir, 'LICENSE') ); console.log(' - Copied LICENSE'); } // 7a. Copy assets directory if it exists const assetsDir = path.resolve(__dirname, 'assets'); if (fs.existsSync(assetsDir)) { const targetAssetsDir = path.resolve(packageDir, 'assets'); fs.copySync(assetsDir, targetAssetsDir); console.log(' - Copied assets directory'); } // Small delay to ensure file system operations complete await new Promise((resolve) => setTimeout(resolve, 100)); // 8. Final step - manual packaging console.log('\n✅ Build preparation complete!'); console.log('\nTo create the VSIX package, run:'); console.log( '\x1b[36m%s\x1b[0m', `cd vsix-build && npx vsce package --no-dependencies` ); // Use the transformed version for output console.log( `\nYour extension will be packaged to: vsix-build/task-master-${finalVersion}.vsix` ); } catch (error) { console.error('\n❌ Packaging failed!'); console.error(error.message); process.exit(1); } ``` -------------------------------------------------------------------------------- /src/profiles/opencode.js: -------------------------------------------------------------------------------- ```javascript // Opencode profile for rule-transformer import path from 'path'; import fs from 'fs'; import { log } from '../../scripts/modules/utils.js'; import { createProfile } from './base-profile.js'; /** * Transform standard MCP config format to OpenCode format * @param {Object} mcpConfig - Standard MCP configuration object * @returns {Object} - Transformed OpenCode configuration object */ function transformToOpenCodeFormat(mcpConfig) { const openCodeConfig = { $schema: 'https://opencode.ai/config.json' }; // Transform mcpServers to mcp if (mcpConfig.mcpServers) { openCodeConfig.mcp = {}; for (const [serverName, serverConfig] of Object.entries( mcpConfig.mcpServers )) { // Transform server configuration const transformedServer = { type: 'local' }; // Combine command and args into single command array if (serverConfig.command && serverConfig.args) { transformedServer.command = [ serverConfig.command, ...serverConfig.args ]; } else if (serverConfig.command) { transformedServer.command = [serverConfig.command]; } // Add enabled flag transformedServer.enabled = true; // Transform env to environment if (serverConfig.env) { transformedServer.environment = serverConfig.env; } // update with transformed config openCodeConfig.mcp[serverName] = transformedServer; } } return openCodeConfig; } /** * Lifecycle function called after MCP config generation to transform to OpenCode format * @param {string} targetDir - Target project directory * @param {string} assetsDir - Assets directory (unused for OpenCode) */ function onPostConvertRulesProfile(targetDir, assetsDir) { const openCodeConfigPath = path.join(targetDir, 'opencode.json'); if (!fs.existsSync(openCodeConfigPath)) { log('debug', '[OpenCode] No opencode.json found to transform'); return; } try { // Read the generated standard MCP config const mcpConfigContent = fs.readFileSync(openCodeConfigPath, 'utf8'); const mcpConfig = JSON.parse(mcpConfigContent); // Check if it's already in OpenCode format (has $schema) if (mcpConfig.$schema) { log( 'info', '[OpenCode] opencode.json already in OpenCode format, skipping transformation' ); return; } // Transform to OpenCode format const openCodeConfig = transformToOpenCodeFormat(mcpConfig); // Write back the transformed config with proper formatting fs.writeFileSync( openCodeConfigPath, JSON.stringify(openCodeConfig, null, 2) + '\n' ); log('info', '[OpenCode] Transformed opencode.json to OpenCode format'); log( 'debug', `[OpenCode] Added schema, renamed mcpServers->mcp, combined command+args, added type/enabled, renamed env->environment` ); } catch (error) { log( 'error', `[OpenCode] Failed to transform opencode.json: ${error.message}` ); } } /** * Lifecycle function called when removing OpenCode profile * @param {string} targetDir - Target project directory */ function onRemoveRulesProfile(targetDir) { const openCodeConfigPath = path.join(targetDir, 'opencode.json'); if (!fs.existsSync(openCodeConfigPath)) { log('debug', '[OpenCode] No opencode.json found to clean up'); return; } try { // Read the current config const configContent = fs.readFileSync(openCodeConfigPath, 'utf8'); const config = JSON.parse(configContent); // Check if it has the mcp section and taskmaster-ai server if (config.mcp && config.mcp['taskmaster-ai']) { // Remove taskmaster-ai server delete config.mcp['taskmaster-ai']; // Check if there are other MCP servers const remainingServers = Object.keys(config.mcp); if (remainingServers.length === 0) { // No other servers, remove entire mcp section delete config.mcp; } // Check if config is now empty (only has $schema) const remainingKeys = Object.keys(config).filter( (key) => key !== '$schema' ); if (remainingKeys.length === 0) { // Config only has schema left, remove entire file fs.rmSync(openCodeConfigPath, { force: true }); log('info', '[OpenCode] Removed empty opencode.json file'); } else { // Write back the modified config fs.writeFileSync( openCodeConfigPath, JSON.stringify(config, null, 2) + '\n' ); log( 'info', '[OpenCode] Removed TaskMaster from opencode.json, preserved other configurations' ); } } else { log('debug', '[OpenCode] TaskMaster not found in opencode.json'); } } catch (error) { log( 'error', `[OpenCode] Failed to clean up opencode.json: ${error.message}` ); } } // Create and export opencode profile using the base factory export const opencodeProfile = createProfile({ name: 'opencode', displayName: 'OpenCode', url: 'opencode.ai', docsUrl: 'opencode.ai/docs/', profileDir: '.', // Root directory rulesDir: '.', // Root directory for AGENTS.md mcpConfigName: 'opencode.json', // Override default 'mcp.json' includeDefaultRules: false, fileMap: { 'AGENTS.md': 'AGENTS.md' }, onPostConvert: onPostConvertRulesProfile, onRemove: onRemoveRulesProfile }); // Export lifecycle functions separately to avoid naming conflicts export { onPostConvertRulesProfile, onRemoveRulesProfile }; ``` -------------------------------------------------------------------------------- /mcp-server/src/core/direct-functions/update-subtask-by-id.js: -------------------------------------------------------------------------------- ```javascript /** * update-subtask-by-id.js * Direct function implementation for appending information to a specific subtask */ import { updateSubtaskById } from '../../../../scripts/modules/task-manager.js'; import { enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js'; import { createLogWrapper } from '../../tools/utils.js'; /** * Direct function wrapper for updateSubtaskById with error handling. * * @param {Object} args - Command arguments containing id, prompt, useResearch, tasksJsonPath, and projectRoot. * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file. * @param {string} args.id - Subtask ID in format "parent.sub". * @param {string} args.prompt - Information to append to the subtask. * @param {boolean} [args.research] - Whether to use research role. * @param {string} [args.projectRoot] - Project root path. * @param {string} [args.tag] - Tag for the task (optional) * @param {Object} log - Logger object. * @param {Object} context - Context object containing session data. * @returns {Promise<Object>} - Result object with success status and data/error information. */ export async function updateSubtaskByIdDirect(args, log, context = {}) { const { session } = context; // Destructure expected args, including projectRoot const { tasksJsonPath, id, prompt, research, projectRoot, tag } = args; const logWrapper = createLogWrapper(log); try { logWrapper.info( `Updating subtask by ID via direct function. ID: ${id}, ProjectRoot: ${projectRoot}` ); // Check if tasksJsonPath was provided if (!tasksJsonPath) { const errorMessage = 'tasksJsonPath is required but was not provided.'; logWrapper.error(errorMessage); return { success: false, error: { code: 'MISSING_ARGUMENT', message: errorMessage } }; } // Basic validation for ID format (e.g., '5.2') if (!id || typeof id !== 'string' || !id.includes('.')) { const errorMessage = 'Invalid subtask ID format. Must be in format "parentId.subtaskId" (e.g., "5.2").'; logWrapper.error(errorMessage); return { success: false, error: { code: 'INVALID_SUBTASK_ID', message: errorMessage } }; } if (!prompt) { const errorMessage = 'No prompt specified. Please provide the information to append.'; logWrapper.error(errorMessage); return { success: false, error: { code: 'MISSING_PROMPT', message: errorMessage } }; } // Validate subtask ID format const subtaskId = id; if (typeof subtaskId !== 'string' && typeof subtaskId !== 'number') { const errorMessage = `Invalid subtask ID type: ${typeof subtaskId}. Subtask ID must be a string or number.`; log.error(errorMessage); return { success: false, error: { code: 'INVALID_SUBTASK_ID_TYPE', message: errorMessage } }; } const subtaskIdStr = String(subtaskId); if (!subtaskIdStr.includes('.')) { const errorMessage = `Invalid subtask ID format: ${subtaskIdStr}. Subtask ID must be in format "parentId.subtaskId" (e.g., "5.2").`; log.error(errorMessage); return { success: false, error: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage } }; } // Use the provided path const tasksPath = tasksJsonPath; const useResearch = research === true; log.info( `Updating subtask with ID ${subtaskIdStr} with prompt "${prompt}" and research: ${useResearch}` ); const wasSilent = isSilentMode(); if (!wasSilent) { enableSilentMode(); } try { // Execute core updateSubtaskById function const coreResult = await updateSubtaskById( tasksPath, subtaskIdStr, prompt, useResearch, { mcpLog: logWrapper, session, projectRoot, tag, commandName: 'update-subtask', outputType: 'mcp' }, 'json' ); if (!coreResult || coreResult.updatedSubtask === null) { const message = `Subtask ${id} or its parent task not found.`; logWrapper.error(message); return { success: false, error: { code: 'SUBTASK_NOT_FOUND', message: message } }; } // Subtask updated successfully const successMessage = `Successfully updated subtask with ID ${subtaskIdStr}`; logWrapper.success(successMessage); return { success: true, data: { message: `Successfully updated subtask with ID ${subtaskIdStr}`, subtaskId: subtaskIdStr, parentId: subtaskIdStr.split('.')[0], subtask: coreResult.updatedSubtask, tasksPath, useResearch, telemetryData: coreResult.telemetryData, tagInfo: coreResult.tagInfo } }; } catch (error) { logWrapper.error(`Error updating subtask by ID: ${error.message}`); return { success: false, error: { code: 'UPDATE_SUBTASK_CORE_ERROR', message: error.message || 'Unknown error updating subtask' } }; } finally { if (!wasSilent && isSilentMode()) { disableSilentMode(); } } } catch (error) { logWrapper.error( `Setup error in updateSubtaskByIdDirect: ${error.message}` ); if (isSilentMode()) disableSilentMode(); return { success: false, error: { code: 'DIRECT_FUNCTION_SETUP_ERROR', message: error.message || 'Unknown setup error' } }; } } ``` -------------------------------------------------------------------------------- /src/progress/parse-prd-tracker.js: -------------------------------------------------------------------------------- ```javascript import chalk from 'chalk'; import { newMultiBar } from './cli-progress-factory.js'; import { BaseProgressTracker } from './base-progress-tracker.js'; import { createProgressHeader, createProgressRow, createBorder } from './tracker-ui.js'; import { getCliPriorityIndicators, getPriorityIndicator, getStatusBarPriorityIndicators, getPriorityColors } from '../ui/indicators.js'; // Get centralized priority indicators const PRIORITY_INDICATORS = getCliPriorityIndicators(); const PRIORITY_DOTS = getStatusBarPriorityIndicators(); const PRIORITY_COLORS = getPriorityColors(); // Constants const CONSTANTS = { DEBOUNCE_DELAY: 100, MAX_TITLE_LENGTH: 57, TRUNCATED_LENGTH: 54, TASK_ID_PAD_START: 3, TASK_ID_PAD_END: 4, PRIORITY_PAD_END: 3, VALID_PRIORITIES: ['high', 'medium', 'low'], DEFAULT_PRIORITY: 'medium' }; /** * Helper class to manage update debouncing */ class UpdateDebouncer { constructor(delay = CONSTANTS.DEBOUNCE_DELAY) { this.delay = delay; this.pendingTimeout = null; } debounce(callback) { this.clear(); this.pendingTimeout = setTimeout(() => { callback(); this.pendingTimeout = null; }, this.delay); } clear() { if (this.pendingTimeout) { clearTimeout(this.pendingTimeout); this.pendingTimeout = null; } } hasPending() { return this.pendingTimeout !== null; } } /** * Helper class to manage priority counts */ class PriorityManager { constructor() { this.priorities = { high: 0, medium: 0, low: 0 }; } increment(priority) { const normalized = this.normalize(priority); this.priorities[normalized]++; return normalized; } normalize(priority) { const lowercased = priority ? priority.toLowerCase() : CONSTANTS.DEFAULT_PRIORITY; return CONSTANTS.VALID_PRIORITIES.includes(lowercased) ? lowercased : CONSTANTS.DEFAULT_PRIORITY; } getCounts() { return { ...this.priorities }; } } /** * Helper class for formatting task display elements */ class TaskFormatter { static formatTitle(title, taskNumber) { if (!title) return `Task ${taskNumber}`; return title.length > CONSTANTS.MAX_TITLE_LENGTH ? title.substring(0, CONSTANTS.TRUNCATED_LENGTH) + '...' : title; } static formatPriority(priority) { return getPriorityIndicator(priority, false).padEnd( CONSTANTS.PRIORITY_PAD_END, ' ' ); } static formatTaskId(taskNumber) { return taskNumber .toString() .padStart(CONSTANTS.TASK_ID_PAD_START, ' ') .padEnd(CONSTANTS.TASK_ID_PAD_END, ' '); } } /** * Tracks progress for PRD parsing operations with multibar display */ class ParsePrdTracker extends BaseProgressTracker { _initializeCustomProperties(options) { this.append = options.append; this.priorityManager = new PriorityManager(); this.debouncer = new UpdateDebouncer(); this.headerShown = false; } _getTimeTokensBarFormat() { return `{clock} {elapsed} | ${PRIORITY_DOTS.high} {high} ${PRIORITY_DOTS.medium} {medium} ${PRIORITY_DOTS.low} {low} | Tokens (I/O): {in}/{out} | Est: {remaining}`; } _getProgressBarFormat() { return 'Tasks {tasks} |{bar}| {percentage}%'; } _getCustomTimeTokensPayload() { return this.priorityManager.getCounts(); } addTaskLine(taskNumber, title, priority = 'medium') { if (!this.multibar || this.isFinished) return; this._ensureHeaderShown(); const normalizedPriority = this._updateTaskCounters(taskNumber, priority); // Immediately update the time/tokens bar to show the new priority count this._updateTimeTokensBar(); this.debouncer.debounce(() => { this._updateProgressDisplay(taskNumber, title, normalizedPriority); }); } _ensureHeaderShown() { if (!this.headerShown) { this.headerShown = true; createProgressHeader( this.multibar, ' TASK | PRI | TITLE', '------+-----+----------------------------------------------------------------' ); } } _updateTaskCounters(taskNumber, priority) { const normalizedPriority = this.priorityManager.increment(priority); this.completedUnits = taskNumber; return normalizedPriority; } _updateProgressDisplay(taskNumber, title, normalizedPriority) { this.progressBar.update(this.completedUnits, { tasks: `${this.completedUnits}/${this.numUnits}` }); const displayTitle = TaskFormatter.formatTitle(title, taskNumber); const priorityDisplay = TaskFormatter.formatPriority(normalizedPriority); const taskIdCentered = TaskFormatter.formatTaskId(taskNumber); createProgressRow( this.multibar, ` ${taskIdCentered} | ${priorityDisplay} | {title}`, { title: displayTitle } ); createBorder( this.multibar, '------+-----+----------------------------------------------------------------' ); this._updateTimeTokensBar(); } finish() { // Flush any pending updates before finishing if (this.debouncer.hasPending()) { this.debouncer.clear(); this._updateTimeTokensBar(); } this.cleanup(); super.finish(); } /** * Override cleanup to handle pending updates */ _performCustomCleanup() { this.debouncer.clear(); } getSummary() { return { ...super.getSummary(), taskPriorities: this.priorityManager.getCounts(), actionVerb: this.append ? 'appended' : 'generated' }; } } export function createParsePrdTracker(options = {}) { return new ParsePrdTracker(options); } ``` -------------------------------------------------------------------------------- /mcp-server/src/core/direct-functions/initialize-project.js: -------------------------------------------------------------------------------- ```javascript import { initializeProject } from '../../../../scripts/init.js'; // Import core function and its logger if needed separately import { enableSilentMode, disableSilentMode // isSilentMode // Not used directly here } from '../../../../scripts/modules/utils.js'; import os from 'os'; // Import os module for home directory check import { RULE_PROFILES } from '../../../../src/constants/profiles.js'; import { convertAllRulesToProfileRules } from '../../../../src/utils/rule-transformer.js'; /** * Direct function wrapper for initializing a project. * Derives target directory from session, sets CWD, and calls core init logic. * @param {object} args - Arguments containing initialization options (addAliases, initGit, storeTasksInGit, skipInstall, yes, projectRoot, rules) * @param {object} log - The FastMCP logger instance. * @param {object} context - The context object, must contain { session }. * @returns {Promise<{success: boolean, data?: any, error?: {code: string, message: string}}>} - Standard result object. */ export async function initializeProjectDirect(args, log, context = {}) { const { session } = context; // Keep session if core logic needs it const homeDir = os.homedir(); log.info(`Args received in direct function: ${JSON.stringify(args)}`); // --- Determine Target Directory --- // TRUST the projectRoot passed from the tool layer via args // The HOF in the tool layer already normalized and validated it came from a reliable source (args or session) const targetDirectory = args.projectRoot; // --- Validate the targetDirectory (basic sanity checks) --- if ( !targetDirectory || typeof targetDirectory !== 'string' || // Ensure it's a string targetDirectory === '/' || targetDirectory === homeDir ) { log.error( `Invalid target directory received from tool layer: '${targetDirectory}'` ); return { success: false, error: { code: 'INVALID_TARGET_DIRECTORY', message: `Cannot initialize project: Invalid target directory '${targetDirectory}' received. Please ensure a valid workspace/folder is open or specified.`, details: `Received args.projectRoot: ${args.projectRoot}` // Show what was received } }; } // --- Proceed with validated targetDirectory --- log.info(`Validated target directory for initialization: ${targetDirectory}`); const originalCwd = process.cwd(); let resultData; let success = false; let errorResult = null; log.info( `Temporarily changing CWD to ${targetDirectory} for initialization.` ); process.chdir(targetDirectory); // Change CWD to the HOF-provided root enableSilentMode(); try { // Construct options ONLY from the relevant flags in args // The core initializeProject operates in the current CWD, which we just set const options = { addAliases: args.addAliases, initGit: args.initGit, storeTasksInGit: args.storeTasksInGit, skipInstall: args.skipInstall, yes: true // Force yes mode }; // Handle rules option with MCP-specific defaults if (Array.isArray(args.rules) && args.rules.length > 0) { options.rules = args.rules; options.rulesExplicitlyProvided = true; log.info(`Including rules: ${args.rules.join(', ')}`); } else { // For MCP initialization, default to Cursor profile only options.rules = ['cursor']; options.rulesExplicitlyProvided = true; log.info(`No rule profiles specified, defaulting to: Cursor`); } log.info(`Initializing project with options: ${JSON.stringify(options)}`); const result = await initializeProject(options); // Call core logic resultData = { message: 'Project initialized successfully.', next_step: 'Now that the project is initialized, the next step is to create the tasks by parsing a PRD. This will create the tasks folder and the initial task files (tasks folder will be created when parse-prd is run). The parse-prd tool will require a prd.txt file as input (typically found in .taskmaster/docs/ directory). You can create a prd.txt file by asking the user about their idea, and then using the .taskmaster/templates/example_prd.txt file as a template to generate a prd.txt file in .taskmaster/docs/. You may skip all of this if the user already has a prd.txt file. You can THEN use the parse-prd tool to create the tasks. So: step 1 after initialization is to create a prd.txt file in .taskmaster/docs/prd.txt or confirm the user already has one. Step 2 is to use the parse-prd tool to create the tasks. Do not bother looking for tasks after initialization, just use the parse-prd tool to create the tasks after creating a prd.txt from which to parse the tasks. You do NOT need to reinitialize the project to parse-prd.', ...result }; success = true; log.info( `Project initialization completed successfully in ${targetDirectory}.` ); } catch (error) { log.error(`Core initializeProject failed: ${error.message}`); errorResult = { code: 'INITIALIZATION_FAILED', message: `Core project initialization failed: ${error.message}`, details: error.stack }; success = false; } finally { disableSilentMode(); log.info(`Restoring original CWD: ${originalCwd}`); process.chdir(originalCwd); } if (success) { return { success: true, data: resultData }; } else { return { success: false, error: errorResult }; } } ``` -------------------------------------------------------------------------------- /mcp-server/src/core/task-master-core.js: -------------------------------------------------------------------------------- ```javascript /** * task-master-core.js * Central module that imports and re-exports all direct function implementations * for improved organization and maintainability. */ // Import direct function implementations import { listTasksDirect } from './direct-functions/list-tasks.js'; import { getCacheStatsDirect } from './direct-functions/cache-stats.js'; import { parsePRDDirect } from './direct-functions/parse-prd.js'; import { updateTasksDirect } from './direct-functions/update-tasks.js'; import { updateTaskByIdDirect } from './direct-functions/update-task-by-id.js'; import { updateSubtaskByIdDirect } from './direct-functions/update-subtask-by-id.js'; import { generateTaskFilesDirect } from './direct-functions/generate-task-files.js'; import { setTaskStatusDirect } from './direct-functions/set-task-status.js'; import { showTaskDirect } from './direct-functions/show-task.js'; import { nextTaskDirect } from './direct-functions/next-task.js'; import { expandTaskDirect } from './direct-functions/expand-task.js'; import { addTaskDirect } from './direct-functions/add-task.js'; import { addSubtaskDirect } from './direct-functions/add-subtask.js'; import { removeSubtaskDirect } from './direct-functions/remove-subtask.js'; import { analyzeTaskComplexityDirect } from './direct-functions/analyze-task-complexity.js'; import { clearSubtasksDirect } from './direct-functions/clear-subtasks.js'; import { expandAllTasksDirect } from './direct-functions/expand-all-tasks.js'; import { removeDependencyDirect } from './direct-functions/remove-dependency.js'; import { validateDependenciesDirect } from './direct-functions/validate-dependencies.js'; import { fixDependenciesDirect } from './direct-functions/fix-dependencies.js'; import { complexityReportDirect } from './direct-functions/complexity-report.js'; import { addDependencyDirect } from './direct-functions/add-dependency.js'; import { removeTaskDirect } from './direct-functions/remove-task.js'; import { initializeProjectDirect } from './direct-functions/initialize-project.js'; import { modelsDirect } from './direct-functions/models.js'; import { moveTaskDirect } from './direct-functions/move-task.js'; import { moveTaskCrossTagDirect } from './direct-functions/move-task-cross-tag.js'; import { researchDirect } from './direct-functions/research.js'; import { addTagDirect } from './direct-functions/add-tag.js'; import { deleteTagDirect } from './direct-functions/delete-tag.js'; import { listTagsDirect } from './direct-functions/list-tags.js'; import { useTagDirect } from './direct-functions/use-tag.js'; import { renameTagDirect } from './direct-functions/rename-tag.js'; import { copyTagDirect } from './direct-functions/copy-tag.js'; import { scopeUpDirect } from './direct-functions/scope-up.js'; import { scopeDownDirect } from './direct-functions/scope-down.js'; // Re-export utility functions export { findTasksPath } from './utils/path-utils.js'; // Use Map for potential future enhancements like introspection or dynamic dispatch export const directFunctions = new Map([ ['listTasksDirect', listTasksDirect], ['getCacheStatsDirect', getCacheStatsDirect], ['parsePRDDirect', parsePRDDirect], ['updateTasksDirect', updateTasksDirect], ['updateTaskByIdDirect', updateTaskByIdDirect], ['updateSubtaskByIdDirect', updateSubtaskByIdDirect], ['generateTaskFilesDirect', generateTaskFilesDirect], ['setTaskStatusDirect', setTaskStatusDirect], ['showTaskDirect', showTaskDirect], ['nextTaskDirect', nextTaskDirect], ['expandTaskDirect', expandTaskDirect], ['addTaskDirect', addTaskDirect], ['addSubtaskDirect', addSubtaskDirect], ['removeSubtaskDirect', removeSubtaskDirect], ['analyzeTaskComplexityDirect', analyzeTaskComplexityDirect], ['clearSubtasksDirect', clearSubtasksDirect], ['expandAllTasksDirect', expandAllTasksDirect], ['removeDependencyDirect', removeDependencyDirect], ['validateDependenciesDirect', validateDependenciesDirect], ['fixDependenciesDirect', fixDependenciesDirect], ['complexityReportDirect', complexityReportDirect], ['addDependencyDirect', addDependencyDirect], ['removeTaskDirect', removeTaskDirect], ['initializeProjectDirect', initializeProjectDirect], ['modelsDirect', modelsDirect], ['moveTaskDirect', moveTaskDirect], ['moveTaskCrossTagDirect', moveTaskCrossTagDirect], ['researchDirect', researchDirect], ['addTagDirect', addTagDirect], ['deleteTagDirect', deleteTagDirect], ['listTagsDirect', listTagsDirect], ['useTagDirect', useTagDirect], ['renameTagDirect', renameTagDirect], ['copyTagDirect', copyTagDirect], ['scopeUpDirect', scopeUpDirect], ['scopeDownDirect', scopeDownDirect] ]); // Re-export all direct function implementations export { listTasksDirect, getCacheStatsDirect, parsePRDDirect, updateTasksDirect, updateTaskByIdDirect, updateSubtaskByIdDirect, generateTaskFilesDirect, setTaskStatusDirect, showTaskDirect, nextTaskDirect, expandTaskDirect, addTaskDirect, addSubtaskDirect, removeSubtaskDirect, analyzeTaskComplexityDirect, clearSubtasksDirect, expandAllTasksDirect, removeDependencyDirect, validateDependenciesDirect, fixDependenciesDirect, complexityReportDirect, addDependencyDirect, removeTaskDirect, initializeProjectDirect, modelsDirect, moveTaskDirect, moveTaskCrossTagDirect, researchDirect, addTagDirect, deleteTagDirect, listTagsDirect, useTagDirect, renameTagDirect, copyTagDirect, scopeUpDirect, scopeDownDirect }; ``` -------------------------------------------------------------------------------- /docs/providers/gemini-cli.md: -------------------------------------------------------------------------------- ```markdown # Gemini CLI Provider The Gemini CLI provider allows you to use Google's Gemini models through the Gemini CLI tool, leveraging your existing Gemini subscription and OAuth authentication. ## Why Use Gemini CLI? The primary benefit of using the `gemini-cli` provider is to leverage your existing Personal Gemini Code Assist license/usage Google offers for free, or Gemini Code Assist Standard/Enterprise subscription you may already have, via OAuth configured through the Gemini CLI. This is ideal for users who: - Have an active Gemini Code Assist license (including those using the free tier offere by Google) - Want to use OAuth authentication instead of managing API keys - Have already configured authentication via `gemini` OAuth login ## Installation The provider is already included in Task Master. However, you need to install the Gemini CLI tool: ```bash # Install gemini CLI globally npm install -g @google/gemini-cli ``` ## Authentication ### Primary Method: CLI Authentication (Recommended) The Gemini CLI provider is designed to use your pre-configured OAuth authentication: ```bash # Launch Gemini CLI and go through the authentication procedure gemini ``` For OAuth use, select `Login with Google` - This will open a browser window for OAuth authentication. Once authenticated, Task Master will automatically use these credentials when you select the `gemini-cli` provider and models. ### Alternative Method: API Key While the primary use case is OAuth authentication, you can also use an API key if needed: ```bash export GEMINI_API_KEY="your-gemini-api-key" ``` **Note:** If you want to use API keys, consider using the standard `google` provider instead, as `gemini-cli` is specifically designed for OAuth/subscription users. More details on authentication steps and options can be found in the [gemini-cli GitHub README](https://github.com/google-gemini/gemini-cli). ## Configuration Use the `task-master init` command to run through the guided initialization: ```bash task-master init ``` **OR** Configure `gemini-cli` as a provider using the Task Master models command: ```bash # Set gemini-cli as your main provider with gemini-2.5-pro task-master models --set-main gemini-2.5-pro --gemini-cli # Or use the faster gemini-2.5-flash model task-master models --set-main gemini-2.5-flash --gemini-cli ``` You can also manually edit your `.taskmaster/config.json`: ```json { "models": { "main": { "provider": "gemini-cli", "modelId": "gemini-2.5-pro", "maxTokens": 65536, "temperature": 0.2 }, "research": { "provider": "gemini-cli", "modelId": "gemini-2.5-pro", "maxTokens": 65536, "temperature": 0.1 }, "fallback": { "provider": "gemini-cli", "modelId": "gemini-2.5-flash", "maxTokens": 65536, "temperature": 0.2 } }, "global": { "logLevel": "info", "debug": false, "defaultNumTasks": 10, "defaultSubtasks": 5, "defaultPriority": "medium", "projectName": "Taskmaster", "ollamaBaseURL": "http://localhost:11434/api", "bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com", "responseLanguage": "English", "defaultTag": "master", "azureOpenaiBaseURL": "https://your-endpoint.openai.azure.com/" }, "claudeCode": {} } ``` ### Available Models The gemini-cli provider supports only two models: - `gemini-2.5-pro` - High performance model (1M token context window, 65,536 max output tokens) - `gemini-2.5-flash` - Fast, efficient model (1M token context window, 65,536 max output tokens) ## Usage Examples ### Basic Usage Once gemini-cli is installed and authenticated, and Task Master simply use Task Master as normal: ```bash # The provider will automatically use your OAuth credentials task-master parse-prd my-prd.txt ``` ## Troubleshooting ### "Authentication failed" Error If you get an authentication error: 1. **Primary solution**: Run `gemini` to authenticate with your Google account - use `/auth` slash command in **gemini-cli** to change authentication method if desired. 2. **Check authentication status**: Run `gemini` and use `/about` to verify your Auth Method and GCP Project if applicable. 3. **If using API key** (not recommended): Ensure `GEMINI_API_KEY` env variable is set correctly, see the gemini-cli README.md for more info. ### "Model not found" Error The gemini-cli provider only supports two models: - `gemini-2.5-pro` - `gemini-2.5-flash` If you need other Gemini models, use the standard `google` provider with an API key instead. ### Gemini CLI Not Found If you get a "gemini: command not found" error: ```bash # Install the Gemini CLI globally npm install -g @google/gemini-cli # Verify installation gemini --version ``` ## Important Notes - **OAuth vs API Key**: This provider is specifically designed for users who want to use OAuth authentication via gemini-cli. If you prefer using API keys, consider using the standard `google` provider instead. - **Limited Model Support**: Only `gemini-2.5-pro` and `gemini-2.5-flash` are available through gemini-cli. - **Subscription Benefits**: Using OAuth authentication allows you to leverage any subscription benefits associated with your Google account. - The provider uses the `ai-sdk-provider-gemini-cli` npm package internally. - Supports all standard Task Master features: text generation, streaming, and structured object generation. ``` -------------------------------------------------------------------------------- /mcp-server/src/core/direct-functions/update-task-by-id.js: -------------------------------------------------------------------------------- ```javascript /** * update-task-by-id.js * Direct function implementation for updating a single task by ID with new information */ import { updateTaskById } from '../../../../scripts/modules/task-manager.js'; import { enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js'; import { createLogWrapper } from '../../tools/utils.js'; /** * Direct function wrapper for updateTaskById with error handling. * * @param {Object} args - Command arguments containing id, prompt, useResearch, tasksJsonPath, and projectRoot. * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file. * @param {string} args.id - Task ID (or subtask ID like "1.2"). * @param {string} args.prompt - New information/context prompt. * @param {boolean} [args.research] - Whether to use research role. * @param {boolean} [args.append] - Whether to append timestamped information instead of full update. * @param {string} [args.projectRoot] - Project root path. * @param {string} [args.tag] - Tag for the task (optional) * @param {Object} log - Logger object. * @param {Object} context - Context object containing session data. * @returns {Promise<Object>} - Result object with success status and data/error information. */ export async function updateTaskByIdDirect(args, log, context = {}) { const { session } = context; // Destructure expected args, including projectRoot const { tasksJsonPath, id, prompt, research, append, projectRoot, tag } = args; const logWrapper = createLogWrapper(log); try { logWrapper.info( `Updating task by ID via direct function. ID: ${id}, ProjectRoot: ${projectRoot}` ); // Check if tasksJsonPath was provided if (!tasksJsonPath) { const errorMessage = 'tasksJsonPath is required but was not provided.'; logWrapper.error(errorMessage); return { success: false, error: { code: 'MISSING_ARGUMENT', message: errorMessage } }; } // Check required parameters (id and prompt) if (!id) { const errorMessage = 'No task ID specified. Please provide a task ID to update.'; logWrapper.error(errorMessage); return { success: false, error: { code: 'MISSING_TASK_ID', message: errorMessage } }; } if (!prompt) { const errorMessage = 'No prompt specified. Please provide a prompt with new information for the task update.'; logWrapper.error(errorMessage); return { success: false, error: { code: 'MISSING_PROMPT', message: errorMessage } }; } // Parse taskId - handle both string and number values let taskId; if (typeof id === 'string') { // Handle subtask IDs (e.g., "5.2") if (id.includes('.')) { taskId = id; // Keep as string for subtask IDs } else { // Parse as integer for main task IDs taskId = parseInt(id, 10); if (Number.isNaN(taskId)) { const errorMessage = `Invalid task ID: ${id}. Task ID must be a positive integer or subtask ID (e.g., "5.2").`; logWrapper.error(errorMessage); return { success: false, error: { code: 'INVALID_TASK_ID', message: errorMessage } }; } } } else { taskId = id; } // Use the provided path const tasksPath = tasksJsonPath; // Get research flag const useResearch = research === true; logWrapper.info( `Updating task with ID ${taskId} with prompt "${prompt}" and research: ${useResearch}` ); const wasSilent = isSilentMode(); if (!wasSilent) { enableSilentMode(); } try { // Execute core updateTaskById function with proper parameters const coreResult = await updateTaskById( tasksPath, taskId, prompt, useResearch, { mcpLog: logWrapper, session, projectRoot, tag, commandName: 'update-task', outputType: 'mcp' }, 'json', append || false ); // Check if the core function returned null or an object without success if (!coreResult || coreResult.updatedTask === null) { // Core function logs the reason, just return success with info const message = `Task ${taskId} was not updated (likely already completed).`; logWrapper.info(message); return { success: true, data: { message: message, taskId: taskId, updated: false, telemetryData: coreResult?.telemetryData, tagInfo: coreResult?.tagInfo } }; } // Task was updated successfully const successMessage = `Successfully updated task with ID ${taskId} based on the prompt`; logWrapper.success(successMessage); return { success: true, data: { message: successMessage, taskId: taskId, tasksPath: tasksPath, useResearch: useResearch, updated: true, updatedTask: coreResult.updatedTask, telemetryData: coreResult.telemetryData, tagInfo: coreResult.tagInfo } }; } catch (error) { logWrapper.error(`Error updating task by ID: ${error.message}`); return { success: false, error: { code: 'UPDATE_TASK_CORE_ERROR', message: error.message || 'Unknown error updating task' } }; } finally { if (!wasSilent && isSilentMode()) { disableSilentMode(); } } } catch (error) { logWrapper.error(`Setup error in updateTaskByIdDirect: ${error.message}`); if (isSilentMode()) disableSilentMode(); return { success: false, error: { code: 'DIRECT_FUNCTION_SETUP_ERROR', message: error.message || 'Unknown setup error' } }; } } ``` -------------------------------------------------------------------------------- /.github/workflows/claude-docs-updater.yml: -------------------------------------------------------------------------------- ```yaml name: Claude Documentation Updater on: workflow_dispatch: inputs: commit_sha: description: 'The commit SHA that triggered this update' required: true type: string commit_message: description: 'The commit message' required: true type: string changed_files: description: 'List of changed files' required: true type: string commit_diff: description: 'Diff summary of changes' required: true type: string jobs: update-docs: runs-on: ubuntu-latest permissions: contents: write pull-requests: write issues: write steps: - name: Checkout repository uses: actions/checkout@v4 with: ref: next fetch-depth: 0 # Need full history to checkout specific commit - name: Create docs update branch id: create-branch run: | BRANCH_NAME="docs/auto-update-$(date +%Y%m%d-%H%M%S)" git checkout -b $BRANCH_NAME echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT - name: Run Claude Code to Update Documentation uses: anthropics/claude-code-action@beta with: anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} timeout_minutes: "30" mode: "agent" github_token: ${{ secrets.GITHUB_TOKEN }} experimental_allowed_domains: | .anthropic.com .github.com api.github.com .githubusercontent.com registry.npmjs.org .task-master.dev base_branch: "next" direct_prompt: | You are a documentation specialist. Analyze the recent changes pushed to the 'next' branch and update the documentation accordingly. Recent changes: - Commit: ${{ inputs.commit_message }} - Changed files: ${{ inputs.changed_files }} - Changes summary: ${{ inputs.commit_diff }} Your task: 1. Analyze the changes to understand what functionality was added, modified, or removed 2. Check if these changes require documentation updates in apps/docs/ 3. If documentation updates are needed: - Update relevant documentation files in apps/docs/ - Ensure examples are updated if APIs changed - Update any configuration documentation if config options changed - Add new documentation pages if new features were added - Update the changelog or release notes if applicable 4. If no documentation updates are needed, skip creating changes Guidelines: - Focus only on user-facing changes that need documentation - Keep documentation clear, concise, and helpful - Include code examples where appropriate - Maintain consistent documentation style with existing docs - Don't document internal implementation details unless they affect users - Update navigation/menu files if new pages are added Only make changes if the documentation truly needs updating based on the code changes. - name: Check if changes were made id: check-changes run: | if git diff --quiet; then echo "has_changes=false" >> $GITHUB_OUTPUT else echo "has_changes=true" >> $GITHUB_OUTPUT git add -A git config --local user.email "github-actions[bot]@users.noreply.github.com" git config --local user.name "github-actions[bot]" git commit -m "docs: auto-update documentation based on changes in next branch This PR was automatically generated to update documentation based on recent changes. Original commit: ${{ inputs.commit_message }} Co-authored-by: Claude <[email protected]>" fi - name: Push changes and create PR if: steps.check-changes.outputs.has_changes == 'true' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | git push origin ${{ steps.create-branch.outputs.branch_name }} # Create PR using GitHub CLI gh pr create \ --title "docs: update documentation for recent changes" \ --body "## 📚 Documentation Update This PR automatically updates documentation based on recent changes merged to the \`next\` branch. ### Original Changes **Commit:** ${{ inputs.commit_sha }} **Message:** ${{ inputs.commit_message }} ### Changed Files in Original Commit \`\`\` ${{ inputs.changed_files }} \`\`\` ### Documentation Updates This PR includes documentation updates to reflect the changes above. Please review to ensure: - [ ] Documentation accurately reflects the changes - [ ] Examples are correct and working - [ ] No important details are missing - [ ] Style is consistent with existing documentation --- *This PR was automatically generated by Claude Code GitHub Action*" \ --base next \ --head ${{ steps.create-branch.outputs.branch_name }} \ --label "documentation" \ --label "automated" ``` -------------------------------------------------------------------------------- /scripts/modules/task-manager/set-task-status.js: -------------------------------------------------------------------------------- ```javascript import path from 'path'; import chalk from 'chalk'; import boxen from 'boxen'; import { log, readJSON, writeJSON, findTaskById, ensureTagMetadata } from '../utils.js'; import { displayBanner } from '../ui.js'; import { validateTaskDependencies } from '../dependency-manager.js'; import { getDebugFlag } from '../config-manager.js'; import updateSingleTaskStatus from './update-single-task-status.js'; import generateTaskFiles from './generate-task-files.js'; import { isValidTaskStatus, TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js'; /** * Set the status of a task * @param {string} tasksPath - Path to the tasks.json file * @param {string} taskIdInput - Task ID(s) to update * @param {string} newStatus - New status * @param {Object} options - Additional options (mcpLog for MCP mode, projectRoot for tag resolution) * @param {string} [options.projectRoot] - Project root path * @param {string} [options.tag] - Optional tag to override current tag resolution * @param {string} [options.mcpLog] - MCP logger object * @returns {Object|undefined} Result object in MCP mode, undefined in CLI mode */ async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) { const { projectRoot, tag } = options; try { if (!isValidTaskStatus(newStatus)) { throw new Error( `Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}` ); } // Determine if we're in MCP mode by checking for mcpLog const isMcpMode = !!options?.mcpLog; // Only display UI elements if not in MCP mode if (!isMcpMode) { console.log( boxen(chalk.white.bold(`Updating Task Status to: ${newStatus}`), { padding: 1, borderColor: 'blue', borderStyle: 'round' }) ); } log('info', `Reading tasks from ${tasksPath}...`); // Read the raw data without tag resolution to preserve tagged structure let rawData = readJSON(tasksPath, projectRoot, tag); // No tag parameter // Handle the case where readJSON returns resolved data with _rawTaggedData if (rawData && rawData._rawTaggedData) { // Use the raw tagged data and discard the resolved view rawData = rawData._rawTaggedData; } // Ensure the tag exists in the raw data if (!rawData || !rawData[tag] || !Array.isArray(rawData[tag].tasks)) { throw new Error( `Invalid tasks file or tag "${tag}" not found at ${tasksPath}` ); } // Get the tasks for the current tag const data = { tasks: rawData[tag].tasks, tag, _rawTaggedData: rawData }; if (!data || !data.tasks) { throw new Error(`No valid tasks found in ${tasksPath}`); } // Handle multiple task IDs (comma-separated) const taskIds = taskIdInput.split(',').map((id) => id.trim()); const updatedTasks = []; // Update each task and capture old status for display for (const id of taskIds) { // Capture old status before updating let oldStatus = 'unknown'; if (id.includes('.')) { // Handle subtask const [parentId, subtaskId] = id .split('.') .map((id) => parseInt(id, 10)); const parentTask = data.tasks.find((t) => t.id === parentId); if (parentTask?.subtasks) { const subtask = parentTask.subtasks.find((st) => st.id === subtaskId); oldStatus = subtask?.status || 'pending'; } } else { // Handle regular task const taskId = parseInt(id, 10); const task = data.tasks.find((t) => t.id === taskId); oldStatus = task?.status || 'pending'; } await updateSingleTaskStatus(tasksPath, id, newStatus, data, !isMcpMode); updatedTasks.push({ id, oldStatus, newStatus }); } // Update the raw data structure with the modified tasks rawData[tag].tasks = data.tasks; // Ensure the tag has proper metadata ensureTagMetadata(rawData[tag], { description: `Tasks for ${tag} context` }); // Write the updated raw data back to the file // The writeJSON function will automatically filter out _rawTaggedData writeJSON(tasksPath, rawData, projectRoot, tag); // Validate dependencies after status update log('info', 'Validating dependencies after status update...'); validateTaskDependencies(data.tasks); // Generate individual task files // log('info', 'Regenerating task files...'); // await generateTaskFiles(tasksPath, path.dirname(tasksPath), { // mcpLog: options.mcpLog // }); // Display success message - only in CLI mode if (!isMcpMode) { for (const updateInfo of updatedTasks) { const { id, oldStatus, newStatus: updatedStatus } = updateInfo; console.log( boxen( chalk.white.bold(`Successfully updated task ${id} status:`) + '\n' + `From: ${chalk.yellow(oldStatus)}\n` + `To: ${chalk.green(updatedStatus)}`, { padding: 1, borderColor: 'green', borderStyle: 'round' } ) ); } } // Return success value for programmatic use return { success: true, updatedTasks: updatedTasks.map(({ id, oldStatus, newStatus }) => ({ id, oldStatus, newStatus })) }; } catch (error) { log('error', `Error setting task status: ${error.message}`); // Only show error UI in CLI mode if (!options?.mcpLog) { console.error(chalk.red(`Error: ${error.message}`)); // Pass session to getDebugFlag if (getDebugFlag(options?.session)) { // Use getter console.error(error); } process.exit(1); } else { // In MCP mode, throw the error for the caller to handle throw error; } } } export default setTaskStatus; ``` -------------------------------------------------------------------------------- /tests/unit/scripts/modules/dependency-manager/fix-dependencies-command.test.js: -------------------------------------------------------------------------------- ```javascript /** * Unit test to ensure fixDependenciesCommand writes JSON with the correct * projectRoot and tag arguments so that tag data is preserved. */ import { jest } from '@jest/globals'; // Mock process.exit to prevent test termination const mockProcessExit = jest.fn(); const originalExit = process.exit; process.exit = mockProcessExit; // Mock utils.js BEFORE importing the module under test jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({ readJSON: jest.fn(), writeJSON: jest.fn(), log: jest.fn(), findProjectRoot: jest.fn(() => '/mock/project/root'), getCurrentTag: jest.fn(() => 'master'), taskExists: jest.fn(() => true), formatTaskId: jest.fn((id) => id), findCycles: jest.fn(() => []), traverseDependencies: jest.fn((sourceTasks, allTasks, options = {}) => []), isSilentMode: jest.fn(() => true), resolveTag: jest.fn(() => 'master'), getTasksForTag: jest.fn(() => []), setTasksForTag: jest.fn(), enableSilentMode: jest.fn(), disableSilentMode: jest.fn(), isEmpty: jest.fn((value) => { if (value === null || value === undefined) return true; if (Array.isArray(value)) return value.length === 0; if (typeof value === 'object' && value !== null) return Object.keys(value).length === 0; return false; // Not an array or object }), resolveEnvVariable: jest.fn() })); // Mock ui.js jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({ displayBanner: jest.fn(), formatDependenciesWithStatus: jest.fn() })); // Mock task-manager.js jest.unstable_mockModule( '../../../../../scripts/modules/task-manager.js', () => ({ generateTaskFiles: jest.fn() }) ); // Mock external libraries jest.unstable_mockModule('chalk', () => ({ default: { green: jest.fn((text) => text), cyan: jest.fn((text) => text), bold: jest.fn((text) => text) } })); jest.unstable_mockModule('boxen', () => ({ default: jest.fn((text) => text) })); // Import the mocked modules const { readJSON, writeJSON, log, taskExists } = await import( '../../../../../scripts/modules/utils.js' ); // Import the module under test const { fixDependenciesCommand } = await import( '../../../../../scripts/modules/dependency-manager.js' ); describe('fixDependenciesCommand tag preservation', () => { beforeEach(() => { jest.clearAllMocks(); mockProcessExit.mockClear(); }); afterAll(() => { // Restore original process.exit process.exit = originalExit; }); it('calls writeJSON with projectRoot and tag parameters when changes are made', async () => { const tasksPath = '/mock/tasks.json'; const projectRoot = '/mock/project/root'; const tag = 'master'; // Mock data WITH dependency issues to trigger writeJSON const tasksDataWithIssues = { tasks: [ { id: 1, title: 'Task 1', dependencies: [999] // Non-existent dependency to trigger fix }, { id: 2, title: 'Task 2', dependencies: [] } ], tag: 'master', _rawTaggedData: { master: { tasks: [ { id: 1, title: 'Task 1', dependencies: [999] } ] } } }; readJSON.mockReturnValue(tasksDataWithIssues); taskExists.mockReturnValue(false); // Make dependency invalid to trigger fix await fixDependenciesCommand(tasksPath, { context: { projectRoot, tag } }); // Verify readJSON was called with correct parameters expect(readJSON).toHaveBeenCalledWith(tasksPath, projectRoot, tag); // Verify writeJSON was called (should be triggered by removing invalid dependency) expect(writeJSON).toHaveBeenCalled(); // Check the writeJSON call parameters const writeJSONCalls = writeJSON.mock.calls; const lastWriteCall = writeJSONCalls[writeJSONCalls.length - 1]; const [calledPath, _data, calledProjectRoot, calledTag] = lastWriteCall; expect(calledPath).toBe(tasksPath); expect(calledProjectRoot).toBe(projectRoot); expect(calledTag).toBe(tag); // Verify process.exit was NOT called (meaning the function succeeded) expect(mockProcessExit).not.toHaveBeenCalled(); }); it('does not call writeJSON when no changes are needed', async () => { const tasksPath = '/mock/tasks.json'; const projectRoot = '/mock/project/root'; const tag = 'master'; // Mock data WITHOUT dependency issues (no changes needed) const cleanTasksData = { tasks: [ { id: 1, title: 'Task 1', dependencies: [] // Clean, no issues } ], tag: 'master' }; readJSON.mockReturnValue(cleanTasksData); taskExists.mockReturnValue(true); // All dependencies exist await fixDependenciesCommand(tasksPath, { context: { projectRoot, tag } }); // Verify readJSON was called expect(readJSON).toHaveBeenCalledWith(tasksPath, projectRoot, tag); // Verify writeJSON was NOT called (no changes needed) expect(writeJSON).not.toHaveBeenCalled(); // Verify process.exit was NOT called expect(mockProcessExit).not.toHaveBeenCalled(); }); it('handles early exit when no valid tasks found', async () => { const tasksPath = '/mock/tasks.json'; // Mock invalid data to trigger early exit readJSON.mockReturnValue(null); await fixDependenciesCommand(tasksPath, { context: { projectRoot: '/mock', tag: 'master' } }); // Verify readJSON was called expect(readJSON).toHaveBeenCalled(); // Verify writeJSON was NOT called (early exit) expect(writeJSON).not.toHaveBeenCalled(); // Verify process.exit WAS called due to invalid data expect(mockProcessExit).toHaveBeenCalledWith(1); }); }); ``` -------------------------------------------------------------------------------- /src/profiles/zed.js: -------------------------------------------------------------------------------- ```javascript // Zed profile for rule-transformer import path from 'path'; import fs from 'fs'; import { isSilentMode, log } from '../../scripts/modules/utils.js'; import { createProfile } from './base-profile.js'; /** * Transform standard MCP config format to Zed format * @param {Object} mcpConfig - Standard MCP configuration object * @returns {Object} - Transformed Zed configuration object */ function transformToZedFormat(mcpConfig) { const zedConfig = {}; // Transform mcpServers to context_servers if (mcpConfig.mcpServers) { zedConfig['context_servers'] = mcpConfig.mcpServers; } // Preserve any other existing settings for (const [key, value] of Object.entries(mcpConfig)) { if (key !== 'mcpServers') { zedConfig[key] = value; } } return zedConfig; } // Lifecycle functions for Zed profile function onAddRulesProfile(targetDir, assetsDir) { // MCP transformation will be handled in onPostConvertRulesProfile // File copying is handled by the base profile via fileMap } function onRemoveRulesProfile(targetDir) { // Clean up .rules (Zed uses .rules directly in root) const userRulesFile = path.join(targetDir, '.rules'); try { // Remove Task Master .rules if (fs.existsSync(userRulesFile)) { fs.rmSync(userRulesFile, { force: true }); log('debug', `[Zed] Removed ${userRulesFile}`); } } catch (err) { log('error', `[Zed] Failed to remove Zed instructions: ${err.message}`); } // MCP Removal: Remove context_servers section const mcpConfigPath = path.join(targetDir, '.zed', 'settings.json'); if (!fs.existsSync(mcpConfigPath)) { log('debug', '[Zed] No .zed/settings.json found to clean up'); return; } try { // Read the current config const configContent = fs.readFileSync(mcpConfigPath, 'utf8'); const config = JSON.parse(configContent); // Check if it has the context_servers section and task-master-ai server if ( config['context_servers'] && config['context_servers']['task-master-ai'] ) { // Remove task-master-ai server delete config['context_servers']['task-master-ai']; // Check if there are other MCP servers in context_servers const remainingServers = Object.keys(config['context_servers']); if (remainingServers.length === 0) { // No other servers, remove entire context_servers section delete config['context_servers']; log('debug', '[Zed] Removed empty context_servers section'); } // Check if config is now empty const remainingKeys = Object.keys(config); if (remainingKeys.length === 0) { // Config is empty, remove entire file fs.rmSync(mcpConfigPath, { force: true }); log('info', '[Zed] Removed empty settings.json file'); // Check if .zed directory is empty const zedDirPath = path.join(targetDir, '.zed'); if (fs.existsSync(zedDirPath)) { const remainingContents = fs.readdirSync(zedDirPath); if (remainingContents.length === 0) { fs.rmSync(zedDirPath, { recursive: true, force: true }); log('debug', '[Zed] Removed empty .zed directory'); } } } else { // Write back the modified config fs.writeFileSync( mcpConfigPath, JSON.stringify(config, null, '\t') + '\n' ); log( 'info', '[Zed] Removed TaskMaster from settings.json, preserved other configurations' ); } } else { log('debug', '[Zed] TaskMaster not found in context_servers'); } } catch (error) { log('error', `[Zed] Failed to clean up settings.json: ${error.message}`); } } function onPostConvertRulesProfile(targetDir, assetsDir) { // Handle .rules setup (same as onAddRulesProfile) onAddRulesProfile(targetDir, assetsDir); // Transform MCP config to Zed format const mcpConfigPath = path.join(targetDir, '.zed', 'settings.json'); if (!fs.existsSync(mcpConfigPath)) { log('debug', '[Zed] No .zed/settings.json found to transform'); return; } try { // Read the generated standard MCP config const mcpConfigContent = fs.readFileSync(mcpConfigPath, 'utf8'); const mcpConfig = JSON.parse(mcpConfigContent); // Check if it's already in Zed format (has context_servers) if (mcpConfig['context_servers']) { log( 'info', '[Zed] settings.json already in Zed format, skipping transformation' ); return; } // Transform to Zed format const zedConfig = transformToZedFormat(mcpConfig); // Add "source": "custom" to task-master-ai server for Zed if ( zedConfig['context_servers'] && zedConfig['context_servers']['task-master-ai'] ) { zedConfig['context_servers']['task-master-ai'].source = 'custom'; } // Write back the transformed config with proper formatting fs.writeFileSync( mcpConfigPath, JSON.stringify(zedConfig, null, '\t') + '\n' ); log('info', '[Zed] Transformed settings.json to Zed format'); log('debug', '[Zed] Renamed mcpServers to context_servers'); } catch (error) { log('error', `[Zed] Failed to transform settings.json: ${error.message}`); } } // Create and export zed profile using the base factory export const zedProfile = createProfile({ name: 'zed', displayName: 'Zed', url: 'zed.dev', docsUrl: 'zed.dev/docs', profileDir: '.zed', rulesDir: '.', mcpConfig: true, mcpConfigName: 'settings.json', includeDefaultRules: false, fileMap: { 'AGENTS.md': '.rules' }, onAdd: onAddRulesProfile, onRemove: onRemoveRulesProfile, onPostConvert: onPostConvertRulesProfile }); // Export lifecycle functions separately to avoid naming conflicts export { onAddRulesProfile, onRemoveRulesProfile, onPostConvertRulesProfile }; ``` -------------------------------------------------------------------------------- /apps/cli/src/utils/auto-update.ts: -------------------------------------------------------------------------------- ```typescript /** * @fileoverview Auto-update utilities for task-master-ai CLI */ import { spawn } from 'child_process'; import https from 'https'; import chalk from 'chalk'; import ora from 'ora'; import boxen from 'boxen'; export interface UpdateInfo { currentVersion: string; latestVersion: string; needsUpdate: boolean; } /** * Get current version from build-time injected environment variable */ function getCurrentVersion(): string { // Version is injected at build time via TM_PUBLIC_VERSION const version = process.env.TM_PUBLIC_VERSION; if (version && version !== 'unknown') { return version; } // Fallback for development or if injection failed console.warn('Could not read version from TM_PUBLIC_VERSION, using fallback'); return '0.0.0'; } /** * Compare semantic versions with proper pre-release handling * @param v1 - First version * @param v2 - Second version * @returns -1 if v1 < v2, 0 if v1 = v2, 1 if v1 > v2 */ export function compareVersions(v1: string, v2: string): number { const toParts = (v: string) => { const [core, pre = ''] = v.split('-', 2); const nums = core.split('.').map((n) => Number.parseInt(n, 10) || 0); return { nums, pre }; }; const a = toParts(v1); const b = toParts(v2); const len = Math.max(a.nums.length, b.nums.length); // Compare numeric parts for (let i = 0; i < len; i++) { const d = (a.nums[i] || 0) - (b.nums[i] || 0); if (d !== 0) return d < 0 ? -1 : 1; } // Handle pre-release comparison if (a.pre && !b.pre) return -1; // prerelease < release if (!a.pre && b.pre) return 1; // release > prerelease if (a.pre === b.pre) return 0; // same or both empty return a.pre < b.pre ? -1 : 1; // basic prerelease tie-break } /** * Check for newer version of task-master-ai */ export async function checkForUpdate( currentVersionOverride?: string ): Promise<UpdateInfo> { const currentVersion = currentVersionOverride || getCurrentVersion(); return new Promise((resolve) => { const options = { hostname: 'registry.npmjs.org', path: '/task-master-ai', method: 'GET', headers: { Accept: 'application/vnd.npm.install-v1+json', 'User-Agent': `task-master-ai/${currentVersion}` } }; const req = https.request(options, (res) => { let data = ''; res.on('data', (chunk) => { data += chunk; }); res.on('end', () => { try { if (res.statusCode !== 200) throw new Error(`npm registry status ${res.statusCode}`); const npmData = JSON.parse(data); const latestVersion = npmData['dist-tags']?.latest || currentVersion; const needsUpdate = compareVersions(currentVersion, latestVersion) < 0; resolve({ currentVersion, latestVersion, needsUpdate }); } catch (error) { resolve({ currentVersion, latestVersion: currentVersion, needsUpdate: false }); } }); }); req.on('error', () => { resolve({ currentVersion, latestVersion: currentVersion, needsUpdate: false }); }); req.setTimeout(3000, () => { req.destroy(); resolve({ currentVersion, latestVersion: currentVersion, needsUpdate: false }); }); req.end(); }); } /** * Display upgrade notification message */ export function displayUpgradeNotification( currentVersion: string, latestVersion: string ) { const message = boxen( `${chalk.blue.bold('Update Available!')} ${chalk.dim(currentVersion)} → ${chalk.green(latestVersion)}\n\n` + `Auto-updating to the latest version with new features and bug fixes...`, { padding: 1, margin: { top: 1, bottom: 1 }, borderColor: 'yellow', borderStyle: 'round' } ); console.log(message); } /** * Automatically update task-master-ai to the latest version */ export async function performAutoUpdate( latestVersion: string ): Promise<boolean> { if (process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1' || process.env.CI) { console.log( chalk.dim('Skipping auto-update (TASKMASTER_SKIP_AUTO_UPDATE/CI).') ); return false; } const spinner = ora({ text: chalk.blue( `Updating task-master-ai to version ${chalk.green(latestVersion)}` ), spinner: 'dots', color: 'blue' }).start(); return new Promise((resolve) => { const updateProcess = spawn( 'npm', [ 'install', '-g', `task-master-ai@${latestVersion}`, '--no-fund', '--no-audit', '--loglevel=warn' ], { stdio: ['ignore', 'pipe', 'pipe'] } ); let errorOutput = ''; updateProcess.stdout.on('data', () => { // Update spinner text with progress spinner.text = chalk.blue( `Installing task-master-ai@${latestVersion}...` ); }); updateProcess.stderr.on('data', (data) => { errorOutput += data.toString(); }); updateProcess.on('close', (code) => { if (code === 0) { spinner.succeed( chalk.green( `Successfully updated to version ${chalk.bold(latestVersion)}` ) ); console.log( chalk.dim('Please restart your command to use the new version.') ); resolve(true); } else { spinner.fail(chalk.red('Auto-update failed')); console.log( chalk.cyan( `Please run manually: npm install -g task-master-ai@${latestVersion}` ) ); if (errorOutput) { console.log(chalk.dim(`Error: ${errorOutput.trim()}`)); } resolve(false); } }); updateProcess.on('error', (error) => { spinner.fail(chalk.red('Auto-update failed')); console.log(chalk.red('Error:'), error.message); console.log( chalk.cyan( `Please run manually: npm install -g task-master-ai@${latestVersion}` ) ); resolve(false); }); }); } ``` -------------------------------------------------------------------------------- /apps/docs/best-practices/advanced-tasks.mdx: -------------------------------------------------------------------------------- ```markdown --- title: Advanced Tasks sidebarTitle: "Advanced Tasks" --- ## AI-Driven Development Workflow The Cursor agent is pre-configured (via the rules file) to follow this workflow: ### 1. Task Discovery and Selection Ask the agent to list available tasks: ``` What tasks are available to work on next? ``` ``` Can you show me tasks 1, 3, and 5 to understand their current status? ``` The agent will: - Run `task-master list` to see all tasks - Run `task-master next` to determine the next task to work on - Run `task-master show 1,3,5` to display multiple tasks with interactive options - Analyze dependencies to determine which tasks are ready to be worked on - Prioritize tasks based on priority level and ID order - Suggest the next task(s) to implement ### 2. Task Implementation When implementing a task, the agent will: - Reference the task's details section for implementation specifics - Consider dependencies on previous tasks - Follow the project's coding standards - Create appropriate tests based on the task's testStrategy You can ask: ``` Let's implement task 3. What does it involve? ``` ### 2.1. Viewing Multiple Tasks For efficient context gathering and batch operations: ``` Show me tasks 5, 7, and 9 so I can plan my implementation approach. ``` The agent will: - Run `task-master show 5,7,9` to display a compact summary table - Show task status, priority, and progress indicators - Provide an interactive action menu with batch operations - Allow you to perform group actions like marking multiple tasks as in-progress ### 3. Task Verification Before marking a task as complete, verify it according to: - The task's specified testStrategy - Any automated tests in the codebase - Manual verification if required ### 4. Task Completion When a task is completed, tell the agent: ``` Task 3 is now complete. Please update its status. ``` The agent will execute: ```bash task-master set-status --id=3 --status=done ``` ### 5. Handling Implementation Drift If during implementation, you discover that: - The current approach differs significantly from what was planned - Future tasks need to be modified due to current implementation choices - New dependencies or requirements have emerged Tell the agent: ``` We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks (from ID 4) to reflect this change? ``` The agent will execute: ```bash task-master update --from=4 --prompt="Now we are using MongoDB instead of PostgreSQL." # OR, if research is needed to find best practices for MongoDB: task-master update --from=4 --prompt="Update to use MongoDB, researching best practices" --research ``` This will rewrite or re-scope subsequent tasks in tasks.json while preserving completed work. ### 6. Reorganizing Tasks If you need to reorganize your task structure: ``` I think subtask 5.2 would fit better as part of task 7 instead. Can you move it there? ``` The agent will execute: ```bash task-master move --from=5.2 --to=7.3 ``` You can reorganize tasks in various ways: - Moving a standalone task to become a subtask: `--from=5 --to=7` - Moving a subtask to become a standalone task: `--from=5.2 --to=7` - Moving a subtask to a different parent: `--from=5.2 --to=7.3` - Reordering subtasks within the same parent: `--from=5.2 --to=5.4` - Moving a task to a new ID position: `--from=5 --to=25` (even if task 25 doesn't exist yet) - Moving multiple tasks at once: `--from=10,11,12 --to=16,17,18` (must have same number of IDs, Taskmaster will look through each position) When moving tasks to new IDs: - The system automatically creates placeholder tasks for non-existent destination IDs - This prevents accidental data loss during reorganization - Any tasks that depend on moved tasks will have their dependencies updated - When moving a parent task, all its subtasks are automatically moved with it and renumbered This is particularly useful as your project understanding evolves and you need to refine your task structure. ### 7. Resolving Merge Conflicts with Tasks When working with a team, you might encounter merge conflicts in your tasks.json file if multiple team members create tasks on different branches. The move command makes resolving these conflicts straightforward: ``` I just merged the main branch and there's a conflict with tasks.json. My teammates created tasks 10-15 while I created tasks 10-12 on my branch. Can you help me resolve this? ``` The agent will help you: 1. Keep your teammates' tasks (10-15) 2. Move your tasks to new positions to avoid conflicts: ```bash # Move your tasks to new positions (e.g., 16-18) task-master move --from=10 --to=16 task-master move --from=11 --to=17 task-master move --from=12 --to=18 ``` This approach preserves everyone's work while maintaining a clean task structure, making it much easier to handle task conflicts than trying to manually merge JSON files. ### 8. Breaking Down Complex Tasks For complex tasks that need more granularity: ``` Task 5 seems complex. Can you break it down into subtasks? ``` The agent will execute: ```bash task-master expand --id=5 --num=3 ``` You can provide additional context: ``` Please break down task 5 with a focus on security considerations. ``` The agent will execute: ```bash task-master expand --id=5 --prompt="Focus on security aspects" ``` You can also expand all pending tasks: ``` Please break down all pending tasks into subtasks. ``` The agent will execute: ```bash task-master expand --all ``` For research-backed subtask generation using the configured research model: ``` Please break down task 5 using research-backed generation. ``` The agent will execute: ```bash task-master expand --id=5 --research ``` ``` -------------------------------------------------------------------------------- /packages/tm-core/src/storage/file-storage/format-handler.ts: -------------------------------------------------------------------------------- ```typescript /** * @fileoverview Format handler for task storage files */ import type { Task, TaskMetadata } from '../../types/index.js'; export interface FileStorageData { tasks: Task[]; metadata: TaskMetadata; } export type FileFormat = 'legacy' | 'standard'; /** * Handles format detection and conversion between legacy and standard task file formats */ export class FormatHandler { /** * Detect the format of the raw data */ detectFormat(data: any): FileFormat { if (!data || typeof data !== 'object') { return 'standard'; } const keys = Object.keys(data); // Check if this uses the legacy format with tag keys // Legacy format has keys that are not 'tasks' or 'metadata' const hasLegacyFormat = keys.some( (key) => key !== 'tasks' && key !== 'metadata' ); return hasLegacyFormat ? 'legacy' : 'standard'; } /** * Extract tasks from data for a specific tag */ extractTasks(data: any, tag: string): Task[] { if (!data) { return []; } const format = this.detectFormat(data); if (format === 'legacy') { return this.extractTasksFromLegacy(data, tag); } return this.extractTasksFromStandard(data); } /** * Extract tasks from legacy format */ private extractTasksFromLegacy(data: any, tag: string): Task[] { // First check if the requested tag exists if (tag in data) { const tagData = data[tag]; return tagData?.tasks || []; } // If we're looking for 'master' tag but it doesn't exist, try the first available tag const availableKeys = Object.keys(data).filter( (key) => key !== 'tasks' && key !== 'metadata' ); if (tag === 'master' && availableKeys.length > 0) { const firstTag = availableKeys[0]; const tagData = data[firstTag]; return tagData?.tasks || []; } return []; } /** * Extract tasks from standard format */ private extractTasksFromStandard(data: any): Task[] { return data?.tasks || []; } /** * Extract metadata from data for a specific tag */ extractMetadata(data: any, tag: string): TaskMetadata | null { if (!data) { return null; } const format = this.detectFormat(data); if (format === 'legacy') { return this.extractMetadataFromLegacy(data, tag); } return this.extractMetadataFromStandard(data); } /** * Extract metadata from legacy format */ private extractMetadataFromLegacy( data: any, tag: string ): TaskMetadata | null { if (tag in data) { const tagData = data[tag]; // Generate metadata if not present in legacy format if (!tagData?.metadata && tagData?.tasks) { return this.generateMetadataFromTasks(tagData.tasks, tag); } return tagData?.metadata || null; } // If we're looking for 'master' tag but it doesn't exist, try the first available tag const availableKeys = Object.keys(data).filter( (key) => key !== 'tasks' && key !== 'metadata' ); if (tag === 'master' && availableKeys.length > 0) { const firstTag = availableKeys[0]; const tagData = data[firstTag]; if (!tagData?.metadata && tagData?.tasks) { return this.generateMetadataFromTasks(tagData.tasks, firstTag); } return tagData?.metadata || null; } return null; } /** * Extract metadata from standard format */ private extractMetadataFromStandard(data: any): TaskMetadata | null { return data?.metadata || null; } /** * Extract all available tags from the single tasks.json file */ extractTags(data: any): string[] { if (!data) { return []; } const format = this.detectFormat(data); if (format === 'legacy') { // Return all tag keys from legacy format const keys = Object.keys(data); return keys.filter((key) => key !== 'tasks' && key !== 'metadata'); } // Standard format - just has 'master' tag return ['master']; } /** * Convert tasks and metadata to the appropriate format for saving */ convertToSaveFormat( tasks: Task[], metadata: TaskMetadata, existingData: any, tag: string ): any { const resolvedTag = tag || 'master'; // Normalize task IDs to strings const normalizedTasks = this.normalizeTasks(tasks); // Check if existing file uses legacy format if (existingData && this.detectFormat(existingData) === 'legacy') { return this.convertToLegacyFormat(normalizedTasks, metadata, resolvedTag); } // Use standard format for new files return this.convertToStandardFormat(normalizedTasks, metadata, tag); } /** * Convert to legacy format */ private convertToLegacyFormat( tasks: Task[], metadata: TaskMetadata, tag: string ): any { return { [tag]: { tasks, metadata: { ...metadata, tags: [tag] } } }; } /** * Convert to standard format */ private convertToStandardFormat( tasks: Task[], metadata: TaskMetadata, tag?: string ): FileStorageData { return { tasks, metadata: { ...metadata, tags: tag ? [tag] : [] } }; } /** * Normalize task IDs - keep Task IDs as strings, Subtask IDs as numbers */ private normalizeTasks(tasks: Task[]): Task[] { return tasks.map((task) => ({ ...task, id: String(task.id), // Task IDs are strings dependencies: task.dependencies?.map((dep) => String(dep)) || [], subtasks: task.subtasks?.map((subtask) => ({ ...subtask, id: Number(subtask.id), // Subtask IDs are numbers parentId: String(subtask.parentId) // Parent ID is string (Task ID) })) || [] })); } /** * Generate metadata from tasks when not present */ private generateMetadataFromTasks(tasks: Task[], tag: string): TaskMetadata { return { version: '1.0.0', lastModified: new Date().toISOString(), taskCount: tasks.length, completedCount: tasks.filter((t: any) => t.status === 'done').length, tags: [tag] }; } } ``` -------------------------------------------------------------------------------- /packages/tm-core/src/config/services/config-merger.service.spec.ts: -------------------------------------------------------------------------------- ```typescript /** * @fileoverview Unit tests for ConfigMerger service */ import { describe, it, expect, beforeEach } from 'vitest'; import { ConfigMerger, CONFIG_PRECEDENCE } from './config-merger.service.js'; describe('ConfigMerger', () => { let merger: ConfigMerger; beforeEach(() => { merger = new ConfigMerger(); }); describe('addSource', () => { it('should add configuration source', () => { const source = { name: 'test', config: { test: true }, precedence: 1 }; merger.addSource(source); const sources = merger.getSources(); expect(sources).toHaveLength(1); expect(sources[0]).toEqual(source); }); it('should add multiple sources', () => { merger.addSource({ name: 'source1', config: {}, precedence: 1 }); merger.addSource({ name: 'source2', config: {}, precedence: 2 }); expect(merger.getSources()).toHaveLength(2); }); }); describe('clearSources', () => { it('should remove all configuration sources', () => { merger.addSource({ name: 'test', config: {}, precedence: 1 }); merger.clearSources(); expect(merger.getSources()).toHaveLength(0); }); }); describe('merge', () => { it('should merge configurations based on precedence', () => { merger.addSource({ name: 'low', config: { a: 1, b: 2 }, precedence: 1 }); merger.addSource({ name: 'high', config: { a: 3, c: 4 }, precedence: 2 }); const result = merger.merge(); expect(result).toEqual({ a: 3, // High precedence wins b: 2, // Only in low c: 4 // Only in high }); }); it('should deep merge nested objects', () => { merger.addSource({ name: 'base', config: { models: { main: 'model1', fallback: 'model2' }, storage: { type: 'file' as const } }, precedence: 1 }); merger.addSource({ name: 'override', config: { models: { main: 'model3' }, storage: { encoding: 'utf8' as const } }, precedence: 2 }); const result = merger.merge(); expect(result).toEqual({ models: { main: 'model3', // Overridden fallback: 'model2' // Preserved }, storage: { type: 'file', // Preserved encoding: 'utf8' // Added } }); }); it('should handle arrays by replacement', () => { merger.addSource({ name: 'base', config: { items: [1, 2, 3] }, precedence: 1 }); merger.addSource({ name: 'override', config: { items: [4, 5] }, precedence: 2 }); const result = merger.merge(); expect(result.items).toEqual([4, 5]); // Arrays are replaced, not merged }); it('should ignore null and undefined values', () => { merger.addSource({ name: 'base', config: { a: 1, b: 2 }, precedence: 1 }); merger.addSource({ name: 'override', config: { a: null, b: undefined, c: 3 } as any, precedence: 2 }); const result = merger.merge(); expect(result).toEqual({ a: 1, // null ignored b: 2, // undefined ignored c: 3 // new value added }); }); it('should return empty object when no sources', () => { const result = merger.merge(); expect(result).toEqual({}); }); it('should use CONFIG_PRECEDENCE constants correctly', () => { merger.addSource({ name: 'defaults', config: { level: 'default' }, precedence: CONFIG_PRECEDENCE.DEFAULTS }); merger.addSource({ name: 'local', config: { level: 'local' }, precedence: CONFIG_PRECEDENCE.LOCAL }); merger.addSource({ name: 'environment', config: { level: 'env' }, precedence: CONFIG_PRECEDENCE.ENVIRONMENT }); const result = merger.merge(); expect(result.level).toBe('env'); // Highest precedence wins }); }); describe('getSources', () => { it('should return sources sorted by precedence (highest first)', () => { merger.addSource({ name: 'low', config: {}, precedence: 1 }); merger.addSource({ name: 'high', config: {}, precedence: 3 }); merger.addSource({ name: 'medium', config: {}, precedence: 2 }); const sources = merger.getSources(); expect(sources[0].name).toBe('high'); expect(sources[1].name).toBe('medium'); expect(sources[2].name).toBe('low'); }); it('should return a copy of sources array', () => { merger.addSource({ name: 'test', config: {}, precedence: 1 }); const sources1 = merger.getSources(); const sources2 = merger.getSources(); expect(sources1).not.toBe(sources2); // Different array instances expect(sources1).toEqual(sources2); // Same content }); }); describe('hasSource', () => { it('should return true when source exists', () => { merger.addSource({ name: 'test', config: {}, precedence: 1 }); expect(merger.hasSource('test')).toBe(true); }); it('should return false when source does not exist', () => { expect(merger.hasSource('nonexistent')).toBe(false); }); }); describe('removeSource', () => { it('should remove source by name and return true', () => { merger.addSource({ name: 'test', config: {}, precedence: 1 }); merger.addSource({ name: 'keep', config: {}, precedence: 2 }); const removed = merger.removeSource('test'); expect(removed).toBe(true); expect(merger.hasSource('test')).toBe(false); expect(merger.hasSource('keep')).toBe(true); }); it('should return false when source does not exist', () => { const removed = merger.removeSource('nonexistent'); expect(removed).toBe(false); }); it('should handle removing all sources', () => { merger.addSource({ name: 'test1', config: {}, precedence: 1 }); merger.addSource({ name: 'test2', config: {}, precedence: 2 }); merger.removeSource('test1'); merger.removeSource('test2'); expect(merger.getSources()).toHaveLength(0); }); }); }); ``` -------------------------------------------------------------------------------- /mcp-server/src/core/direct-functions/add-task.js: -------------------------------------------------------------------------------- ```javascript /** * add-task.js * Direct function implementation for adding a new task */ import { addTask } from '../../../../scripts/modules/task-manager.js'; import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js'; import { createLogWrapper } from '../../tools/utils.js'; /** * Direct function wrapper for adding a new task with error handling. * * @param {Object} args - Command arguments * @param {string} [args.prompt] - Description of the task to add (required if not using manual fields) * @param {string} [args.title] - Task title (for manual task creation) * @param {string} [args.description] - Task description (for manual task creation) * @param {string} [args.details] - Implementation details (for manual task creation) * @param {string} [args.testStrategy] - Test strategy (for manual task creation) * @param {string} [args.dependencies] - Comma-separated list of task IDs this task depends on * @param {string} [args.priority='medium'] - Task priority (high, medium, low) * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool) * @param {boolean} [args.research=false] - Whether to use research capabilities for task creation * @param {string} [args.projectRoot] - Project root path * @param {string} [args.tag] - Tag for the task (optional) * @param {Object} log - Logger object * @param {Object} context - Additional context (session) * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } } */ export async function addTaskDirect(args, log, context = {}) { // Destructure expected args (including research and projectRoot) const { tasksJsonPath, prompt, dependencies, priority, research, projectRoot, tag } = args; const { session } = context; // Destructure session from context // Enable silent mode to prevent console logs from interfering with JSON response enableSilentMode(); // Create logger wrapper using the utility const mcpLog = createLogWrapper(log); try { // Check if tasksJsonPath was provided if (!tasksJsonPath) { log.error('addTaskDirect called without tasksJsonPath'); disableSilentMode(); // Disable before returning return { success: false, error: { code: 'MISSING_ARGUMENT', message: 'tasksJsonPath is required' } }; } // Use provided path const tasksPath = tasksJsonPath; // Check if this is manual task creation or AI-driven task creation const isManualCreation = args.title && args.description; // Check required parameters if (!args.prompt && !isManualCreation) { log.error( 'Missing required parameters: either prompt or title+description must be provided' ); disableSilentMode(); return { success: false, error: { code: 'MISSING_PARAMETER', message: 'Either the prompt parameter or both title and description parameters are required for adding a task' } }; } // Extract and prepare parameters const taskDependencies = Array.isArray(dependencies) ? dependencies // Already an array if passed directly : dependencies // Check if dependencies exist and are a string ? String(dependencies) .split(',') .map((id) => parseInt(id.trim(), 10)) // Split, trim, and parse : []; // Default to empty array if null/undefined const taskPriority = priority || 'medium'; // Default priority let manualTaskData = null; let newTaskId; let telemetryData; let tagInfo; if (isManualCreation) { // Create manual task data object manualTaskData = { title: args.title, description: args.description, details: args.details || '', testStrategy: args.testStrategy || '' }; log.info( `Adding new task manually with title: "${args.title}", dependencies: [${taskDependencies.join(', ')}], priority: ${priority}` ); // Call the addTask function with manual task data const result = await addTask( tasksPath, null, // prompt is null for manual creation taskDependencies, taskPriority, { session, mcpLog, projectRoot, commandName: 'add-task', outputType: 'mcp', tag }, 'json', // outputFormat manualTaskData, // Pass the manual task data false // research flag is false for manual creation ); newTaskId = result.newTaskId; telemetryData = result.telemetryData; tagInfo = result.tagInfo; } else { // AI-driven task creation log.info( `Adding new task with prompt: "${prompt}", dependencies: [${taskDependencies.join(', ')}], priority: ${taskPriority}, research: ${research}` ); // Call the addTask function, passing the research flag const result = await addTask( tasksPath, prompt, // Use the prompt for AI creation taskDependencies, taskPriority, { session, mcpLog, projectRoot, commandName: 'add-task', outputType: 'mcp', tag }, 'json', // outputFormat null, // manualTaskData is null for AI creation research // Pass the research flag ); newTaskId = result.newTaskId; telemetryData = result.telemetryData; tagInfo = result.tagInfo; } // Restore normal logging disableSilentMode(); return { success: true, data: { taskId: newTaskId, message: `Successfully added new task #${newTaskId}`, telemetryData: telemetryData, tagInfo: tagInfo } }; } catch (error) { // Make sure to restore normal logging even if there's an error disableSilentMode(); log.error(`Error in addTaskDirect: ${error.message}`); // Add specific error code checks if needed return { success: false, error: { code: error.code || 'ADD_TASK_ERROR', // Use error code if available message: error.message } }; } } ``` -------------------------------------------------------------------------------- /scripts/modules/sync-readme.js: -------------------------------------------------------------------------------- ```javascript import fs from 'fs'; import path from 'path'; import chalk from 'chalk'; import { log, findProjectRoot } from './utils.js'; import { getProjectName } from './config-manager.js'; import listTasks from './task-manager/list-tasks.js'; /** * Creates a basic README structure if one doesn't exist * @param {string} projectName - Name of the project * @returns {string} - Basic README content */ function createBasicReadme(projectName) { return `# ${projectName} This project is managed using Task Master. `; } /** * Create UTM tracking URL for task-master.dev * @param {string} projectRoot - The project root path * @returns {string} - UTM tracked URL */ function createTaskMasterUrl(projectRoot) { // Get the actual folder name from the project root path const folderName = path.basename(projectRoot); // Clean folder name for UTM (replace spaces/special chars with hyphens) const cleanFolderName = folderName .toLowerCase() .replace(/[^a-z0-9]/g, '-') .replace(/-+/g, '-') .replace(/^-|-$/g, ''); const utmParams = new URLSearchParams({ utm_source: 'github-readme', utm_medium: 'readme-export', utm_campaign: cleanFolderName || 'task-sync', utm_content: 'task-export-link' }); return `https://task-master.dev?${utmParams.toString()}`; } /** * Create the start marker with metadata * @param {Object} options - Export options * @returns {string} - Formatted start marker */ function createStartMarker(options) { const { timestamp, withSubtasks, status, projectRoot } = options; // Format status filter text const statusText = status ? `Status filter: ${status}` : 'Status filter: none'; const subtasksText = withSubtasks ? 'with subtasks' : 'without subtasks'; // Create the export info content const exportInfo = `🎯 **Taskmaster Export** - ${timestamp}\n` + `📋 Export: ${subtasksText} • ${statusText}\n` + `🔗 Powered by [Task Master](${createTaskMasterUrl(projectRoot)})`; // Create a markdown box using code blocks and emojis to mimic our UI style const boxContent = `<!-- TASKMASTER_EXPORT_START -->\n` + `> ${exportInfo.split('\n').join('\n> ')}\n\n`; return boxContent; } /** * Create the end marker * @returns {string} - Formatted end marker */ function createEndMarker() { return ( `\n> 📋 **End of Taskmaster Export** - Tasks are synced from your project using the \`sync-readme\` command.\n` + `<!-- TASKMASTER_EXPORT_END -->\n` ); } /** * Syncs the current task list to README.md at the project root * @param {string} projectRoot - Path to the project root directory * @param {Object} options - Options for syncing * @param {boolean} options.withSubtasks - Include subtasks in the output (default: false) * @param {string} options.status - Filter by status (e.g., 'pending', 'done') * @param {string} options.tasksPath - Custom path to tasks.json * @returns {boolean} - True if sync was successful, false otherwise * TODO: Add tag support - this is not currently supported how we want to handle this - Parthy */ export async function syncTasksToReadme(projectRoot = null, options = {}) { try { const actualProjectRoot = projectRoot || findProjectRoot() || '.'; const { withSubtasks = false, status, tasksPath, tag } = options; // Get current tasks using the list-tasks functionality with markdown-readme format const tasksOutput = await listTasks( tasksPath || path.join(actualProjectRoot, '.taskmaster', 'tasks', 'tasks.json'), status, null, withSubtasks, 'markdown-readme', { projectRoot, tag } ); if (!tasksOutput) { console.log(chalk.red('❌ Failed to generate task output')); return false; } // Generate timestamp and metadata const timestamp = new Date().toISOString().replace('T', ' ').substring(0, 19) + ' UTC'; const projectName = getProjectName(actualProjectRoot); // Create the export markers with metadata const startMarker = createStartMarker({ timestamp, withSubtasks, status, projectRoot: actualProjectRoot }); const endMarker = createEndMarker(); // Create the complete task section const taskSection = startMarker + tasksOutput + endMarker; // Read current README content const readmePath = path.join(actualProjectRoot, 'README.md'); let readmeContent = ''; try { readmeContent = fs.readFileSync(readmePath, 'utf8'); } catch (err) { if (err.code === 'ENOENT') { // Create basic README if it doesn't exist readmeContent = createBasicReadme(projectName); } else { throw err; } } // Check if export markers exist and replace content between them const startComment = '<!-- TASKMASTER_EXPORT_START -->'; const endComment = '<!-- TASKMASTER_EXPORT_END -->'; let updatedContent; const startIndex = readmeContent.indexOf(startComment); const endIndex = readmeContent.indexOf(endComment); if (startIndex !== -1 && endIndex !== -1) { // Replace existing task section const beforeTasks = readmeContent.substring(0, startIndex); const afterTasks = readmeContent.substring(endIndex + endComment.length); updatedContent = beforeTasks + taskSection + afterTasks; } else { // Append to end of README updatedContent = readmeContent + '\n' + taskSection; } // Write updated content to README fs.writeFileSync(readmePath, updatedContent, 'utf8'); console.log(chalk.green('✅ Successfully synced tasks to README.md')); console.log( chalk.cyan( `📋 Export details: ${withSubtasks ? 'with' : 'without'} subtasks${status ? `, status: ${status}` : ''}` ) ); console.log(chalk.gray(`📍 Location: ${readmePath}`)); return true; } catch (error) { console.log(chalk.red('❌ Failed to sync tasks to README:'), error.message); log('error', `README sync error: ${error.message}`); return false; } } export default syncTasksToReadme; ``` -------------------------------------------------------------------------------- /tests/fixtures/sample-prd.txt: -------------------------------------------------------------------------------- ``` <context> # Overview This document outlines the requirements for a minimal web-based URL Shortener application. The application allows users to input a long URL and receive a shorter, alias URL that redirects to the original destination. This serves as a basic example of a micro-SaaS product. It's intended for anyone needing to create shorter links for sharing. The value is in providing a simple, functional utility accessible via a web browser. # Core Features 1. **URL Input & Shortening:** A user interface with an input field for pasting a long URL and a button to trigger the shortening process. - *Why:* The primary function for the user interaction. - *How:* A React component with a text input and a submit button. Clicking the button sends the long URL to a backend API. 2. **Short URL Display:** After successful shortening, the application displays the newly generated short URL to the user. - *Why:* Provides the result of the core function to the user. - *How:* The React frontend updates to show the short URL returned by the API (e.g., `http://your-domain.com/aB3cD`). Include a "copy to clipboard" button for convenience. 3. **URL Redirection:** Accessing a generated short URL in a browser redirects the user to the original long URL. - *Why:* The fundamental purpose of the shortened link. * *How:* A backend API endpoint handles requests to `/:shortCode`. It looks up the code in a data store and issues an HTTP redirect (301 or 302) to the corresponding long URL. 4. **Basic Persistence:** Short URL mappings (short code -> long URL) persist across requests. - *Why:* Short URLs need to remain functional after creation. * *How:* A simple backend data store (e.g., initially an in-memory object for testing, then potentially a JSON file or simple database) holds the mappings. # User Experience - **User Persona:** Anyone wanting to shorten a long web link. - **Key User Flow:** User visits the web app -> Pastes a long URL into the input field -> Clicks "Shorten" -> Sees the generated short URL -> Copies the short URL -> (Later) Uses the short URL in a browser and gets redirected. - **UI/UX Considerations:** Clean, minimal single-page interface. Clear input field, prominent button, easy-to-read display of the short URL, copy button. Basic validation feedback (e.g., "Invalid URL", "Success!"). </context> <PRD> # Technical Architecture - **System Components:** - Frontend: Single Page Application (SPA) built with Vite + React. - Backend: Simple API server (e.g., Node.js with Express). - **Data Model:** A key-value store mapping `shortCode` (string) to `longUrl` (string). - **APIs & Integrations:** - Backend API: - `POST /api/shorten`: Accepts `{ longUrl: string }` in the request body. Generates a unique `shortCode`, stores the mapping, returns `{ shortUrl: string }`. - `GET /:shortCode`: Looks up `shortCode`. If found, performs HTTP redirect to `longUrl`. If not found, returns 404. - **Infrastructure:** Frontend can be hosted on static hosting. Backend needs a simple server environment (Node.js). - **Libraries:** - Frontend: `react`, `react-dom`, `axios` (or `fetch` API) for API calls. Consider a simple state management solution if needed (e.g., `useState`, `useContext`). - Backend: `express`, `nanoid` (or similar for short code generation). # Development Roadmap - **MVP Requirements:** 1. Setup Vite + React project. 2. Create basic React UI components (InputForm, ResultDisplay). 3. Setup basic Node.js/Express backend server. 4. Implement backend data storage module (start with in-memory object). 5. Implement unique short code generation logic (e.g., using `nanoid`). 6. Implement backend `POST /api/shorten` endpoint logic. 7. Implement backend `GET /:shortCode` redirect logic. 8. Implement frontend logic to take input, call `POST /api/shorten`, and display the result. 9. Basic frontend input validation (check if likely a URL). - **Future Enhancements:** User accounts, custom short codes, analytics (click tracking), using a persistent database, error handling improvements, UI styling. (Out of scope for MVP). # Logical Dependency Chain 1. Vite + React Project Setup. 2. Basic Backend Server Setup (Express). 3. Backend Storage Module (in-memory first). 4. Short Code Generation Logic. 5. Implement `POST /api/shorten` endpoint (depends on 3 & 4). 6. Implement `GET /:shortCode` endpoint (depends on 3). 7. Frontend UI Components. 8. Frontend logic to call `POST /api/shorten` (depends on 5 & 7). 9. Frontend display logic (depends on 7 & 8). *Goal is to get the backend API working first, then build the frontend to consume it.* # Risks and Mitigations - **Risk:** Short code collisions (generating the same code twice). - **Mitigation (MVP):** Use a library like `nanoid` with sufficient length to make collisions highly improbable for a simple service. Add a retry loop in generation if a collision *is* detected (check if code exists before storing). - **Risk:** Storing invalid or malicious URLs. - **Mitigation (MVP):** Basic URL validation on the frontend (simple regex) and potentially on the backend. Sanitize input. Advanced checks are out of scope. - **Risk:** Scalability of in-memory store. - **Mitigation (MVP):** Acceptable for MVP. Acknowledge need for persistent database (JSON file, Redis, SQL/NoSQL DB) for future enhancement. # Appendix - Example Data Store (in-memory object): ```javascript // backend/storage.js const urlMap = { 'aB3cD': 'https://very-long-url-example.com/with/path/and/query?params=true', 'xY7zW': 'https://another-example.org/' }; // ... functions to get/set URLs ... ``` </PRD> ``` -------------------------------------------------------------------------------- /packages/tm-core/src/types/index.ts: -------------------------------------------------------------------------------- ```typescript /** * Core type definitions for Task Master */ /** * Storage type options * - 'file': Local file system storage * - 'api': Remote API storage (Hamster integration) * - 'auto': Automatically detect based on auth status */ export type StorageType = 'file' | 'api' | 'auto'; // ============================================================================ // Type Literals // ============================================================================ /** * Task status values */ export type TaskStatus = | 'pending' | 'in-progress' | 'done' | 'deferred' | 'cancelled' | 'blocked' | 'review' | 'completed'; /** * Task priority levels */ export type TaskPriority = 'low' | 'medium' | 'high' | 'critical'; /** * Task complexity levels */ export type TaskComplexity = 'simple' | 'moderate' | 'complex' | 'very-complex'; // ============================================================================ // Core Interfaces // ============================================================================ /** * Placeholder task interface for temporary/minimal task objects */ export interface PlaceholderTask { id: string; title: string; status: TaskStatus; priority: TaskPriority; } /** * Base task interface */ export interface Task { id: string; title: string; description: string; status: TaskStatus; priority: TaskPriority; dependencies: string[]; details: string; testStrategy: string; subtasks: Subtask[]; // Optional enhanced properties createdAt?: string; updatedAt?: string; effort?: number; actualEffort?: number; tags?: string[]; assignee?: string; complexity?: TaskComplexity; } /** * Subtask interface extending Task with numeric ID */ export interface Subtask extends Omit<Task, 'id' | 'subtasks'> { id: number; parentId: string; subtasks?: never; // Subtasks cannot have their own subtasks } /** * Task metadata for tracking overall project state */ export interface TaskMetadata { version: string; lastModified: string; taskCount: number; completedCount: number; projectName?: string; description?: string; tags?: string[]; } /** * Task collection with metadata */ export interface TaskCollection { tasks: Task[]; metadata: TaskMetadata; } /** * Task tag for organizing tasks */ export interface TaskTag { name: string; tasks: string[]; // Task IDs belonging to this tag metadata: Record<string, any>; } // ============================================================================ // Utility Types // ============================================================================ /** * Type for creating a new task (without generated fields) */ export type CreateTask = Omit< Task, 'id' | 'createdAt' | 'updatedAt' | 'subtasks' > & { subtasks?: Omit<Subtask, 'id' | 'parentId' | 'createdAt' | 'updatedAt'>[]; }; /** * Type for updating a task (all fields optional except ID) */ export type UpdateTask = Partial<Omit<Task, 'id'>> & { id: string; }; /** * Type for task filters */ export interface TaskFilter { status?: TaskStatus | TaskStatus[]; priority?: TaskPriority | TaskPriority[]; tags?: string[]; hasSubtasks?: boolean; search?: string; assignee?: string; complexity?: TaskComplexity | TaskComplexity[]; } /** * Type for sort options */ export interface TaskSortOptions { field: keyof Task; direction: 'asc' | 'desc'; } // ============================================================================ // Type Guards // ============================================================================ /** * Type guard to check if a value is a valid TaskStatus */ export function isTaskStatus(value: unknown): value is TaskStatus { return ( typeof value === 'string' && [ 'pending', 'in-progress', 'done', 'deferred', 'cancelled', 'blocked', 'review' ].includes(value) ); } /** * Type guard to check if a value is a valid TaskPriority */ export function isTaskPriority(value: unknown): value is TaskPriority { return ( typeof value === 'string' && ['low', 'medium', 'high', 'critical'].includes(value) ); } /** * Type guard to check if a value is a valid TaskComplexity */ export function isTaskComplexity(value: unknown): value is TaskComplexity { return ( typeof value === 'string' && ['simple', 'moderate', 'complex', 'very-complex'].includes(value) ); } /** * Type guard to check if an object is a Task */ export function isTask(obj: unknown): obj is Task { if (!obj || typeof obj !== 'object') return false; const task = obj as Record<string, unknown>; return ( typeof task.id === 'string' && typeof task.title === 'string' && typeof task.description === 'string' && isTaskStatus(task.status) && isTaskPriority(task.priority) && Array.isArray(task.dependencies) && typeof task.details === 'string' && typeof task.testStrategy === 'string' && Array.isArray(task.subtasks) ); } /** * Type guard to check if an object is a Subtask */ export function isSubtask(obj: unknown): obj is Subtask { if (!obj || typeof obj !== 'object') return false; const subtask = obj as Record<string, unknown>; return ( typeof subtask.id === 'number' && typeof subtask.parentId === 'string' && typeof subtask.title === 'string' && typeof subtask.description === 'string' && isTaskStatus(subtask.status) && isTaskPriority(subtask.priority) && !('subtasks' in subtask) ); } // ============================================================================ // Deprecated Types (for backwards compatibility) // ============================================================================ /** * @deprecated Use TaskStatus instead */ export type Status = TaskStatus; /** * @deprecated Use TaskPriority instead */ export type Priority = TaskPriority; /** * @deprecated Use TaskComplexity instead */ export type Complexity = TaskComplexity; ``` -------------------------------------------------------------------------------- /.claude/agents/task-checker.md: -------------------------------------------------------------------------------- ```markdown --- name: task-checker description: Use this agent to verify that tasks marked as 'review' have been properly implemented according to their specifications. This agent performs quality assurance by checking implementations against requirements, running tests, and ensuring best practices are followed. <example>Context: A task has been marked as 'review' after implementation. user: 'Check if task 118 was properly implemented' assistant: 'I'll use the task-checker agent to verify the implementation meets all requirements.' <commentary>Tasks in 'review' status need verification before being marked as 'done'.</commentary></example> <example>Context: Multiple tasks are in review status. user: 'Verify all tasks that are ready for review' assistant: 'I'll deploy the task-checker to verify all tasks in review status.' <commentary>The checker ensures quality before tasks are marked complete.</commentary></example> model: sonnet color: yellow --- You are a Quality Assurance specialist that rigorously verifies task implementations against their specifications. Your role is to ensure that tasks marked as 'review' meet all requirements before they can be marked as 'done'. ## Core Responsibilities 1. **Task Specification Review** - Retrieve task details using MCP tool `mcp__task-master-ai__get_task` - Understand the requirements, test strategy, and success criteria - Review any subtasks and their individual requirements 2. **Implementation Verification** - Use `Read` tool to examine all created/modified files - Use `Bash` tool to run compilation and build commands - Use `Grep` tool to search for required patterns and implementations - Verify file structure matches specifications - Check that all required methods/functions are implemented 3. **Test Execution** - Run tests specified in the task's testStrategy - Execute build commands (npm run build, tsc --noEmit, etc.) - Verify no compilation errors or warnings - Check for runtime errors where applicable - Test edge cases mentioned in requirements 4. **Code Quality Assessment** - Verify code follows project conventions - Check for proper error handling - Ensure TypeScript typing is strict (no 'any' unless justified) - Verify documentation/comments where required - Check for security best practices 5. **Dependency Validation** - Verify all task dependencies were actually completed - Check integration points with dependent tasks - Ensure no breaking changes to existing functionality ## Verification Workflow 1. **Retrieve Task Information** ``` Use mcp__task-master-ai__get_task to get full task details Note the implementation requirements and test strategy ``` 2. **Check File Existence** ```bash # Verify all required files exist ls -la [expected directories] # Read key files to verify content ``` 3. **Verify Implementation** - Read each created/modified file - Check against requirements checklist - Verify all subtasks are complete 4. **Run Tests** ```bash # TypeScript compilation cd [project directory] && npx tsc --noEmit # Run specified tests npm test [specific test files] # Build verification npm run build ``` 5. **Generate Verification Report** ## Output Format ```yaml verification_report: task_id: [ID] status: PASS | FAIL | PARTIAL score: [1-10] requirements_met: - ✅ [Requirement that was satisfied] - ✅ [Another satisfied requirement] issues_found: - ❌ [Issue description] - ⚠️ [Warning or minor issue] files_verified: - path: [file path] status: [created/modified/verified] issues: [any problems found] tests_run: - command: [test command] result: [pass/fail] output: [relevant output] recommendations: - [Specific fix needed] - [Improvement suggestion] verdict: | [Clear statement on whether task should be marked 'done' or sent back to 'pending'] [If FAIL: Specific list of what must be fixed] [If PASS: Confirmation that all requirements are met] ``` ## Decision Criteria **Mark as PASS (ready for 'done'):** - All required files exist and contain expected content - All tests pass successfully - No compilation or build errors - All subtasks are complete - Core requirements are met - Code quality is acceptable **Mark as PARTIAL (may proceed with warnings):** - Core functionality is implemented - Minor issues that don't block functionality - Missing nice-to-have features - Documentation could be improved - Tests pass but coverage could be better **Mark as FAIL (must return to 'pending'):** - Required files are missing - Compilation or build errors - Tests fail - Core requirements not met - Security vulnerabilities detected - Breaking changes to existing code ## Important Guidelines - **BE THOROUGH**: Check every requirement systematically - **BE SPECIFIC**: Provide exact file paths and line numbers for issues - **BE FAIR**: Distinguish between critical issues and minor improvements - **BE CONSTRUCTIVE**: Provide clear guidance on how to fix issues - **BE EFFICIENT**: Focus on requirements, not perfection ## Tools You MUST Use - `Read`: Examine implementation files (READ-ONLY) - `Bash`: Run tests and verification commands - `Grep`: Search for patterns in code - `mcp__task-master-ai__get_task`: Get task details - **NEVER use Write/Edit** - you only verify, not fix ## Integration with Workflow You are the quality gate between 'review' and 'done' status: 1. Task-executor implements and marks as 'review' 2. You verify and report PASS/FAIL 3. Claude either marks as 'done' (PASS) or 'pending' (FAIL) 4. If FAIL, task-executor re-implements based on your report Your verification ensures high quality and prevents accumulation of technical debt. ``` -------------------------------------------------------------------------------- /assets/claude/agents/task-checker.md: -------------------------------------------------------------------------------- ```markdown --- name: task-checker description: Use this agent to verify that tasks marked as 'review' have been properly implemented according to their specifications. This agent performs quality assurance by checking implementations against requirements, running tests, and ensuring best practices are followed. <example>Context: A task has been marked as 'review' after implementation. user: 'Check if task 118 was properly implemented' assistant: 'I'll use the task-checker agent to verify the implementation meets all requirements.' <commentary>Tasks in 'review' status need verification before being marked as 'done'.</commentary></example> <example>Context: Multiple tasks are in review status. user: 'Verify all tasks that are ready for review' assistant: 'I'll deploy the task-checker to verify all tasks in review status.' <commentary>The checker ensures quality before tasks are marked complete.</commentary></example> model: sonnet color: yellow --- You are a Quality Assurance specialist that rigorously verifies task implementations against their specifications. Your role is to ensure that tasks marked as 'review' meet all requirements before they can be marked as 'done'. ## Core Responsibilities 1. **Task Specification Review** - Retrieve task details using MCP tool `mcp__task-master-ai__get_task` - Understand the requirements, test strategy, and success criteria - Review any subtasks and their individual requirements 2. **Implementation Verification** - Use `Read` tool to examine all created/modified files - Use `Bash` tool to run compilation and build commands - Use `Grep` tool to search for required patterns and implementations - Verify file structure matches specifications - Check that all required methods/functions are implemented 3. **Test Execution** - Run tests specified in the task's testStrategy - Execute build commands (npm run build, tsc --noEmit, etc.) - Verify no compilation errors or warnings - Check for runtime errors where applicable - Test edge cases mentioned in requirements 4. **Code Quality Assessment** - Verify code follows project conventions - Check for proper error handling - Ensure TypeScript typing is strict (no 'any' unless justified) - Verify documentation/comments where required - Check for security best practices 5. **Dependency Validation** - Verify all task dependencies were actually completed - Check integration points with dependent tasks - Ensure no breaking changes to existing functionality ## Verification Workflow 1. **Retrieve Task Information** ``` Use mcp__task-master-ai__get_task to get full task details Note the implementation requirements and test strategy ``` 2. **Check File Existence** ```bash # Verify all required files exist ls -la [expected directories] # Read key files to verify content ``` 3. **Verify Implementation** - Read each created/modified file - Check against requirements checklist - Verify all subtasks are complete 4. **Run Tests** ```bash # TypeScript compilation cd [project directory] && npx tsc --noEmit # Run specified tests npm test [specific test files] # Build verification npm run build ``` 5. **Generate Verification Report** ## Output Format ```yaml verification_report: task_id: [ID] status: PASS | FAIL | PARTIAL score: [1-10] requirements_met: - ✅ [Requirement that was satisfied] - ✅ [Another satisfied requirement] issues_found: - ❌ [Issue description] - ⚠️ [Warning or minor issue] files_verified: - path: [file path] status: [created/modified/verified] issues: [any problems found] tests_run: - command: [test command] result: [pass/fail] output: [relevant output] recommendations: - [Specific fix needed] - [Improvement suggestion] verdict: | [Clear statement on whether task should be marked 'done' or sent back to 'pending'] [If FAIL: Specific list of what must be fixed] [If PASS: Confirmation that all requirements are met] ``` ## Decision Criteria **Mark as PASS (ready for 'done'):** - All required files exist and contain expected content - All tests pass successfully - No compilation or build errors - All subtasks are complete - Core requirements are met - Code quality is acceptable **Mark as PARTIAL (may proceed with warnings):** - Core functionality is implemented - Minor issues that don't block functionality - Missing nice-to-have features - Documentation could be improved - Tests pass but coverage could be better **Mark as FAIL (must return to 'pending'):** - Required files are missing - Compilation or build errors - Tests fail - Core requirements not met - Security vulnerabilities detected - Breaking changes to existing code ## Important Guidelines - **BE THOROUGH**: Check every requirement systematically - **BE SPECIFIC**: Provide exact file paths and line numbers for issues - **BE FAIR**: Distinguish between critical issues and minor improvements - **BE CONSTRUCTIVE**: Provide clear guidance on how to fix issues - **BE EFFICIENT**: Focus on requirements, not perfection ## Tools You MUST Use - `Read`: Examine implementation files (READ-ONLY) - `Bash`: Run tests and verification commands - `Grep`: Search for patterns in code - `mcp__task-master-ai__get_task`: Get task details - **NEVER use Write/Edit** - you only verify, not fix ## Integration with Workflow You are the quality gate between 'review' and 'done' status: 1. Task-executor implements and marks as 'review' 2. You verify and report PASS/FAIL 3. Claude either marks as 'done' (PASS) or 'pending' (FAIL) 4. If FAIL, task-executor re-implements based on your report Your verification ensures high quality and prevents accumulation of technical debt. ``` -------------------------------------------------------------------------------- /apps/docs/capabilities/cli-root-commands.mdx: -------------------------------------------------------------------------------- ```markdown --- title: CLI Commands sidebarTitle: "CLI Commands" --- <AccordionGroup> <Accordion title="Parse PRD"> ```bash # Parse a PRD file and generate tasks task-master parse-prd <prd-file.txt> # Limit the number of tasks generated task-master parse-prd <prd-file.txt> --num-tasks=10 ``` </Accordion> <Accordion title="List Tasks"> ```bash # List all tasks task-master list # List tasks with a specific status task-master list --status=<status> # List tasks with subtasks task-master list --with-subtasks # List tasks with a specific status and include subtasks task-master list --status=<status> --with-subtasks ``` </Accordion> <Accordion title="Show Next Task"> ```bash # Show the next task to work on based on dependencies and status task-master next ``` </Accordion> <Accordion title="Show Specific Task"> ```bash # Show details of a specific task task-master show <id> # or task-master show --id=<id> # View a specific subtask (e.g., subtask 2 of task 1) task-master show 1.2 ``` </Accordion> <Accordion title="Update Tasks"> ```bash # Update tasks from a specific ID and provide context task-master update --from=<id> --prompt="<prompt>" ``` </Accordion> <Accordion title="Update a Specific Task"> ```bash # Update a single task by ID with new information task-master update-task --id=<id> --prompt="<prompt>" # Use research-backed updates with Perplexity AI task-master update-task --id=<id> --prompt="<prompt>" --research ``` </Accordion> <Accordion title="Update a Subtask"> ```bash # Append additional information to a specific subtask task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" # Example: Add details about API rate limiting to subtask 2 of task 5 task-master update-subtask --id=5.2 --prompt="Add rate limiting of 100 requests per minute" # Use research-backed updates with Perplexity AI task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" --research ``` Unlike the `update-task` command which replaces task information, the `update-subtask` command _appends_ new information to the existing subtask details, marking it with a timestamp. This is useful for iteratively enhancing subtasks while preserving the original content. </Accordion> <Accordion title="Generate Task Files"> ```bash # Generate individual task files from tasks.json task-master generate ``` </Accordion> <Accordion title="Set Task Status"> ```bash # Set status of a single task task-master set-status --id=<id> --status=<status> # Set status for multiple tasks task-master set-status --id=1,2,3 --status=<status> # Set status for subtasks task-master set-status --id=1.1,1.2 --status=<status> ``` When marking a task as "done", all of its subtasks will automatically be marked as "done" as well. </Accordion> <Accordion title="Expand Tasks"> ```bash # Expand a specific task with subtasks task-master expand --id=<id> --num=<number> # Expand with additional context task-master expand --id=<id> --prompt="<context>" # Expand all pending tasks task-master expand --all # Force regeneration of subtasks for tasks that already have them task-master expand --all --force # Research-backed subtask generation for a specific task task-master expand --id=<id> --research # Research-backed generation for all tasks task-master expand --all --research ``` </Accordion> <Accordion title="Clear Subtasks"> ```bash # Clear subtasks from a specific task task-master clear-subtasks --id=<id> # Clear subtasks from multiple tasks task-master clear-subtasks --id=1,2,3 # Clear subtasks from all tasks task-master clear-subtasks --all ``` </Accordion> <Accordion title="Analyze Task Complexity"> ```bash # Analyze complexity of all tasks task-master analyze-complexity # Save report to a custom location task-master analyze-complexity --output=my-report.json # Use a specific LLM model task-master analyze-complexity --model=claude-3-opus-20240229 # Set a custom complexity threshold (1-10) task-master analyze-complexity --threshold=6 # Use an alternative tasks file task-master analyze-complexity --file=custom-tasks.json # Use Perplexity AI for research-backed complexity analysis task-master analyze-complexity --research ``` </Accordion> <Accordion title="View Complexity Report"> ```bash # Display the task complexity analysis report task-master complexity-report # View a report at a custom location task-master complexity-report --file=my-report.json ``` </Accordion> <Accordion title="Managing Task Dependencies"> ```bash # Add a dependency to a task task-master add-dependency --id=<id> --depends-on=<id> # Remove a dependency from a task task-master remove-dependency --id=<id> --depends-on=<id> # Validate dependencies without fixing them task-master validate-dependencies # Find and fix invalid dependencies automatically task-master fix-dependencies ``` </Accordion> <Accordion title="Add a New Task"> ```bash # Add a new task using AI task-master add-task --prompt="Description of the new task" # Add a task with dependencies task-master add-task --prompt="Description" --dependencies=1,2,3 # Add a task with priority task-master add-task --prompt="Description" --priority=high ``` </Accordion> <Accordion title="Initialize a Project"> ```bash # Initialize a new project with Task Master structure task-master init ``` </Accordion> </AccordionGroup> ``` -------------------------------------------------------------------------------- /tests/unit/ui/indicators.test.js: -------------------------------------------------------------------------------- ```javascript /** * Unit tests for indicators module (priority and complexity indicators) */ import { jest } from '@jest/globals'; // Mock chalk using unstable_mockModule for ESM compatibility jest.unstable_mockModule('chalk', () => ({ default: { red: jest.fn((str) => str), yellow: jest.fn((str) => str), green: jest.fn((str) => str), white: jest.fn((str) => str), hex: jest.fn(() => jest.fn((str) => str)) } })); // Import after mocking const { getMcpPriorityIndicators, getCliPriorityIndicators, getPriorityIndicators, getPriorityIndicator, getStatusBarPriorityIndicators, getPriorityColors, getCliComplexityIndicators, getStatusBarComplexityIndicators, getComplexityColors, getComplexityIndicator } = await import('../../../src/ui/indicators.js'); describe('Priority Indicators', () => { describe('getMcpPriorityIndicators', () => { it('should return emoji indicators for MCP context', () => { const indicators = getMcpPriorityIndicators(); expect(indicators).toEqual({ high: '🔴', medium: '🟠', low: '🟢' }); }); }); describe('getCliPriorityIndicators', () => { it('should return colored dot indicators for CLI context', () => { const indicators = getCliPriorityIndicators(); expect(indicators).toHaveProperty('high'); expect(indicators).toHaveProperty('medium'); expect(indicators).toHaveProperty('low'); // Since chalk is mocked, we're just verifying structure expect(indicators.high).toContain('●'); }); }); describe('getPriorityIndicators', () => { it('should return MCP indicators when isMcp is true', () => { const indicators = getPriorityIndicators(true); expect(indicators).toEqual({ high: '🔴', medium: '🟠', low: '🟢' }); }); it('should return CLI indicators when isMcp is false', () => { const indicators = getPriorityIndicators(false); expect(indicators).toHaveProperty('high'); expect(indicators).toHaveProperty('medium'); expect(indicators).toHaveProperty('low'); }); it('should default to CLI indicators when no parameter provided', () => { const indicators = getPriorityIndicators(); expect(indicators).toHaveProperty('high'); expect(indicators.high).toContain('●'); }); }); describe('getPriorityIndicator', () => { it('should return correct MCP indicator for valid priority', () => { expect(getPriorityIndicator('high', true)).toBe('🔴'); expect(getPriorityIndicator('medium', true)).toBe('🟠'); expect(getPriorityIndicator('low', true)).toBe('🟢'); }); it('should return correct CLI indicator for valid priority', () => { const highIndicator = getPriorityIndicator('high', false); const mediumIndicator = getPriorityIndicator('medium', false); const lowIndicator = getPriorityIndicator('low', false); expect(highIndicator).toContain('●'); expect(mediumIndicator).toContain('●'); expect(lowIndicator).toContain('●'); }); it('should return medium indicator for invalid priority', () => { expect(getPriorityIndicator('invalid', true)).toBe('🟠'); expect(getPriorityIndicator(null, true)).toBe('🟠'); expect(getPriorityIndicator(undefined, true)).toBe('🟠'); }); it('should default to CLI context when isMcp not provided', () => { const indicator = getPriorityIndicator('high'); expect(indicator).toContain('●'); }); }); }); describe('Complexity Indicators', () => { describe('getCliComplexityIndicators', () => { it('should return colored dot indicators for complexity levels', () => { const indicators = getCliComplexityIndicators(); expect(indicators).toHaveProperty('high'); expect(indicators).toHaveProperty('medium'); expect(indicators).toHaveProperty('low'); expect(indicators.high).toContain('●'); }); }); describe('getStatusBarComplexityIndicators', () => { it('should return single character indicators for status bars', () => { const indicators = getStatusBarComplexityIndicators(); // Since chalk is mocked, we need to check for the actual characters expect(indicators.high).toContain('⋮'); expect(indicators.medium).toContain(':'); expect(indicators.low).toContain('.'); }); }); describe('getComplexityColors', () => { it('should return complexity color functions', () => { const colors = getComplexityColors(); expect(colors).toHaveProperty('high'); expect(colors).toHaveProperty('medium'); expect(colors).toHaveProperty('low'); // Verify they are functions (mocked chalk functions) expect(typeof colors.high).toBe('function'); }); }); describe('getComplexityIndicator', () => { it('should return high indicator for scores >= 7', () => { const cliIndicators = getCliComplexityIndicators(); expect(getComplexityIndicator(7)).toBe(cliIndicators.high); expect(getComplexityIndicator(8)).toBe(cliIndicators.high); expect(getComplexityIndicator(10)).toBe(cliIndicators.high); }); it('should return low indicator for scores <= 3', () => { const cliIndicators = getCliComplexityIndicators(); expect(getComplexityIndicator(1)).toBe(cliIndicators.low); expect(getComplexityIndicator(2)).toBe(cliIndicators.low); expect(getComplexityIndicator(3)).toBe(cliIndicators.low); }); it('should return medium indicator for scores 4-6', () => { const cliIndicators = getCliComplexityIndicators(); expect(getComplexityIndicator(4)).toBe(cliIndicators.medium); expect(getComplexityIndicator(5)).toBe(cliIndicators.medium); expect(getComplexityIndicator(6)).toBe(cliIndicators.medium); }); it('should return status bar indicators when statusBar is true', () => { const statusBarIndicators = getStatusBarComplexityIndicators(); expect(getComplexityIndicator(8, true)).toBe(statusBarIndicators.high); expect(getComplexityIndicator(5, true)).toBe(statusBarIndicators.medium); expect(getComplexityIndicator(2, true)).toBe(statusBarIndicators.low); }); }); }); ``` -------------------------------------------------------------------------------- /apps/extension/src/utils/task-master-api/cache/cache-manager.ts: -------------------------------------------------------------------------------- ```typescript /** * Cache Manager * Handles all caching logic with LRU eviction and analytics */ import type { ExtensionLogger } from '../../logger'; import type { CacheAnalytics, CacheConfig, CacheEntry } from '../types'; export class CacheManager { private cache = new Map<string, CacheEntry>(); private analytics: CacheAnalytics = { hits: 0, misses: 0, evictions: 0, refreshes: 0, totalSize: 0, averageAccessTime: 0, hitRate: 0 }; private backgroundRefreshTimer?: NodeJS.Timeout; constructor( private config: CacheConfig & { cacheDuration: number }, private logger: ExtensionLogger ) { if (config.enableBackgroundRefresh) { this.initializeBackgroundRefresh(); } } /** * Get data from cache if not expired */ get(key: string): any { const startTime = Date.now(); const cached = this.cache.get(key); if (cached) { const isExpired = Date.now() - cached.timestamp >= (cached.ttl || this.config.cacheDuration); if (!isExpired) { // Update access statistics cached.accessCount++; cached.lastAccessed = Date.now(); if (this.config.enableAnalytics) { this.analytics.hits++; } const accessTime = Date.now() - startTime; this.logger.debug( `Cache hit for ${key} (${accessTime}ms, ${cached.accessCount} accesses)` ); return cached.data; } else { // Remove expired entry this.cache.delete(key); this.logger.debug(`Cache entry expired and removed: ${key}`); } } if (this.config.enableAnalytics) { this.analytics.misses++; } this.logger.debug(`Cache miss for ${key}`); return null; } /** * Set data in cache with LRU eviction */ set( key: string, data: any, options?: { ttl?: number; tags?: string[] } ): void { const now = Date.now(); const dataSize = this.estimateDataSize(data); // Create cache entry const entry: CacheEntry = { data, timestamp: now, accessCount: 1, lastAccessed: now, size: dataSize, ttl: options?.ttl, tags: options?.tags || [key.split('_')[0]] }; // Check if we need to evict entries (LRU strategy) if (this.cache.size >= this.config.maxSize) { this.evictLRUEntries(Math.max(1, Math.floor(this.config.maxSize * 0.1))); } this.cache.set(key, entry); this.logger.debug( `Cached data for ${key} (size: ${dataSize} bytes, TTL: ${entry.ttl || this.config.cacheDuration}ms)` ); // Trigger prefetch if enabled if (this.config.enablePrefetch) { this.scheduleRelatedDataPrefetch(key, data); } } /** * Clear cache entries matching a pattern */ clearPattern(pattern: string): void { let evictedCount = 0; for (const key of this.cache.keys()) { if (key.includes(pattern)) { this.cache.delete(key); evictedCount++; } } if (evictedCount > 0) { this.analytics.evictions += evictedCount; this.logger.debug( `Evicted ${evictedCount} cache entries matching pattern: ${pattern}` ); } } /** * Clear all cached data */ clear(): void { this.cache.clear(); this.resetAnalytics(); } /** * Get cache analytics */ getAnalytics(): CacheAnalytics { this.updateAnalytics(); return { ...this.analytics }; } /** * Get frequently accessed entries for background refresh */ getRefreshCandidates(): Array<[string, CacheEntry]> { return Array.from(this.cache.entries()) .filter(([key, entry]) => { const age = Date.now() - entry.timestamp; const isNearExpiration = age > this.config.cacheDuration * 0.7; const isFrequentlyAccessed = entry.accessCount >= 3; return ( isNearExpiration && isFrequentlyAccessed && key.includes('get_tasks') ); }) .sort((a, b) => b[1].accessCount - a[1].accessCount) .slice(0, 5); } /** * Update refresh count for analytics */ incrementRefreshes(): void { this.analytics.refreshes++; } /** * Cleanup resources */ destroy(): void { if (this.backgroundRefreshTimer) { clearInterval(this.backgroundRefreshTimer); this.backgroundRefreshTimer = undefined; } this.clear(); } private initializeBackgroundRefresh(): void { if (this.backgroundRefreshTimer) { clearInterval(this.backgroundRefreshTimer); } const interval = this.config.refreshInterval; this.backgroundRefreshTimer = setInterval(() => { // Background refresh is handled by the main API class // This just maintains the timer }, interval); this.logger.debug( `Cache background refresh initialized with ${interval}ms interval` ); } private evictLRUEntries(count: number): void { const entries = Array.from(this.cache.entries()) .sort((a, b) => a[1].lastAccessed - b[1].lastAccessed) .slice(0, count); for (const [key] of entries) { this.cache.delete(key); this.analytics.evictions++; } if (entries.length > 0) { this.logger.debug(`Evicted ${entries.length} LRU cache entries`); } } private estimateDataSize(data: any): number { try { return JSON.stringify(data).length * 2; // Rough estimate } catch { return 1000; // Default fallback } } private scheduleRelatedDataPrefetch(key: string, data: any): void { if (key.includes('get_tasks') && Array.isArray(data)) { this.logger.debug( `Scheduled prefetch for ${data.length} tasks related to ${key}` ); } } private resetAnalytics(): void { this.analytics = { hits: 0, misses: 0, evictions: 0, refreshes: 0, totalSize: 0, averageAccessTime: 0, hitRate: 0 }; } private updateAnalytics(): void { const total = this.analytics.hits + this.analytics.misses; this.analytics.hitRate = total > 0 ? this.analytics.hits / total : 0; this.analytics.totalSize = this.cache.size; if (this.cache.size > 0) { const totalAccessTime = Array.from(this.cache.values()).reduce( (sum, entry) => sum + (entry.lastAccessed - entry.timestamp), 0 ); this.analytics.averageAccessTime = totalAccessTime / this.cache.size; } } } ``` -------------------------------------------------------------------------------- /packages/tm-core/POC-STATUS.md: -------------------------------------------------------------------------------- ```markdown # GetTaskList POC Status ## ✅ What We've Accomplished We've successfully implemented a complete end-to-end proof of concept for the `getTaskList` functionality with improved separation of concerns: ### 1. Clean Architecture Layers with Proper Separation #### Configuration Layer (ConfigManager) - Single source of truth for configuration - Manages active tag and storage settings - Handles config.json persistence - Determines storage type (file vs API) #### Service Layer (TaskService) - Core business logic and operations - `getTaskList()` method that coordinates between ConfigManager and Storage - Handles all filtering and task processing - Manages storage lifecycle #### Facade Layer (TaskMasterCore) - Simplified API for consumers - Delegates to TaskService for operations - Backwards compatible `listTasks()` method - New `getTaskList()` method (preferred naming) #### Domain Layer (Entities) - `TaskEntity` with business logic - Validation and status transitions - Dependency checking (`canComplete()`) #### Infrastructure Layer (Storage) - `IStorage` interface for abstraction - `FileStorage` for local files (handles 'master' tag correctly) - `ApiStorage` for Hamster integration - `StorageFactory` for automatic selection - **NO business logic** - only persistence ### 2. Storage Abstraction Benefits ```typescript // Same API works with different backends const fileCore = createTaskMasterCore(path, { storage: { type: 'file' } }); const apiCore = createTaskMasterCore(path, { storage: { type: 'api', apiEndpoint: 'https://hamster.ai', apiAccessToken: 'xxx' } }); // Identical usage const result = await core.listTasks({ filter: { status: 'pending' } }); ``` ### 3. Type Safety Throughout - Full TypeScript implementation - Comprehensive interfaces - Type-safe filters and options - Proper error types ### 4. Testing Coverage - 50 tests passing - Unit tests for core components - Integration tests for listTasks - Mock implementations for testing ## 📊 Architecture Validation ### ✅ Separation of Concerns - **CLI** handles UI/formatting only - **tm-core** handles business logic - **Storage** handles persistence - Each layer is independently testable ### ✅ Extensibility - Easy to add new storage types (database, S3, etc.) - New filters can be added to `TaskFilter` - AI providers follow same pattern (BaseProvider) ### ✅ Error Handling - Consistent `TaskMasterError` with codes - Context preservation - User-friendly messages ### ✅ Performance Considerations - File locking for concurrent access - Atomic writes with temp files - Retry logic with exponential backoff - Request timeout handling ## 🔄 Integration Path ### Current CLI Structure ```javascript // scripts/modules/task-manager/list-tasks.js listTasks(tasksPath, statusFilter, reportPath, withSubtasks, outputFormat, context) // Directly reads files, handles all logic ``` ### New Integration Structure ```javascript // Using tm-core with proper separation of concerns const tmCore = createTaskMasterCore(projectPath, config); const result = await tmCore.getTaskList(options); // CLI only handles formatting result for display // Under the hood: // 1. ConfigManager determines active tag and storage type // 2. TaskService uses storage to fetch tasks for the tag // 3. TaskService applies business logic and filters // 4. Storage only handles reading/writing - no business logic ``` ## 📈 Metrics ### Code Quality - **Clean Code**: Methods under 40 lines ✅ - **Single Responsibility**: Each class has one purpose ✅ - **DRY**: No code duplication ✅ - **Type Coverage**: 100% TypeScript ✅ ### Test Coverage - **Unit Tests**: BaseProvider, TaskEntity ✅ - **Integration Tests**: Full listTasks flow ✅ - **Storage Tests**: File and API operations ✅ ## 🎯 POC Success Criteria | Criteria | Status | Notes | |----------|--------|-------| | Clean architecture | ✅ | Clear layer separation | | Storage abstraction | ✅ | File + API storage working | | Type safety | ✅ | Full TypeScript | | Error handling | ✅ | Comprehensive error system | | Testing | ✅ | 50 tests passing | | Performance | ✅ | Optimized with caching, batching | | Documentation | ✅ | Architecture docs created | ## 🚀 Next Steps ### Immediate (Complete ListTasks Integration) 1. Create npm script to test integration example 2. Add mock Hamster API for testing 3. Create migration guide for CLI ### Phase 1 Remaining Work Based on this POC success, implement remaining operations: - `addTask()` - Add new tasks - `updateTask()` - Update existing tasks - `deleteTask()` - Remove tasks - `expandTask()` - Break into subtasks - Tag management operations ### Phase 2 (AI Integration) - Complete AI provider implementations - Task generation from PRD - Task complexity analysis - Auto-expansion of tasks ## 💡 Lessons Learned ### What Worked Well 1. **Separation of Concerns** - ConfigManager, TaskService, and Storage have clear responsibilities 2. **Storage Factory Pattern** - Clean abstraction for multiple backends 3. **Entity Pattern** - Business logic encapsulation 4. **Template Method Pattern** - BaseProvider for AI providers 5. **Comprehensive Error Handling** - TaskMasterError with context ### Improvements Made 1. Migrated from Jest to Vitest (faster) 2. Replaced ESLint/Prettier with Biome (unified tooling) 3. Fixed conflicting interface definitions 4. Added proper TypeScript exports 5. **Better Architecture** - Separated configuration, business logic, and persistence 6. **Proper Tag Handling** - 'master' tag maps correctly to tasks.json 7. **Clean Storage Layer** - Removed business logic from storage ## ✨ Conclusion The ListTasks POC successfully validates our architecture. The structure is: - **Clean and maintainable** - **Properly abstracted** - **Well-tested** - **Ready for extension** We can confidently proceed with implementing the remaining functionality following this same pattern. ``` -------------------------------------------------------------------------------- /apps/docs/archive/command-reference.mdx: -------------------------------------------------------------------------------- ```markdown --- title: "Task Master Commands" description: "A comprehensive reference of all available Task Master commands" --- <AccordionGroup> <Accordion title="Parse PRD"> ```bash # Parse a PRD file and generate tasks task-master parse-prd <prd-file.txt> # Limit the number of tasks generated task-master parse-prd <prd-file.txt> --num-tasks=10 ``` </Accordion> <Accordion title="List Tasks"> ```bash # List all tasks task-master list # List tasks with a specific status task-master list --status=<status> # List tasks with subtasks task-master list --with-subtasks # List tasks with a specific status and include subtasks task-master list --status=<status> --with-subtasks ``` </Accordion> <Accordion title="Show Next Task"> ```bash # Show the next task to work on based on dependencies and status task-master next ``` </Accordion> <Accordion title="Show Specific Task"> ```bash # Show details of a specific task task-master show <id> # or task-master show --id=<id> # View a specific subtask (e.g., subtask 2 of task 1) task-master show 1.2 ``` </Accordion> <Accordion title="Update Tasks"> ```bash # Update tasks from a specific ID and provide context task-master update --from=<id> --prompt="<prompt>" ``` </Accordion> <Accordion title="Update a Specific Task"> ```bash # Update a single task by ID with new information task-master update-task --id=<id> --prompt="<prompt>" # Use research-backed updates with Perplexity AI task-master update-task --id=<id> --prompt="<prompt>" --research ``` </Accordion> <Accordion title="Update a Subtask"> ```bash # Append additional information to a specific subtask task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" # Example: Add details about API rate limiting to subtask 2 of task 5 task-master update-subtask --id=5.2 --prompt="Add rate limiting of 100 requests per minute" # Use research-backed updates with Perplexity AI task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" --research ``` Unlike the `update-task` command which replaces task information, the `update-subtask` command _appends_ new information to the existing subtask details, marking it with a timestamp. This is useful for iteratively enhancing subtasks while preserving the original content. </Accordion> <Accordion title="Generate Task Files"> ```bash # Generate individual task files from tasks.json task-master generate ``` </Accordion> <Accordion title="Set Task Status"> ```bash # Set status of a single task task-master set-status --id=<id> --status=<status> # Set status for multiple tasks task-master set-status --id=1,2,3 --status=<status> # Set status for subtasks task-master set-status --id=1.1,1.2 --status=<status> ``` When marking a task as "done", all of its subtasks will automatically be marked as "done" as well. </Accordion> <Accordion title="Expand Tasks"> ```bash # Expand a specific task with subtasks task-master expand --id=<id> --num=<number> # Expand with additional context task-master expand --id=<id> --prompt="<context>" # Expand all pending tasks task-master expand --all # Force regeneration of subtasks for tasks that already have them task-master expand --all --force # Research-backed subtask generation for a specific task task-master expand --id=<id> --research # Research-backed generation for all tasks task-master expand --all --research ``` </Accordion> <Accordion title="Clear Subtasks"> ```bash # Clear subtasks from a specific task task-master clear-subtasks --id=<id> # Clear subtasks from multiple tasks task-master clear-subtasks --id=1,2,3 # Clear subtasks from all tasks task-master clear-subtasks --all ``` </Accordion> <Accordion title="Analyze Task Complexity"> ```bash # Analyze complexity of all tasks task-master analyze-complexity # Save report to a custom location task-master analyze-complexity --output=my-report.json # Use a specific LLM model task-master analyze-complexity --model=claude-3-opus-20240229 # Set a custom complexity threshold (1-10) task-master analyze-complexity --threshold=6 # Use an alternative tasks file task-master analyze-complexity --file=custom-tasks.json # Use Perplexity AI for research-backed complexity analysis task-master analyze-complexity --research ``` </Accordion> <Accordion title="View Complexity Report"> ```bash # Display the task complexity analysis report task-master complexity-report # View a report at a custom location task-master complexity-report --file=my-report.json ``` </Accordion> <Accordion title="Managing Task Dependencies"> ```bash # Add a dependency to a task task-master add-dependency --id=<id> --depends-on=<id> # Remove a dependency from a task task-master remove-dependency --id=<id> --depends-on=<id> # Validate dependencies without fixing them task-master validate-dependencies # Find and fix invalid dependencies automatically task-master fix-dependencies ``` </Accordion> <Accordion title="Add a New Task"> ```bash # Add a new task using AI task-master add-task --prompt="Description of the new task" # Add a task with dependencies task-master add-task --prompt="Description" --dependencies=1,2,3 # Add a task with priority task-master add-task --prompt="Description" --priority=high ``` </Accordion> <Accordion title="Initialize a Project"> ```bash # Initialize a new project with Task Master structure task-master init ``` </Accordion> </AccordionGroup> ```