This is page 28 of 38. Use http://codebase.md/eyaltoledano/claude-task-master?page={x} to view the full context. # Directory Structure ``` ├── .changeset │ ├── config.json │ └── README.md ├── .claude │ ├── agents │ │ ├── task-checker.md │ │ ├── task-executor.md │ │ └── task-orchestrator.md │ ├── commands │ │ ├── dedupe.md │ │ └── tm │ │ ├── add-dependency │ │ │ └── add-dependency.md │ │ ├── add-subtask │ │ │ ├── add-subtask.md │ │ │ └── convert-task-to-subtask.md │ │ ├── add-task │ │ │ └── add-task.md │ │ ├── analyze-complexity │ │ │ └── analyze-complexity.md │ │ ├── complexity-report │ │ │ └── complexity-report.md │ │ ├── expand │ │ │ ├── expand-all-tasks.md │ │ │ └── expand-task.md │ │ ├── fix-dependencies │ │ │ └── fix-dependencies.md │ │ ├── generate │ │ │ └── generate-tasks.md │ │ ├── help.md │ │ ├── init │ │ │ ├── init-project-quick.md │ │ │ └── init-project.md │ │ ├── learn.md │ │ ├── list │ │ │ ├── list-tasks-by-status.md │ │ │ ├── list-tasks-with-subtasks.md │ │ │ └── list-tasks.md │ │ ├── models │ │ │ ├── setup-models.md │ │ │ └── view-models.md │ │ ├── next │ │ │ └── next-task.md │ │ ├── parse-prd │ │ │ ├── parse-prd-with-research.md │ │ │ └── parse-prd.md │ │ ├── remove-dependency │ │ │ └── remove-dependency.md │ │ ├── remove-subtask │ │ │ └── remove-subtask.md │ │ ├── remove-subtasks │ │ │ ├── remove-all-subtasks.md │ │ │ └── remove-subtasks.md │ │ ├── remove-task │ │ │ └── remove-task.md │ │ ├── set-status │ │ │ ├── to-cancelled.md │ │ │ ├── to-deferred.md │ │ │ ├── to-done.md │ │ │ ├── to-in-progress.md │ │ │ ├── to-pending.md │ │ │ └── to-review.md │ │ ├── setup │ │ │ ├── install-taskmaster.md │ │ │ └── quick-install-taskmaster.md │ │ ├── show │ │ │ └── show-task.md │ │ ├── status │ │ │ └── project-status.md │ │ ├── sync-readme │ │ │ └── sync-readme.md │ │ ├── tm-main.md │ │ ├── update │ │ │ ├── update-single-task.md │ │ │ ├── update-task.md │ │ │ └── update-tasks-from-id.md │ │ ├── utils │ │ │ └── analyze-project.md │ │ ├── validate-dependencies │ │ │ └── validate-dependencies.md │ │ └── workflows │ │ ├── auto-implement-tasks.md │ │ ├── command-pipeline.md │ │ └── smart-workflow.md │ └── TM_COMMANDS_GUIDE.md ├── .coderabbit.yaml ├── .cursor │ ├── mcp.json │ └── rules │ ├── ai_providers.mdc │ ├── ai_services.mdc │ ├── architecture.mdc │ ├── changeset.mdc │ ├── commands.mdc │ ├── context_gathering.mdc │ ├── cursor_rules.mdc │ ├── dependencies.mdc │ ├── dev_workflow.mdc │ ├── git_workflow.mdc │ ├── glossary.mdc │ ├── mcp.mdc │ ├── new_features.mdc │ ├── self_improve.mdc │ ├── tags.mdc │ ├── taskmaster.mdc │ ├── tasks.mdc │ ├── telemetry.mdc │ ├── test_workflow.mdc │ ├── tests.mdc │ ├── ui.mdc │ └── utilities.mdc ├── .cursorignore ├── .env.example ├── .github │ ├── ISSUE_TEMPLATE │ │ ├── bug_report.md │ │ ├── enhancements---feature-requests.md │ │ └── feedback.md │ ├── PULL_REQUEST_TEMPLATE │ │ ├── bugfix.md │ │ ├── config.yml │ │ ├── feature.md │ │ └── integration.md │ ├── PULL_REQUEST_TEMPLATE.md │ ├── scripts │ │ ├── auto-close-duplicates.mjs │ │ ├── backfill-duplicate-comments.mjs │ │ ├── check-pre-release-mode.mjs │ │ ├── parse-metrics.mjs │ │ ├── release.mjs │ │ ├── tag-extension.mjs │ │ └── utils.mjs │ └── workflows │ ├── auto-close-duplicates.yml │ ├── backfill-duplicate-comments.yml │ ├── ci.yml │ ├── claude-dedupe-issues.yml │ ├── claude-docs-trigger.yml │ ├── claude-docs-updater.yml │ ├── claude-issue-triage.yml │ ├── claude.yml │ ├── extension-ci.yml │ ├── extension-release.yml │ ├── log-issue-events.yml │ ├── pre-release.yml │ ├── release-check.yml │ ├── release.yml │ ├── update-models-md.yml │ └── weekly-metrics-discord.yml ├── .gitignore ├── .kiro │ ├── hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── settings │ │ └── mcp.json │ └── steering │ ├── dev_workflow.md │ ├── kiro_rules.md │ ├── self_improve.md │ ├── taskmaster_hooks_workflow.md │ └── taskmaster.md ├── .manypkg.json ├── .mcp.json ├── .npmignore ├── .nvmrc ├── .taskmaster │ ├── CLAUDE.md │ ├── config.json │ ├── docs │ │ ├── MIGRATION-ROADMAP.md │ │ ├── prd-tm-start.txt │ │ ├── prd.txt │ │ ├── README.md │ │ ├── research │ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md │ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md │ │ │ ├── 2025-06-14_test-save-functionality.md │ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md │ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md │ │ ├── task-template-importing-prd.txt │ │ ├── test-prd.txt │ │ └── tm-core-phase-1.txt │ ├── reports │ │ ├── task-complexity-report_cc-kiro-hooks.json │ │ ├── task-complexity-report_test-prd-tag.json │ │ ├── task-complexity-report_tm-core-phase-1.json │ │ ├── task-complexity-report.json │ │ └── tm-core-complexity.json │ ├── state.json │ ├── tasks │ │ ├── task_001_tm-start.txt │ │ ├── task_002_tm-start.txt │ │ ├── task_003_tm-start.txt │ │ ├── task_004_tm-start.txt │ │ ├── task_007_tm-start.txt │ │ └── tasks.json │ └── templates │ └── example_prd.txt ├── .vscode │ ├── extensions.json │ └── settings.json ├── apps │ ├── cli │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ ├── commands │ │ │ │ ├── auth.command.ts │ │ │ │ ├── context.command.ts │ │ │ │ ├── list.command.ts │ │ │ │ ├── set-status.command.ts │ │ │ │ ├── show.command.ts │ │ │ │ └── start.command.ts │ │ │ ├── index.ts │ │ │ ├── ui │ │ │ │ ├── components │ │ │ │ │ ├── dashboard.component.ts │ │ │ │ │ ├── header.component.ts │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── next-task.component.ts │ │ │ │ │ ├── suggested-steps.component.ts │ │ │ │ │ └── task-detail.component.ts │ │ │ │ └── index.ts │ │ │ └── utils │ │ │ ├── auto-update.ts │ │ │ └── ui.ts │ │ └── tsconfig.json │ ├── docs │ │ ├── archive │ │ │ ├── ai-client-utils-example.mdx │ │ │ ├── ai-development-workflow.mdx │ │ │ ├── command-reference.mdx │ │ │ ├── configuration.mdx │ │ │ ├── cursor-setup.mdx │ │ │ ├── examples.mdx │ │ │ └── Installation.mdx │ │ ├── best-practices │ │ │ ├── advanced-tasks.mdx │ │ │ ├── configuration-advanced.mdx │ │ │ └── index.mdx │ │ ├── capabilities │ │ │ ├── cli-root-commands.mdx │ │ │ ├── index.mdx │ │ │ ├── mcp.mdx │ │ │ └── task-structure.mdx │ │ ├── CHANGELOG.md │ │ ├── docs.json │ │ ├── favicon.svg │ │ ├── getting-started │ │ │ ├── contribute.mdx │ │ │ ├── faq.mdx │ │ │ └── quick-start │ │ │ ├── configuration-quick.mdx │ │ │ ├── execute-quick.mdx │ │ │ ├── installation.mdx │ │ │ ├── moving-forward.mdx │ │ │ ├── prd-quick.mdx │ │ │ ├── quick-start.mdx │ │ │ ├── requirements.mdx │ │ │ ├── rules-quick.mdx │ │ │ └── tasks-quick.mdx │ │ ├── introduction.mdx │ │ ├── licensing.md │ │ ├── logo │ │ │ ├── dark.svg │ │ │ ├── light.svg │ │ │ └── task-master-logo.png │ │ ├── package.json │ │ ├── README.md │ │ ├── style.css │ │ ├── vercel.json │ │ └── whats-new.mdx │ └── extension │ ├── .vscodeignore │ ├── assets │ │ ├── banner.png │ │ ├── icon-dark.svg │ │ ├── icon-light.svg │ │ ├── icon.png │ │ ├── screenshots │ │ │ ├── kanban-board.png │ │ │ └── task-details.png │ │ └── sidebar-icon.svg │ ├── CHANGELOG.md │ ├── components.json │ ├── docs │ │ ├── extension-CI-setup.md │ │ └── extension-development-guide.md │ ├── esbuild.js │ ├── LICENSE │ ├── package.json │ ├── package.mjs │ ├── package.publish.json │ ├── README.md │ ├── src │ │ ├── components │ │ │ ├── ConfigView.tsx │ │ │ ├── constants.ts │ │ │ ├── TaskDetails │ │ │ │ ├── AIActionsSection.tsx │ │ │ │ ├── DetailsSection.tsx │ │ │ │ ├── PriorityBadge.tsx │ │ │ │ ├── SubtasksSection.tsx │ │ │ │ ├── TaskMetadataSidebar.tsx │ │ │ │ └── useTaskDetails.ts │ │ │ ├── TaskDetailsView.tsx │ │ │ ├── TaskMasterLogo.tsx │ │ │ └── ui │ │ │ ├── badge.tsx │ │ │ ├── breadcrumb.tsx │ │ │ ├── button.tsx │ │ │ ├── card.tsx │ │ │ ├── collapsible.tsx │ │ │ ├── CollapsibleSection.tsx │ │ │ ├── dropdown-menu.tsx │ │ │ ├── label.tsx │ │ │ ├── scroll-area.tsx │ │ │ ├── separator.tsx │ │ │ ├── shadcn-io │ │ │ │ └── kanban │ │ │ │ └── index.tsx │ │ │ └── textarea.tsx │ │ ├── extension.ts │ │ ├── index.ts │ │ ├── lib │ │ │ └── utils.ts │ │ ├── services │ │ │ ├── config-service.ts │ │ │ ├── error-handler.ts │ │ │ ├── notification-preferences.ts │ │ │ ├── polling-service.ts │ │ │ ├── polling-strategies.ts │ │ │ ├── sidebar-webview-manager.ts │ │ │ ├── task-repository.ts │ │ │ ├── terminal-manager.ts │ │ │ └── webview-manager.ts │ │ ├── test │ │ │ └── extension.test.ts │ │ ├── utils │ │ │ ├── configManager.ts │ │ │ ├── connectionManager.ts │ │ │ ├── errorHandler.ts │ │ │ ├── event-emitter.ts │ │ │ ├── logger.ts │ │ │ ├── mcpClient.ts │ │ │ ├── notificationPreferences.ts │ │ │ └── task-master-api │ │ │ ├── cache │ │ │ │ └── cache-manager.ts │ │ │ ├── index.ts │ │ │ ├── mcp-client.ts │ │ │ ├── transformers │ │ │ │ └── task-transformer.ts │ │ │ └── types │ │ │ └── index.ts │ │ └── webview │ │ ├── App.tsx │ │ ├── components │ │ │ ├── AppContent.tsx │ │ │ ├── EmptyState.tsx │ │ │ ├── ErrorBoundary.tsx │ │ │ ├── PollingStatus.tsx │ │ │ ├── PriorityBadge.tsx │ │ │ ├── SidebarView.tsx │ │ │ ├── TagDropdown.tsx │ │ │ ├── TaskCard.tsx │ │ │ ├── TaskEditModal.tsx │ │ │ ├── TaskMasterKanban.tsx │ │ │ ├── ToastContainer.tsx │ │ │ └── ToastNotification.tsx │ │ ├── constants │ │ │ └── index.ts │ │ ├── contexts │ │ │ └── VSCodeContext.tsx │ │ ├── hooks │ │ │ ├── useTaskQueries.ts │ │ │ ├── useVSCodeMessages.ts │ │ │ └── useWebviewHeight.ts │ │ ├── index.css │ │ ├── index.tsx │ │ ├── providers │ │ │ └── QueryProvider.tsx │ │ ├── reducers │ │ │ └── appReducer.ts │ │ ├── sidebar.tsx │ │ ├── types │ │ │ └── index.ts │ │ └── utils │ │ ├── logger.ts │ │ └── toast.ts │ └── tsconfig.json ├── assets │ ├── .windsurfrules │ ├── AGENTS.md │ ├── claude │ │ ├── agents │ │ │ ├── task-checker.md │ │ │ ├── task-executor.md │ │ │ └── task-orchestrator.md │ │ ├── commands │ │ │ └── tm │ │ │ ├── add-dependency │ │ │ │ └── add-dependency.md │ │ │ ├── add-subtask │ │ │ │ ├── add-subtask.md │ │ │ │ └── convert-task-to-subtask.md │ │ │ ├── add-task │ │ │ │ └── add-task.md │ │ │ ├── analyze-complexity │ │ │ │ └── analyze-complexity.md │ │ │ ├── clear-subtasks │ │ │ │ ├── clear-all-subtasks.md │ │ │ │ └── clear-subtasks.md │ │ │ ├── complexity-report │ │ │ │ └── complexity-report.md │ │ │ ├── expand │ │ │ │ ├── expand-all-tasks.md │ │ │ │ └── expand-task.md │ │ │ ├── fix-dependencies │ │ │ │ └── fix-dependencies.md │ │ │ ├── generate │ │ │ │ └── generate-tasks.md │ │ │ ├── help.md │ │ │ ├── init │ │ │ │ ├── init-project-quick.md │ │ │ │ └── init-project.md │ │ │ ├── learn.md │ │ │ ├── list │ │ │ │ ├── list-tasks-by-status.md │ │ │ │ ├── list-tasks-with-subtasks.md │ │ │ │ └── list-tasks.md │ │ │ ├── models │ │ │ │ ├── setup-models.md │ │ │ │ └── view-models.md │ │ │ ├── next │ │ │ │ └── next-task.md │ │ │ ├── parse-prd │ │ │ │ ├── parse-prd-with-research.md │ │ │ │ └── parse-prd.md │ │ │ ├── remove-dependency │ │ │ │ └── remove-dependency.md │ │ │ ├── remove-subtask │ │ │ │ └── remove-subtask.md │ │ │ ├── remove-subtasks │ │ │ │ ├── remove-all-subtasks.md │ │ │ │ └── remove-subtasks.md │ │ │ ├── remove-task │ │ │ │ └── remove-task.md │ │ │ ├── set-status │ │ │ │ ├── to-cancelled.md │ │ │ │ ├── to-deferred.md │ │ │ │ ├── to-done.md │ │ │ │ ├── to-in-progress.md │ │ │ │ ├── to-pending.md │ │ │ │ └── to-review.md │ │ │ ├── setup │ │ │ │ ├── install-taskmaster.md │ │ │ │ └── quick-install-taskmaster.md │ │ │ ├── show │ │ │ │ └── show-task.md │ │ │ ├── status │ │ │ │ └── project-status.md │ │ │ ├── sync-readme │ │ │ │ └── sync-readme.md │ │ │ ├── tm-main.md │ │ │ ├── update │ │ │ │ ├── update-single-task.md │ │ │ │ ├── update-task.md │ │ │ │ └── update-tasks-from-id.md │ │ │ ├── utils │ │ │ │ └── analyze-project.md │ │ │ ├── validate-dependencies │ │ │ │ └── validate-dependencies.md │ │ │ └── workflows │ │ │ ├── auto-implement-tasks.md │ │ │ ├── command-pipeline.md │ │ │ └── smart-workflow.md │ │ └── TM_COMMANDS_GUIDE.md │ ├── config.json │ ├── env.example │ ├── example_prd.txt │ ├── gitignore │ ├── kiro-hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── roocode │ │ ├── .roo │ │ │ ├── rules-architect │ │ │ │ └── architect-rules │ │ │ ├── rules-ask │ │ │ │ └── ask-rules │ │ │ ├── rules-code │ │ │ │ └── code-rules │ │ │ ├── rules-debug │ │ │ │ └── debug-rules │ │ │ ├── rules-orchestrator │ │ │ │ └── orchestrator-rules │ │ │ └── rules-test │ │ │ └── test-rules │ │ └── .roomodes │ ├── rules │ │ ├── cursor_rules.mdc │ │ ├── dev_workflow.mdc │ │ ├── self_improve.mdc │ │ ├── taskmaster_hooks_workflow.mdc │ │ └── taskmaster.mdc │ └── scripts_README.md ├── bin │ └── task-master.js ├── biome.json ├── CHANGELOG.md ├── CLAUDE.md ├── context │ ├── chats │ │ ├── add-task-dependencies-1.md │ │ └── max-min-tokens.txt.md │ ├── fastmcp-core.txt │ ├── fastmcp-docs.txt │ ├── MCP_INTEGRATION.md │ ├── mcp-js-sdk-docs.txt │ ├── mcp-protocol-repo.txt │ ├── mcp-protocol-schema-03262025.json │ └── mcp-protocol-spec.txt ├── CONTRIBUTING.md ├── docs │ ├── CLI-COMMANDER-PATTERN.md │ ├── command-reference.md │ ├── configuration.md │ ├── contributor-docs │ │ └── testing-roo-integration.md │ ├── cross-tag-task-movement.md │ ├── examples │ │ └── claude-code-usage.md │ ├── examples.md │ ├── licensing.md │ ├── mcp-provider-guide.md │ ├── mcp-provider.md │ ├── migration-guide.md │ ├── models.md │ ├── providers │ │ └── gemini-cli.md │ ├── README.md │ ├── scripts │ │ └── models-json-to-markdown.js │ ├── task-structure.md │ └── tutorial.md ├── images │ └── logo.png ├── index.js ├── jest.config.js ├── jest.resolver.cjs ├── LICENSE ├── llms-install.md ├── mcp-server │ ├── server.js │ └── src │ ├── core │ │ ├── __tests__ │ │ │ └── context-manager.test.js │ │ ├── context-manager.js │ │ ├── direct-functions │ │ │ ├── add-dependency.js │ │ │ ├── add-subtask.js │ │ │ ├── add-tag.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── cache-stats.js │ │ │ ├── clear-subtasks.js │ │ │ ├── complexity-report.js │ │ │ ├── copy-tag.js │ │ │ ├── create-tag-from-branch.js │ │ │ ├── delete-tag.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── fix-dependencies.js │ │ │ ├── generate-task-files.js │ │ │ ├── initialize-project.js │ │ │ ├── list-tags.js │ │ │ ├── list-tasks.js │ │ │ ├── models.js │ │ │ ├── move-task-cross-tag.js │ │ │ ├── move-task.js │ │ │ ├── next-task.js │ │ │ ├── parse-prd.js │ │ │ ├── remove-dependency.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── rename-tag.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── rules.js │ │ │ ├── scope-down.js │ │ │ ├── scope-up.js │ │ │ ├── set-task-status.js │ │ │ ├── show-task.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ ├── update-tasks.js │ │ │ ├── use-tag.js │ │ │ └── validate-dependencies.js │ │ ├── task-master-core.js │ │ └── utils │ │ ├── env-utils.js │ │ └── path-utils.js │ ├── custom-sdk │ │ ├── errors.js │ │ ├── index.js │ │ ├── json-extractor.js │ │ ├── language-model.js │ │ ├── message-converter.js │ │ └── schema-converter.js │ ├── index.js │ ├── logger.js │ ├── providers │ │ └── mcp-provider.js │ └── tools │ ├── add-dependency.js │ ├── add-subtask.js │ ├── add-tag.js │ ├── add-task.js │ ├── analyze.js │ ├── clear-subtasks.js │ ├── complexity-report.js │ ├── copy-tag.js │ ├── delete-tag.js │ ├── expand-all.js │ ├── expand-task.js │ ├── fix-dependencies.js │ ├── generate.js │ ├── get-operation-status.js │ ├── get-task.js │ ├── get-tasks.js │ ├── index.js │ ├── initialize-project.js │ ├── list-tags.js │ ├── models.js │ ├── move-task.js │ ├── next-task.js │ ├── parse-prd.js │ ├── remove-dependency.js │ ├── remove-subtask.js │ ├── remove-task.js │ ├── rename-tag.js │ ├── research.js │ ├── response-language.js │ ├── rules.js │ ├── scope-down.js │ ├── scope-up.js │ ├── set-task-status.js │ ├── update-subtask.js │ ├── update-task.js │ ├── update.js │ ├── use-tag.js │ ├── utils.js │ └── validate-dependencies.js ├── mcp-test.js ├── output.json ├── package-lock.json ├── package.json ├── packages │ ├── build-config │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ └── tsdown.base.ts │ │ └── tsconfig.json │ └── tm-core │ ├── .gitignore │ ├── CHANGELOG.md │ ├── docs │ │ └── listTasks-architecture.md │ ├── package.json │ ├── POC-STATUS.md │ ├── README.md │ ├── src │ │ ├── auth │ │ │ ├── auth-manager.test.ts │ │ │ ├── auth-manager.ts │ │ │ ├── config.ts │ │ │ ├── credential-store.test.ts │ │ │ ├── credential-store.ts │ │ │ ├── index.ts │ │ │ ├── oauth-service.ts │ │ │ ├── supabase-session-storage.ts │ │ │ └── types.ts │ │ ├── clients │ │ │ ├── index.ts │ │ │ └── supabase-client.ts │ │ ├── config │ │ │ ├── config-manager.spec.ts │ │ │ ├── config-manager.ts │ │ │ ├── index.ts │ │ │ └── services │ │ │ ├── config-loader.service.spec.ts │ │ │ ├── config-loader.service.ts │ │ │ ├── config-merger.service.spec.ts │ │ │ ├── config-merger.service.ts │ │ │ ├── config-persistence.service.spec.ts │ │ │ ├── config-persistence.service.ts │ │ │ ├── environment-config-provider.service.spec.ts │ │ │ ├── environment-config-provider.service.ts │ │ │ ├── index.ts │ │ │ ├── runtime-state-manager.service.spec.ts │ │ │ └── runtime-state-manager.service.ts │ │ ├── constants │ │ │ └── index.ts │ │ ├── entities │ │ │ └── task.entity.ts │ │ ├── errors │ │ │ ├── index.ts │ │ │ └── task-master-error.ts │ │ ├── executors │ │ │ ├── base-executor.ts │ │ │ ├── claude-executor.ts │ │ │ ├── executor-factory.ts │ │ │ ├── executor-service.ts │ │ │ ├── index.ts │ │ │ └── types.ts │ │ ├── index.ts │ │ ├── interfaces │ │ │ ├── ai-provider.interface.ts │ │ │ ├── configuration.interface.ts │ │ │ ├── index.ts │ │ │ └── storage.interface.ts │ │ ├── logger │ │ │ ├── factory.ts │ │ │ ├── index.ts │ │ │ └── logger.ts │ │ ├── mappers │ │ │ └── TaskMapper.ts │ │ ├── parser │ │ │ └── index.ts │ │ ├── providers │ │ │ ├── ai │ │ │ │ ├── base-provider.ts │ │ │ │ └── index.ts │ │ │ └── index.ts │ │ ├── repositories │ │ │ ├── supabase-task-repository.ts │ │ │ └── task-repository.interface.ts │ │ ├── services │ │ │ ├── index.ts │ │ │ ├── organization.service.ts │ │ │ ├── task-execution-service.ts │ │ │ └── task-service.ts │ │ ├── storage │ │ │ ├── api-storage.ts │ │ │ ├── file-storage │ │ │ │ ├── file-operations.ts │ │ │ │ ├── file-storage.ts │ │ │ │ ├── format-handler.ts │ │ │ │ ├── index.ts │ │ │ │ └── path-resolver.ts │ │ │ ├── index.ts │ │ │ └── storage-factory.ts │ │ ├── subpath-exports.test.ts │ │ ├── task-master-core.ts │ │ ├── types │ │ │ ├── database.types.ts │ │ │ ├── index.ts │ │ │ └── legacy.ts │ │ └── utils │ │ ├── id-generator.ts │ │ └── index.ts │ ├── tests │ │ ├── integration │ │ │ └── list-tasks.test.ts │ │ ├── mocks │ │ │ └── mock-provider.ts │ │ ├── setup.ts │ │ └── unit │ │ ├── base-provider.test.ts │ │ ├── executor.test.ts │ │ └── smoke.test.ts │ ├── tsconfig.json │ └── vitest.config.ts ├── README-task-master.md ├── README.md ├── scripts │ ├── dev.js │ ├── init.js │ ├── modules │ │ ├── ai-services-unified.js │ │ ├── commands.js │ │ ├── config-manager.js │ │ ├── dependency-manager.js │ │ ├── index.js │ │ ├── prompt-manager.js │ │ ├── supported-models.json │ │ ├── sync-readme.js │ │ ├── task-manager │ │ │ ├── add-subtask.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── clear-subtasks.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── find-next-task.js │ │ │ ├── generate-task-files.js │ │ │ ├── is-task-dependent.js │ │ │ ├── list-tasks.js │ │ │ ├── migrate.js │ │ │ ├── models.js │ │ │ ├── move-task.js │ │ │ ├── parse-prd │ │ │ │ ├── index.js │ │ │ │ ├── parse-prd-config.js │ │ │ │ ├── parse-prd-helpers.js │ │ │ │ ├── parse-prd-non-streaming.js │ │ │ │ ├── parse-prd-streaming.js │ │ │ │ └── parse-prd.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── scope-adjustment.js │ │ │ ├── set-task-status.js │ │ │ ├── tag-management.js │ │ │ ├── task-exists.js │ │ │ ├── update-single-task-status.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ └── update-tasks.js │ │ ├── task-manager.js │ │ ├── ui.js │ │ ├── update-config-tokens.js │ │ ├── utils │ │ │ ├── contextGatherer.js │ │ │ ├── fuzzyTaskSearch.js │ │ │ └── git-utils.js │ │ └── utils.js │ ├── task-complexity-report.json │ ├── test-claude-errors.js │ └── test-claude.js ├── src │ ├── ai-providers │ │ ├── anthropic.js │ │ ├── azure.js │ │ ├── base-provider.js │ │ ├── bedrock.js │ │ ├── claude-code.js │ │ ├── custom-sdk │ │ │ ├── claude-code │ │ │ │ ├── errors.js │ │ │ │ ├── index.js │ │ │ │ ├── json-extractor.js │ │ │ │ ├── language-model.js │ │ │ │ ├── message-converter.js │ │ │ │ └── types.js │ │ │ └── grok-cli │ │ │ ├── errors.js │ │ │ ├── index.js │ │ │ ├── json-extractor.js │ │ │ ├── language-model.js │ │ │ ├── message-converter.js │ │ │ └── types.js │ │ ├── gemini-cli.js │ │ ├── google-vertex.js │ │ ├── google.js │ │ ├── grok-cli.js │ │ ├── groq.js │ │ ├── index.js │ │ ├── ollama.js │ │ ├── openai.js │ │ ├── openrouter.js │ │ ├── perplexity.js │ │ └── xai.js │ ├── constants │ │ ├── commands.js │ │ ├── paths.js │ │ ├── profiles.js │ │ ├── providers.js │ │ ├── rules-actions.js │ │ ├── task-priority.js │ │ └── task-status.js │ ├── profiles │ │ ├── amp.js │ │ ├── base-profile.js │ │ ├── claude.js │ │ ├── cline.js │ │ ├── codex.js │ │ ├── cursor.js │ │ ├── gemini.js │ │ ├── index.js │ │ ├── kilo.js │ │ ├── kiro.js │ │ ├── opencode.js │ │ ├── roo.js │ │ ├── trae.js │ │ ├── vscode.js │ │ ├── windsurf.js │ │ └── zed.js │ ├── progress │ │ ├── base-progress-tracker.js │ │ ├── cli-progress-factory.js │ │ ├── parse-prd-tracker.js │ │ ├── progress-tracker-builder.js │ │ └── tracker-ui.js │ ├── prompts │ │ ├── add-task.json │ │ ├── analyze-complexity.json │ │ ├── expand-task.json │ │ ├── parse-prd.json │ │ ├── README.md │ │ ├── research.json │ │ ├── schemas │ │ │ ├── parameter.schema.json │ │ │ ├── prompt-template.schema.json │ │ │ ├── README.md │ │ │ └── variant.schema.json │ │ ├── update-subtask.json │ │ ├── update-task.json │ │ └── update-tasks.json │ ├── provider-registry │ │ └── index.js │ ├── task-master.js │ ├── ui │ │ ├── confirm.js │ │ ├── indicators.js │ │ └── parse-prd.js │ └── utils │ ├── asset-resolver.js │ ├── create-mcp-config.js │ ├── format.js │ ├── getVersion.js │ ├── logger-utils.js │ ├── manage-gitignore.js │ ├── path-utils.js │ ├── profiles.js │ ├── rule-transformer.js │ ├── stream-parser.js │ └── timeout-manager.js ├── test-clean-tags.js ├── test-config-manager.js ├── test-prd.txt ├── test-tag-functions.js ├── test-version-check-full.js ├── test-version-check.js ├── tests │ ├── e2e │ │ ├── e2e_helpers.sh │ │ ├── parse_llm_output.cjs │ │ ├── run_e2e.sh │ │ ├── run_fallback_verification.sh │ │ └── test_llm_analysis.sh │ ├── fixture │ │ └── test-tasks.json │ ├── fixtures │ │ ├── .taskmasterconfig │ │ ├── sample-claude-response.js │ │ ├── sample-prd.txt │ │ └── sample-tasks.js │ ├── integration │ │ ├── claude-code-optional.test.js │ │ ├── cli │ │ │ ├── commands.test.js │ │ │ ├── complex-cross-tag-scenarios.test.js │ │ │ └── move-cross-tag.test.js │ │ ├── manage-gitignore.test.js │ │ ├── mcp-server │ │ │ └── direct-functions.test.js │ │ ├── move-task-cross-tag.integration.test.js │ │ ├── move-task-simple.integration.test.js │ │ └── profiles │ │ ├── amp-init-functionality.test.js │ │ ├── claude-init-functionality.test.js │ │ ├── cline-init-functionality.test.js │ │ ├── codex-init-functionality.test.js │ │ ├── cursor-init-functionality.test.js │ │ ├── gemini-init-functionality.test.js │ │ ├── opencode-init-functionality.test.js │ │ ├── roo-files-inclusion.test.js │ │ ├── roo-init-functionality.test.js │ │ ├── rules-files-inclusion.test.js │ │ ├── trae-init-functionality.test.js │ │ ├── vscode-init-functionality.test.js │ │ └── windsurf-init-functionality.test.js │ ├── manual │ │ ├── progress │ │ │ ├── parse-prd-analysis.js │ │ │ ├── test-parse-prd.js │ │ │ └── TESTING_GUIDE.md │ │ └── prompts │ │ ├── prompt-test.js │ │ └── README.md │ ├── README.md │ ├── setup.js │ └── unit │ ├── ai-providers │ │ ├── claude-code.test.js │ │ ├── custom-sdk │ │ │ └── claude-code │ │ │ └── language-model.test.js │ │ ├── gemini-cli.test.js │ │ ├── mcp-components.test.js │ │ └── openai.test.js │ ├── ai-services-unified.test.js │ ├── commands.test.js │ ├── config-manager.test.js │ ├── config-manager.test.mjs │ ├── dependency-manager.test.js │ ├── init.test.js │ ├── initialize-project.test.js │ ├── kebab-case-validation.test.js │ ├── manage-gitignore.test.js │ ├── mcp │ │ └── tools │ │ ├── __mocks__ │ │ │ └── move-task.js │ │ ├── add-task.test.js │ │ ├── analyze-complexity.test.js │ │ ├── expand-all.test.js │ │ ├── get-tasks.test.js │ │ ├── initialize-project.test.js │ │ ├── move-task-cross-tag-options.test.js │ │ ├── move-task-cross-tag.test.js │ │ └── remove-task.test.js │ ├── mcp-providers │ │ ├── mcp-components.test.js │ │ └── mcp-provider.test.js │ ├── parse-prd.test.js │ ├── profiles │ │ ├── amp-integration.test.js │ │ ├── claude-integration.test.js │ │ ├── cline-integration.test.js │ │ ├── codex-integration.test.js │ │ ├── cursor-integration.test.js │ │ ├── gemini-integration.test.js │ │ ├── kilo-integration.test.js │ │ ├── kiro-integration.test.js │ │ ├── mcp-config-validation.test.js │ │ ├── opencode-integration.test.js │ │ ├── profile-safety-check.test.js │ │ ├── roo-integration.test.js │ │ ├── rule-transformer-cline.test.js │ │ ├── rule-transformer-cursor.test.js │ │ ├── rule-transformer-gemini.test.js │ │ ├── rule-transformer-kilo.test.js │ │ ├── rule-transformer-kiro.test.js │ │ ├── rule-transformer-opencode.test.js │ │ ├── rule-transformer-roo.test.js │ │ ├── rule-transformer-trae.test.js │ │ ├── rule-transformer-vscode.test.js │ │ ├── rule-transformer-windsurf.test.js │ │ ├── rule-transformer-zed.test.js │ │ ├── rule-transformer.test.js │ │ ├── selective-profile-removal.test.js │ │ ├── subdirectory-support.test.js │ │ ├── trae-integration.test.js │ │ ├── vscode-integration.test.js │ │ ├── windsurf-integration.test.js │ │ └── zed-integration.test.js │ ├── progress │ │ └── base-progress-tracker.test.js │ ├── prompt-manager.test.js │ ├── prompts │ │ └── expand-task-prompt.test.js │ ├── providers │ │ └── provider-registry.test.js │ ├── scripts │ │ └── modules │ │ ├── commands │ │ │ ├── move-cross-tag.test.js │ │ │ └── README.md │ │ ├── dependency-manager │ │ │ ├── circular-dependencies.test.js │ │ │ ├── cross-tag-dependencies.test.js │ │ │ └── fix-dependencies-command.test.js │ │ ├── task-manager │ │ │ ├── add-subtask.test.js │ │ │ ├── add-task.test.js │ │ │ ├── analyze-task-complexity.test.js │ │ │ ├── clear-subtasks.test.js │ │ │ ├── complexity-report-tag-isolation.test.js │ │ │ ├── expand-all-tasks.test.js │ │ │ ├── expand-task.test.js │ │ │ ├── find-next-task.test.js │ │ │ ├── generate-task-files.test.js │ │ │ ├── list-tasks.test.js │ │ │ ├── move-task-cross-tag.test.js │ │ │ ├── move-task.test.js │ │ │ ├── parse-prd.test.js │ │ │ ├── remove-subtask.test.js │ │ │ ├── remove-task.test.js │ │ │ ├── research.test.js │ │ │ ├── scope-adjustment.test.js │ │ │ ├── set-task-status.test.js │ │ │ ├── setup.js │ │ │ ├── update-single-task-status.test.js │ │ │ ├── update-subtask-by-id.test.js │ │ │ ├── update-task-by-id.test.js │ │ │ └── update-tasks.test.js │ │ ├── ui │ │ │ └── cross-tag-error-display.test.js │ │ └── utils-tag-aware-paths.test.js │ ├── task-finder.test.js │ ├── task-manager │ │ ├── clear-subtasks.test.js │ │ ├── move-task.test.js │ │ ├── tag-boundary.test.js │ │ └── tag-management.test.js │ ├── task-master.test.js │ ├── ui │ │ └── indicators.test.js │ ├── ui.test.js │ ├── utils-strip-ansi.test.js │ └── utils.test.js ├── tsconfig.json ├── tsdown.config.ts └── turbo.json ``` # Files -------------------------------------------------------------------------------- /mcp-server/src/tools/utils.js: -------------------------------------------------------------------------------- ```javascript /** * tools/utils.js * Utility functions for Task Master CLI integration */ import { spawnSync } from 'child_process'; import path from 'path'; import fs from 'fs'; import { contextManager } from '../core/context-manager.js'; // Import the singleton import { fileURLToPath } from 'url'; import packageJson from '../../../package.json' with { type: 'json' }; import { getCurrentTag } from '../../../scripts/modules/utils.js'; // Import path utilities to ensure consistent path resolution import { lastFoundProjectRoot, PROJECT_MARKERS } from '../core/utils/path-utils.js'; const __filename = fileURLToPath(import.meta.url); // Cache for version info to avoid repeated file reads let cachedVersionInfo = null; /** * Get version information from package.json * @returns {Object} Version information */ function getVersionInfo() { // Return cached version if available if (cachedVersionInfo) { return cachedVersionInfo; } // Use the imported packageJson directly cachedVersionInfo = { version: packageJson.version || 'unknown', name: packageJson.name || 'task-master-ai' }; return cachedVersionInfo; } /** * Get current tag information for MCP responses * @param {string} projectRoot - The project root directory * @param {Object} log - Logger object * @returns {Object} Tag information object */ function getTagInfo(projectRoot, log) { try { if (!projectRoot) { log.warn('No project root provided for tag information'); return { currentTag: 'master', availableTags: ['master'] }; } const currentTag = getCurrentTag(projectRoot); // Read available tags from tasks.json let availableTags = ['master']; // Default fallback try { const tasksJsonPath = path.join( projectRoot, '.taskmaster', 'tasks', 'tasks.json' ); if (fs.existsSync(tasksJsonPath)) { const tasksData = JSON.parse(fs.readFileSync(tasksJsonPath, 'utf-8')); // If it's the new tagged format, extract tag keys if ( tasksData && typeof tasksData === 'object' && !Array.isArray(tasksData.tasks) ) { const tagKeys = Object.keys(tasksData).filter( (key) => tasksData[key] && typeof tasksData[key] === 'object' && Array.isArray(tasksData[key].tasks) ); if (tagKeys.length > 0) { availableTags = tagKeys; } } } } catch (tagError) { log.debug(`Could not read available tags: ${tagError.message}`); } return { currentTag: currentTag || 'master', availableTags: availableTags }; } catch (error) { log.warn(`Error getting tag information: ${error.message}`); return { currentTag: 'master', availableTags: ['master'] }; } } /** * Get normalized project root path * @param {string|undefined} projectRootRaw - Raw project root from arguments * @param {Object} log - Logger object * @returns {string} - Normalized absolute path to project root */ function getProjectRoot(projectRootRaw, log) { // PRECEDENCE ORDER: // 1. Environment variable override (TASK_MASTER_PROJECT_ROOT) // 2. Explicitly provided projectRoot in args // 3. Previously found/cached project root // 4. Current directory if it has project markers // 5. Current directory with warning // 1. Check for environment variable override if (process.env.TASK_MASTER_PROJECT_ROOT) { const envRoot = process.env.TASK_MASTER_PROJECT_ROOT; const absolutePath = path.isAbsolute(envRoot) ? envRoot : path.resolve(process.cwd(), envRoot); log.info( `Using project root from TASK_MASTER_PROJECT_ROOT environment variable: ${absolutePath}` ); return absolutePath; } // 2. If project root is explicitly provided, use it if (projectRootRaw) { const absolutePath = path.isAbsolute(projectRootRaw) ? projectRootRaw : path.resolve(process.cwd(), projectRootRaw); log.info(`Using explicitly provided project root: ${absolutePath}`); return absolutePath; } // 3. If we have a last found project root from a tasks.json search, use that for consistency if (lastFoundProjectRoot) { log.info( `Using last known project root where tasks.json was found: ${lastFoundProjectRoot}` ); return lastFoundProjectRoot; } // 4. Check if the current directory has any indicators of being a task-master project const currentDir = process.cwd(); if ( PROJECT_MARKERS.some((marker) => { const markerPath = path.join(currentDir, marker); return fs.existsSync(markerPath); }) ) { log.info( `Using current directory as project root (found project markers): ${currentDir}` ); return currentDir; } // 5. Default to current working directory but warn the user log.warn( `No task-master project detected in current directory. Using ${currentDir} as project root.` ); log.warn( 'Consider using --project-root to specify the correct project location or set TASK_MASTER_PROJECT_ROOT environment variable.' ); return currentDir; } /** * Extracts and normalizes the project root path from the MCP session object. * @param {Object} session - The MCP session object. * @param {Object} log - The MCP logger object. * @returns {string|null} - The normalized absolute project root path or null if not found/invalid. */ function getProjectRootFromSession(session, log) { try { // Add detailed logging of session structure log.info( `Session object: ${JSON.stringify({ hasSession: !!session, hasRoots: !!session?.roots, rootsType: typeof session?.roots, isRootsArray: Array.isArray(session?.roots), rootsLength: session?.roots?.length, firstRoot: session?.roots?.[0], hasRootsRoots: !!session?.roots?.roots, rootsRootsType: typeof session?.roots?.roots, isRootsRootsArray: Array.isArray(session?.roots?.roots), rootsRootsLength: session?.roots?.roots?.length, firstRootsRoot: session?.roots?.roots?.[0] })}` ); let rawRootPath = null; let decodedPath = null; let finalPath = null; // Check primary location if (session?.roots?.[0]?.uri) { rawRootPath = session.roots[0].uri; log.info(`Found raw root URI in session.roots[0].uri: ${rawRootPath}`); } // Check alternate location else if (session?.roots?.roots?.[0]?.uri) { rawRootPath = session.roots.roots[0].uri; log.info( `Found raw root URI in session.roots.roots[0].uri: ${rawRootPath}` ); } if (rawRootPath) { // Decode URI and strip file:// protocol decodedPath = rawRootPath.startsWith('file://') ? decodeURIComponent(rawRootPath.slice(7)) : rawRootPath; // Assume non-file URI is already decoded? Or decode anyway? Let's decode. if (!rawRootPath.startsWith('file://')) { decodedPath = decodeURIComponent(rawRootPath); // Decode even if no file:// } // Handle potential Windows drive prefix after stripping protocol (e.g., /C:/...) if ( decodedPath.startsWith('/') && /[A-Za-z]:/.test(decodedPath.substring(1, 3)) ) { decodedPath = decodedPath.substring(1); // Remove leading slash if it's like /C:/... } log.info(`Decoded path: ${decodedPath}`); // Normalize slashes and resolve const normalizedSlashes = decodedPath.replace(/\\/g, '/'); finalPath = path.resolve(normalizedSlashes); // Resolve to absolute path for current OS log.info(`Normalized and resolved session path: ${finalPath}`); return finalPath; } // Fallback Logic (remains the same) log.warn('No project root URI found in session. Attempting fallbacks...'); const cwd = process.cwd(); // Fallback 1: Use server path deduction (Cursor IDE) const serverPath = process.argv[1]; if (serverPath && serverPath.includes('mcp-server')) { const mcpServerIndex = serverPath.indexOf('mcp-server'); if (mcpServerIndex !== -1) { const projectRoot = path.dirname( serverPath.substring(0, mcpServerIndex) ); // Go up one level if ( fs.existsSync(path.join(projectRoot, '.cursor')) || fs.existsSync(path.join(projectRoot, 'mcp-server')) || fs.existsSync(path.join(projectRoot, 'package.json')) ) { log.info( `Using project root derived from server path: ${projectRoot}` ); return projectRoot; // Already absolute } } } // Fallback 2: Use CWD log.info(`Using current working directory as ultimate fallback: ${cwd}`); return cwd; // Already absolute } catch (e) { log.error(`Error in getProjectRootFromSession: ${e.message}`); // Attempt final fallback to CWD on error const cwd = process.cwd(); log.warn( `Returning CWD (${cwd}) due to error during session root processing.` ); return cwd; } } /** * Handle API result with standardized error handling and response formatting * @param {Object} result - Result object from API call with success, data, and error properties * @param {Object} log - Logger object * @param {string} errorPrefix - Prefix for error messages * @param {Function} processFunction - Optional function to process successful result data * @param {string} [projectRoot] - Optional project root for tag information * @returns {Object} - Standardized MCP response object */ async function handleApiResult( result, log, errorPrefix = 'API error', processFunction = processMCPResponseData, projectRoot = null ) { // Get version info for every response const versionInfo = getVersionInfo(); // Get tag info if project root is provided const tagInfo = projectRoot ? getTagInfo(projectRoot, log) : null; if (!result.success) { const errorMsg = result.error?.message || `Unknown ${errorPrefix}`; log.error(`${errorPrefix}: ${errorMsg}`); return createErrorResponse(errorMsg, versionInfo, tagInfo); } // Process the result data if needed const processedData = processFunction ? processFunction(result.data) : result.data; log.info('Successfully completed operation'); // Create the response payload including version info and tag info const responsePayload = { data: processedData, version: versionInfo }; // Add tag information if available if (tagInfo) { responsePayload.tag = tagInfo; } return createContentResponse(responsePayload); } /** * Executes a task-master CLI command synchronously. * @param {string} command - The command to execute (e.g., 'add-task') * @param {Object} log - Logger instance * @param {Array} args - Arguments for the command * @param {string|undefined} projectRootRaw - Optional raw project root path (will be normalized internally) * @param {Object|null} customEnv - Optional object containing environment variables to pass to the child process * @returns {Object} - The result of the command execution */ function executeTaskMasterCommand( command, log, args = [], projectRootRaw = null, customEnv = null // Changed from session to customEnv ) { try { // Normalize project root internally using the getProjectRoot utility const cwd = getProjectRoot(projectRootRaw, log); log.info( `Executing task-master ${command} with args: ${JSON.stringify( args )} in directory: ${cwd}` ); // Prepare full arguments array const fullArgs = [command, ...args]; // Common options for spawn const spawnOptions = { encoding: 'utf8', cwd: cwd, // Merge process.env with customEnv, giving precedence to customEnv env: { ...process.env, ...(customEnv || {}) } }; // Log the environment being passed (optional, for debugging) // log.info(`Spawn options env: ${JSON.stringify(spawnOptions.env)}`); // Execute the command using the global task-master CLI or local script // Try the global CLI first let result = spawnSync('task-master', fullArgs, spawnOptions); // If global CLI is not available, try fallback to the local script if (result.error && result.error.code === 'ENOENT') { log.info('Global task-master not found, falling back to local script'); // Pass the same spawnOptions (including env) to the fallback result = spawnSync('node', ['scripts/dev.js', ...fullArgs], spawnOptions); } if (result.error) { throw new Error(`Command execution error: ${result.error.message}`); } if (result.status !== 0) { // Improve error handling by combining stderr and stdout if stderr is empty const errorOutput = result.stderr ? result.stderr.trim() : result.stdout ? result.stdout.trim() : 'Unknown error'; throw new Error( `Command failed with exit code ${result.status}: ${errorOutput}` ); } return { success: true, stdout: result.stdout, stderr: result.stderr }; } catch (error) { log.error(`Error executing task-master command: ${error.message}`); return { success: false, error: error.message }; } } /** * Checks cache for a result using the provided key. If not found, executes the action function, * caches the result upon success, and returns the result. * * @param {Object} options - Configuration options. * @param {string} options.cacheKey - The unique key for caching this operation's result. * @param {Function} options.actionFn - The async function to execute if the cache misses. * Should return an object like { success: boolean, data?: any, error?: { code: string, message: string } }. * @param {Object} options.log - The logger instance. * @returns {Promise<Object>} - An object containing the result. * Format: { success: boolean, data?: any, error?: { code: string, message: string } } */ async function getCachedOrExecute({ cacheKey, actionFn, log }) { // Check cache first const cachedResult = contextManager.getCachedData(cacheKey); if (cachedResult !== undefined) { log.info(`Cache hit for key: ${cacheKey}`); return cachedResult; } log.info(`Cache miss for key: ${cacheKey}. Executing action function.`); // Execute the action function if cache missed const result = await actionFn(); // If the action was successful, cache the result if (result.success && result.data !== undefined) { log.info(`Action successful. Caching result for key: ${cacheKey}`); contextManager.setCachedData(cacheKey, result); } else if (!result.success) { log.warn( `Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}` ); } else { log.warn( `Action for cache key ${cacheKey} succeeded but returned no data. Result not cached.` ); } return result; } /** * Recursively removes specified fields from task objects, whether single or in an array. * Handles common data structures returned by task commands. * @param {Object|Array} taskOrData - A single task object or a data object containing a 'tasks' array. * @param {string[]} fieldsToRemove - An array of field names to remove. * @returns {Object|Array} - The processed data with specified fields removed. */ function processMCPResponseData( taskOrData, fieldsToRemove = ['details', 'testStrategy'] ) { if (!taskOrData) { return taskOrData; } // Helper function to process a single task object const processSingleTask = (task) => { if (typeof task !== 'object' || task === null) { return task; } const processedTask = { ...task }; // Remove specified fields from the task fieldsToRemove.forEach((field) => { delete processedTask[field]; }); // Recursively process subtasks if they exist and are an array if (processedTask.subtasks && Array.isArray(processedTask.subtasks)) { // Use processArrayOfTasks to handle the subtasks array processedTask.subtasks = processArrayOfTasks(processedTask.subtasks); } return processedTask; }; // Helper function to process an array of tasks const processArrayOfTasks = (tasks) => { return tasks.map(processSingleTask); }; // Check if the input is a data structure containing a 'tasks' array (like from listTasks) if ( typeof taskOrData === 'object' && taskOrData !== null && Array.isArray(taskOrData.tasks) ) { return { ...taskOrData, // Keep other potential fields like 'stats', 'filter' tasks: processArrayOfTasks(taskOrData.tasks) }; } // Check if the input is likely a single task object (add more checks if needed) else if ( typeof taskOrData === 'object' && taskOrData !== null && 'id' in taskOrData && 'title' in taskOrData ) { return processSingleTask(taskOrData); } // Check if the input is an array of tasks directly (less common but possible) else if (Array.isArray(taskOrData)) { return processArrayOfTasks(taskOrData); } // If it doesn't match known task structures, return it as is return taskOrData; } /** * Creates standard content response for tools * @param {string|Object} content - Content to include in response * @returns {Object} - Content response object in FastMCP format */ function createContentResponse(content) { // FastMCP requires text type, so we format objects as JSON strings return { content: [ { type: 'text', text: typeof content === 'object' ? // Format JSON nicely with indentation JSON.stringify(content, null, 2) : // Keep other content types as-is String(content) } ] }; } /** * Creates error response for tools * @param {string} errorMessage - Error message to include in response * @param {Object} [versionInfo] - Optional version information object * @param {Object} [tagInfo] - Optional tag information object * @returns {Object} - Error content response object in FastMCP format */ function createErrorResponse(errorMessage, versionInfo, tagInfo) { // Provide fallback version info if not provided if (!versionInfo) { versionInfo = getVersionInfo(); } let responseText = `Error: ${errorMessage} Version: ${versionInfo.version} Name: ${versionInfo.name}`; // Add tag information if available if (tagInfo) { responseText += ` Current Tag: ${tagInfo.currentTag}`; } return { content: [ { type: 'text', text: responseText } ], isError: true }; } /** * Creates a logger wrapper object compatible with core function expectations. * Adapts the MCP logger to the { info, warn, error, debug, success } structure. * @param {Object} log - The MCP logger instance. * @returns {Object} - The logger wrapper object. */ function createLogWrapper(log) { return { info: (message, ...args) => log.info(message, ...args), warn: (message, ...args) => log.warn(message, ...args), error: (message, ...args) => log.error(message, ...args), // Handle optional debug method debug: (message, ...args) => log.debug ? log.debug(message, ...args) : null, // Map success to info as a common fallback success: (message, ...args) => log.info(message, ...args) }; } /** * Resolves and normalizes a project root path from various formats. * Handles URI encoding, Windows paths, and file protocols. * @param {string | undefined | null} rawPath - The raw project root path. * @param {object} [log] - Optional logger object. * @returns {string | null} Normalized absolute path or null if input is invalid/empty. */ function normalizeProjectRoot(rawPath, log) { if (!rawPath) return null; try { let pathString = Array.isArray(rawPath) ? rawPath[0] : String(rawPath); if (!pathString) return null; // 1. Decode URI Encoding // Use try-catch for decoding as malformed URIs can throw try { pathString = decodeURIComponent(pathString); } catch (decodeError) { if (log) log.warn( `Could not decode URI component for path "${rawPath}": ${decodeError.message}. Proceeding with raw string.` ); // Proceed with the original string if decoding fails pathString = Array.isArray(rawPath) ? rawPath[0] : String(rawPath); } // 2. Strip file:// prefix (handle 2 or 3 slashes) if (pathString.startsWith('file:///')) { pathString = pathString.slice(7); // Slice 7 for file:///, may leave leading / on Windows } else if (pathString.startsWith('file://')) { pathString = pathString.slice(7); // Slice 7 for file:// } // 3. Handle potential Windows leading slash after stripping prefix (e.g., /C:/...) // This checks if it starts with / followed by a drive letter C: D: etc. if ( pathString.startsWith('/') && /[A-Za-z]:/.test(pathString.substring(1, 3)) ) { pathString = pathString.substring(1); // Remove the leading slash } // 4. Normalize backslashes to forward slashes pathString = pathString.replace(/\\/g, '/'); // 5. Resolve to absolute path using server's OS convention const resolvedPath = path.resolve(pathString); return resolvedPath; } catch (error) { if (log) { log.error( `Error normalizing project root path "${rawPath}": ${error.message}` ); } return null; // Return null on error } } /** * Extracts the raw project root path from the session (without normalization). * Used as a fallback within the HOF. * @param {Object} session - The MCP session object. * @param {Object} log - The MCP logger object. * @returns {string|null} The raw path string or null. */ function getRawProjectRootFromSession(session, log) { try { // Check primary location if (session?.roots?.[0]?.uri) { return session.roots[0].uri; } // Check alternate location else if (session?.roots?.roots?.[0]?.uri) { return session.roots.roots[0].uri; } return null; // Not found in expected session locations } catch (e) { log.error(`Error accessing session roots: ${e.message}`); return null; } } /** * Higher-order function to wrap MCP tool execute methods. * Ensures args.projectRoot is present and normalized before execution. * Uses TASK_MASTER_PROJECT_ROOT environment variable with proper precedence. * @param {Function} executeFn - The original async execute(args, context) function. * @returns {Function} The wrapped async execute function. */ function withNormalizedProjectRoot(executeFn) { return async (args, context) => { const { log, session } = context; let normalizedRoot = null; let rootSource = 'unknown'; try { // PRECEDENCE ORDER: // 1. TASK_MASTER_PROJECT_ROOT environment variable (from process.env or session) // 2. args.projectRoot (explicitly provided) // 3. Session-based project root resolution // 4. Current directory fallback // 1. Check for TASK_MASTER_PROJECT_ROOT environment variable first if (process.env.TASK_MASTER_PROJECT_ROOT) { const envRoot = process.env.TASK_MASTER_PROJECT_ROOT; normalizedRoot = path.isAbsolute(envRoot) ? envRoot : path.resolve(process.cwd(), envRoot); rootSource = 'TASK_MASTER_PROJECT_ROOT environment variable'; log.info(`Using project root from ${rootSource}: ${normalizedRoot}`); } // Also check session environment variables for TASK_MASTER_PROJECT_ROOT else if (session?.env?.TASK_MASTER_PROJECT_ROOT) { const envRoot = session.env.TASK_MASTER_PROJECT_ROOT; normalizedRoot = path.isAbsolute(envRoot) ? envRoot : path.resolve(process.cwd(), envRoot); rootSource = 'TASK_MASTER_PROJECT_ROOT session environment variable'; log.info(`Using project root from ${rootSource}: ${normalizedRoot}`); } // 2. If no environment variable, try args.projectRoot else if (args.projectRoot) { normalizedRoot = normalizeProjectRoot(args.projectRoot, log); rootSource = 'args.projectRoot'; log.info(`Using project root from ${rootSource}: ${normalizedRoot}`); } // 3. If no args.projectRoot, try session-based resolution else { const sessionRoot = getProjectRootFromSession(session, log); if (sessionRoot) { normalizedRoot = sessionRoot; // getProjectRootFromSession already normalizes rootSource = 'session'; log.info(`Using project root from ${rootSource}: ${normalizedRoot}`); } } if (!normalizedRoot) { log.error( 'Could not determine project root from environment, args, or session.' ); return createErrorResponse( 'Could not determine project root. Please provide projectRoot argument or ensure TASK_MASTER_PROJECT_ROOT environment variable is set.' ); } // Inject the normalized root back into args const updatedArgs = { ...args, projectRoot: normalizedRoot }; // Execute the original function with normalized root in args return await executeFn(updatedArgs, context); } catch (error) { log.error( `Error within withNormalizedProjectRoot HOF (Normalized Root: ${normalizedRoot}): ${error.message}` ); // Add stack trace if available and debug enabled if (error.stack && log.debug) { log.debug(error.stack); } // Return a generic error or re-throw depending on desired behavior return createErrorResponse(`Operation failed: ${error.message}`); } }; } /** * Checks progress reporting capability and returns the validated function or undefined. * * STANDARD PATTERN for AI-powered, long-running operations (parse-prd, expand-task, expand-all, analyze): * * This helper should be used as the first step in any MCP tool that performs long-running * AI operations. It validates the availability of progress reporting and provides consistent * logging about the capability status. * * Operations that should use this pattern: * - parse-prd: Parsing PRD documents with AI * - expand-task: Expanding tasks into subtasks * - expand-all: Expanding all tasks in batch * - analyze-complexity: Analyzing task complexity * - update-task: Updating tasks with AI assistance * - add-task: Creating new tasks with AI * - Any operation that makes AI service calls * * @example Basic usage in a tool's execute function: * ```javascript * import { checkProgressCapability } from './utils.js'; * * async execute(args, context) { * const { log, reportProgress, session } = context; * * // Always validate progress capability first * const progressCapability = checkProgressCapability(reportProgress, log); * * // Pass to direct function - it handles undefined gracefully * const result = await expandTask(taskId, numSubtasks, { * session, * reportProgress: progressCapability, * mcpLog: log * }); * } * ``` * * @example With progress reporting available: * ```javascript * // When reportProgress is available, users see real-time updates: * // "Starting PRD analysis (Input: 5432 tokens)..." * // "Task 1/10 - Implement user authentication" * // "Task 2/10 - Create database schema" * // "Task Generation Completed | Tokens: 5432/1234" * ``` * * @example Without progress reporting (graceful degradation): * ```javascript * // When reportProgress is not available: * // - Operation runs normally without progress updates * // - Debug log: "reportProgress not available - operation will run without progress updates" * // - User gets final result after completion * ``` * * @param {Function|undefined} reportProgress - The reportProgress function from MCP context. * Expected signature: async (progress: {progress: number, total: number, message: string}) => void * @param {Object} log - Logger instance with debug, info, warn, error methods * @returns {Function|undefined} The validated reportProgress function or undefined if not available */ function checkProgressCapability(reportProgress, log) { // Validate that reportProgress is available for long-running operations if (typeof reportProgress !== 'function') { log.debug( 'reportProgress not available - operation will run without progress updates' ); return undefined; } return reportProgress; } // Ensure all functions are exported export { getProjectRoot, getProjectRootFromSession, getTagInfo, handleApiResult, executeTaskMasterCommand, getCachedOrExecute, processMCPResponseData, createContentResponse, createErrorResponse, createLogWrapper, normalizeProjectRoot, getRawProjectRootFromSession, withNormalizedProjectRoot, checkProgressCapability }; ``` -------------------------------------------------------------------------------- /tests/unit/ai-services-unified.test.js: -------------------------------------------------------------------------------- ```javascript import { jest } from '@jest/globals'; // Mock config-manager const mockGetMainProvider = jest.fn(); const mockGetMainModelId = jest.fn(); const mockGetResearchProvider = jest.fn(); const mockGetResearchModelId = jest.fn(); const mockGetFallbackProvider = jest.fn(); const mockGetFallbackModelId = jest.fn(); const mockGetParametersForRole = jest.fn(); const mockGetResponseLanguage = jest.fn(); const mockGetUserId = jest.fn(); const mockGetDebugFlag = jest.fn(); const mockIsApiKeySet = jest.fn(); // --- Mock MODEL_MAP Data --- // Provide a simplified structure sufficient for cost calculation tests const mockModelMap = { anthropic: [ { id: 'test-main-model', cost_per_1m_tokens: { input: 3, output: 15, currency: 'USD' } }, { id: 'test-fallback-model', cost_per_1m_tokens: { input: 3, output: 15, currency: 'USD' } } ], perplexity: [ { id: 'test-research-model', cost_per_1m_tokens: { input: 1, output: 1, currency: 'USD' } } ], openai: [ { id: 'test-openai-model', cost_per_1m_tokens: { input: 2, output: 6, currency: 'USD' } } ] // Add other providers/models if needed for specific tests }; const mockGetBaseUrlForRole = jest.fn(); const mockGetAllProviders = jest.fn(); const mockGetOllamaBaseURL = jest.fn(); const mockGetAzureBaseURL = jest.fn(); const mockGetBedrockBaseURL = jest.fn(); const mockGetVertexProjectId = jest.fn(); const mockGetVertexLocation = jest.fn(); const mockGetAvailableModels = jest.fn(); const mockValidateProvider = jest.fn(); const mockValidateProviderModelCombination = jest.fn(); const mockGetConfig = jest.fn(); const mockWriteConfig = jest.fn(); const mockIsConfigFilePresent = jest.fn(); const mockGetMcpApiKeyStatus = jest.fn(); const mockGetMainMaxTokens = jest.fn(); const mockGetMainTemperature = jest.fn(); const mockGetResearchMaxTokens = jest.fn(); const mockGetResearchTemperature = jest.fn(); const mockGetFallbackMaxTokens = jest.fn(); const mockGetFallbackTemperature = jest.fn(); const mockGetLogLevel = jest.fn(); const mockGetDefaultNumTasks = jest.fn(); const mockGetDefaultSubtasks = jest.fn(); const mockGetDefaultPriority = jest.fn(); const mockGetProjectName = jest.fn(); jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({ // Core config access getConfig: mockGetConfig, writeConfig: mockWriteConfig, isConfigFilePresent: mockIsConfigFilePresent, ConfigurationError: class ConfigurationError extends Error { constructor(message) { super(message); this.name = 'ConfigurationError'; } }, // Validation validateProvider: mockValidateProvider, validateProviderModelCombination: mockValidateProviderModelCombination, VALID_PROVIDERS: ['anthropic', 'perplexity', 'openai', 'google'], MODEL_MAP: mockModelMap, getAvailableModels: mockGetAvailableModels, // Role-specific getters getMainProvider: mockGetMainProvider, getMainModelId: mockGetMainModelId, getMainMaxTokens: mockGetMainMaxTokens, getMainTemperature: mockGetMainTemperature, getResearchProvider: mockGetResearchProvider, getResearchModelId: mockGetResearchModelId, getResearchMaxTokens: mockGetResearchMaxTokens, getResearchTemperature: mockGetResearchTemperature, getFallbackProvider: mockGetFallbackProvider, getFallbackModelId: mockGetFallbackModelId, getFallbackMaxTokens: mockGetFallbackMaxTokens, getFallbackTemperature: mockGetFallbackTemperature, getParametersForRole: mockGetParametersForRole, getResponseLanguage: mockGetResponseLanguage, getUserId: mockGetUserId, getDebugFlag: mockGetDebugFlag, getBaseUrlForRole: mockGetBaseUrlForRole, // Global settings getLogLevel: mockGetLogLevel, getDefaultNumTasks: mockGetDefaultNumTasks, getDefaultSubtasks: mockGetDefaultSubtasks, getDefaultPriority: mockGetDefaultPriority, getProjectName: mockGetProjectName, // API Key and provider functions isApiKeySet: mockIsApiKeySet, getAllProviders: mockGetAllProviders, getOllamaBaseURL: mockGetOllamaBaseURL, getAzureBaseURL: mockGetAzureBaseURL, getBedrockBaseURL: mockGetBedrockBaseURL, getVertexProjectId: mockGetVertexProjectId, getVertexLocation: mockGetVertexLocation, getMcpApiKeyStatus: mockGetMcpApiKeyStatus, // Providers without API keys providersWithoutApiKeys: ['ollama', 'bedrock', 'gemini-cli'] })); // Mock AI Provider Classes with proper methods const mockAnthropicProvider = { generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'ANTHROPIC_API_KEY'), isRequiredApiKey: jest.fn(() => true) }; const mockPerplexityProvider = { generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'PERPLEXITY_API_KEY'), isRequiredApiKey: jest.fn(() => true) }; const mockOpenAIProvider = { generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'OPENAI_API_KEY'), isRequiredApiKey: jest.fn(() => true) }; const mockOllamaProvider = { generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => null), isRequiredApiKey: jest.fn(() => false) }; // Mock the provider classes to return our mock instances jest.unstable_mockModule('../../src/ai-providers/index.js', () => ({ AnthropicAIProvider: jest.fn(() => mockAnthropicProvider), PerplexityAIProvider: jest.fn(() => mockPerplexityProvider), GoogleAIProvider: jest.fn(() => ({ generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'GOOGLE_GENERATIVE_AI_API_KEY'), isRequiredApiKey: jest.fn(() => true) })), OpenAIProvider: jest.fn(() => mockOpenAIProvider), XAIProvider: jest.fn(() => ({ generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'XAI_API_KEY'), isRequiredApiKey: jest.fn(() => true) })), GroqProvider: jest.fn(() => ({ generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'GROQ_API_KEY'), isRequiredApiKey: jest.fn(() => true) })), OpenRouterAIProvider: jest.fn(() => ({ generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'OPENROUTER_API_KEY'), isRequiredApiKey: jest.fn(() => true) })), OllamaAIProvider: jest.fn(() => mockOllamaProvider), BedrockAIProvider: jest.fn(() => ({ generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'AWS_ACCESS_KEY_ID'), isRequiredApiKey: jest.fn(() => false) })), AzureProvider: jest.fn(() => ({ generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'AZURE_API_KEY'), isRequiredApiKey: jest.fn(() => true) })), VertexAIProvider: jest.fn(() => ({ generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => null), isRequiredApiKey: jest.fn(() => false) })), ClaudeCodeProvider: jest.fn(() => ({ generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'CLAUDE_CODE_API_KEY'), isRequiredApiKey: jest.fn(() => false) })), GeminiCliProvider: jest.fn(() => ({ generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'GEMINI_API_KEY'), isRequiredApiKey: jest.fn(() => false) })), GrokCliProvider: jest.fn(() => ({ generateText: jest.fn(), streamText: jest.fn(), generateObject: jest.fn(), getRequiredApiKeyName: jest.fn(() => 'XAI_API_KEY'), isRequiredApiKey: jest.fn(() => false) })) })); // Mock utils logger, API key resolver, AND findProjectRoot const mockLog = jest.fn(); const mockResolveEnvVariable = jest.fn(); const mockFindProjectRoot = jest.fn(); const mockIsSilentMode = jest.fn(); const mockLogAiUsage = jest.fn(); const mockFindCycles = jest.fn(); const mockFormatTaskId = jest.fn(); const mockTaskExists = jest.fn(); const mockFindTaskById = jest.fn(); const mockTruncate = jest.fn(); const mockToKebabCase = jest.fn(); const mockDetectCamelCaseFlags = jest.fn(); const mockDisableSilentMode = jest.fn(); const mockEnableSilentMode = jest.fn(); const mockGetTaskManager = jest.fn(); const mockAddComplexityToTask = jest.fn(); const mockReadJSON = jest.fn(); const mockWriteJSON = jest.fn(); const mockSanitizePrompt = jest.fn(); const mockReadComplexityReport = jest.fn(); const mockFindTaskInComplexityReport = jest.fn(); const mockAggregateTelemetry = jest.fn(); const mockGetCurrentTag = jest.fn(() => 'master'); const mockResolveTag = jest.fn(() => 'master'); const mockGetTasksForTag = jest.fn(() => []); jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({ LOG_LEVELS: { error: 0, warn: 1, info: 2, debug: 3 }, log: mockLog, resolveEnvVariable: mockResolveEnvVariable, findProjectRoot: mockFindProjectRoot, isSilentMode: mockIsSilentMode, logAiUsage: mockLogAiUsage, findCycles: mockFindCycles, formatTaskId: mockFormatTaskId, taskExists: mockTaskExists, findTaskById: mockFindTaskById, truncate: mockTruncate, toKebabCase: mockToKebabCase, detectCamelCaseFlags: mockDetectCamelCaseFlags, disableSilentMode: mockDisableSilentMode, enableSilentMode: mockEnableSilentMode, getTaskManager: mockGetTaskManager, addComplexityToTask: mockAddComplexityToTask, readJSON: mockReadJSON, writeJSON: mockWriteJSON, sanitizePrompt: mockSanitizePrompt, readComplexityReport: mockReadComplexityReport, findTaskInComplexityReport: mockFindTaskInComplexityReport, aggregateTelemetry: mockAggregateTelemetry, getCurrentTag: mockGetCurrentTag, resolveTag: mockResolveTag, getTasksForTag: mockGetTasksForTag })); // Import the module to test (AFTER mocks) const { generateTextService } = await import( '../../scripts/modules/ai-services-unified.js' ); describe('Unified AI Services', () => { const fakeProjectRoot = '/fake/project/root'; // Define for reuse beforeEach(() => { // Clear mocks before each test jest.clearAllMocks(); // Clears all mocks // Set default mock behaviors mockGetMainProvider.mockReturnValue('anthropic'); mockGetMainModelId.mockReturnValue('test-main-model'); mockGetResearchProvider.mockReturnValue('perplexity'); mockGetResearchModelId.mockReturnValue('test-research-model'); mockGetFallbackProvider.mockReturnValue('anthropic'); mockGetFallbackModelId.mockReturnValue('test-fallback-model'); mockGetParametersForRole.mockImplementation((role) => { if (role === 'main') return { maxTokens: 100, temperature: 0.5 }; if (role === 'research') return { maxTokens: 200, temperature: 0.3 }; if (role === 'fallback') return { maxTokens: 150, temperature: 0.6 }; return { maxTokens: 100, temperature: 0.5 }; // Default }); mockGetResponseLanguage.mockReturnValue('English'); mockResolveEnvVariable.mockImplementation((key) => { if (key === 'ANTHROPIC_API_KEY') return 'mock-anthropic-key'; if (key === 'PERPLEXITY_API_KEY') return 'mock-perplexity-key'; if (key === 'OPENAI_API_KEY') return 'mock-openai-key'; if (key === 'OLLAMA_API_KEY') return 'mock-ollama-key'; return null; }); // Set a default behavior for the new mock mockFindProjectRoot.mockReturnValue(fakeProjectRoot); mockGetDebugFlag.mockReturnValue(false); mockGetUserId.mockReturnValue('test-user-id'); // Add default mock for getUserId mockIsApiKeySet.mockReturnValue(true); // Default to true for most tests mockGetBaseUrlForRole.mockReturnValue(null); // Default to no base URL }); describe('generateTextService', () => { test('should use main provider/model and succeed', async () => { mockAnthropicProvider.generateText.mockResolvedValue({ text: 'Main provider response', usage: { inputTokens: 10, outputTokens: 20, totalTokens: 30 } }); const params = { role: 'main', session: { env: {} }, systemPrompt: 'System', prompt: 'Test' }; const result = await generateTextService(params); expect(result.mainResult).toBe('Main provider response'); expect(result).toHaveProperty('telemetryData'); expect(mockGetMainProvider).toHaveBeenCalledWith(fakeProjectRoot); expect(mockGetMainModelId).toHaveBeenCalledWith(fakeProjectRoot); expect(mockGetParametersForRole).toHaveBeenCalledWith( 'main', fakeProjectRoot ); expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(1); expect(mockPerplexityProvider.generateText).not.toHaveBeenCalled(); }); test('should fall back to fallback provider if main fails', async () => { const mainError = new Error('Main provider failed'); mockAnthropicProvider.generateText .mockRejectedValueOnce(mainError) .mockResolvedValueOnce({ text: 'Fallback provider response', usage: { inputTokens: 15, outputTokens: 25, totalTokens: 40 } }); const explicitRoot = '/explicit/test/root'; const params = { role: 'main', prompt: 'Fallback test', projectRoot: explicitRoot }; const result = await generateTextService(params); expect(result.mainResult).toBe('Fallback provider response'); expect(result).toHaveProperty('telemetryData'); expect(mockGetMainProvider).toHaveBeenCalledWith(explicitRoot); expect(mockGetFallbackProvider).toHaveBeenCalledWith(explicitRoot); expect(mockGetParametersForRole).toHaveBeenCalledWith( 'main', explicitRoot ); expect(mockGetParametersForRole).toHaveBeenCalledWith( 'fallback', explicitRoot ); expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2); expect(mockPerplexityProvider.generateText).not.toHaveBeenCalled(); expect(mockLog).toHaveBeenCalledWith( 'error', expect.stringContaining('Service call failed for role main') ); expect(mockLog).toHaveBeenCalledWith( 'debug', expect.stringContaining('New AI service call with role: fallback') ); }); test('should fall back to research provider if main and fallback fail', async () => { const mainError = new Error('Main failed'); const fallbackError = new Error('Fallback failed'); mockAnthropicProvider.generateText .mockRejectedValueOnce(mainError) .mockRejectedValueOnce(fallbackError); mockPerplexityProvider.generateText.mockResolvedValue({ text: 'Research provider response', usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 } }); const params = { role: 'main', prompt: 'Research fallback test' }; const result = await generateTextService(params); expect(result.mainResult).toBe('Research provider response'); expect(result).toHaveProperty('telemetryData'); expect(mockGetMainProvider).toHaveBeenCalledWith(fakeProjectRoot); expect(mockGetFallbackProvider).toHaveBeenCalledWith(fakeProjectRoot); expect(mockGetResearchProvider).toHaveBeenCalledWith(fakeProjectRoot); expect(mockGetParametersForRole).toHaveBeenCalledWith( 'main', fakeProjectRoot ); expect(mockGetParametersForRole).toHaveBeenCalledWith( 'fallback', fakeProjectRoot ); expect(mockGetParametersForRole).toHaveBeenCalledWith( 'research', fakeProjectRoot ); expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2); expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1); expect(mockLog).toHaveBeenCalledWith( 'error', expect.stringContaining('Service call failed for role fallback') ); expect(mockLog).toHaveBeenCalledWith( 'debug', expect.stringContaining('New AI service call with role: research') ); }); test('should throw error if all providers in sequence fail', async () => { mockAnthropicProvider.generateText.mockRejectedValue( new Error('Anthropic failed') ); mockPerplexityProvider.generateText.mockRejectedValue( new Error('Perplexity failed') ); const params = { role: 'main', prompt: 'All fail test' }; await expect(generateTextService(params)).rejects.toThrow( 'Perplexity failed' // Error from the last attempt (research) ); expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2); // main, fallback expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1); // research }); test('should handle retryable errors correctly', async () => { const retryableError = new Error('Rate limit'); mockAnthropicProvider.generateText .mockRejectedValueOnce(retryableError) // Fails once .mockResolvedValueOnce({ // Succeeds on retry text: 'Success after retry', usage: { inputTokens: 5, outputTokens: 10, totalTokens: 15 } }); const params = { role: 'main', prompt: 'Retry success test' }; const result = await generateTextService(params); expect(result.mainResult).toBe('Success after retry'); expect(result).toHaveProperty('telemetryData'); expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2); // Initial + 1 retry expect(mockLog).toHaveBeenCalledWith( 'info', expect.stringContaining( 'Something went wrong on the provider side. Retrying' ) ); }); test('should use default project root or handle null if findProjectRoot returns null', async () => { mockFindProjectRoot.mockReturnValue(null); // Simulate not finding root mockAnthropicProvider.generateText.mockResolvedValue({ text: 'Response with no root', usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 } }); const params = { role: 'main', prompt: 'No root test' }; // No explicit root passed await generateTextService(params); expect(mockGetMainProvider).toHaveBeenCalledWith(null); expect(mockGetParametersForRole).toHaveBeenCalledWith('main', null); expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(1); }); test('should use configured responseLanguage in system prompt', async () => { mockGetResponseLanguage.mockReturnValue('中文'); mockAnthropicProvider.generateText.mockResolvedValue('中文回复'); const params = { role: 'main', systemPrompt: 'You are an assistant', prompt: 'Hello' }; await generateTextService(params); expect(mockAnthropicProvider.generateText).toHaveBeenCalledWith( expect.objectContaining({ messages: [ { role: 'system', content: expect.stringContaining('Always respond in 中文') }, { role: 'user', content: 'Hello' } ] }) ); expect(mockGetResponseLanguage).toHaveBeenCalledWith(fakeProjectRoot); }); test('should pass custom projectRoot to getResponseLanguage', async () => { const customRoot = '/custom/project/root'; mockGetResponseLanguage.mockReturnValue('Español'); mockAnthropicProvider.generateText.mockResolvedValue( 'Respuesta en Español' ); const params = { role: 'main', systemPrompt: 'You are an assistant', prompt: 'Hello', projectRoot: customRoot }; await generateTextService(params); expect(mockGetResponseLanguage).toHaveBeenCalledWith(customRoot); expect(mockAnthropicProvider.generateText).toHaveBeenCalledWith( expect.objectContaining({ messages: [ { role: 'system', content: expect.stringContaining('Always respond in Español') }, { role: 'user', content: 'Hello' } ] }) ); }); // Add more tests for edge cases: // - Missing API keys (should throw from _resolveApiKey) // - Unsupported provider configured (should skip and log) // - Missing provider/model config for a role (should skip and log) // - Missing prompt // - Different initial roles (research, fallback) // - generateObjectService (mock schema, check object result) // - streamTextService (more complex to test, might need stream helpers) test('should skip provider with missing API key and try next in fallback sequence', async () => { // Setup isApiKeySet to return false for anthropic but true for perplexity mockIsApiKeySet.mockImplementation((provider, session, root) => { if (provider === 'anthropic') return false; // Main provider has no key return true; // Other providers have keys }); // Mock perplexity text response (since we'll skip anthropic) mockPerplexityProvider.generateText.mockResolvedValue({ text: 'Perplexity response (skipped to research)', usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 } }); const params = { role: 'main', prompt: 'Skip main provider test', session: { env: {} } }; const result = await generateTextService(params); // Should have gotten the perplexity response expect(result.mainResult).toBe( 'Perplexity response (skipped to research)' ); // Should check API keys expect(mockIsApiKeySet).toHaveBeenCalledWith( 'anthropic', params.session, fakeProjectRoot ); expect(mockIsApiKeySet).toHaveBeenCalledWith( 'perplexity', params.session, fakeProjectRoot ); // Should log a warning expect(mockLog).toHaveBeenCalledWith( 'warn', expect.stringContaining( `Skipping role 'main' (Provider: anthropic): API key not set or invalid.` ) ); // Should NOT call anthropic provider expect(mockAnthropicProvider.generateText).not.toHaveBeenCalled(); // Should call perplexity provider expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1); }); test('should skip multiple providers with missing API keys and use first available', async () => { // Setup: Main and fallback providers have no keys, only research has a key mockIsApiKeySet.mockImplementation((provider, session, root) => { if (provider === 'anthropic') return false; // Main and fallback are both anthropic if (provider === 'perplexity') return true; // Research has a key return false; }); // Define different providers for testing multiple skips mockGetFallbackProvider.mockReturnValue('openai'); // Different from main mockGetFallbackModelId.mockReturnValue('test-openai-model'); // Mock isApiKeySet to return false for both main and fallback mockIsApiKeySet.mockImplementation((provider, session, root) => { if (provider === 'anthropic') return false; // Main provider has no key if (provider === 'openai') return false; // Fallback provider has no key return true; // Research provider has a key }); // Mock perplexity text response (since we'll skip to research) mockPerplexityProvider.generateText.mockResolvedValue({ text: 'Research response after skipping main and fallback', usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 } }); const params = { role: 'main', prompt: 'Skip multiple providers test', session: { env: {} } }; const result = await generateTextService(params); // Should have gotten the perplexity (research) response expect(result.mainResult).toBe( 'Research response after skipping main and fallback' ); // Should check API keys for all three roles expect(mockIsApiKeySet).toHaveBeenCalledWith( 'anthropic', params.session, fakeProjectRoot ); expect(mockIsApiKeySet).toHaveBeenCalledWith( 'openai', params.session, fakeProjectRoot ); expect(mockIsApiKeySet).toHaveBeenCalledWith( 'perplexity', params.session, fakeProjectRoot ); // Should log warnings for both skipped providers expect(mockLog).toHaveBeenCalledWith( 'warn', expect.stringContaining( `Skipping role 'main' (Provider: anthropic): API key not set or invalid.` ) ); expect(mockLog).toHaveBeenCalledWith( 'warn', expect.stringContaining( `Skipping role 'fallback' (Provider: openai): API key not set or invalid.` ) ); // Should NOT call skipped providers expect(mockAnthropicProvider.generateText).not.toHaveBeenCalled(); expect(mockOpenAIProvider.generateText).not.toHaveBeenCalled(); // Should call perplexity provider expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1); }); test('should throw error if all providers in sequence have missing API keys', async () => { // Mock all providers to have missing API keys mockIsApiKeySet.mockReturnValue(false); const params = { role: 'main', prompt: 'All API keys missing test', session: { env: {} } }; // Should throw error since all providers would be skipped await expect(generateTextService(params)).rejects.toThrow( 'AI service call failed for all configured roles' ); // Should log warnings for all skipped providers expect(mockLog).toHaveBeenCalledWith( 'warn', expect.stringContaining( `Skipping role 'main' (Provider: anthropic): API key not set or invalid.` ) ); expect(mockLog).toHaveBeenCalledWith( 'warn', expect.stringContaining( `Skipping role 'fallback' (Provider: anthropic): API key not set or invalid.` ) ); expect(mockLog).toHaveBeenCalledWith( 'warn', expect.stringContaining( `Skipping role 'research' (Provider: perplexity): API key not set or invalid.` ) ); // Should log final error expect(mockLog).toHaveBeenCalledWith( 'error', expect.stringContaining( 'All roles in the sequence [main, fallback, research] failed.' ) ); // Should NOT call any providers expect(mockAnthropicProvider.generateText).not.toHaveBeenCalled(); expect(mockPerplexityProvider.generateText).not.toHaveBeenCalled(); }); test('should not check API key for Ollama provider and try to use it', async () => { // Setup: Set main provider to ollama mockGetMainProvider.mockReturnValue('ollama'); mockGetMainModelId.mockReturnValue('llama3'); // Mock Ollama text generation to succeed mockOllamaProvider.generateText.mockResolvedValue({ text: 'Ollama response (no API key required)', usage: { inputTokens: 10, outputTokens: 10, totalTokens: 20 } }); const params = { role: 'main', prompt: 'Ollama special case test', session: { env: {} } }; const result = await generateTextService(params); // Should have gotten the Ollama response expect(result.mainResult).toBe('Ollama response (no API key required)'); // isApiKeySet shouldn't be called for Ollama // Note: This is indirect - the code just doesn't check isApiKeySet for ollama // so we're verifying ollama provider was called despite isApiKeySet being mocked to false mockIsApiKeySet.mockReturnValue(false); // Should be ignored for Ollama // Should call Ollama provider expect(mockOllamaProvider.generateText).toHaveBeenCalledTimes(1); }); test('should correctly use the provided session for API key check', async () => { // Mock custom session object with env vars const customSession = { env: { ANTHROPIC_API_KEY: 'session-api-key' } }; // Setup API key check to verify the session is passed correctly mockIsApiKeySet.mockImplementation((provider, session, root) => { // Only return true if the correct session was provided return session === customSession; }); // Mock the anthropic response mockAnthropicProvider.generateText.mockResolvedValue({ text: 'Anthropic response with session key', usage: { inputTokens: 10, outputTokens: 10, totalTokens: 20 } }); const params = { role: 'main', prompt: 'Session API key test', session: customSession }; const result = await generateTextService(params); // Should check API key with the custom session expect(mockIsApiKeySet).toHaveBeenCalledWith( 'anthropic', customSession, fakeProjectRoot ); // Should have gotten the anthropic response expect(result.mainResult).toBe('Anthropic response with session key'); }); }); }); ``` -------------------------------------------------------------------------------- /scripts/init.js: -------------------------------------------------------------------------------- ```javascript /** * Task Master * Copyright (c) 2025 Eyal Toledano, Ralph Khreish * * This software is licensed under the MIT License with Commons Clause. * You may use this software for any purpose, including commercial applications, * and modify and redistribute it freely, subject to the following restrictions: * * 1. You may not sell this software or offer it as a service. * 2. The origin of this software must not be misrepresented. * 3. Altered source versions must be plainly marked as such. * * For the full license text, see the LICENSE file in the root directory. */ import fs from 'fs'; import path from 'path'; import readline from 'readline'; import chalk from 'chalk'; import figlet from 'figlet'; import boxen from 'boxen'; import gradient from 'gradient-string'; import { isSilentMode } from './modules/utils.js'; import { insideGitWorkTree } from './modules/utils/git-utils.js'; import { manageGitignoreFile } from '../src/utils/manage-gitignore.js'; import { RULE_PROFILES } from '../src/constants/profiles.js'; import { convertAllRulesToProfileRules, getRulesProfile } from '../src/utils/rule-transformer.js'; import { updateConfigMaxTokens } from './modules/update-config-tokens.js'; // Import asset resolver import { assetExists, readAsset } from '../src/utils/asset-resolver.js'; import { execSync } from 'child_process'; import { EXAMPLE_PRD_FILE, TASKMASTER_CONFIG_FILE, TASKMASTER_TEMPLATES_DIR, TASKMASTER_DIR, TASKMASTER_TASKS_DIR, TASKMASTER_DOCS_DIR, TASKMASTER_REPORTS_DIR, TASKMASTER_STATE_FILE, ENV_EXAMPLE_FILE, GITIGNORE_FILE } from '../src/constants/paths.js'; // Define log levels const LOG_LEVELS = { debug: 0, info: 1, warn: 2, error: 3, success: 4 }; // Determine log level from environment variable or default to 'info' const LOG_LEVEL = process.env.TASKMASTER_LOG_LEVEL ? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()] : LOG_LEVELS.info; // Default to info // Create a color gradient for the banner const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']); const warmGradient = gradient(['#fb8b24', '#e36414', '#9a031e']); // Display a fancy banner function displayBanner() { if (isSilentMode()) return; console.clear(); const bannerText = figlet.textSync('Task Master AI', { font: 'Standard', horizontalLayout: 'default', verticalLayout: 'default' }); console.log(coolGradient(bannerText)); // Add creator credit line below the banner console.log( chalk.dim('by ') + chalk.cyan.underline('https://x.com/eyaltoledano') ); console.log( boxen(chalk.white(`${chalk.bold('Initializing')} your new project`), { padding: 1, margin: { top: 0, bottom: 1 }, borderStyle: 'round', borderColor: 'cyan' }) ); } // Logging function with icons and colors function log(level, ...args) { const icons = { debug: chalk.gray('🔍'), info: chalk.blue('ℹ️'), warn: chalk.yellow('⚠️'), error: chalk.red('❌'), success: chalk.green('✅') }; if (LOG_LEVELS[level] >= LOG_LEVEL) { const icon = icons[level] || ''; // Only output to console if not in silent mode if (!isSilentMode()) { if (level === 'error') { console.error(icon, chalk.red(...args)); } else if (level === 'warn') { console.warn(icon, chalk.yellow(...args)); } else if (level === 'success') { console.log(icon, chalk.green(...args)); } else if (level === 'info') { console.log(icon, chalk.blue(...args)); } else { console.log(icon, ...args); } } } // Write to debug log if DEBUG=true if (process.env.DEBUG === 'true') { const logMessage = `[${level.toUpperCase()}] ${args.join(' ')}\n`; fs.appendFileSync('init-debug.log', logMessage); } } // Function to create directory if it doesn't exist function ensureDirectoryExists(dirPath) { if (!fs.existsSync(dirPath)) { fs.mkdirSync(dirPath, { recursive: true }); log('info', `Created directory: ${dirPath}`); } } // Function to add shell aliases to the user's shell configuration function addShellAliases() { const homeDir = process.env.HOME || process.env.USERPROFILE; let shellConfigFile; // Determine which shell config file to use if (process.env.SHELL?.includes('zsh')) { shellConfigFile = path.join(homeDir, '.zshrc'); } else if (process.env.SHELL?.includes('bash')) { shellConfigFile = path.join(homeDir, '.bashrc'); } else { log('warn', 'Could not determine shell type. Aliases not added.'); return false; } try { // Check if file exists if (!fs.existsSync(shellConfigFile)) { log( 'warn', `Shell config file ${shellConfigFile} not found. Aliases not added.` ); return false; } // Check if aliases already exist const configContent = fs.readFileSync(shellConfigFile, 'utf8'); if (configContent.includes("alias tm='task-master'")) { log('info', 'Task Master aliases already exist in shell config.'); return true; } // Add aliases to the shell config file const aliasBlock = ` # Task Master aliases added on ${new Date().toLocaleDateString()} alias tm='task-master' alias taskmaster='task-master' `; fs.appendFileSync(shellConfigFile, aliasBlock); log('success', `Added Task Master aliases to ${shellConfigFile}`); log( 'info', `To use the aliases in your current terminal, run: source ${shellConfigFile}` ); return true; } catch (error) { log('error', `Failed to add aliases: ${error.message}`); return false; } } // Function to create initial state.json file for tag management function createInitialStateFile(targetDir) { const stateFilePath = path.join(targetDir, TASKMASTER_STATE_FILE); // Check if state.json already exists if (fs.existsSync(stateFilePath)) { log('info', 'State file already exists, preserving current configuration'); return; } // Create initial state configuration const initialState = { currentTag: 'master', lastSwitched: new Date().toISOString(), branchTagMapping: {}, migrationNoticeShown: false }; try { fs.writeFileSync(stateFilePath, JSON.stringify(initialState, null, 2)); log('success', `Created initial state file: ${stateFilePath}`); log('info', 'Default tag set to "master" for task organization'); } catch (error) { log('error', `Failed to create state file: ${error.message}`); } } // Function to copy a file from the package to the target directory function copyTemplateFile(templateName, targetPath, replacements = {}) { // Get the file content from the appropriate source directory // Check if the asset exists if (!assetExists(templateName)) { log('error', `Source file not found: ${templateName}`); return; } // Read the asset content using the resolver let content = readAsset(templateName, 'utf8'); // Replace placeholders with actual values Object.entries(replacements).forEach(([key, value]) => { const regex = new RegExp(`\\{\\{${key}\\}\\}`, 'g'); content = content.replace(regex, value); }); // Handle special files that should be merged instead of overwritten if (fs.existsSync(targetPath)) { const filename = path.basename(targetPath); // Handle .gitignore - append lines that don't exist if (filename === '.gitignore') { log('info', `${targetPath} already exists, merging content...`); const existingContent = fs.readFileSync(targetPath, 'utf8'); const existingLines = new Set( existingContent.split('\n').map((line) => line.trim()) ); const newLines = content .split('\n') .filter((line) => !existingLines.has(line.trim())); if (newLines.length > 0) { // Add a comment to separate the original content from our additions const updatedContent = `${existingContent.trim()}\n\n# Added by Task Master AI\n${newLines.join('\n')}`; fs.writeFileSync(targetPath, updatedContent); log('success', `Updated ${targetPath} with additional entries`); } else { log('info', `No new content to add to ${targetPath}`); } return; } // Handle README.md - offer to preserve or create a different file if (filename === 'README-task-master.md') { log('info', `${targetPath} already exists`); // Create a separate README file specifically for this project const taskMasterReadmePath = path.join( path.dirname(targetPath), 'README-task-master.md' ); fs.writeFileSync(taskMasterReadmePath, content); log( 'success', `Created ${taskMasterReadmePath} (preserved original README-task-master.md)` ); return; } // For other files, warn and prompt before overwriting log('warn', `${targetPath} already exists, skipping.`); return; } // If the file doesn't exist, create it normally fs.writeFileSync(targetPath, content); log('info', `Created file: ${targetPath}`); } // Main function to initialize a new project async function initializeProject(options = {}) { // Receives options as argument // Only display banner if not in silent mode if (!isSilentMode()) { displayBanner(); } // Debug logging only if not in silent mode // if (!isSilentMode()) { // console.log('===== DEBUG: INITIALIZE PROJECT OPTIONS RECEIVED ====='); // console.log('Full options object:', JSON.stringify(options)); // console.log('options.yes:', options.yes); // console.log('=================================================='); // } // Handle boolean aliases flags if (options.aliases === true) { options.addAliases = true; // --aliases flag provided } else if (options.aliases === false) { options.addAliases = false; // --no-aliases flag provided } // If options.aliases and options.noAliases are undefined, we'll prompt for it // Handle boolean git flags if (options.git === true) { options.initGit = true; // --git flag provided } else if (options.git === false) { options.initGit = false; // --no-git flag provided } // If options.git and options.noGit are undefined, we'll prompt for it // Handle boolean gitTasks flags if (options.gitTasks === true) { options.storeTasksInGit = true; // --git-tasks flag provided } else if (options.gitTasks === false) { options.storeTasksInGit = false; // --no-git-tasks flag provided } // If options.gitTasks and options.noGitTasks are undefined, we'll prompt for it const skipPrompts = options.yes || (options.name && options.description); // if (!isSilentMode()) { // console.log('Skip prompts determined:', skipPrompts); // } let selectedRuleProfiles; if (options.rulesExplicitlyProvided) { // If --rules flag was used, always respect it. log( 'info', `Using rule profiles provided via command line: ${options.rules.join(', ')}` ); selectedRuleProfiles = options.rules; } else if (skipPrompts) { // If non-interactive (e.g., --yes) and no rules specified, default to ALL. log( 'info', `No rules specified in non-interactive mode, defaulting to all profiles.` ); selectedRuleProfiles = RULE_PROFILES; } else { // If interactive and no rules specified, default to NONE. // The 'rules --setup' wizard will handle selection. log( 'info', 'No rules specified; interactive setup will be launched to select profiles.' ); selectedRuleProfiles = []; } if (skipPrompts) { if (!isSilentMode()) { console.log('SKIPPING PROMPTS - Using defaults or provided values'); } // Use provided options or defaults const projectName = options.name || 'task-master-project'; const projectDescription = options.description || 'A project managed with Task Master AI'; const projectVersion = options.version || '0.1.0'; const authorName = options.author || 'Vibe coder'; const dryRun = options.dryRun || false; const addAliases = options.addAliases !== undefined ? options.addAliases : true; // Default to true if not specified const initGit = options.initGit !== undefined ? options.initGit : true; // Default to true if not specified const storeTasksInGit = options.storeTasksInGit !== undefined ? options.storeTasksInGit : true; // Default to true if not specified if (dryRun) { log('info', 'DRY RUN MODE: No files will be modified'); log('info', 'Would initialize Task Master project'); log('info', 'Would create/update necessary project files'); // Show flag-specific behavior log( 'info', `${addAliases ? 'Would add shell aliases (tm, taskmaster)' : 'Would skip shell aliases'}` ); log( 'info', `${initGit ? 'Would initialize Git repository' : 'Would skip Git initialization'}` ); log( 'info', `${storeTasksInGit ? 'Would store tasks in Git' : 'Would exclude tasks from Git'}` ); return { dryRun: true }; } createProjectStructure( addAliases, initGit, storeTasksInGit, dryRun, options, selectedRuleProfiles ); } else { // Interactive logic log('info', 'Required options not provided, proceeding with prompts.'); try { const rl = readline.createInterface({ input: process.stdin, output: process.stdout }); // Prompt for shell aliases (skip if --aliases or --no-aliases flag was provided) let addAliasesPrompted = true; // Default to true if (options.addAliases !== undefined) { addAliasesPrompted = options.addAliases; // Use flag value if provided } else { const addAliasesInput = await promptQuestion( rl, chalk.cyan( 'Add shell aliases for task-master? This lets you type "tm" instead of "task-master" (Y/n): ' ) ); addAliasesPrompted = addAliasesInput.trim().toLowerCase() !== 'n'; } // Prompt for Git initialization (skip if --git or --no-git flag was provided) let initGitPrompted = true; // Default to true if (options.initGit !== undefined) { initGitPrompted = options.initGit; // Use flag value if provided } else { const gitInitInput = await promptQuestion( rl, chalk.cyan('Initialize a Git repository in project root? (Y/n): ') ); initGitPrompted = gitInitInput.trim().toLowerCase() !== 'n'; } // Prompt for Git tasks storage (skip if --git-tasks or --no-git-tasks flag was provided) let storeGitPrompted = true; // Default to true if (options.storeTasksInGit !== undefined) { storeGitPrompted = options.storeTasksInGit; // Use flag value if provided } else { const gitTasksInput = await promptQuestion( rl, chalk.cyan( 'Store tasks in Git (tasks.json and tasks/ directory)? (Y/n): ' ) ); storeGitPrompted = gitTasksInput.trim().toLowerCase() !== 'n'; } // Confirm settings... console.log('\nTask Master Project settings:'); console.log( chalk.blue( 'Add shell aliases (so you can use "tm" instead of "task-master"):' ), chalk.white(addAliasesPrompted ? 'Yes' : 'No') ); console.log( chalk.blue('Initialize Git repository in project root:'), chalk.white(initGitPrompted ? 'Yes' : 'No') ); console.log( chalk.blue('Store tasks in Git (tasks.json and tasks/ directory):'), chalk.white(storeGitPrompted ? 'Yes' : 'No') ); const confirmInput = await promptQuestion( rl, chalk.yellow('\nDo you want to continue with these settings? (Y/n): ') ); const shouldContinue = confirmInput.trim().toLowerCase() !== 'n'; if (!shouldContinue) { rl.close(); log('info', 'Project initialization cancelled by user'); process.exit(0); return; } // Only run interactive rules if rules flag not provided via command line if (options.rulesExplicitlyProvided) { log( 'info', `Using rule profiles provided via command line: ${selectedRuleProfiles.join(', ')}` ); } const dryRun = options.dryRun || false; if (dryRun) { log('info', 'DRY RUN MODE: No files will be modified'); log('info', 'Would initialize Task Master project'); log('info', 'Would create/update necessary project files'); // Show flag-specific behavior log( 'info', `${addAliasesPrompted ? 'Would add shell aliases (tm, taskmaster)' : 'Would skip shell aliases'}` ); log( 'info', `${initGitPrompted ? 'Would initialize Git repository' : 'Would skip Git initialization'}` ); log( 'info', `${storeGitPrompted ? 'Would store tasks in Git' : 'Would exclude tasks from Git'}` ); return { dryRun: true }; } // Create structure using only necessary values createProjectStructure( addAliasesPrompted, initGitPrompted, storeGitPrompted, dryRun, options, selectedRuleProfiles ); rl.close(); } catch (error) { if (rl) { rl.close(); } log('error', `Error during initialization process: ${error.message}`); process.exit(1); } } } // Helper function to promisify readline question function promptQuestion(rl, question) { return new Promise((resolve) => { rl.question(question, (answer) => { resolve(answer); }); }); } // Function to create the project structure function createProjectStructure( addAliases, initGit, storeTasksInGit, dryRun, options, selectedRuleProfiles = RULE_PROFILES ) { const targetDir = process.cwd(); log('info', `Initializing project in ${targetDir}`); // Create NEW .taskmaster directory structure (using constants) ensureDirectoryExists(path.join(targetDir, TASKMASTER_DIR)); ensureDirectoryExists(path.join(targetDir, TASKMASTER_TASKS_DIR)); ensureDirectoryExists(path.join(targetDir, TASKMASTER_DOCS_DIR)); ensureDirectoryExists(path.join(targetDir, TASKMASTER_REPORTS_DIR)); ensureDirectoryExists(path.join(targetDir, TASKMASTER_TEMPLATES_DIR)); // Create initial state.json file for tag management createInitialStateFile(targetDir); // Copy template files with replacements const replacements = { year: new Date().getFullYear() }; // Helper function to create rule profiles function _processSingleProfile(profileName) { const profile = getRulesProfile(profileName); if (profile) { convertAllRulesToProfileRules(targetDir, profile); // Also triggers MCP config setup (if applicable) } else { log('warn', `Unknown rule profile: ${profileName}`); } } // Copy .env.example copyTemplateFile( 'env.example', path.join(targetDir, ENV_EXAMPLE_FILE), replacements ); // Copy config.json with project name to NEW location copyTemplateFile( 'config.json', path.join(targetDir, TASKMASTER_CONFIG_FILE), { ...replacements } ); // Update config.json with correct maxTokens values from supported-models.json const configPath = path.join(targetDir, TASKMASTER_CONFIG_FILE); if (updateConfigMaxTokens(configPath)) { log('info', 'Updated config with correct maxTokens values'); } else { log('warn', 'Could not update maxTokens in config'); } // Copy .gitignore with GitTasks preference try { const templateContent = readAsset('gitignore', 'utf8'); manageGitignoreFile( path.join(targetDir, GITIGNORE_FILE), templateContent, storeTasksInGit, log ); } catch (error) { log('error', `Failed to create .gitignore: ${error.message}`); } // Copy example_prd.txt to NEW location copyTemplateFile('example_prd.txt', path.join(targetDir, EXAMPLE_PRD_FILE)); // Initialize git repository if git is available try { if (initGit === false) { log('info', 'Git initialization skipped due to --no-git flag.'); } else if (initGit === true) { if (insideGitWorkTree()) { log( 'info', 'Existing Git repository detected – skipping git init despite --git flag.' ); } else { log('info', 'Initializing Git repository due to --git flag...'); execSync('git init', { cwd: targetDir, stdio: 'ignore' }); log('success', 'Git repository initialized'); } } else { // Default behavior when no flag is provided (from interactive prompt) if (insideGitWorkTree()) { log('info', 'Existing Git repository detected – skipping git init.'); } else { log( 'info', 'No Git repository detected. Initializing one in project root...' ); execSync('git init', { cwd: targetDir, stdio: 'ignore' }); log('success', 'Git repository initialized'); } } } catch (error) { log('warn', 'Git not available, skipping repository initialization'); } // Only run the manual transformer if rules were provided via flags. // The interactive `rules --setup` wizard handles its own installation. if (options.rulesExplicitlyProvided || options.yes) { log('info', 'Generating profile rules from command-line flags...'); for (const profileName of selectedRuleProfiles) { _processSingleProfile(profileName); } } // Add shell aliases if requested if (addAliases) { addShellAliases(); } // Run npm install automatically const npmInstallOptions = { cwd: targetDir, // Default to inherit for interactive CLI, change if silent stdio: 'inherit' }; if (isSilentMode()) { // If silent (MCP mode), suppress npm install output npmInstallOptions.stdio = 'ignore'; log('info', 'Running npm install silently...'); // Log our own message } else { // Interactive mode, show the boxen message console.log( boxen(chalk.cyan('Installing dependencies...'), { padding: 0.5, margin: 0.5, borderStyle: 'round', borderColor: 'blue' }) ); } // === Add Rule Profiles Setup Step === if ( !isSilentMode() && !dryRun && !options?.yes && !options.rulesExplicitlyProvided ) { console.log( boxen(chalk.cyan('Configuring Rule Profiles...'), { padding: 0.5, margin: { top: 1, bottom: 0.5 }, borderStyle: 'round', borderColor: 'blue' }) ); log( 'info', 'Running interactive rules setup. Please select which rule profiles to include.' ); try { // Correct command confirmed by you. execSync('npx task-master rules --setup', { stdio: 'inherit', cwd: targetDir }); log('success', 'Rule profiles configured.'); } catch (error) { log('error', 'Failed to configure rule profiles:', error.message); log('warn', 'You may need to run "task-master rules --setup" manually.'); } } else if (isSilentMode() || dryRun || options?.yes) { // This branch can log why setup was skipped, similar to the model setup logic. if (options.rulesExplicitlyProvided) { log( 'info', 'Skipping interactive rules setup because --rules flag was used.' ); } else { log('info', 'Skipping interactive rules setup in non-interactive mode.'); } } // ===================================== // === Add Response Language Step === if (!isSilentMode() && !dryRun && !options?.yes) { console.log( boxen(chalk.cyan('Configuring Response Language...'), { padding: 0.5, margin: { top: 1, bottom: 0.5 }, borderStyle: 'round', borderColor: 'blue' }) ); log( 'info', 'Running interactive response language setup. Please input your preferred language.' ); try { execSync('npx task-master lang --setup', { stdio: 'inherit', cwd: targetDir }); log('success', 'Response Language configured.'); } catch (error) { log('error', 'Failed to configure response language:', error.message); log('warn', 'You may need to run "task-master lang --setup" manually.'); } } else if (isSilentMode() && !dryRun) { log( 'info', 'Skipping interactive response language setup in silent (MCP) mode.' ); log( 'warn', 'Please configure response language using "task-master models --set-response-language" or the "models" MCP tool.' ); } else if (dryRun) { log('info', 'DRY RUN: Skipping interactive response language setup.'); } // ===================================== // === Add Model Configuration Step === if (!isSilentMode() && !dryRun && !options?.yes) { console.log( boxen(chalk.cyan('Configuring AI Models...'), { padding: 0.5, margin: { top: 1, bottom: 0.5 }, borderStyle: 'round', borderColor: 'blue' }) ); log( 'info', 'Running interactive model setup. Please select your preferred AI models.' ); try { execSync('npx task-master models --setup', { stdio: 'inherit', cwd: targetDir }); log('success', 'AI Models configured.'); } catch (error) { log('error', 'Failed to configure AI models:', error.message); log('warn', 'You may need to run "task-master models --setup" manually.'); } } else if (isSilentMode() && !dryRun) { log('info', 'Skipping interactive model setup in silent (MCP) mode.'); log( 'warn', 'Please configure AI models using "task-master models --set-..." or the "models" MCP tool.' ); } else if (dryRun) { log('info', 'DRY RUN: Skipping interactive model setup.'); } else if (options?.yes) { log('info', 'Skipping interactive model setup due to --yes flag.'); log( 'info', 'Default AI models will be used. You can configure different models later using "task-master models --setup" or "task-master models --set-..." commands.' ); } // ==================================== // Add shell aliases if requested if (addAliases && !dryRun) { log('info', 'Adding shell aliases...'); const aliasResult = addShellAliases(); if (aliasResult) { log('success', 'Shell aliases added successfully'); } } else if (addAliases && dryRun) { log('info', 'DRY RUN: Would add shell aliases (tm, taskmaster)'); } // Display success message if (!isSilentMode()) { console.log( boxen( `${warmGradient.multiline( figlet.textSync('Success!', { font: 'Standard' }) )}\n${chalk.green('Project initialized successfully!')}`, { padding: 1, margin: 1, borderStyle: 'double', borderColor: 'green' } ) ); } // Display next steps in a nice box if (!isSilentMode()) { console.log( boxen( `${chalk.cyan.bold('Things you should do next:')}\n\n${chalk.white('1. ')}${chalk.yellow( 'Configure AI models (if needed) and add API keys to `.env`' )}\n${chalk.white(' ├─ ')}${chalk.dim('Models: Use `task-master models` commands')}\n${chalk.white(' └─ ')}${chalk.dim( 'Keys: Add provider API keys to .env (or inside the MCP config file i.e. .cursor/mcp.json)' )}\n${chalk.white('2. ')}${chalk.yellow( 'Discuss your idea with AI and ask for a PRD using example_prd.txt, and save it to scripts/PRD.txt' )}\n${chalk.white('3. ')}${chalk.yellow( 'Ask Cursor Agent (or run CLI) to parse your PRD and generate initial tasks:' )}\n${chalk.white(' └─ ')}${chalk.dim('MCP Tool: ')}${chalk.cyan('parse_prd')}${chalk.dim(' | CLI: ')}${chalk.cyan('task-master parse-prd scripts/prd.txt')}\n${chalk.white('4. ')}${chalk.yellow( 'Ask Cursor to analyze the complexity of the tasks in your PRD using research' )}\n${chalk.white(' └─ ')}${chalk.dim('MCP Tool: ')}${chalk.cyan('analyze_project_complexity')}${chalk.dim(' | CLI: ')}${chalk.cyan('task-master analyze-complexity')}\n${chalk.white('5. ')}${chalk.yellow( 'Ask Cursor to expand all of your tasks using the complexity analysis' )}\n${chalk.white('6. ')}${chalk.yellow('Ask Cursor to begin working on the next task')}\n${chalk.white('7. ')}${chalk.yellow( 'Add new tasks anytime using the add-task command or MCP tool' )}\n${chalk.white('8. ')}${chalk.yellow( 'Ask Cursor to set the status of one or many tasks/subtasks at a time. Use the task id from the task lists.' )}\n${chalk.white('9. ')}${chalk.yellow( 'Ask Cursor to update all tasks from a specific task id based on new learnings or pivots in your project.' )}\n${chalk.white('10. ')}${chalk.green.bold('Ship it!')}\n\n${chalk.dim( '* Review the README.md file to learn how to use other commands via Cursor Agent.' )}\n${chalk.dim( '* Use the task-master command without arguments to see all available commands.' )}`, { padding: 1, margin: 1, borderStyle: 'round', borderColor: 'yellow', title: 'Getting Started', titleAlignment: 'center' } ) ); } } // Ensure necessary functions are exported export { initializeProject, log }; ``` -------------------------------------------------------------------------------- /scripts/modules/task-manager/research.js: -------------------------------------------------------------------------------- ```javascript /** * research.js * Core research functionality for AI-powered queries with project context */ import fs from 'fs'; import path from 'path'; import chalk from 'chalk'; import boxen from 'boxen'; import inquirer from 'inquirer'; import { highlight } from 'cli-highlight'; import { ContextGatherer } from '../utils/contextGatherer.js'; import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js'; import { generateTextService } from '../ai-services-unified.js'; import { getPromptManager } from '../prompt-manager.js'; import { log as consoleLog, findProjectRoot, readJSON, flattenTasksWithSubtasks } from '../utils.js'; import { displayAiUsageSummary, startLoadingIndicator, stopLoadingIndicator } from '../ui.js'; /** * Perform AI-powered research with project context * @param {string} query - Research query/prompt * @param {Object} options - Research options * @param {Array<string>} [options.taskIds] - Task/subtask IDs for context * @param {Array<string>} [options.filePaths] - File paths for context * @param {string} [options.customContext] - Additional custom context * @param {boolean} [options.includeProjectTree] - Include project file tree * @param {string} [options.detailLevel] - Detail level: 'low', 'medium', 'high' * @param {string} [options.projectRoot] - Project root directory * @param {string} [options.tag] - Tag for the task * @param {boolean} [options.saveToFile] - Whether to save results to file (MCP mode) * @param {Object} [context] - Execution context * @param {Object} [context.session] - MCP session object * @param {Object} [context.mcpLog] - MCP logger object * @param {string} [context.commandName] - Command name for telemetry * @param {string} [context.outputType] - Output type ('cli' or 'mcp') * @param {string} [outputFormat] - Output format ('text' or 'json') * @param {boolean} [allowFollowUp] - Whether to allow follow-up questions (default: true) * @returns {Promise<Object>} Research results with telemetry data */ async function performResearch( query, options = {}, context = {}, outputFormat = 'text', allowFollowUp = true ) { const { taskIds = [], filePaths = [], customContext = '', includeProjectTree = false, detailLevel = 'medium', projectRoot: providedProjectRoot, tag, saveToFile = false } = options; const { session, mcpLog, commandName = 'research', outputType = 'cli' } = context; const isMCP = !!mcpLog; // Determine project root const projectRoot = providedProjectRoot || findProjectRoot(); if (!projectRoot) { throw new Error('Could not determine project root directory'); } // Create consistent logger const logFn = isMCP ? mcpLog : { info: (...args) => consoleLog('info', ...args), warn: (...args) => consoleLog('warn', ...args), error: (...args) => consoleLog('error', ...args), debug: (...args) => consoleLog('debug', ...args), success: (...args) => consoleLog('success', ...args) }; // Show UI banner for CLI mode if (outputFormat === 'text') { console.log( boxen(chalk.cyan.bold(`🔍 AI Research Query`), { padding: 1, borderColor: 'cyan', borderStyle: 'round', margin: { top: 1, bottom: 1 } }) ); } try { // Initialize context gatherer const contextGatherer = new ContextGatherer(projectRoot, tag); // Auto-discover relevant tasks using fuzzy search to supplement provided tasks let finalTaskIds = [...taskIds]; // Start with explicitly provided tasks let autoDiscoveredIds = []; try { const tasksPath = path.join( projectRoot, '.taskmaster', 'tasks', 'tasks.json' ); const tasksData = await readJSON(tasksPath, projectRoot, tag); if (tasksData && tasksData.tasks && tasksData.tasks.length > 0) { // Flatten tasks to include subtasks for fuzzy search const flattenedTasks = flattenTasksWithSubtasks(tasksData.tasks); const fuzzySearch = new FuzzyTaskSearch(flattenedTasks, 'research'); const searchResults = fuzzySearch.findRelevantTasks(query, { maxResults: 8, includeRecent: true, includeCategoryMatches: true }); autoDiscoveredIds = fuzzySearch.getTaskIds(searchResults); // Remove any auto-discovered tasks that were already explicitly provided const uniqueAutoDiscovered = autoDiscoveredIds.filter( (id) => !finalTaskIds.includes(id) ); // Add unique auto-discovered tasks to the final list finalTaskIds = [...finalTaskIds, ...uniqueAutoDiscovered]; if (outputFormat === 'text' && finalTaskIds.length > 0) { // Sort task IDs numerically for better display const sortedTaskIds = finalTaskIds .map((id) => parseInt(id)) .sort((a, b) => a - b) .map((id) => id.toString()); // Show different messages based on whether tasks were explicitly provided if (taskIds.length > 0) { const sortedProvidedIds = taskIds .map((id) => parseInt(id)) .sort((a, b) => a - b) .map((id) => id.toString()); console.log( chalk.gray('Provided tasks: ') + chalk.cyan(sortedProvidedIds.join(', ')) ); if (uniqueAutoDiscovered.length > 0) { const sortedAutoIds = uniqueAutoDiscovered .map((id) => parseInt(id)) .sort((a, b) => a - b) .map((id) => id.toString()); console.log( chalk.gray('+ Auto-discovered related tasks: ') + chalk.cyan(sortedAutoIds.join(', ')) ); } } else { console.log( chalk.gray('Auto-discovered relevant tasks: ') + chalk.cyan(sortedTaskIds.join(', ')) ); } } } } catch (error) { // Silently continue without auto-discovered tasks if there's an error logFn.debug(`Could not auto-discover tasks: ${error.message}`); } const contextResult = await contextGatherer.gather({ tasks: finalTaskIds, files: filePaths, customContext, includeProjectTree, format: 'research', // Use research format for AI consumption includeTokenCounts: true }); const gatheredContext = contextResult.context; const tokenBreakdown = contextResult.tokenBreakdown; // Load prompts using PromptManager const promptManager = getPromptManager(); const promptParams = { query: query, gatheredContext: gatheredContext || '', detailLevel: detailLevel, projectInfo: { root: projectRoot, taskCount: finalTaskIds.length, fileCount: filePaths.length } }; // Load prompts - the research template handles detail level internally const { systemPrompt, userPrompt } = await promptManager.loadPrompt( 'research', promptParams ); // Count tokens for system and user prompts const systemPromptTokens = contextGatherer.countTokens(systemPrompt); const userPromptTokens = contextGatherer.countTokens(userPrompt); const totalInputTokens = systemPromptTokens + userPromptTokens; if (outputFormat === 'text') { // Display detailed token breakdown in a clean box displayDetailedTokenBreakdown( tokenBreakdown, systemPromptTokens, userPromptTokens ); } // Only log detailed info in debug mode or MCP if (outputFormat !== 'text') { logFn.info( `Calling AI service with research role, context size: ${tokenBreakdown.total} tokens (${gatheredContext.length} characters)` ); } // Start loading indicator for CLI mode let loadingIndicator = null; if (outputFormat === 'text') { loadingIndicator = startLoadingIndicator('Researching with AI...\n'); } let aiResult; try { // Call AI service with research role aiResult = await generateTextService({ role: 'research', // Always use research role for research command session, projectRoot, systemPrompt, prompt: userPrompt, commandName, outputType }); } catch (error) { if (loadingIndicator) { stopLoadingIndicator(loadingIndicator); } throw error; } finally { if (loadingIndicator) { stopLoadingIndicator(loadingIndicator); } } const researchResult = aiResult.mainResult; const telemetryData = aiResult.telemetryData; const tagInfo = aiResult.tagInfo; // Format and display results // Initialize interactive save tracking let interactiveSaveInfo = { interactiveSaveOccurred: false }; if (outputFormat === 'text') { displayResearchResults( researchResult, query, detailLevel, tokenBreakdown ); // Display AI usage telemetry for CLI users if (telemetryData) { displayAiUsageSummary(telemetryData, 'cli'); } // Offer follow-up question option (only for initial CLI queries, not MCP) if (allowFollowUp && !isMCP) { interactiveSaveInfo = await handleFollowUpQuestions( options, context, outputFormat, projectRoot, logFn, query, researchResult ); } } // Handle MCP save-to-file request if (saveToFile && isMCP) { const conversationHistory = [ { question: query, answer: researchResult, type: 'initial', timestamp: new Date().toISOString() } ]; const savedFilePath = await handleSaveToFile( conversationHistory, projectRoot, context, logFn ); // Add saved file path to return data return { query, result: researchResult, contextSize: gatheredContext.length, contextTokens: tokenBreakdown.total, tokenBreakdown, systemPromptTokens, userPromptTokens, totalInputTokens, detailLevel, telemetryData, tagInfo, savedFilePath, interactiveSaveOccurred: false // MCP save-to-file doesn't count as interactive save }; } logFn.success('Research query completed successfully'); return { query, result: researchResult, contextSize: gatheredContext.length, contextTokens: tokenBreakdown.total, tokenBreakdown, systemPromptTokens, userPromptTokens, totalInputTokens, detailLevel, telemetryData, tagInfo, interactiveSaveOccurred: interactiveSaveInfo?.interactiveSaveOccurred || false }; } catch (error) { logFn.error(`Research query failed: ${error.message}`); if (outputFormat === 'text') { console.error(chalk.red(`\n❌ Research failed: ${error.message}`)); } throw error; } } /** * Display detailed token breakdown for context and prompts * @param {Object} tokenBreakdown - Token breakdown from context gatherer * @param {number} systemPromptTokens - System prompt token count * @param {number} userPromptTokens - User prompt token count */ function displayDetailedTokenBreakdown( tokenBreakdown, systemPromptTokens, userPromptTokens ) { const parts = []; // Custom context if (tokenBreakdown.customContext) { parts.push( chalk.cyan('Custom: ') + chalk.yellow(tokenBreakdown.customContext.tokens.toLocaleString()) ); } // Tasks breakdown if (tokenBreakdown.tasks && tokenBreakdown.tasks.length > 0) { const totalTaskTokens = tokenBreakdown.tasks.reduce( (sum, task) => sum + task.tokens, 0 ); const taskDetails = tokenBreakdown.tasks .map((task) => { const titleDisplay = task.title.length > 30 ? task.title.substring(0, 30) + '...' : task.title; return ` ${chalk.gray(task.id)} ${chalk.white(titleDisplay)} ${chalk.yellow(task.tokens.toLocaleString())} tokens`; }) .join('\n'); parts.push( chalk.cyan('Tasks: ') + chalk.yellow(totalTaskTokens.toLocaleString()) + chalk.gray(` (${tokenBreakdown.tasks.length} items)`) + '\n' + taskDetails ); } // Files breakdown if (tokenBreakdown.files && tokenBreakdown.files.length > 0) { const totalFileTokens = tokenBreakdown.files.reduce( (sum, file) => sum + file.tokens, 0 ); const fileDetails = tokenBreakdown.files .map((file) => { const pathDisplay = file.path.length > 40 ? '...' + file.path.substring(file.path.length - 37) : file.path; return ` ${chalk.gray(pathDisplay)} ${chalk.yellow(file.tokens.toLocaleString())} tokens ${chalk.gray(`(${file.sizeKB}KB)`)}`; }) .join('\n'); parts.push( chalk.cyan('Files: ') + chalk.yellow(totalFileTokens.toLocaleString()) + chalk.gray(` (${tokenBreakdown.files.length} files)`) + '\n' + fileDetails ); } // Project tree if (tokenBreakdown.projectTree) { parts.push( chalk.cyan('Project Tree: ') + chalk.yellow(tokenBreakdown.projectTree.tokens.toLocaleString()) + chalk.gray( ` (${tokenBreakdown.projectTree.fileCount} files, ${tokenBreakdown.projectTree.dirCount} dirs)` ) ); } // Prompts breakdown const totalPromptTokens = systemPromptTokens + userPromptTokens; const promptDetails = [ ` ${chalk.gray('System:')} ${chalk.yellow(systemPromptTokens.toLocaleString())} tokens`, ` ${chalk.gray('User:')} ${chalk.yellow(userPromptTokens.toLocaleString())} tokens` ].join('\n'); parts.push( chalk.cyan('Prompts: ') + chalk.yellow(totalPromptTokens.toLocaleString()) + chalk.gray(' (generated)') + '\n' + promptDetails ); // Display the breakdown in a clean box if (parts.length > 0) { const content = parts.join('\n\n'); const tokenBox = boxen(content, { title: chalk.blue.bold('Context Analysis'), titleAlignment: 'left', padding: { top: 1, bottom: 1, left: 2, right: 2 }, margin: { top: 0, bottom: 1 }, borderStyle: 'single', borderColor: 'blue' }); console.log(tokenBox); } } /** * Process research result text to highlight code blocks * @param {string} text - Raw research result text * @returns {string} Processed text with highlighted code blocks */ function processCodeBlocks(text) { // Regex to match code blocks with optional language specification const codeBlockRegex = /```(\w+)?\n([\s\S]*?)```/g; return text.replace(codeBlockRegex, (match, language, code) => { try { // Default to javascript if no language specified const lang = language || 'javascript'; // Highlight the code using cli-highlight const highlightedCode = highlight(code.trim(), { language: lang, ignoreIllegals: true // Don't fail on unrecognized syntax }); // Add a subtle border around code blocks const codeBox = boxen(highlightedCode, { padding: { top: 0, bottom: 0, left: 1, right: 1 }, margin: { top: 0, bottom: 0 }, borderStyle: 'single', borderColor: 'dim' }); return '\n' + codeBox + '\n'; } catch (error) { // If highlighting fails, return the original code block with basic formatting return ( '\n' + chalk.gray('```' + (language || '')) + '\n' + chalk.white(code.trim()) + '\n' + chalk.gray('```') + '\n' ); } }); } /** * Display research results in formatted output * @param {string} result - AI research result * @param {string} query - Original query * @param {string} detailLevel - Detail level used * @param {Object} tokenBreakdown - Detailed token usage */ function displayResearchResults(result, query, detailLevel, tokenBreakdown) { // Header with query info const header = boxen( chalk.green.bold('Research Results') + '\n\n' + chalk.gray('Query: ') + chalk.white(query) + '\n' + chalk.gray('Detail Level: ') + chalk.cyan(detailLevel), { padding: { top: 1, bottom: 1, left: 2, right: 2 }, margin: { top: 1, bottom: 0 }, borderStyle: 'round', borderColor: 'green' } ); console.log(header); // Process the result to highlight code blocks const processedResult = processCodeBlocks(result); // Main research content in a clean box const contentBox = boxen(processedResult, { padding: { top: 1, bottom: 1, left: 2, right: 2 }, margin: { top: 0, bottom: 1 }, borderStyle: 'single', borderColor: 'gray' }); console.log(contentBox); // Success footer console.log(chalk.green('✅ Research completed')); } /** * Handle follow-up questions and save functionality in interactive mode * @param {Object} originalOptions - Original research options * @param {Object} context - Execution context * @param {string} outputFormat - Output format * @param {string} projectRoot - Project root directory * @param {Object} logFn - Logger function * @param {string} initialQuery - Initial query for context * @param {string} initialResult - Initial AI result for context */ async function handleFollowUpQuestions( originalOptions, context, outputFormat, projectRoot, logFn, initialQuery, initialResult ) { let interactiveSaveOccurred = false; try { // Import required modules for saving const { readJSON } = await import('../utils.js'); const updateTaskById = (await import('./update-task-by-id.js')).default; const { updateSubtaskById } = await import('./update-subtask-by-id.js'); // Initialize conversation history with the initial Q&A const conversationHistory = [ { question: initialQuery, answer: initialResult, type: 'initial', timestamp: new Date().toISOString() } ]; while (true) { // Get user choice const { action } = await inquirer.prompt([ { type: 'list', name: 'action', message: 'What would you like to do next?', choices: [ { name: 'Ask a follow-up question', value: 'followup' }, { name: 'Save to file', value: 'savefile' }, { name: 'Save to task/subtask', value: 'save' }, { name: 'Quit', value: 'quit' } ], pageSize: 4 } ]); if (action === 'quit') { break; } if (action === 'savefile') { // Handle save to file functionality await handleSaveToFile( conversationHistory, projectRoot, context, logFn ); continue; } if (action === 'save') { // Handle save functionality const saveResult = await handleSaveToTask( conversationHistory, projectRoot, context, logFn ); if (saveResult) { interactiveSaveOccurred = true; } continue; } if (action === 'followup') { // Get the follow-up question const { followUpQuery } = await inquirer.prompt([ { type: 'input', name: 'followUpQuery', message: 'Enter your follow-up question:', validate: (input) => { if (!input || input.trim().length === 0) { return 'Please enter a valid question.'; } return true; } } ]); if (!followUpQuery || followUpQuery.trim().length === 0) { continue; } console.log('\n' + chalk.gray('─'.repeat(60)) + '\n'); // Build cumulative conversation context from all previous exchanges const conversationContext = buildConversationContext(conversationHistory); // Create enhanced options for follow-up with full conversation context const followUpOptions = { ...originalOptions, taskIds: [], // Clear task IDs to allow fresh fuzzy search customContext: conversationContext + (originalOptions.customContext ? `\n\n--- Original Context ---\n${originalOptions.customContext}` : '') }; // Perform follow-up research const followUpResult = await performResearch( followUpQuery.trim(), followUpOptions, context, outputFormat, false // allowFollowUp = false for nested calls ); // Add this exchange to the conversation history conversationHistory.push({ question: followUpQuery.trim(), answer: followUpResult.result, type: 'followup', timestamp: new Date().toISOString() }); } } } catch (error) { // If there's an error with inquirer (e.g., non-interactive terminal), // silently continue without follow-up functionality logFn.debug(`Follow-up questions not available: ${error.message}`); } return { interactiveSaveOccurred }; } /** * Handle saving conversation to a task or subtask * @param {Array} conversationHistory - Array of conversation exchanges * @param {string} projectRoot - Project root directory * @param {Object} context - Execution context * @param {Object} logFn - Logger function */ async function handleSaveToTask( conversationHistory, projectRoot, context, logFn ) { try { // Import required modules const { readJSON } = await import('../utils.js'); const updateTaskById = (await import('./update-task-by-id.js')).default; const { updateSubtaskById } = await import('./update-subtask-by-id.js'); // Get task ID from user const { taskId } = await inquirer.prompt([ { type: 'input', name: 'taskId', message: 'Enter task ID (e.g., "15" for task or "15.2" for subtask):', validate: (input) => { if (!input || input.trim().length === 0) { return 'Please enter a task ID.'; } const trimmedInput = input.trim(); // Validate format: number or number.number if (!/^\d+(\.\d+)?$/.test(trimmedInput)) { return 'Invalid format. Use "15" for task or "15.2" for subtask.'; } return true; } } ]); const trimmedTaskId = taskId.trim(); // Format conversation thread for saving const conversationThread = formatConversationForSaving(conversationHistory); // Determine if it's a task or subtask const isSubtask = trimmedTaskId.includes('.'); // Try to save - first validate the ID exists const tasksPath = path.join( projectRoot, '.taskmaster', 'tasks', 'tasks.json' ); if (!fs.existsSync(tasksPath)) { console.log( chalk.red('❌ Tasks file not found. Please run task-master init first.') ); return; } const data = readJSON(tasksPath, projectRoot, context.tag); if (!data || !data.tasks) { console.log(chalk.red('❌ No valid tasks found.')); return; } if (isSubtask) { // Validate subtask exists const [parentId, subtaskId] = trimmedTaskId .split('.') .map((id) => parseInt(id, 10)); const parentTask = data.tasks.find((t) => t.id === parentId); if (!parentTask) { console.log(chalk.red(`❌ Parent task ${parentId} not found.`)); return; } if ( !parentTask.subtasks || !parentTask.subtasks.find((st) => st.id === subtaskId) ) { console.log(chalk.red(`❌ Subtask ${trimmedTaskId} not found.`)); return; } // Save to subtask using updateSubtaskById console.log(chalk.blue('💾 Saving research conversation to subtask...')); await updateSubtaskById( tasksPath, trimmedTaskId, conversationThread, false, // useResearch = false for simple append context, 'text' ); console.log( chalk.green( `✅ Research conversation saved to subtask ${trimmedTaskId}` ) ); } else { // Validate task exists const taskIdNum = parseInt(trimmedTaskId, 10); const task = data.tasks.find((t) => t.id === taskIdNum); if (!task) { console.log(chalk.red(`❌ Task ${trimmedTaskId} not found.`)); return; } // Save to task using updateTaskById with append mode console.log(chalk.blue('💾 Saving research conversation to task...')); await updateTaskById( tasksPath, taskIdNum, conversationThread, false, // useResearch = false for simple append context, 'text', true // appendMode = true ); console.log( chalk.green(`✅ Research conversation saved to task ${trimmedTaskId}`) ); } return true; // Indicate successful save } catch (error) { console.log(chalk.red(`❌ Error saving conversation: ${error.message}`)); logFn.error(`Error saving conversation: ${error.message}`); return false; // Indicate failed save } } /** * Handle saving conversation to a file in .taskmaster/docs/research/ * @param {Array} conversationHistory - Array of conversation exchanges * @param {string} projectRoot - Project root directory * @param {Object} context - Execution context * @param {Object} logFn - Logger function * @returns {Promise<string>} Path to saved file */ async function handleSaveToFile( conversationHistory, projectRoot, context, logFn ) { try { // Create research directory if it doesn't exist const researchDir = path.join( projectRoot, '.taskmaster', 'docs', 'research' ); if (!fs.existsSync(researchDir)) { fs.mkdirSync(researchDir, { recursive: true }); } // Generate filename from first query and timestamp const firstQuery = conversationHistory[0]?.question || 'research-query'; const timestamp = new Date().toISOString().split('T')[0]; // YYYY-MM-DD format // Create a slug from the query (remove special chars, limit length) const querySlug = firstQuery .toLowerCase() .replace(/[^a-z0-9\s-]/g, '') // Remove special characters .replace(/\s+/g, '-') // Replace spaces with hyphens .replace(/-+/g, '-') // Replace multiple hyphens with single .substring(0, 50) // Limit length .replace(/^-+|-+$/g, ''); // Remove leading/trailing hyphens const filename = `${timestamp}_${querySlug}.md`; const filePath = path.join(researchDir, filename); // Format conversation for file const fileContent = formatConversationForFile( conversationHistory, firstQuery ); // Write file fs.writeFileSync(filePath, fileContent, 'utf8'); const relativePath = path.relative(projectRoot, filePath); console.log( chalk.green(`✅ Research saved to: ${chalk.cyan(relativePath)}`) ); logFn.success(`Research conversation saved to ${relativePath}`); return filePath; } catch (error) { console.log(chalk.red(`❌ Error saving research file: ${error.message}`)); logFn.error(`Error saving research file: ${error.message}`); throw error; } } /** * Format conversation history for saving to a file * @param {Array} conversationHistory - Array of conversation exchanges * @param {string} initialQuery - The initial query for metadata * @returns {string} Formatted file content */ function formatConversationForFile(conversationHistory, initialQuery) { const timestamp = new Date().toISOString(); const date = new Date().toLocaleDateString(); const time = new Date().toLocaleTimeString(); // Create metadata header let content = `--- title: Research Session query: "${initialQuery}" date: ${date} time: ${time} timestamp: ${timestamp} exchanges: ${conversationHistory.length} --- # Research Session `; // Add each conversation exchange conversationHistory.forEach((exchange, index) => { if (exchange.type === 'initial') { content += `## Initial Query\n\n**Question:** ${exchange.question}\n\n**Response:**\n\n${exchange.answer}\n\n`; } else { content += `## Follow-up ${index}\n\n**Question:** ${exchange.question}\n\n**Response:**\n\n${exchange.answer}\n\n`; } if (index < conversationHistory.length - 1) { content += '---\n\n'; } }); // Add footer content += `\n---\n\n*Generated by Task Master Research Command* \n*Timestamp: ${timestamp}*\n`; return content; } /** * Format conversation history for saving to a task/subtask * @param {Array} conversationHistory - Array of conversation exchanges * @returns {string} Formatted conversation thread */ function formatConversationForSaving(conversationHistory) { const timestamp = new Date().toISOString(); let formatted = `## Research Session - ${new Date().toLocaleDateString()} ${new Date().toLocaleTimeString()}\n\n`; conversationHistory.forEach((exchange, index) => { if (exchange.type === 'initial') { formatted += `**Initial Query:** ${exchange.question}\n\n`; formatted += `**Response:** ${exchange.answer}\n\n`; } else { formatted += `**Follow-up ${index}:** ${exchange.question}\n\n`; formatted += `**Response:** ${exchange.answer}\n\n`; } if (index < conversationHistory.length - 1) { formatted += '---\n\n'; } }); return formatted; } /** * Build conversation context string from conversation history * @param {Array} conversationHistory - Array of conversation exchanges * @returns {string} Formatted conversation context */ function buildConversationContext(conversationHistory) { if (conversationHistory.length === 0) { return ''; } const contextParts = ['--- Conversation History ---']; conversationHistory.forEach((exchange, index) => { const questionLabel = exchange.type === 'initial' ? 'Initial Question' : `Follow-up ${index}`; const answerLabel = exchange.type === 'initial' ? 'Initial Answer' : `Answer ${index}`; contextParts.push(`\n${questionLabel}: ${exchange.question}`); contextParts.push(`${answerLabel}: ${exchange.answer}`); }); return contextParts.join('\n'); } export { performResearch }; ``` -------------------------------------------------------------------------------- /scripts/modules/ai-services-unified.js: -------------------------------------------------------------------------------- ```javascript /** * ai-services-unified.js * Centralized AI service layer using provider modules and config-manager. */ // Vercel AI SDK functions are NOT called directly anymore. // import { generateText, streamText, generateObject } from 'ai'; // --- Core Dependencies --- import { MODEL_MAP, getAzureBaseURL, getBaseUrlForRole, getBedrockBaseURL, getDebugFlag, getFallbackModelId, getFallbackProvider, getMainModelId, getMainProvider, getOllamaBaseURL, getParametersForRole, getResearchModelId, getResearchProvider, getResponseLanguage, getUserId, getVertexLocation, getVertexProjectId, isApiKeySet, providersWithoutApiKeys } from './config-manager.js'; import { findProjectRoot, getCurrentTag, log, resolveEnvVariable } from './utils.js'; // Import provider classes import { AnthropicAIProvider, AzureProvider, BedrockAIProvider, ClaudeCodeProvider, GeminiCliProvider, GoogleAIProvider, GrokCliProvider, GroqProvider, OllamaAIProvider, OpenAIProvider, OpenRouterAIProvider, PerplexityAIProvider, VertexAIProvider, XAIProvider } from '../../src/ai-providers/index.js'; // Import the provider registry import ProviderRegistry from '../../src/provider-registry/index.js'; // Create provider instances const PROVIDERS = { anthropic: new AnthropicAIProvider(), perplexity: new PerplexityAIProvider(), google: new GoogleAIProvider(), openai: new OpenAIProvider(), xai: new XAIProvider(), groq: new GroqProvider(), openrouter: new OpenRouterAIProvider(), ollama: new OllamaAIProvider(), bedrock: new BedrockAIProvider(), azure: new AzureProvider(), vertex: new VertexAIProvider(), 'claude-code': new ClaudeCodeProvider(), 'gemini-cli': new GeminiCliProvider(), 'grok-cli': new GrokCliProvider() }; function _getProvider(providerName) { // First check the static PROVIDERS object if (PROVIDERS[providerName]) { return PROVIDERS[providerName]; } // If not found, check the provider registry const providerRegistry = ProviderRegistry.getInstance(); if (providerRegistry.hasProvider(providerName)) { log('debug', `Provider "${providerName}" found in dynamic registry`); return providerRegistry.getProvider(providerName); } // Provider not found in either location return null; } // Helper function to get cost for a specific model function _getCostForModel(providerName, modelId) { const DEFAULT_COST = { inputCost: 0, outputCost: 0, currency: 'USD' }; if (!MODEL_MAP || !MODEL_MAP[providerName]) { log( 'warn', `Provider "${providerName}" not found in MODEL_MAP. Cannot determine cost for model ${modelId}.` ); return DEFAULT_COST; } const modelData = MODEL_MAP[providerName].find((m) => m.id === modelId); if (!modelData?.cost_per_1m_tokens) { log( 'debug', `Cost data not found for model "${modelId}" under provider "${providerName}". Assuming zero cost.` ); return DEFAULT_COST; } const costs = modelData.cost_per_1m_tokens; return { inputCost: costs.input || 0, outputCost: costs.output || 0, currency: costs.currency || 'USD' }; } /** * Calculate cost from token counts and cost per million * @param {number} inputTokens - Number of input tokens * @param {number} outputTokens - Number of output tokens * @param {number} inputCost - Cost per million input tokens * @param {number} outputCost - Cost per million output tokens * @returns {number} Total calculated cost */ function _calculateCost(inputTokens, outputTokens, inputCost, outputCost) { const calculatedCost = ((inputTokens || 0) / 1_000_000) * inputCost + ((outputTokens || 0) / 1_000_000) * outputCost; return parseFloat(calculatedCost.toFixed(6)); } // Helper function to get tag information for responses function _getTagInfo(projectRoot) { const DEFAULT_TAG_INFO = { currentTag: 'master', availableTags: ['master'] }; try { if (!projectRoot) { return DEFAULT_TAG_INFO; } const currentTag = getCurrentTag(projectRoot) || 'master'; const availableTags = _readAvailableTags(projectRoot); return { currentTag, availableTags }; } catch (error) { if (getDebugFlag()) { log('debug', `Error getting tag information: ${error.message}`); } return DEFAULT_TAG_INFO; } } // Extract method for reading available tags function _readAvailableTags(projectRoot) { const DEFAULT_TAGS = ['master']; try { const path = require('path'); const fs = require('fs'); const tasksPath = path.join( projectRoot, '.taskmaster', 'tasks', 'tasks.json' ); if (!fs.existsSync(tasksPath)) { return DEFAULT_TAGS; } const tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8')); if (!tasksData || typeof tasksData !== 'object') { return DEFAULT_TAGS; } // Check if it's tagged format (has tag-like keys with tasks arrays) const potentialTags = Object.keys(tasksData).filter((key) => _isValidTaggedTask(tasksData[key]) ); return potentialTags.length > 0 ? potentialTags : DEFAULT_TAGS; } catch (readError) { if (getDebugFlag()) { log( 'debug', `Could not read tasks file for available tags: ${readError.message}` ); } return DEFAULT_TAGS; } } // Helper to validate tagged task structure function _isValidTaggedTask(taskData) { return ( taskData && typeof taskData === 'object' && Array.isArray(taskData.tasks) ); } // --- Configuration for Retries --- const MAX_RETRIES = 2; const INITIAL_RETRY_DELAY_MS = 1000; // Helper function to check if an error is retryable function isRetryableError(error) { const errorMessage = error.message?.toLowerCase() || ''; return ( errorMessage.includes('rate limit') || errorMessage.includes('overloaded') || errorMessage.includes('service temporarily unavailable') || errorMessage.includes('timeout') || errorMessage.includes('network error') || error.status === 429 || error.status >= 500 ); } /** * Extracts a user-friendly error message from a potentially complex AI error object. * Prioritizes nested messages and falls back to the top-level message. * @param {Error | object | any} error - The error object. * @returns {string} A concise error message. */ function _extractErrorMessage(error) { try { // Attempt 1: Look for Vercel SDK specific nested structure (common) if (error?.data?.error?.message) { return error.data.error.message; } // Attempt 2: Look for nested error message directly in the error object if (error?.error?.message) { return error.error.message; } // Attempt 3: Look for nested error message in response body if it's JSON string if (typeof error?.responseBody === 'string') { try { const body = JSON.parse(error.responseBody); if (body?.error?.message) { return body.error.message; } } catch (parseError) { // Ignore if responseBody is not valid JSON } } // Attempt 4: Use the top-level message if it exists if (typeof error?.message === 'string' && error.message) { return error.message; } // Attempt 5: Handle simple string errors if (typeof error === 'string') { return error; } // Fallback return 'An unknown AI service error occurred.'; } catch (e) { // Safety net return 'Failed to extract error message.'; } } /** * Get role configuration (provider and model) based on role type * @param {string} role - The role ('main', 'research', 'fallback') * @param {string} projectRoot - Project root path * @returns {Object|null} Configuration object with provider and modelId */ function _getRoleConfiguration(role, projectRoot) { const roleConfigs = { main: { provider: getMainProvider(projectRoot), modelId: getMainModelId(projectRoot) }, research: { provider: getResearchProvider(projectRoot), modelId: getResearchModelId(projectRoot) }, fallback: { provider: getFallbackProvider(projectRoot), modelId: getFallbackModelId(projectRoot) } }; return roleConfigs[role] || null; } /** * Get Vertex AI specific configuration * @param {string} projectRoot - Project root path * @param {Object} session - Session object * @returns {Object} Vertex AI configuration parameters */ function _getVertexConfiguration(projectRoot, session) { const projectId = getVertexProjectId(projectRoot) || resolveEnvVariable('VERTEX_PROJECT_ID', session, projectRoot); const location = getVertexLocation(projectRoot) || resolveEnvVariable('VERTEX_LOCATION', session, projectRoot) || 'us-central1'; const credentialsPath = resolveEnvVariable( 'GOOGLE_APPLICATION_CREDENTIALS', session, projectRoot ); log( 'debug', `Using Vertex AI configuration: Project ID=${projectId}, Location=${location}` ); return { projectId, location, ...(credentialsPath && { credentials: { credentialsFromEnv: true } }) }; } /** * Internal helper to resolve the API key for a given provider. * @param {string} providerName - The name of the provider (lowercase). * @param {object|null} session - Optional MCP session object. * @param {string|null} projectRoot - Optional project root path for .env fallback. * @returns {string|null} The API key or null if not found/needed. * @throws {Error} If a required API key is missing. */ function _resolveApiKey(providerName, session, projectRoot = null) { // Get provider instance const provider = _getProvider(providerName); if (!provider) { throw new Error( `Unknown provider '${providerName}' for API key resolution.` ); } // All providers must implement getRequiredApiKeyName() const envVarName = provider.getRequiredApiKeyName(); // If envVarName is null (like for MCP), return null directly if (envVarName === null) { return null; } const apiKey = resolveEnvVariable(envVarName, session, projectRoot); // Special handling for providers that can use alternative auth or no API key if (!provider.isRequiredApiKey()) { return apiKey || null; } if (!apiKey) { throw new Error( `Required API key ${envVarName} for provider '${providerName}' is not set in environment, session, or .env file.` ); } return apiKey; } /** * Internal helper to attempt a provider-specific AI API call with retries. * * @param {function} providerApiFn - The specific provider function to call (e.g., generateAnthropicText). * @param {object} callParams - Parameters object for the provider function. * @param {string} providerName - Name of the provider (for logging). * @param {string} modelId - Specific model ID (for logging). * @param {string} attemptRole - The role being attempted (for logging). * @returns {Promise<object>} The result from the successful API call. * @throws {Error} If the call fails after all retries. */ async function _attemptProviderCallWithRetries( provider, serviceType, callParams, providerName, modelId, attemptRole ) { let retries = 0; const fnName = serviceType; while (retries <= MAX_RETRIES) { try { if (getDebugFlag()) { log( 'info', `Attempt ${retries + 1}/${MAX_RETRIES + 1} calling ${fnName} (Provider: ${providerName}, Model: ${modelId}, Role: ${attemptRole})` ); } // Call the appropriate method on the provider instance const result = await provider[serviceType](callParams); if (getDebugFlag()) { log( 'info', `${fnName} succeeded for role ${attemptRole} (Provider: ${providerName}) on attempt ${retries + 1}` ); } return result; } catch (error) { log( 'warn', `Attempt ${retries + 1} failed for role ${attemptRole} (${fnName} / ${providerName}): ${error.message}` ); if (isRetryableError(error) && retries < MAX_RETRIES) { retries++; const delay = INITIAL_RETRY_DELAY_MS * 2 ** (retries - 1); log( 'info', `Something went wrong on the provider side. Retrying in ${delay / 1000}s...` ); await new Promise((resolve) => setTimeout(resolve, delay)); } else { log( 'error', `Something went wrong on the provider side. Max retries reached for role ${attemptRole} (${fnName} / ${providerName}).` ); throw error; } } } // Should not be reached due to throw in the else block throw new Error( `Exhausted all retries for role ${attemptRole} (${fnName} / ${providerName})` ); } /** * Base logic for unified service functions. * @param {string} serviceType - Type of service ('generateText', 'streamText', 'generateObject'). * @param {object} params - Original parameters passed to the service function. * @param {string} params.role - The initial client role. * @param {object} [params.session=null] - Optional MCP session object. * @param {string} [params.projectRoot] - Optional project root path. * @param {string} params.commandName - Name of the command invoking the service. * @param {string} params.outputType - 'cli' or 'mcp'. * @param {string} [params.systemPrompt] - Optional system prompt. * @param {string} [params.prompt] - The prompt for the AI. * @param {string} [params.schema] - The Zod schema for the expected object. * @param {string} [params.objectName] - Name for object/tool. * @returns {Promise<any>} Result from the underlying provider call. */ async function _unifiedServiceRunner(serviceType, params) { const { role: initialRole, session, projectRoot, systemPrompt, prompt, schema, objectName, commandName, outputType, ...restApiParams } = params; if (getDebugFlag()) { log('info', `${serviceType}Service called`, { role: initialRole, commandName, outputType, projectRoot }); } const effectiveProjectRoot = projectRoot || findProjectRoot(); const userId = getUserId(effectiveProjectRoot); let sequence; if (initialRole === 'main') { sequence = ['main', 'fallback', 'research']; } else if (initialRole === 'research') { sequence = ['research', 'fallback', 'main']; } else if (initialRole === 'fallback') { sequence = ['fallback', 'main', 'research']; } else { log( 'warn', `Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.` ); sequence = ['main', 'fallback', 'research']; } let lastError = null; let lastCleanErrorMessage = 'AI service call failed for all configured roles.'; for (const currentRole of sequence) { let providerName; let modelId; let apiKey; let roleParams; let provider; let baseURL; let providerResponse; let telemetryData = null; try { log('debug', `New AI service call with role: ${currentRole}`); const roleConfig = _getRoleConfiguration( currentRole, effectiveProjectRoot ); if (!roleConfig) { log( 'error', `Unknown role encountered in _unifiedServiceRunner: ${currentRole}` ); lastError = lastError || new Error(`Unknown AI role specified: ${currentRole}`); continue; } providerName = roleConfig.provider; modelId = roleConfig.modelId; if (!providerName || !modelId) { log( 'warn', `Skipping role '${currentRole}': Provider or Model ID not configured.` ); lastError = lastError || new Error( `Configuration missing for role '${currentRole}'. Provider: ${providerName}, Model: ${modelId}` ); continue; } // Get provider instance provider = _getProvider(providerName?.toLowerCase()); if (!provider) { log( 'warn', `Skipping role '${currentRole}': Provider '${providerName}' not supported.` ); lastError = lastError || new Error(`Unsupported provider configured: ${providerName}`); continue; } // Check API key if needed if (!providersWithoutApiKeys.includes(providerName?.toLowerCase())) { if (!isApiKeySet(providerName, session, effectiveProjectRoot)) { log( 'warn', `Skipping role '${currentRole}' (Provider: ${providerName}): API key not set or invalid.` ); lastError = lastError || new Error( `API key for provider '${providerName}' (role: ${currentRole}) is not set.` ); continue; // Skip to the next role in the sequence } } // Get base URL if configured (optional for most providers) baseURL = getBaseUrlForRole(currentRole, effectiveProjectRoot); // For Azure, use the global Azure base URL if role-specific URL is not configured if (providerName?.toLowerCase() === 'azure' && !baseURL) { baseURL = getAzureBaseURL(effectiveProjectRoot); log('debug', `Using global Azure base URL: ${baseURL}`); } else if (providerName?.toLowerCase() === 'ollama' && !baseURL) { // For Ollama, use the global Ollama base URL if role-specific URL is not configured baseURL = getOllamaBaseURL(effectiveProjectRoot); log('debug', `Using global Ollama base URL: ${baseURL}`); } else if (providerName?.toLowerCase() === 'bedrock' && !baseURL) { // For Bedrock, use the global Bedrock base URL if role-specific URL is not configured baseURL = getBedrockBaseURL(effectiveProjectRoot); log('debug', `Using global Bedrock base URL: ${baseURL}`); } // Get AI parameters for the current role roleParams = getParametersForRole(currentRole, effectiveProjectRoot); apiKey = _resolveApiKey( providerName?.toLowerCase(), session, effectiveProjectRoot ); // Prepare provider-specific configuration let providerSpecificParams = {}; // Handle Vertex AI specific configuration if (providerName?.toLowerCase() === 'vertex') { providerSpecificParams = _getVertexConfiguration( effectiveProjectRoot, session ); } const messages = []; const responseLanguage = getResponseLanguage(effectiveProjectRoot); const systemPromptWithLanguage = `${systemPrompt} \n\n Always respond in ${responseLanguage}.`; messages.push({ role: 'system', content: systemPromptWithLanguage.trim() }); // IN THE FUTURE WHEN DOING CONTEXT IMPROVEMENTS // { // type: 'text', // text: 'Large cached context here like a tasks json', // providerOptions: { // anthropic: { cacheControl: { type: 'ephemeral' } } // } // } // Example // if (params.context) { // context is a json string of a tasks object or some other stu // messages.push({ // type: 'text', // text: params.context, // providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' } } } // }); // } if (prompt) { messages.push({ role: 'user', content: prompt }); } else { throw new Error('User prompt content is missing.'); } const callParams = { apiKey, modelId, maxTokens: roleParams.maxTokens, temperature: roleParams.temperature, messages, ...(baseURL && { baseURL }), ...((serviceType === 'generateObject' || serviceType === 'streamObject') && { schema, objectName }), ...providerSpecificParams, ...restApiParams }; providerResponse = await _attemptProviderCallWithRetries( provider, serviceType, callParams, providerName, modelId, currentRole ); if (userId && providerResponse && providerResponse.usage) { try { telemetryData = await logAiUsage({ userId, commandName, providerName, modelId, inputTokens: providerResponse.usage.inputTokens, outputTokens: providerResponse.usage.outputTokens, outputType }); } catch (telemetryError) { // logAiUsage already logs its own errors and returns null on failure // No need to log again here, telemetryData will remain null } } else if (userId && providerResponse && !providerResponse.usage) { log( 'warn', `Cannot log telemetry for ${commandName} (${providerName}/${modelId}): AI result missing 'usage' data. (May be expected for streams)` ); } let finalMainResult; if (serviceType === 'generateText') { finalMainResult = providerResponse.text; } else if (serviceType === 'generateObject') { finalMainResult = providerResponse.object; } else if ( serviceType === 'streamText' || serviceType === 'streamObject' ) { finalMainResult = providerResponse; } else { log( 'error', `Unknown serviceType in _unifiedServiceRunner: ${serviceType}` ); finalMainResult = providerResponse; } // Get tag information for the response const tagInfo = _getTagInfo(effectiveProjectRoot); return { mainResult: finalMainResult, telemetryData: telemetryData, tagInfo: tagInfo, providerName: providerName, modelId: modelId }; } catch (error) { const cleanMessage = _extractErrorMessage(error); log( 'error', `Service call failed for role ${currentRole} (Provider: ${providerName || 'unknown'}, Model: ${modelId || 'unknown'}): ${cleanMessage}` ); lastError = error; lastCleanErrorMessage = cleanMessage; if (serviceType === 'generateObject') { const lowerCaseMessage = cleanMessage.toLowerCase(); if ( lowerCaseMessage.includes( 'no endpoints found that support tool use' ) || lowerCaseMessage.includes('does not support tool_use') || lowerCaseMessage.includes('tool use is not supported') || lowerCaseMessage.includes('tools are not supported') || lowerCaseMessage.includes('function calling is not supported') || lowerCaseMessage.includes('tool use is not supported') ) { const specificErrorMsg = `Model '${modelId || 'unknown'}' via provider '${providerName || 'unknown'}' does not support the 'tool use' required by generateObjectService. Please configure a model that supports tool/function calling for the '${currentRole}' role, or use generateTextService if structured output is not strictly required.`; log('error', `[Tool Support Error] ${specificErrorMsg}`); throw new Error(specificErrorMsg); } } } } log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`); throw new Error(lastCleanErrorMessage); } /** * Unified service function for generating text. * Handles client retrieval, retries, and fallback sequence. * * @param {object} params - Parameters for the service call. * @param {string} params.role - The initial client role ('main', 'research', 'fallback'). * @param {object} [params.session=null] - Optional MCP session object. * @param {string} [params.projectRoot=null] - Optional project root path for .env fallback. * @param {string} params.prompt - The prompt for the AI. * @param {string} [params.systemPrompt] - Optional system prompt. * @param {string} params.commandName - Name of the command invoking the service. * @param {string} [params.outputType='cli'] - 'cli' or 'mcp'. * @returns {Promise<object>} Result object containing generated text and usage data. */ async function generateTextService(params) { // Ensure default outputType if not provided const defaults = { outputType: 'cli' }; const combinedParams = { ...defaults, ...params }; // TODO: Validate commandName exists? return _unifiedServiceRunner('generateText', combinedParams); } /** * Unified service function for streaming text. * Handles client retrieval, retries, and fallback sequence. * * @param {object} params - Parameters for the service call. * @param {string} params.role - The initial client role ('main', 'research', 'fallback'). * @param {object} [params.session=null] - Optional MCP session object. * @param {string} [params.projectRoot=null] - Optional project root path for .env fallback. * @param {string} params.prompt - The prompt for the AI. * @param {string} [params.systemPrompt] - Optional system prompt. * @param {string} params.commandName - Name of the command invoking the service. * @param {string} [params.outputType='cli'] - 'cli' or 'mcp'. * @returns {Promise<object>} Result object containing the stream and usage data. */ async function streamTextService(params) { const defaults = { outputType: 'cli' }; const combinedParams = { ...defaults, ...params }; // TODO: Validate commandName exists? // NOTE: Telemetry for streaming might be tricky as usage data often comes at the end. // The current implementation logs *after* the stream is returned. // We might need to adjust how usage is captured/logged for streams. return _unifiedServiceRunner('streamText', combinedParams); } /** * Unified service function for streaming structured objects. * Uses Vercel AI SDK's streamObject for proper JSON streaming. * * @param {object} params - Parameters for the service call. * @param {string} params.role - The initial client role ('main', 'research', 'fallback'). * @param {object} [params.session=null] - Optional MCP session object. * @param {string} [params.projectRoot=null] - Optional project root path for .env fallback. * @param {import('zod').ZodSchema} params.schema - The Zod schema for the expected object. * @param {string} params.prompt - The prompt for the AI. * @param {string} [params.systemPrompt] - Optional system prompt. * @param {string} params.commandName - Name of the command invoking the service. * @param {string} [params.outputType='cli'] - 'cli' or 'mcp'. * @returns {Promise<object>} Result object containing the stream and usage data. */ async function streamObjectService(params) { const defaults = { outputType: 'cli' }; const combinedParams = { ...defaults, ...params }; // Stream object requires a schema if (!combinedParams.schema) { throw new Error('streamObjectService requires a schema parameter'); } return _unifiedServiceRunner('streamObject', combinedParams); } /** * Unified service function for generating structured objects. * Handles client retrieval, retries, and fallback sequence. * * @param {object} params - Parameters for the service call. * @param {string} params.role - The initial client role ('main', 'research', 'fallback'). * @param {object} [params.session=null] - Optional MCP session object. * @param {string} [params.projectRoot=null] - Optional project root path for .env fallback. * @param {import('zod').ZodSchema} params.schema - The Zod schema for the expected object. * @param {string} params.prompt - The prompt for the AI. * @param {string} [params.systemPrompt] - Optional system prompt. * @param {string} [params.objectName='generated_object'] - Name for object/tool. * @param {number} [params.maxRetries=3] - Max retries for object generation. * @param {string} params.commandName - Name of the command invoking the service. * @param {string} [params.outputType='cli'] - 'cli' or 'mcp'. * @returns {Promise<object>} Result object containing the generated object and usage data. */ async function generateObjectService(params) { const defaults = { objectName: 'generated_object', maxRetries: 3, outputType: 'cli' }; const combinedParams = { ...defaults, ...params }; // TODO: Validate commandName exists? return _unifiedServiceRunner('generateObject', combinedParams); } // --- Telemetry Function --- /** * Logs AI usage telemetry data. * For now, it just logs to the console. Sending will be implemented later. * @param {object} params - Telemetry parameters. * @param {string} params.userId - Unique user identifier. * @param {string} params.commandName - The command that triggered the AI call. * @param {string} params.providerName - The AI provider used (e.g., 'openai'). * @param {string} params.modelId - The specific AI model ID used. * @param {number} params.inputTokens - Number of input tokens. * @param {number} params.outputTokens - Number of output tokens. */ async function logAiUsage({ userId, commandName, providerName, modelId, inputTokens, outputTokens, outputType }) { try { const isMCP = outputType === 'mcp'; const timestamp = new Date().toISOString(); const totalTokens = (inputTokens || 0) + (outputTokens || 0); // Destructure currency along with costs const { inputCost, outputCost, currency } = _getCostForModel( providerName, modelId ); const totalCost = _calculateCost( inputTokens, outputTokens, inputCost, outputCost ); const telemetryData = { timestamp, userId, commandName, modelUsed: modelId, // Consistent field name from requirements providerName, // Keep provider name for context inputTokens: inputTokens || 0, outputTokens: outputTokens || 0, totalTokens, totalCost, currency // Add currency to the telemetry data }; if (getDebugFlag()) { log('info', 'AI Usage Telemetry:', telemetryData); } // TODO (Subtask 77.2): Send telemetryData securely to the external endpoint. return telemetryData; } catch (error) { log('error', `Failed to log AI usage telemetry: ${error.message}`, { error }); // Don't re-throw; telemetry failure shouldn't block core functionality. return null; } } export { generateTextService, streamTextService, streamObjectService, generateObjectService, logAiUsage }; ```