This is page 31 of 52. Use http://codebase.md/eyaltoledano/claude-task-master?lines=true&page={x} to view the full context. # Directory Structure ``` ├── .changeset │ ├── config.json │ └── README.md ├── .claude │ ├── agents │ │ ├── task-checker.md │ │ ├── task-executor.md │ │ └── task-orchestrator.md │ ├── commands │ │ ├── dedupe.md │ │ └── tm │ │ ├── add-dependency │ │ │ └── add-dependency.md │ │ ├── add-subtask │ │ │ ├── add-subtask.md │ │ │ └── convert-task-to-subtask.md │ │ ├── add-task │ │ │ └── add-task.md │ │ ├── analyze-complexity │ │ │ └── analyze-complexity.md │ │ ├── complexity-report │ │ │ └── complexity-report.md │ │ ├── expand │ │ │ ├── expand-all-tasks.md │ │ │ └── expand-task.md │ │ ├── fix-dependencies │ │ │ └── fix-dependencies.md │ │ ├── generate │ │ │ └── generate-tasks.md │ │ ├── help.md │ │ ├── init │ │ │ ├── init-project-quick.md │ │ │ └── init-project.md │ │ ├── learn.md │ │ ├── list │ │ │ ├── list-tasks-by-status.md │ │ │ ├── list-tasks-with-subtasks.md │ │ │ └── list-tasks.md │ │ ├── models │ │ │ ├── setup-models.md │ │ │ └── view-models.md │ │ ├── next │ │ │ └── next-task.md │ │ ├── parse-prd │ │ │ ├── parse-prd-with-research.md │ │ │ └── parse-prd.md │ │ ├── remove-dependency │ │ │ └── remove-dependency.md │ │ ├── remove-subtask │ │ │ └── remove-subtask.md │ │ ├── remove-subtasks │ │ │ ├── remove-all-subtasks.md │ │ │ └── remove-subtasks.md │ │ ├── remove-task │ │ │ └── remove-task.md │ │ ├── set-status │ │ │ ├── to-cancelled.md │ │ │ ├── to-deferred.md │ │ │ ├── to-done.md │ │ │ ├── to-in-progress.md │ │ │ ├── to-pending.md │ │ │ └── to-review.md │ │ ├── setup │ │ │ ├── install-taskmaster.md │ │ │ └── quick-install-taskmaster.md │ │ ├── show │ │ │ └── show-task.md │ │ ├── status │ │ │ └── project-status.md │ │ ├── sync-readme │ │ │ └── sync-readme.md │ │ ├── tm-main.md │ │ ├── update │ │ │ ├── update-single-task.md │ │ │ ├── update-task.md │ │ │ └── update-tasks-from-id.md │ │ ├── utils │ │ │ └── analyze-project.md │ │ ├── validate-dependencies │ │ │ └── validate-dependencies.md │ │ └── workflows │ │ ├── auto-implement-tasks.md │ │ ├── command-pipeline.md │ │ └── smart-workflow.md │ └── TM_COMMANDS_GUIDE.md ├── .coderabbit.yaml ├── .cursor │ ├── mcp.json │ └── rules │ ├── ai_providers.mdc │ ├── ai_services.mdc │ ├── architecture.mdc │ ├── changeset.mdc │ ├── commands.mdc │ ├── context_gathering.mdc │ ├── cursor_rules.mdc │ ├── dependencies.mdc │ ├── dev_workflow.mdc │ ├── git_workflow.mdc │ ├── glossary.mdc │ ├── mcp.mdc │ ├── new_features.mdc │ ├── self_improve.mdc │ ├── tags.mdc │ ├── taskmaster.mdc │ ├── tasks.mdc │ ├── telemetry.mdc │ ├── test_workflow.mdc │ ├── tests.mdc │ ├── ui.mdc │ └── utilities.mdc ├── .cursorignore ├── .env.example ├── .github │ ├── ISSUE_TEMPLATE │ │ ├── bug_report.md │ │ ├── enhancements---feature-requests.md │ │ └── feedback.md │ ├── PULL_REQUEST_TEMPLATE │ │ ├── bugfix.md │ │ ├── config.yml │ │ ├── feature.md │ │ └── integration.md │ ├── PULL_REQUEST_TEMPLATE.md │ ├── scripts │ │ ├── auto-close-duplicates.mjs │ │ ├── backfill-duplicate-comments.mjs │ │ ├── check-pre-release-mode.mjs │ │ ├── parse-metrics.mjs │ │ ├── release.mjs │ │ ├── tag-extension.mjs │ │ └── utils.mjs │ └── workflows │ ├── auto-close-duplicates.yml │ ├── backfill-duplicate-comments.yml │ ├── ci.yml │ ├── claude-dedupe-issues.yml │ ├── claude-docs-trigger.yml │ ├── claude-docs-updater.yml │ ├── claude-issue-triage.yml │ ├── claude.yml │ ├── extension-ci.yml │ ├── extension-release.yml │ ├── log-issue-events.yml │ ├── pre-release.yml │ ├── release-check.yml │ ├── release.yml │ ├── update-models-md.yml │ └── weekly-metrics-discord.yml ├── .gitignore ├── .kiro │ ├── hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── settings │ │ └── mcp.json │ └── steering │ ├── dev_workflow.md │ ├── kiro_rules.md │ ├── self_improve.md │ ├── taskmaster_hooks_workflow.md │ └── taskmaster.md ├── .manypkg.json ├── .mcp.json ├── .npmignore ├── .nvmrc ├── .taskmaster │ ├── CLAUDE.md │ ├── config.json │ ├── docs │ │ ├── MIGRATION-ROADMAP.md │ │ ├── prd-tm-start.txt │ │ ├── prd.txt │ │ ├── README.md │ │ ├── research │ │ │ ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md │ │ │ ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md │ │ │ ├── 2025-06-14_test-save-functionality.md │ │ │ ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md │ │ │ └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md │ │ ├── task-template-importing-prd.txt │ │ ├── test-prd.txt │ │ └── tm-core-phase-1.txt │ ├── reports │ │ ├── task-complexity-report_cc-kiro-hooks.json │ │ ├── task-complexity-report_test-prd-tag.json │ │ ├── task-complexity-report_tm-core-phase-1.json │ │ ├── task-complexity-report.json │ │ └── tm-core-complexity.json │ ├── state.json │ ├── tasks │ │ ├── task_001_tm-start.txt │ │ ├── task_002_tm-start.txt │ │ ├── task_003_tm-start.txt │ │ ├── task_004_tm-start.txt │ │ ├── task_007_tm-start.txt │ │ └── tasks.json │ └── templates │ └── example_prd.txt ├── .vscode │ ├── extensions.json │ └── settings.json ├── apps │ ├── cli │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ ├── commands │ │ │ │ ├── auth.command.ts │ │ │ │ ├── context.command.ts │ │ │ │ ├── list.command.ts │ │ │ │ ├── set-status.command.ts │ │ │ │ ├── show.command.ts │ │ │ │ └── start.command.ts │ │ │ ├── index.ts │ │ │ ├── ui │ │ │ │ ├── components │ │ │ │ │ ├── dashboard.component.ts │ │ │ │ │ ├── header.component.ts │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── next-task.component.ts │ │ │ │ │ ├── suggested-steps.component.ts │ │ │ │ │ └── task-detail.component.ts │ │ │ │ └── index.ts │ │ │ └── utils │ │ │ ├── auto-update.ts │ │ │ └── ui.ts │ │ └── tsconfig.json │ ├── docs │ │ ├── archive │ │ │ ├── ai-client-utils-example.mdx │ │ │ ├── ai-development-workflow.mdx │ │ │ ├── command-reference.mdx │ │ │ ├── configuration.mdx │ │ │ ├── cursor-setup.mdx │ │ │ ├── examples.mdx │ │ │ └── Installation.mdx │ │ ├── best-practices │ │ │ ├── advanced-tasks.mdx │ │ │ ├── configuration-advanced.mdx │ │ │ └── index.mdx │ │ ├── capabilities │ │ │ ├── cli-root-commands.mdx │ │ │ ├── index.mdx │ │ │ ├── mcp.mdx │ │ │ └── task-structure.mdx │ │ ├── CHANGELOG.md │ │ ├── docs.json │ │ ├── favicon.svg │ │ ├── getting-started │ │ │ ├── contribute.mdx │ │ │ ├── faq.mdx │ │ │ └── quick-start │ │ │ ├── configuration-quick.mdx │ │ │ ├── execute-quick.mdx │ │ │ ├── installation.mdx │ │ │ ├── moving-forward.mdx │ │ │ ├── prd-quick.mdx │ │ │ ├── quick-start.mdx │ │ │ ├── requirements.mdx │ │ │ ├── rules-quick.mdx │ │ │ └── tasks-quick.mdx │ │ ├── introduction.mdx │ │ ├── licensing.md │ │ ├── logo │ │ │ ├── dark.svg │ │ │ ├── light.svg │ │ │ └── task-master-logo.png │ │ ├── package.json │ │ ├── README.md │ │ ├── style.css │ │ ├── vercel.json │ │ └── whats-new.mdx │ └── extension │ ├── .vscodeignore │ ├── assets │ │ ├── banner.png │ │ ├── icon-dark.svg │ │ ├── icon-light.svg │ │ ├── icon.png │ │ ├── screenshots │ │ │ ├── kanban-board.png │ │ │ └── task-details.png │ │ └── sidebar-icon.svg │ ├── CHANGELOG.md │ ├── components.json │ ├── docs │ │ ├── extension-CI-setup.md │ │ └── extension-development-guide.md │ ├── esbuild.js │ ├── LICENSE │ ├── package.json │ ├── package.mjs │ ├── package.publish.json │ ├── README.md │ ├── src │ │ ├── components │ │ │ ├── ConfigView.tsx │ │ │ ├── constants.ts │ │ │ ├── TaskDetails │ │ │ │ ├── AIActionsSection.tsx │ │ │ │ ├── DetailsSection.tsx │ │ │ │ ├── PriorityBadge.tsx │ │ │ │ ├── SubtasksSection.tsx │ │ │ │ ├── TaskMetadataSidebar.tsx │ │ │ │ └── useTaskDetails.ts │ │ │ ├── TaskDetailsView.tsx │ │ │ ├── TaskMasterLogo.tsx │ │ │ └── ui │ │ │ ├── badge.tsx │ │ │ ├── breadcrumb.tsx │ │ │ ├── button.tsx │ │ │ ├── card.tsx │ │ │ ├── collapsible.tsx │ │ │ ├── CollapsibleSection.tsx │ │ │ ├── dropdown-menu.tsx │ │ │ ├── label.tsx │ │ │ ├── scroll-area.tsx │ │ │ ├── separator.tsx │ │ │ ├── shadcn-io │ │ │ │ └── kanban │ │ │ │ └── index.tsx │ │ │ └── textarea.tsx │ │ ├── extension.ts │ │ ├── index.ts │ │ ├── lib │ │ │ └── utils.ts │ │ ├── services │ │ │ ├── config-service.ts │ │ │ ├── error-handler.ts │ │ │ ├── notification-preferences.ts │ │ │ ├── polling-service.ts │ │ │ ├── polling-strategies.ts │ │ │ ├── sidebar-webview-manager.ts │ │ │ ├── task-repository.ts │ │ │ ├── terminal-manager.ts │ │ │ └── webview-manager.ts │ │ ├── test │ │ │ └── extension.test.ts │ │ ├── utils │ │ │ ├── configManager.ts │ │ │ ├── connectionManager.ts │ │ │ ├── errorHandler.ts │ │ │ ├── event-emitter.ts │ │ │ ├── logger.ts │ │ │ ├── mcpClient.ts │ │ │ ├── notificationPreferences.ts │ │ │ └── task-master-api │ │ │ ├── cache │ │ │ │ └── cache-manager.ts │ │ │ ├── index.ts │ │ │ ├── mcp-client.ts │ │ │ ├── transformers │ │ │ │ └── task-transformer.ts │ │ │ └── types │ │ │ └── index.ts │ │ └── webview │ │ ├── App.tsx │ │ ├── components │ │ │ ├── AppContent.tsx │ │ │ ├── EmptyState.tsx │ │ │ ├── ErrorBoundary.tsx │ │ │ ├── PollingStatus.tsx │ │ │ ├── PriorityBadge.tsx │ │ │ ├── SidebarView.tsx │ │ │ ├── TagDropdown.tsx │ │ │ ├── TaskCard.tsx │ │ │ ├── TaskEditModal.tsx │ │ │ ├── TaskMasterKanban.tsx │ │ │ ├── ToastContainer.tsx │ │ │ └── ToastNotification.tsx │ │ ├── constants │ │ │ └── index.ts │ │ ├── contexts │ │ │ └── VSCodeContext.tsx │ │ ├── hooks │ │ │ ├── useTaskQueries.ts │ │ │ ├── useVSCodeMessages.ts │ │ │ └── useWebviewHeight.ts │ │ ├── index.css │ │ ├── index.tsx │ │ ├── providers │ │ │ └── QueryProvider.tsx │ │ ├── reducers │ │ │ └── appReducer.ts │ │ ├── sidebar.tsx │ │ ├── types │ │ │ └── index.ts │ │ └── utils │ │ ├── logger.ts │ │ └── toast.ts │ └── tsconfig.json ├── assets │ ├── .windsurfrules │ ├── AGENTS.md │ ├── claude │ │ ├── agents │ │ │ ├── task-checker.md │ │ │ ├── task-executor.md │ │ │ └── task-orchestrator.md │ │ ├── commands │ │ │ └── tm │ │ │ ├── add-dependency │ │ │ │ └── add-dependency.md │ │ │ ├── add-subtask │ │ │ │ ├── add-subtask.md │ │ │ │ └── convert-task-to-subtask.md │ │ │ ├── add-task │ │ │ │ └── add-task.md │ │ │ ├── analyze-complexity │ │ │ │ └── analyze-complexity.md │ │ │ ├── clear-subtasks │ │ │ │ ├── clear-all-subtasks.md │ │ │ │ └── clear-subtasks.md │ │ │ ├── complexity-report │ │ │ │ └── complexity-report.md │ │ │ ├── expand │ │ │ │ ├── expand-all-tasks.md │ │ │ │ └── expand-task.md │ │ │ ├── fix-dependencies │ │ │ │ └── fix-dependencies.md │ │ │ ├── generate │ │ │ │ └── generate-tasks.md │ │ │ ├── help.md │ │ │ ├── init │ │ │ │ ├── init-project-quick.md │ │ │ │ └── init-project.md │ │ │ ├── learn.md │ │ │ ├── list │ │ │ │ ├── list-tasks-by-status.md │ │ │ │ ├── list-tasks-with-subtasks.md │ │ │ │ └── list-tasks.md │ │ │ ├── models │ │ │ │ ├── setup-models.md │ │ │ │ └── view-models.md │ │ │ ├── next │ │ │ │ └── next-task.md │ │ │ ├── parse-prd │ │ │ │ ├── parse-prd-with-research.md │ │ │ │ └── parse-prd.md │ │ │ ├── remove-dependency │ │ │ │ └── remove-dependency.md │ │ │ ├── remove-subtask │ │ │ │ └── remove-subtask.md │ │ │ ├── remove-subtasks │ │ │ │ ├── remove-all-subtasks.md │ │ │ │ └── remove-subtasks.md │ │ │ ├── remove-task │ │ │ │ └── remove-task.md │ │ │ ├── set-status │ │ │ │ ├── to-cancelled.md │ │ │ │ ├── to-deferred.md │ │ │ │ ├── to-done.md │ │ │ │ ├── to-in-progress.md │ │ │ │ ├── to-pending.md │ │ │ │ └── to-review.md │ │ │ ├── setup │ │ │ │ ├── install-taskmaster.md │ │ │ │ └── quick-install-taskmaster.md │ │ │ ├── show │ │ │ │ └── show-task.md │ │ │ ├── status │ │ │ │ └── project-status.md │ │ │ ├── sync-readme │ │ │ │ └── sync-readme.md │ │ │ ├── tm-main.md │ │ │ ├── update │ │ │ │ ├── update-single-task.md │ │ │ │ ├── update-task.md │ │ │ │ └── update-tasks-from-id.md │ │ │ ├── utils │ │ │ │ └── analyze-project.md │ │ │ ├── validate-dependencies │ │ │ │ └── validate-dependencies.md │ │ │ └── workflows │ │ │ ├── auto-implement-tasks.md │ │ │ ├── command-pipeline.md │ │ │ └── smart-workflow.md │ │ └── TM_COMMANDS_GUIDE.md │ ├── config.json │ ├── env.example │ ├── example_prd.txt │ ├── gitignore │ ├── kiro-hooks │ │ ├── tm-code-change-task-tracker.kiro.hook │ │ ├── tm-complexity-analyzer.kiro.hook │ │ ├── tm-daily-standup-assistant.kiro.hook │ │ ├── tm-git-commit-task-linker.kiro.hook │ │ ├── tm-pr-readiness-checker.kiro.hook │ │ ├── tm-task-dependency-auto-progression.kiro.hook │ │ └── tm-test-success-task-completer.kiro.hook │ ├── roocode │ │ ├── .roo │ │ │ ├── rules-architect │ │ │ │ └── architect-rules │ │ │ ├── rules-ask │ │ │ │ └── ask-rules │ │ │ ├── rules-code │ │ │ │ └── code-rules │ │ │ ├── rules-debug │ │ │ │ └── debug-rules │ │ │ ├── rules-orchestrator │ │ │ │ └── orchestrator-rules │ │ │ └── rules-test │ │ │ └── test-rules │ │ └── .roomodes │ ├── rules │ │ ├── cursor_rules.mdc │ │ ├── dev_workflow.mdc │ │ ├── self_improve.mdc │ │ ├── taskmaster_hooks_workflow.mdc │ │ └── taskmaster.mdc │ └── scripts_README.md ├── bin │ └── task-master.js ├── biome.json ├── CHANGELOG.md ├── CLAUDE.md ├── context │ ├── chats │ │ ├── add-task-dependencies-1.md │ │ └── max-min-tokens.txt.md │ ├── fastmcp-core.txt │ ├── fastmcp-docs.txt │ ├── MCP_INTEGRATION.md │ ├── mcp-js-sdk-docs.txt │ ├── mcp-protocol-repo.txt │ ├── mcp-protocol-schema-03262025.json │ └── mcp-protocol-spec.txt ├── CONTRIBUTING.md ├── docs │ ├── CLI-COMMANDER-PATTERN.md │ ├── command-reference.md │ ├── configuration.md │ ├── contributor-docs │ │ └── testing-roo-integration.md │ ├── cross-tag-task-movement.md │ ├── examples │ │ └── claude-code-usage.md │ ├── examples.md │ ├── licensing.md │ ├── mcp-provider-guide.md │ ├── mcp-provider.md │ ├── migration-guide.md │ ├── models.md │ ├── providers │ │ └── gemini-cli.md │ ├── README.md │ ├── scripts │ │ └── models-json-to-markdown.js │ ├── task-structure.md │ └── tutorial.md ├── images │ └── logo.png ├── index.js ├── jest.config.js ├── jest.resolver.cjs ├── LICENSE ├── llms-install.md ├── mcp-server │ ├── server.js │ └── src │ ├── core │ │ ├── __tests__ │ │ │ └── context-manager.test.js │ │ ├── context-manager.js │ │ ├── direct-functions │ │ │ ├── add-dependency.js │ │ │ ├── add-subtask.js │ │ │ ├── add-tag.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── cache-stats.js │ │ │ ├── clear-subtasks.js │ │ │ ├── complexity-report.js │ │ │ ├── copy-tag.js │ │ │ ├── create-tag-from-branch.js │ │ │ ├── delete-tag.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── fix-dependencies.js │ │ │ ├── generate-task-files.js │ │ │ ├── initialize-project.js │ │ │ ├── list-tags.js │ │ │ ├── list-tasks.js │ │ │ ├── models.js │ │ │ ├── move-task-cross-tag.js │ │ │ ├── move-task.js │ │ │ ├── next-task.js │ │ │ ├── parse-prd.js │ │ │ ├── remove-dependency.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── rename-tag.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── rules.js │ │ │ ├── scope-down.js │ │ │ ├── scope-up.js │ │ │ ├── set-task-status.js │ │ │ ├── show-task.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ ├── update-tasks.js │ │ │ ├── use-tag.js │ │ │ └── validate-dependencies.js │ │ ├── task-master-core.js │ │ └── utils │ │ ├── env-utils.js │ │ └── path-utils.js │ ├── custom-sdk │ │ ├── errors.js │ │ ├── index.js │ │ ├── json-extractor.js │ │ ├── language-model.js │ │ ├── message-converter.js │ │ └── schema-converter.js │ ├── index.js │ ├── logger.js │ ├── providers │ │ └── mcp-provider.js │ └── tools │ ├── add-dependency.js │ ├── add-subtask.js │ ├── add-tag.js │ ├── add-task.js │ ├── analyze.js │ ├── clear-subtasks.js │ ├── complexity-report.js │ ├── copy-tag.js │ ├── delete-tag.js │ ├── expand-all.js │ ├── expand-task.js │ ├── fix-dependencies.js │ ├── generate.js │ ├── get-operation-status.js │ ├── get-task.js │ ├── get-tasks.js │ ├── index.js │ ├── initialize-project.js │ ├── list-tags.js │ ├── models.js │ ├── move-task.js │ ├── next-task.js │ ├── parse-prd.js │ ├── remove-dependency.js │ ├── remove-subtask.js │ ├── remove-task.js │ ├── rename-tag.js │ ├── research.js │ ├── response-language.js │ ├── rules.js │ ├── scope-down.js │ ├── scope-up.js │ ├── set-task-status.js │ ├── update-subtask.js │ ├── update-task.js │ ├── update.js │ ├── use-tag.js │ ├── utils.js │ └── validate-dependencies.js ├── mcp-test.js ├── output.json ├── package-lock.json ├── package.json ├── packages │ ├── build-config │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src │ │ │ └── tsdown.base.ts │ │ └── tsconfig.json │ └── tm-core │ ├── .gitignore │ ├── CHANGELOG.md │ ├── docs │ │ └── listTasks-architecture.md │ ├── package.json │ ├── POC-STATUS.md │ ├── README.md │ ├── src │ │ ├── auth │ │ │ ├── auth-manager.test.ts │ │ │ ├── auth-manager.ts │ │ │ ├── config.ts │ │ │ ├── credential-store.test.ts │ │ │ ├── credential-store.ts │ │ │ ├── index.ts │ │ │ ├── oauth-service.ts │ │ │ ├── supabase-session-storage.ts │ │ │ └── types.ts │ │ ├── clients │ │ │ ├── index.ts │ │ │ └── supabase-client.ts │ │ ├── config │ │ │ ├── config-manager.spec.ts │ │ │ ├── config-manager.ts │ │ │ ├── index.ts │ │ │ └── services │ │ │ ├── config-loader.service.spec.ts │ │ │ ├── config-loader.service.ts │ │ │ ├── config-merger.service.spec.ts │ │ │ ├── config-merger.service.ts │ │ │ ├── config-persistence.service.spec.ts │ │ │ ├── config-persistence.service.ts │ │ │ ├── environment-config-provider.service.spec.ts │ │ │ ├── environment-config-provider.service.ts │ │ │ ├── index.ts │ │ │ ├── runtime-state-manager.service.spec.ts │ │ │ └── runtime-state-manager.service.ts │ │ ├── constants │ │ │ └── index.ts │ │ ├── entities │ │ │ └── task.entity.ts │ │ ├── errors │ │ │ ├── index.ts │ │ │ └── task-master-error.ts │ │ ├── executors │ │ │ ├── base-executor.ts │ │ │ ├── claude-executor.ts │ │ │ ├── executor-factory.ts │ │ │ ├── executor-service.ts │ │ │ ├── index.ts │ │ │ └── types.ts │ │ ├── index.ts │ │ ├── interfaces │ │ │ ├── ai-provider.interface.ts │ │ │ ├── configuration.interface.ts │ │ │ ├── index.ts │ │ │ └── storage.interface.ts │ │ ├── logger │ │ │ ├── factory.ts │ │ │ ├── index.ts │ │ │ └── logger.ts │ │ ├── mappers │ │ │ └── TaskMapper.ts │ │ ├── parser │ │ │ └── index.ts │ │ ├── providers │ │ │ ├── ai │ │ │ │ ├── base-provider.ts │ │ │ │ └── index.ts │ │ │ └── index.ts │ │ ├── repositories │ │ │ ├── supabase-task-repository.ts │ │ │ └── task-repository.interface.ts │ │ ├── services │ │ │ ├── index.ts │ │ │ ├── organization.service.ts │ │ │ ├── task-execution-service.ts │ │ │ └── task-service.ts │ │ ├── storage │ │ │ ├── api-storage.ts │ │ │ ├── file-storage │ │ │ │ ├── file-operations.ts │ │ │ │ ├── file-storage.ts │ │ │ │ ├── format-handler.ts │ │ │ │ ├── index.ts │ │ │ │ └── path-resolver.ts │ │ │ ├── index.ts │ │ │ └── storage-factory.ts │ │ ├── subpath-exports.test.ts │ │ ├── task-master-core.ts │ │ ├── types │ │ │ ├── database.types.ts │ │ │ ├── index.ts │ │ │ └── legacy.ts │ │ └── utils │ │ ├── id-generator.ts │ │ └── index.ts │ ├── tests │ │ ├── integration │ │ │ └── list-tasks.test.ts │ │ ├── mocks │ │ │ └── mock-provider.ts │ │ ├── setup.ts │ │ └── unit │ │ ├── base-provider.test.ts │ │ ├── executor.test.ts │ │ └── smoke.test.ts │ ├── tsconfig.json │ └── vitest.config.ts ├── README-task-master.md ├── README.md ├── scripts │ ├── dev.js │ ├── init.js │ ├── modules │ │ ├── ai-services-unified.js │ │ ├── commands.js │ │ ├── config-manager.js │ │ ├── dependency-manager.js │ │ ├── index.js │ │ ├── prompt-manager.js │ │ ├── supported-models.json │ │ ├── sync-readme.js │ │ ├── task-manager │ │ │ ├── add-subtask.js │ │ │ ├── add-task.js │ │ │ ├── analyze-task-complexity.js │ │ │ ├── clear-subtasks.js │ │ │ ├── expand-all-tasks.js │ │ │ ├── expand-task.js │ │ │ ├── find-next-task.js │ │ │ ├── generate-task-files.js │ │ │ ├── is-task-dependent.js │ │ │ ├── list-tasks.js │ │ │ ├── migrate.js │ │ │ ├── models.js │ │ │ ├── move-task.js │ │ │ ├── parse-prd │ │ │ │ ├── index.js │ │ │ │ ├── parse-prd-config.js │ │ │ │ ├── parse-prd-helpers.js │ │ │ │ ├── parse-prd-non-streaming.js │ │ │ │ ├── parse-prd-streaming.js │ │ │ │ └── parse-prd.js │ │ │ ├── remove-subtask.js │ │ │ ├── remove-task.js │ │ │ ├── research.js │ │ │ ├── response-language.js │ │ │ ├── scope-adjustment.js │ │ │ ├── set-task-status.js │ │ │ ├── tag-management.js │ │ │ ├── task-exists.js │ │ │ ├── update-single-task-status.js │ │ │ ├── update-subtask-by-id.js │ │ │ ├── update-task-by-id.js │ │ │ └── update-tasks.js │ │ ├── task-manager.js │ │ ├── ui.js │ │ ├── update-config-tokens.js │ │ ├── utils │ │ │ ├── contextGatherer.js │ │ │ ├── fuzzyTaskSearch.js │ │ │ └── git-utils.js │ │ └── utils.js │ ├── task-complexity-report.json │ ├── test-claude-errors.js │ └── test-claude.js ├── src │ ├── ai-providers │ │ ├── anthropic.js │ │ ├── azure.js │ │ ├── base-provider.js │ │ ├── bedrock.js │ │ ├── claude-code.js │ │ ├── custom-sdk │ │ │ ├── claude-code │ │ │ │ ├── errors.js │ │ │ │ ├── index.js │ │ │ │ ├── json-extractor.js │ │ │ │ ├── language-model.js │ │ │ │ ├── message-converter.js │ │ │ │ └── types.js │ │ │ └── grok-cli │ │ │ ├── errors.js │ │ │ ├── index.js │ │ │ ├── json-extractor.js │ │ │ ├── language-model.js │ │ │ ├── message-converter.js │ │ │ └── types.js │ │ ├── gemini-cli.js │ │ ├── google-vertex.js │ │ ├── google.js │ │ ├── grok-cli.js │ │ ├── groq.js │ │ ├── index.js │ │ ├── ollama.js │ │ ├── openai.js │ │ ├── openrouter.js │ │ ├── perplexity.js │ │ └── xai.js │ ├── constants │ │ ├── commands.js │ │ ├── paths.js │ │ ├── profiles.js │ │ ├── providers.js │ │ ├── rules-actions.js │ │ ├── task-priority.js │ │ └── task-status.js │ ├── profiles │ │ ├── amp.js │ │ ├── base-profile.js │ │ ├── claude.js │ │ ├── cline.js │ │ ├── codex.js │ │ ├── cursor.js │ │ ├── gemini.js │ │ ├── index.js │ │ ├── kilo.js │ │ ├── kiro.js │ │ ├── opencode.js │ │ ├── roo.js │ │ ├── trae.js │ │ ├── vscode.js │ │ ├── windsurf.js │ │ └── zed.js │ ├── progress │ │ ├── base-progress-tracker.js │ │ ├── cli-progress-factory.js │ │ ├── parse-prd-tracker.js │ │ ├── progress-tracker-builder.js │ │ └── tracker-ui.js │ ├── prompts │ │ ├── add-task.json │ │ ├── analyze-complexity.json │ │ ├── expand-task.json │ │ ├── parse-prd.json │ │ ├── README.md │ │ ├── research.json │ │ ├── schemas │ │ │ ├── parameter.schema.json │ │ │ ├── prompt-template.schema.json │ │ │ ├── README.md │ │ │ └── variant.schema.json │ │ ├── update-subtask.json │ │ ├── update-task.json │ │ └── update-tasks.json │ ├── provider-registry │ │ └── index.js │ ├── task-master.js │ ├── ui │ │ ├── confirm.js │ │ ├── indicators.js │ │ └── parse-prd.js │ └── utils │ ├── asset-resolver.js │ ├── create-mcp-config.js │ ├── format.js │ ├── getVersion.js │ ├── logger-utils.js │ ├── manage-gitignore.js │ ├── path-utils.js │ ├── profiles.js │ ├── rule-transformer.js │ ├── stream-parser.js │ └── timeout-manager.js ├── test-clean-tags.js ├── test-config-manager.js ├── test-prd.txt ├── test-tag-functions.js ├── test-version-check-full.js ├── test-version-check.js ├── tests │ ├── e2e │ │ ├── e2e_helpers.sh │ │ ├── parse_llm_output.cjs │ │ ├── run_e2e.sh │ │ ├── run_fallback_verification.sh │ │ └── test_llm_analysis.sh │ ├── fixture │ │ └── test-tasks.json │ ├── fixtures │ │ ├── .taskmasterconfig │ │ ├── sample-claude-response.js │ │ ├── sample-prd.txt │ │ └── sample-tasks.js │ ├── integration │ │ ├── claude-code-optional.test.js │ │ ├── cli │ │ │ ├── commands.test.js │ │ │ ├── complex-cross-tag-scenarios.test.js │ │ │ └── move-cross-tag.test.js │ │ ├── manage-gitignore.test.js │ │ ├── mcp-server │ │ │ └── direct-functions.test.js │ │ ├── move-task-cross-tag.integration.test.js │ │ ├── move-task-simple.integration.test.js │ │ └── profiles │ │ ├── amp-init-functionality.test.js │ │ ├── claude-init-functionality.test.js │ │ ├── cline-init-functionality.test.js │ │ ├── codex-init-functionality.test.js │ │ ├── cursor-init-functionality.test.js │ │ ├── gemini-init-functionality.test.js │ │ ├── opencode-init-functionality.test.js │ │ ├── roo-files-inclusion.test.js │ │ ├── roo-init-functionality.test.js │ │ ├── rules-files-inclusion.test.js │ │ ├── trae-init-functionality.test.js │ │ ├── vscode-init-functionality.test.js │ │ └── windsurf-init-functionality.test.js │ ├── manual │ │ ├── progress │ │ │ ├── parse-prd-analysis.js │ │ │ ├── test-parse-prd.js │ │ │ └── TESTING_GUIDE.md │ │ └── prompts │ │ ├── prompt-test.js │ │ └── README.md │ ├── README.md │ ├── setup.js │ └── unit │ ├── ai-providers │ │ ├── claude-code.test.js │ │ ├── custom-sdk │ │ │ └── claude-code │ │ │ └── language-model.test.js │ │ ├── gemini-cli.test.js │ │ ├── mcp-components.test.js │ │ └── openai.test.js │ ├── ai-services-unified.test.js │ ├── commands.test.js │ ├── config-manager.test.js │ ├── config-manager.test.mjs │ ├── dependency-manager.test.js │ ├── init.test.js │ ├── initialize-project.test.js │ ├── kebab-case-validation.test.js │ ├── manage-gitignore.test.js │ ├── mcp │ │ └── tools │ │ ├── __mocks__ │ │ │ └── move-task.js │ │ ├── add-task.test.js │ │ ├── analyze-complexity.test.js │ │ ├── expand-all.test.js │ │ ├── get-tasks.test.js │ │ ├── initialize-project.test.js │ │ ├── move-task-cross-tag-options.test.js │ │ ├── move-task-cross-tag.test.js │ │ └── remove-task.test.js │ ├── mcp-providers │ │ ├── mcp-components.test.js │ │ └── mcp-provider.test.js │ ├── parse-prd.test.js │ ├── profiles │ │ ├── amp-integration.test.js │ │ ├── claude-integration.test.js │ │ ├── cline-integration.test.js │ │ ├── codex-integration.test.js │ │ ├── cursor-integration.test.js │ │ ├── gemini-integration.test.js │ │ ├── kilo-integration.test.js │ │ ├── kiro-integration.test.js │ │ ├── mcp-config-validation.test.js │ │ ├── opencode-integration.test.js │ │ ├── profile-safety-check.test.js │ │ ├── roo-integration.test.js │ │ ├── rule-transformer-cline.test.js │ │ ├── rule-transformer-cursor.test.js │ │ ├── rule-transformer-gemini.test.js │ │ ├── rule-transformer-kilo.test.js │ │ ├── rule-transformer-kiro.test.js │ │ ├── rule-transformer-opencode.test.js │ │ ├── rule-transformer-roo.test.js │ │ ├── rule-transformer-trae.test.js │ │ ├── rule-transformer-vscode.test.js │ │ ├── rule-transformer-windsurf.test.js │ │ ├── rule-transformer-zed.test.js │ │ ├── rule-transformer.test.js │ │ ├── selective-profile-removal.test.js │ │ ├── subdirectory-support.test.js │ │ ├── trae-integration.test.js │ │ ├── vscode-integration.test.js │ │ ├── windsurf-integration.test.js │ │ └── zed-integration.test.js │ ├── progress │ │ └── base-progress-tracker.test.js │ ├── prompt-manager.test.js │ ├── prompts │ │ └── expand-task-prompt.test.js │ ├── providers │ │ └── provider-registry.test.js │ ├── scripts │ │ └── modules │ │ ├── commands │ │ │ ├── move-cross-tag.test.js │ │ │ └── README.md │ │ ├── dependency-manager │ │ │ ├── circular-dependencies.test.js │ │ │ ├── cross-tag-dependencies.test.js │ │ │ └── fix-dependencies-command.test.js │ │ ├── task-manager │ │ │ ├── add-subtask.test.js │ │ │ ├── add-task.test.js │ │ │ ├── analyze-task-complexity.test.js │ │ │ ├── clear-subtasks.test.js │ │ │ ├── complexity-report-tag-isolation.test.js │ │ │ ├── expand-all-tasks.test.js │ │ │ ├── expand-task.test.js │ │ │ ├── find-next-task.test.js │ │ │ ├── generate-task-files.test.js │ │ │ ├── list-tasks.test.js │ │ │ ├── move-task-cross-tag.test.js │ │ │ ├── move-task.test.js │ │ │ ├── parse-prd.test.js │ │ │ ├── remove-subtask.test.js │ │ │ ├── remove-task.test.js │ │ │ ├── research.test.js │ │ │ ├── scope-adjustment.test.js │ │ │ ├── set-task-status.test.js │ │ │ ├── setup.js │ │ │ ├── update-single-task-status.test.js │ │ │ ├── update-subtask-by-id.test.js │ │ │ ├── update-task-by-id.test.js │ │ │ └── update-tasks.test.js │ │ ├── ui │ │ │ └── cross-tag-error-display.test.js │ │ └── utils-tag-aware-paths.test.js │ ├── task-finder.test.js │ ├── task-manager │ │ ├── clear-subtasks.test.js │ │ ├── move-task.test.js │ │ ├── tag-boundary.test.js │ │ └── tag-management.test.js │ ├── task-master.test.js │ ├── ui │ │ └── indicators.test.js │ ├── ui.test.js │ ├── utils-strip-ansi.test.js │ └── utils.test.js ├── tsconfig.json ├── tsdown.config.ts └── turbo.json ``` # Files -------------------------------------------------------------------------------- /docs/tutorial.md: -------------------------------------------------------------------------------- ```markdown 1 | # Task Master Tutorial 2 | 3 | This tutorial will guide you through setting up and using Task Master for AI-driven development. 4 | 5 | ## Initial Setup 6 | 7 | There are two ways to set up Task Master: using MCP (recommended) or via npm installation. 8 | 9 | ### Option 1: Using MCP (Recommended) 10 | 11 | MCP (Model Control Protocol) provides the easiest way to get started with Task Master directly in your editor. 12 | 13 | 1. **Install the package** 14 | 15 | ```bash 16 | npm i -g task-master-ai 17 | ``` 18 | 19 | 2. **Add the MCP config to your IDE/MCP Client** (Cursor is recommended, but it works with other clients): 20 | 21 | ```json 22 | { 23 | "mcpServers": { 24 | "taskmaster-ai": { 25 | "command": "npx", 26 | "args": ["-y", "task-master-ai"], 27 | "env": { 28 | "ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE", 29 | "PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE", 30 | "OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE", 31 | "GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE", 32 | "MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE", 33 | "OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE", 34 | "XAI_API_KEY": "YOUR_XAI_KEY_HERE", 35 | "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE" 36 | } 37 | } 38 | } 39 | } 40 | ``` 41 | 42 | **IMPORTANT:** An API key is _required_ for each AI provider you plan on using. Run the `task-master models` command to see your selected models and the status of your API keys across .env and mcp.json 43 | 44 | **To use AI commands in CLI** you MUST have API keys in the .env file 45 | **To use AI commands in MCP** you MUST have API keys in the .mcp.json file (or MCP config equivalent) 46 | 47 | We recommend having keys in both places and adding mcp.json to your gitignore so your API keys aren't checked into git. 48 | 49 | 3. **Enable the MCP** in your editor settings 50 | 51 | 4. **Prompt the AI** to initialize Task Master: 52 | 53 | ``` 54 | Can you please initialize taskmaster-ai into my project? 55 | ``` 56 | 57 | The AI will: 58 | 59 | - Create necessary project structure 60 | - Set up initial configuration files 61 | - Guide you through the rest of the process 62 | 63 | 5. Place your PRD document in the `.taskmaster/docs/` directory (e.g., `.taskmaster/docs/prd.txt`) 64 | 65 | 6. **Use natural language commands** to interact with Task Master: 66 | 67 | ``` 68 | Can you parse my PRD at .taskmaster/docs/prd.txt? 69 | What's the next task I should work on? 70 | Can you help me implement task 3? 71 | ``` 72 | 73 | ### Option 2: Manual Installation 74 | 75 | If you prefer to use the command line interface directly: 76 | 77 | ```bash 78 | # Install globally 79 | npm install -g task-master-ai 80 | 81 | # OR install locally within your project 82 | npm install task-master-ai 83 | ``` 84 | 85 | Initialize a new project: 86 | 87 | ```bash 88 | # If installed globally 89 | task-master init 90 | 91 | # If installed locally 92 | npx task-master init 93 | ``` 94 | 95 | This will prompt you for project details and set up a new project with the necessary files and structure. 96 | 97 | ## Common Commands 98 | 99 | After setting up Task Master, you can use these commands (either via AI prompts or CLI): 100 | 101 | ```bash 102 | # Parse a PRD and generate tasks 103 | task-master parse-prd your-prd.txt 104 | 105 | # List all tasks 106 | task-master list 107 | 108 | # Show the next task to work on 109 | task-master next 110 | 111 | # Generate task files 112 | task-master generate 113 | ``` 114 | 115 | ## Setting up Cursor AI Integration 116 | 117 | Task Master is designed to work seamlessly with [Cursor AI](https://www.cursor.so/), providing a structured workflow for AI-driven development. 118 | 119 | ### Using Cursor with MCP (Recommended) 120 | 121 | If you've already set up Task Master with MCP in Cursor, the integration is automatic. You can simply use natural language to interact with Task Master: 122 | 123 | ``` 124 | What tasks are available to work on next? 125 | Can you analyze the complexity of our tasks? 126 | I'd like to implement task 4. What does it involve? 127 | ``` 128 | 129 | ### Manual Cursor Setup 130 | 131 | If you're not using MCP, you can still set up Cursor integration: 132 | 133 | 1. After initializing your project, open it in Cursor 134 | 2. The `.cursor/rules/dev_workflow.mdc` file is automatically loaded by Cursor, providing the AI with knowledge about the task management system 135 | 3. Place your PRD document in the `.taskmaster/docs/` directory (e.g., `.taskmaster/docs/prd.txt`) 136 | 4. Open Cursor's AI chat and switch to Agent mode 137 | 138 | ### Alternative MCP Setup in Cursor 139 | 140 | You can also set up the MCP server in Cursor settings: 141 | 142 | 1. Go to Cursor settings 143 | 2. Navigate to the MCP section 144 | 3. Click on "Add New MCP Server" 145 | 4. Configure with the following details: 146 | - Name: "Task Master" 147 | - Type: "Command" 148 | - Command: "npx -y task-master-ai" 149 | 5. Save the settings 150 | 151 | Once configured, you can interact with Task Master's task management commands directly through Cursor's interface, providing a more integrated experience. 152 | 153 | ## Initial Task Generation 154 | 155 | In Cursor's AI chat, instruct the agent to generate tasks from your PRD: 156 | 157 | ``` 158 | Please use the task-master parse-prd command to generate tasks from my PRD. The PRD is located at .taskmaster/docs/prd.txt. 159 | ``` 160 | 161 | The agent will execute: 162 | 163 | ```bash 164 | task-master parse-prd .taskmaster/docs/prd.txt 165 | ``` 166 | 167 | This will: 168 | 169 | - Parse your PRD document 170 | - Generate a structured `tasks.json` file with tasks, dependencies, priorities, and test strategies 171 | - The agent will understand this process due to the Cursor rules 172 | 173 | ### Generate Individual Task Files 174 | 175 | Next, ask the agent to generate individual task files: 176 | 177 | ``` 178 | Please generate individual task files from tasks.json 179 | ``` 180 | 181 | The agent will execute: 182 | 183 | ```bash 184 | task-master generate 185 | ``` 186 | 187 | This creates individual task files in the `tasks/` directory (e.g., `task_001.txt`, `task_002.txt`), making it easier to reference specific tasks. 188 | 189 | ## AI-Driven Development Workflow 190 | 191 | The Cursor agent is pre-configured (via the rules file) to follow this workflow: 192 | 193 | ### 1. Task Discovery and Selection 194 | 195 | Ask the agent to list available tasks: 196 | 197 | ``` 198 | What tasks are available to work on next? 199 | ``` 200 | 201 | ``` 202 | Can you show me tasks 1, 3, and 5 to understand their current status? 203 | ``` 204 | 205 | The agent will: 206 | 207 | - Run `task-master list` to see all tasks 208 | - Run `task-master next` to determine the next task to work on 209 | - Run `task-master show 1,3,5` to display multiple tasks with interactive options 210 | - Analyze dependencies to determine which tasks are ready to be worked on 211 | - Prioritize tasks based on priority level and ID order 212 | - Suggest the next task(s) to implement 213 | 214 | ### 2. Task Implementation 215 | 216 | When implementing a task, the agent will: 217 | 218 | - Reference the task's details section for implementation specifics 219 | - Consider dependencies on previous tasks 220 | - Follow the project's coding standards 221 | - Create appropriate tests based on the task's testStrategy 222 | 223 | You can ask: 224 | 225 | ``` 226 | Let's implement task 3. What does it involve? 227 | ``` 228 | 229 | ### 2.1. Viewing Multiple Tasks 230 | 231 | For efficient context gathering and batch operations: 232 | 233 | ``` 234 | Show me tasks 5, 7, and 9 so I can plan my implementation approach. 235 | ``` 236 | 237 | The agent will: 238 | 239 | - Run `task-master show 5,7,9` to display a compact summary table 240 | - Show task status, priority, and progress indicators 241 | - Provide an interactive action menu with batch operations 242 | - Allow you to perform group actions like marking multiple tasks as in-progress 243 | 244 | ### 3. Task Verification 245 | 246 | Before marking a task as complete, verify it according to: 247 | 248 | - The task's specified testStrategy 249 | - Any automated tests in the codebase 250 | - Manual verification if required 251 | 252 | ### 4. Task Completion 253 | 254 | When a task is completed, tell the agent: 255 | 256 | ``` 257 | Task 3 is now complete. Please update its status. 258 | ``` 259 | 260 | The agent will execute: 261 | 262 | ```bash 263 | task-master set-status --id=3 --status=done 264 | ``` 265 | 266 | ### 5. Handling Implementation Drift 267 | 268 | If during implementation, you discover that: 269 | 270 | - The current approach differs significantly from what was planned 271 | - Future tasks need to be modified due to current implementation choices 272 | - New dependencies or requirements have emerged 273 | 274 | Tell the agent: 275 | 276 | ``` 277 | We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks (from ID 4) to reflect this change? 278 | ``` 279 | 280 | The agent will execute: 281 | 282 | ```bash 283 | task-master update --from=4 --prompt="Now we are using MongoDB instead of PostgreSQL." 284 | 285 | # OR, if research is needed to find best practices for MongoDB: 286 | task-master update --from=4 --prompt="Update to use MongoDB, researching best practices" --research 287 | ``` 288 | 289 | This will rewrite or re-scope subsequent tasks in tasks.json while preserving completed work. 290 | 291 | ### 6. Reorganizing Tasks 292 | 293 | If you need to reorganize your task structure: 294 | 295 | ``` 296 | I think subtask 5.2 would fit better as part of task 7 instead. Can you move it there? 297 | ``` 298 | 299 | The agent will execute: 300 | 301 | ```bash 302 | task-master move --from=5.2 --to=7.3 303 | ``` 304 | 305 | You can reorganize tasks in various ways: 306 | 307 | - Moving a standalone task to become a subtask: `--from=5 --to=7` 308 | - Moving a subtask to become a standalone task: `--from=5.2 --to=7` 309 | - Moving a subtask to a different parent: `--from=5.2 --to=7.3` 310 | - Reordering subtasks within the same parent: `--from=5.2 --to=5.4` 311 | - Moving a task to a new ID position: `--from=5 --to=25` (even if task 25 doesn't exist yet) 312 | - Moving multiple tasks at once: `--from=10,11,12 --to=16,17,18` (must have same number of IDs, Taskmaster will look through each position) 313 | 314 | When moving tasks to new IDs: 315 | 316 | - The system automatically creates placeholder tasks for non-existent destination IDs 317 | - This prevents accidental data loss during reorganization 318 | - Any tasks that depend on moved tasks will have their dependencies updated 319 | - When moving a parent task, all its subtasks are automatically moved with it and renumbered 320 | 321 | This is particularly useful as your project understanding evolves and you need to refine your task structure. 322 | 323 | ### 7. Resolving Merge Conflicts with Tasks 324 | 325 | When working with a team, you might encounter merge conflicts in your tasks.json file if multiple team members create tasks on different branches. The move command makes resolving these conflicts straightforward: 326 | 327 | ``` 328 | I just merged the main branch and there's a conflict with tasks.json. My teammates created tasks 10-15 while I created tasks 10-12 on my branch. Can you help me resolve this? 329 | ``` 330 | 331 | The agent will help you: 332 | 333 | 1. Keep your teammates' tasks (10-15) 334 | 2. Move your tasks to new positions to avoid conflicts: 335 | 336 | ```bash 337 | # Move your tasks to new positions (e.g., 16-18) 338 | task-master move --from=10 --to=16 339 | task-master move --from=11 --to=17 340 | task-master move --from=12 --to=18 341 | ``` 342 | 343 | This approach preserves everyone's work while maintaining a clean task structure, making it much easier to handle task conflicts than trying to manually merge JSON files. 344 | 345 | ### 8. Breaking Down Complex Tasks 346 | 347 | For complex tasks that need more granularity: 348 | 349 | ``` 350 | Task 5 seems complex. Can you break it down into subtasks? 351 | ``` 352 | 353 | The agent will execute: 354 | 355 | ```bash 356 | task-master expand --id=5 --num=3 357 | ``` 358 | 359 | You can provide additional context: 360 | 361 | ``` 362 | Please break down task 5 with a focus on security considerations. 363 | ``` 364 | 365 | The agent will execute: 366 | 367 | ```bash 368 | task-master expand --id=5 --prompt="Focus on security aspects" 369 | ``` 370 | 371 | You can also expand all pending tasks: 372 | 373 | ``` 374 | Please break down all pending tasks into subtasks. 375 | ``` 376 | 377 | The agent will execute: 378 | 379 | ```bash 380 | task-master expand --all 381 | ``` 382 | 383 | For research-backed subtask generation using the configured research model: 384 | 385 | ``` 386 | Please break down task 5 using research-backed generation. 387 | ``` 388 | 389 | The agent will execute: 390 | 391 | ```bash 392 | task-master expand --id=5 --research 393 | ``` 394 | 395 | ## Example Cursor AI Interactions 396 | 397 | ### Starting a new project 398 | 399 | ``` 400 | I've just initialized a new project with Claude Task Master. I have a PRD at .taskmaster/docs/prd.txt. 401 | Can you help me parse it and set up the initial tasks? 402 | ``` 403 | 404 | ### Working on tasks 405 | 406 | ``` 407 | What's the next task I should work on? Please consider dependencies and priorities. 408 | ``` 409 | 410 | ### Implementing a specific task 411 | 412 | ``` 413 | I'd like to implement task 4. Can you help me understand what needs to be done and how to approach it? 414 | ``` 415 | 416 | ### Managing subtasks 417 | 418 | ``` 419 | I need to regenerate the subtasks for task 3 with a different approach. Can you help me clear and regenerate them? 420 | ``` 421 | 422 | ### Handling changes 423 | 424 | ``` 425 | We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks to reflect this change? 426 | ``` 427 | 428 | ### Completing work 429 | 430 | ``` 431 | I've finished implementing the authentication system described in task 2. All tests are passing. 432 | Please mark it as complete and tell me what I should work on next. 433 | ``` 434 | 435 | ### Analyzing complexity 436 | 437 | ``` 438 | Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further? 439 | ``` 440 | 441 | ### Viewing complexity report 442 | 443 | ``` 444 | Can you show me the complexity report in a more readable format? 445 | ``` 446 | 447 | ### Research-Driven Development 448 | 449 | Task Master includes a powerful research tool that provides fresh, up-to-date information beyond the AI's knowledge cutoff. This is particularly valuable for: 450 | 451 | #### Getting Current Best Practices 452 | 453 | ``` 454 | Before implementing task 5 (authentication), research the latest JWT security recommendations. 455 | ``` 456 | 457 | The agent will execute: 458 | 459 | ```bash 460 | task-master research "Latest JWT security recommendations 2024" --id=5 461 | ``` 462 | 463 | #### Research with Project Context 464 | 465 | ``` 466 | Research React Query v5 migration strategies for our current API implementation. 467 | ``` 468 | 469 | The agent will execute: 470 | 471 | ```bash 472 | task-master research "React Query v5 migration strategies" --files=src/api.js,src/hooks.js 473 | ``` 474 | 475 | #### Research and Update Pattern 476 | 477 | A powerful workflow is to research first, then update tasks with findings: 478 | 479 | ``` 480 | Research the latest Node.js performance optimization techniques and update task 12 with the findings. 481 | ``` 482 | 483 | The agent will: 484 | 485 | 1. Run research: `task-master research "Node.js performance optimization 2024" --id=12` 486 | 2. Update the task: `task-master update-subtask --id=12.2 --prompt="Updated with latest performance findings: [research results]"` 487 | 488 | #### When to Use Research 489 | 490 | - **Before implementing any new technology** 491 | - **When encountering security-related tasks** 492 | - **For performance optimization tasks** 493 | - **When debugging complex issues** 494 | - **Before making architectural decisions** 495 | - **When updating dependencies** 496 | 497 | The research tool automatically includes relevant project context and provides fresh information that can significantly improve implementation quality. 498 | 499 | ## Git Integration and Tag Management 500 | 501 | Task Master supports tagged task lists for multi-context development, which is particularly useful when working with git branches or different project phases. 502 | 503 | ### Working with Tags 504 | 505 | Tags provide isolated task contexts, allowing you to maintain separate task lists for different features, branches, or experiments: 506 | 507 | ``` 508 | I'm starting work on a new feature branch. Can you create a new tag for this work? 509 | ``` 510 | 511 | The agent will execute: 512 | 513 | ```bash 514 | # Create a tag based on your current git branch 515 | task-master add-tag --from-branch 516 | ``` 517 | 518 | Or you can create a tag with a specific name: 519 | 520 | ``` 521 | Create a new tag called 'user-auth' for authentication-related tasks. 522 | ``` 523 | 524 | The agent will execute: 525 | 526 | ```bash 527 | task-master add-tag user-auth --description="User authentication feature tasks" 528 | ``` 529 | 530 | ### Switching Between Contexts 531 | 532 | When working on different features or branches: 533 | 534 | ``` 535 | Switch to the 'user-auth' tag context so I can work on authentication tasks. 536 | ``` 537 | 538 | The agent will execute: 539 | 540 | ```bash 541 | task-master use-tag user-auth 542 | ``` 543 | 544 | ### Copying Tasks Between Tags 545 | 546 | When you need to duplicate work across contexts: 547 | 548 | ``` 549 | Copy all tasks from the current tag to a new 'testing' tag for QA work. 550 | ``` 551 | 552 | The agent will execute: 553 | 554 | ```bash 555 | task-master add-tag testing --copy-from-current --description="QA and testing tasks" 556 | ``` 557 | 558 | ### Tag Management 559 | 560 | View and manage your tag contexts: 561 | 562 | ``` 563 | Show me all available tags and their current status. 564 | ``` 565 | 566 | The agent will execute: 567 | 568 | ```bash 569 | task-master tags --show-metadata 570 | ``` 571 | 572 | ### Benefits of Tagged Task Lists 573 | 574 | - **Branch Isolation**: Each git branch can have its own task context 575 | - **Merge Conflict Prevention**: Tasks in different tags don't interfere with each other 576 | - **Parallel Development**: Multiple team members can work on separate contexts 577 | - **Context Switching**: Easily switch between different project phases or features 578 | - **Experimentation**: Create experimental task lists without affecting main work 579 | 580 | ### Git Workflow Integration 581 | 582 | A typical git workflow with Task Master tags: 583 | 584 | 1. **Create feature branch**: `git checkout -b feature/user-auth` 585 | 2. **Create matching tag**: Ask agent to run `task-master add-tag --from-branch` 586 | 3. **Work in isolated context**: All task operations work within the new tag 587 | 4. **Switch contexts as needed**: Use `task-master use-tag <name>` to switch between different work streams 588 | 5. **Merge and cleanup**: After merging the branch, optionally delete the tag with `task-master delete-tag <name>` 589 | 590 | This workflow ensures your task management stays organized and conflicts are minimized when working with teams or multiple features simultaneously. 591 | ``` -------------------------------------------------------------------------------- /.taskmaster/docs/prd.txt: -------------------------------------------------------------------------------- ``` 1 | 2 | # Claude Task Master - Product Requirements Document 3 | 4 | <PRD> 5 | # Technical Architecture 6 | 7 | ## System Components 8 | 1. **Task Management Core** 9 | - Tasks.json file structure (single source of truth) 10 | - Task model with dependencies, priorities, and metadata 11 | - Task state management system 12 | - Task file generation subsystem 13 | 14 | 2. **AI Integration Layer** 15 | - Anthropic Claude API integration 16 | - Perplexity API integration (optional) 17 | - Prompt engineering components 18 | - Response parsing and processing 19 | 20 | 3. **Command Line Interface** 21 | - Command parsing and execution 22 | - Interactive user input handling 23 | - Display and formatting utilities 24 | - Status reporting and feedback system 25 | 26 | 4. **Cursor AI Integration** 27 | - Cursor rules documentation 28 | - Agent interaction patterns 29 | - Workflow guideline specifications 30 | 31 | ## Data Models 32 | 33 | ### Task Model 34 | ```json 35 | { 36 | "id": 1, 37 | "title": "Task Title", 38 | "description": "Brief task description", 39 | "status": "pending|done|deferred", 40 | "dependencies": [0], 41 | "priority": "high|medium|low", 42 | "details": "Detailed implementation instructions", 43 | "testStrategy": "Verification approach details", 44 | "subtasks": [ 45 | { 46 | "id": 1, 47 | "title": "Subtask Title", 48 | "description": "Subtask description", 49 | "status": "pending|done|deferred", 50 | "dependencies": [], 51 | "acceptanceCriteria": "Verification criteria" 52 | } 53 | ] 54 | } 55 | ``` 56 | 57 | ### Tasks Collection Model 58 | ```json 59 | { 60 | "meta": { 61 | "projectName": "Project Name", 62 | "version": "1.0.0", 63 | "prdSource": "path/to/prd.txt", 64 | "createdAt": "ISO-8601 timestamp", 65 | "updatedAt": "ISO-8601 timestamp" 66 | }, 67 | "tasks": [ 68 | // Array of Task objects 69 | ] 70 | } 71 | ``` 72 | 73 | ### Task File Format 74 | ``` 75 | # Task ID: <id> 76 | # Title: <title> 77 | # Status: <status> 78 | # Dependencies: <comma-separated list of dependency IDs> 79 | # Priority: <priority> 80 | # Description: <brief description> 81 | # Details: 82 | <detailed implementation notes> 83 | 84 | # Test Strategy: 85 | <verification approach> 86 | 87 | # Subtasks: 88 | 1. <subtask title> - <subtask description> 89 | ``` 90 | 91 | ## APIs and Integrations 92 | 1. **Anthropic Claude API** 93 | - Authentication via API key 94 | - Prompt construction and streaming 95 | - Response parsing and extraction 96 | - Error handling and retries 97 | 98 | 2. **Perplexity API (via OpenAI client)** 99 | - Authentication via API key 100 | - Research-oriented prompt construction 101 | - Enhanced contextual response handling 102 | - Fallback mechanisms to Claude 103 | 104 | 3. **File System API** 105 | - Reading/writing tasks.json 106 | - Managing individual task files 107 | - Command execution logging 108 | - Debug logging system 109 | 110 | ## Infrastructure Requirements 111 | 1. **Node.js Runtime** 112 | - Version 14.0.0 or higher 113 | - ES Module support 114 | - File system access rights 115 | - Command execution capabilities 116 | 117 | 2. **Configuration Management** 118 | - Environment variable handling 119 | - .env file support 120 | - Configuration validation 121 | - Sensible defaults with overrides 122 | 123 | 3. **Development Environment** 124 | - Git repository 125 | - NPM package management 126 | - Cursor editor integration 127 | - Command-line terminal access 128 | 129 | # Development Roadmap 130 | 131 | ## Phase 1: Core Task Management System 132 | 1. **Task Data Structure** 133 | - Design and implement the tasks.json structure 134 | - Create task model validation 135 | - Implement basic task operations (create, read, update) 136 | - Develop file system interactions 137 | 138 | 2. **Command Line Interface Foundation** 139 | - Implement command parsing with Commander.js 140 | - Create help documentation 141 | - Implement colorized console output 142 | - Add logging system with configurable levels 143 | 144 | 3. **Basic Task Operations** 145 | - Implement task listing functionality 146 | - Create task status update capability 147 | - Add dependency tracking 148 | - Implement priority management 149 | 150 | 4. **Task File Generation** 151 | - Create task file templates 152 | - Implement generation from tasks.json 153 | - Add bi-directional synchronization 154 | - Implement proper file naming and organization 155 | 156 | ## Phase 2: AI Integration 157 | 1. **Claude API Integration** 158 | - Implement API authentication 159 | - Create prompt templates for PRD parsing 160 | - Design response handlers 161 | - Add error management and retries 162 | 163 | 2. **PRD Parsing System** 164 | - Implement PRD file reading 165 | - Create PRD to task conversion logic 166 | - Add intelligent dependency inference 167 | - Implement priority assignment logic 168 | 169 | 3. **Task Expansion With Claude** 170 | - Create subtask generation prompts 171 | - Implement subtask creation workflow 172 | - Add context-aware expansion capabilities 173 | - Implement parent-child relationship management 174 | 175 | 4. **Implementation Drift Handling** 176 | - Add capability to update future tasks 177 | - Implement task rewriting based on new context 178 | - Create dependency chain updates 179 | - Preserve completed work while updating future tasks 180 | 181 | ## Phase 3: Advanced Features 182 | 1. **Perplexity Integration** 183 | - Implement Perplexity API authentication 184 | - Create research-oriented prompts 185 | - Add fallback to Claude when unavailable 186 | - Implement response quality comparison logic 187 | 188 | 2. **Research-Backed Subtask Generation** 189 | - Create specialized research prompts 190 | - Implement context enrichment 191 | - Add domain-specific knowledge incorporation 192 | - Create more detailed subtask generation 193 | 194 | 3. **Batch Operations** 195 | - Implement multi-task status updates 196 | - Add bulk subtask generation 197 | - Create task filtering and querying 198 | - Implement advanced dependency management 199 | 200 | 4. **Project Initialization** 201 | - Create project templating system 202 | - Implement interactive setup 203 | - Add environment configuration 204 | - Create documentation generation 205 | 206 | ## Phase 4: Cursor AI Integration 207 | 1. **Cursor Rules Implementation** 208 | - Create dev_workflow.mdc documentation 209 | - Implement cursor_rules.mdc 210 | - Add self_improve.mdc 211 | - Design rule integration documentation 212 | 213 | 2. **Agent Workflow Guidelines** 214 | - Document task discovery workflow 215 | - Create task selection guidelines 216 | - Implement implementation guidance 217 | - Add verification procedures 218 | 219 | 3. **Agent Command Integration** 220 | - Document command syntax for agents 221 | - Create example interactions 222 | - Implement agent response patterns 223 | - Add context management for agents 224 | 225 | 4. **User Documentation** 226 | - Create detailed README 227 | - Add scripts documentation 228 | - Implement example workflows 229 | - Create troubleshooting guides 230 | 231 | # Logical Dependency Chain 232 | 233 | ## Foundation Layer 234 | 1. **Task Data Structure** 235 | - Must be implemented first as all other functionality depends on this 236 | - Defines the core data model for the entire system 237 | - Establishes the single source of truth concept 238 | 239 | 2. **Command Line Interface** 240 | - Built on top of the task data structure 241 | - Provides the primary user interaction mechanism 242 | - Required for all subsequent operations to be accessible 243 | 244 | 3. **Basic Task Operations** 245 | - Depends on both task data structure and CLI 246 | - Provides the fundamental operations for task management 247 | - Enables the minimal viable workflow 248 | 249 | ## Functional Layer 250 | 4. **Task File Generation** 251 | - Depends on task data structure and basic operations 252 | - Creates the individual task files for reference 253 | - Enables the file-based workflow complementing tasks.json 254 | 255 | 5. **Claude API Integration** 256 | - Independent of most previous components but needs the task data structure 257 | - Provides the AI capabilities that enhance the system 258 | - Gateway to advanced task generation features 259 | 260 | 6. **PRD Parsing System** 261 | - Depends on Claude API integration and task data structure 262 | - Enables the initial task generation workflow 263 | - Creates the starting point for new projects 264 | 265 | ## Enhancement Layer 266 | 7. **Task Expansion With Claude** 267 | - Depends on Claude API integration and basic task operations 268 | - Enhances existing tasks with more detailed subtasks 269 | - Improves the implementation guidance 270 | 271 | 8. **Implementation Drift Handling** 272 | - Depends on Claude API integration and task operations 273 | - Addresses a key challenge in AI-driven development 274 | - Maintains the relevance of task planning as implementation evolves 275 | 276 | 9. **Perplexity Integration** 277 | - Can be developed in parallel with other features after Claude integration 278 | - Enhances the quality of generated content 279 | - Provides research-backed improvements 280 | 281 | ## Advanced Layer 282 | 10. **Research-Backed Subtask Generation** 283 | - Depends on Perplexity integration and task expansion 284 | - Provides higher quality, more contextual subtasks 285 | - Enhances the value of the task breakdown 286 | 287 | 11. **Batch Operations** 288 | - Depends on basic task operations 289 | - Improves efficiency for managing multiple tasks 290 | - Quality-of-life enhancement for larger projects 291 | 292 | 12. **Project Initialization** 293 | - Depends on most previous components being stable 294 | - Provides a smooth onboarding experience 295 | - Creates a complete project setup in one step 296 | 297 | ## Integration Layer 298 | 13. **Cursor Rules Implementation** 299 | - Can be developed in parallel after basic functionality 300 | - Provides the guidance for Cursor AI agent 301 | - Enhances the AI-driven workflow 302 | 303 | 14. **Agent Workflow Guidelines** 304 | - Depends on Cursor rules implementation 305 | - Structures how the agent interacts with the system 306 | - Ensures consistent agent behavior 307 | 308 | 15. **Agent Command Integration** 309 | - Depends on agent workflow guidelines 310 | - Provides specific command patterns for the agent 311 | - Optimizes the agent-user interaction 312 | 313 | 16. **User Documentation** 314 | - Should be developed alongside all features 315 | - Must be completed before release 316 | - Ensures users can effectively use the system 317 | 318 | # Risks and Mitigations 319 | 320 | ## Technical Challenges 321 | 322 | ### API Reliability 323 | **Risk**: Anthropic or Perplexity API could have downtime, rate limiting, or breaking changes. 324 | **Mitigation**: 325 | - Implement robust error handling with exponential backoff 326 | - Add fallback mechanisms (Claude fallback for Perplexity) 327 | - Cache important responses to reduce API dependency 328 | - Support offline mode for critical functions 329 | 330 | ### Model Output Variability 331 | **Risk**: AI models may produce inconsistent or unexpected outputs. 332 | **Mitigation**: 333 | - Design robust prompt templates with strict output formatting requirements 334 | - Implement response validation and error detection 335 | - Add self-correction mechanisms and retries with improved prompts 336 | - Allow manual editing of generated content 337 | 338 | ### Node.js Version Compatibility 339 | **Risk**: Differences in Node.js versions could cause unexpected behavior. 340 | **Mitigation**: 341 | - Clearly document minimum Node.js version requirements 342 | - Use transpilers if needed for compatibility 343 | - Test across multiple Node.js versions 344 | - Handle version-specific features gracefully 345 | 346 | ## MVP Definition 347 | 348 | ### Feature Prioritization 349 | **Risk**: Including too many features in the MVP could delay release and adoption. 350 | **Mitigation**: 351 | - Define MVP as core task management + basic Claude integration 352 | - Ensure each phase delivers a complete, usable product 353 | - Implement feature flags for easy enabling/disabling of features 354 | - Get early user feedback to validate feature importance 355 | 356 | ### Scope Creep 357 | **Risk**: The project could expand beyond its original intent, becoming too complex. 358 | **Mitigation**: 359 | - Maintain a strict definition of what the tool is and isn't 360 | - Focus on task management for AI-driven development 361 | - Evaluate new features against core value proposition 362 | - Implement extensibility rather than building every feature 363 | 364 | ### User Expectations 365 | **Risk**: Users might expect a full project management solution rather than a task tracking system. 366 | **Mitigation**: 367 | - Clearly communicate the tool's purpose and limitations 368 | - Provide integration points with existing project management tools 369 | - Focus on the unique value of AI-driven development 370 | - Document specific use cases and example workflows 371 | 372 | ## Resource Constraints 373 | 374 | ### Development Capacity 375 | **Risk**: Limited development resources could delay implementation. 376 | **Mitigation**: 377 | - Phase implementation to deliver value incrementally 378 | - Focus on core functionality first 379 | - Leverage open source libraries where possible 380 | - Design for extensibility to allow community contributions 381 | 382 | ### AI Cost Management 383 | **Risk**: Excessive API usage could lead to high costs. 384 | **Mitigation**: 385 | - Implement token usage tracking and reporting 386 | - Add configurable limits to prevent unexpected costs 387 | - Cache responses where appropriate 388 | - Optimize prompts for token efficiency 389 | - Support local LLM options in the future 390 | 391 | ### Documentation Overhead 392 | **Risk**: Complexity of the system requires extensive documentation that is time-consuming to maintain. 393 | **Mitigation**: 394 | - Use AI to help generate and maintain documentation 395 | - Create self-documenting commands and features 396 | - Implement progressive documentation (basic to advanced) 397 | - Build help directly into the CLI 398 | 399 | # Appendix 400 | 401 | ## AI Prompt Engineering Specifications 402 | 403 | ### PRD Parsing Prompt Structure 404 | ``` 405 | You are assisting with transforming a Product Requirements Document (PRD) into a structured set of development tasks. 406 | 407 | Given the following PRD, create a comprehensive list of development tasks that would be needed to implement the described product. 408 | 409 | For each task: 410 | 1. Assign a short, descriptive title 411 | 2. Write a concise description 412 | 3. Identify dependencies (which tasks must be completed before this one) 413 | 4. Assign a priority (high, medium, low) 414 | 5. Include detailed implementation notes 415 | 6. Describe a test strategy to verify completion 416 | 417 | Structure the tasks in a logical order of implementation. 418 | 419 | PRD: 420 | {prd_content} 421 | ``` 422 | 423 | ### Task Expansion Prompt Structure 424 | ``` 425 | You are helping to break down a development task into more manageable subtasks. 426 | 427 | Main task: 428 | Title: {task_title} 429 | Description: {task_description} 430 | Details: {task_details} 431 | 432 | Please create {num_subtasks} specific subtasks that together would accomplish this main task. 433 | 434 | For each subtask, provide: 435 | 1. A clear, actionable title 436 | 2. A concise description 437 | 3. Any dependencies on other subtasks 438 | 4. Specific acceptance criteria to verify completion 439 | 440 | Additional context: 441 | {additional_context} 442 | ``` 443 | 444 | ### Research-Backed Expansion Prompt Structure 445 | ``` 446 | You are a technical researcher and developer helping to break down a software development task into detailed, well-researched subtasks. 447 | 448 | Main task: 449 | Title: {task_title} 450 | Description: {task_description} 451 | Details: {task_details} 452 | 453 | Research the latest best practices, technologies, and implementation patterns for this type of task. Then create {num_subtasks} specific, actionable subtasks that together would accomplish the main task. 454 | 455 | For each subtask: 456 | 1. Provide a clear, specific title 457 | 2. Write a detailed description including technical approach 458 | 3. Identify dependencies on other subtasks 459 | 4. Include specific acceptance criteria 460 | 5. Reference any relevant libraries, tools, or resources that should be used 461 | 462 | Consider security, performance, maintainability, and user experience in your recommendations. 463 | ``` 464 | 465 | ## Task File System Specification 466 | 467 | ### Directory Structure 468 | ``` 469 | / 470 | ├── .cursor/ 471 | │ └── rules/ 472 | │ ├── dev_workflow.mdc 473 | │ ├── cursor_rules.mdc 474 | │ └── self_improve.mdc 475 | ├── scripts/ 476 | │ ├── dev.js 477 | │ └── README.md 478 | ├── tasks/ 479 | │ ├── task_001.txt 480 | │ ├── task_002.txt 481 | │ └── ... 482 | ├── .env 483 | ├── .env.example 484 | ├── .gitignore 485 | ├── package.json 486 | ├── README.md 487 | └── tasks.json 488 | ``` 489 | 490 | ### Task ID Specification 491 | - Main tasks: Sequential integers (1, 2, 3, ...) 492 | - Subtasks: Parent ID + dot + sequential integer (1.1, 1.2, 2.1, ...) 493 | - ID references: Used in dependencies, command parameters 494 | - ID ordering: Implies suggested implementation order 495 | 496 | ## Command-Line Interface Specification 497 | 498 | ### Global Options 499 | - `--help`: Display help information 500 | - `--version`: Display version information 501 | - `--file=<file>`: Specify an alternative tasks.json file 502 | - `--quiet`: Reduce output verbosity 503 | - `--debug`: Increase output verbosity 504 | - `--json`: Output in JSON format (for programmatic use) 505 | 506 | ### Command Structure 507 | - `node scripts/dev.js <command> [options]` 508 | - All commands operate on tasks.json by default 509 | - Commands follow consistent parameter naming 510 | - Common parameter styles: `--id=<id>`, `--status=<status>`, `--prompt="<text>"` 511 | - Boolean flags: `--all`, `--force`, `--with-subtasks` 512 | 513 | ## API Integration Specifications 514 | 515 | ### Anthropic API Configuration 516 | - Authentication: ANTHROPIC_API_KEY environment variable 517 | - Model selection: MODEL environment variable 518 | - Default model: claude-3-7-sonnet-20250219 519 | - Maximum tokens: MAX_TOKENS environment variable (default: 4000) 520 | - Temperature: TEMPERATURE environment variable (default: 0.7) 521 | 522 | ### Perplexity API Configuration 523 | - Authentication: PERPLEXITY_API_KEY environment variable 524 | - Model selection: PERPLEXITY_MODEL environment variable 525 | - Default model: sonar-medium-online 526 | - Connection: Via OpenAI client 527 | - Fallback: Use Claude if Perplexity unavailable 528 | </PRD> 529 | ``` -------------------------------------------------------------------------------- /assets/scripts_README.md: -------------------------------------------------------------------------------- ```markdown 1 | # Meta-Development Script 2 | 3 | This folder contains a **meta-development script** (`dev.js`) and related utilities that manage tasks for an AI-driven or traditional software development workflow. The script revolves around a `tasks.json` file, which holds an up-to-date list of development tasks. 4 | 5 | ## Overview 6 | 7 | In an AI-driven development process—particularly with tools like [Cursor](https://www.cursor.so/)—it's beneficial to have a **single source of truth** for tasks. This script allows you to: 8 | 9 | 1. **Parse** a PRD or requirements document (`.txt`) to initialize a set of tasks (`tasks.json`). 10 | 2. **List** all existing tasks (IDs, statuses, titles). 11 | 3. **Update** tasks to accommodate new prompts or architecture changes (useful if you discover "implementation drift"). 12 | 4. **Generate** individual task files (e.g., `task_001.txt`) for easy reference or to feed into an AI coding workflow. 13 | 5. **Set task status**—mark tasks as `done`, `pending`, or `deferred` based on progress. 14 | 6. **Expand** tasks with subtasks—break down complex tasks into smaller, more manageable subtasks. 15 | 7. **Research-backed subtask generation**—use Perplexity AI to generate more informed and contextually relevant subtasks. 16 | 8. **Clear subtasks**—remove subtasks from specified tasks to allow regeneration or restructuring. 17 | 9. **Show task details**—display detailed information about a specific task and its subtasks. 18 | 19 | ## Configuration (Updated) 20 | 21 | Task Master configuration is now managed through two primary methods: 22 | 23 | 1. **`.taskmaster/config.json` File (Project Root - Primary)** 24 | 25 | - Stores AI model selections (`main`, `research`, `fallback`), model parameters (`maxTokens`, `temperature`), `logLevel`, `defaultSubtasks`, `defaultPriority`, `projectName`, etc. 26 | - Managed using the `task-master models --setup` command or the `models` MCP tool. 27 | - This is the main configuration file for most settings. 28 | 29 | 2. **Environment Variables (`.env` File - API Keys Only)** 30 | - Used **only** for sensitive **API Keys** (e.g., `ANTHROPIC_API_KEY`, `PERPLEXITY_API_KEY`). 31 | - Create a `.env` file in your project root for CLI usage. 32 | - See `assets/env.example` for required key names. 33 | 34 | **Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `TASKMASTER_LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead. 35 | 36 | ## How It Works 37 | 38 | 1. **`tasks.json`**: 39 | 40 | - A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.). 41 | - The `meta` field can store additional info like the project's name, version, or reference to the PRD. 42 | - Tasks can have `subtasks` for more detailed implementation steps. 43 | - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress. 44 | 45 | 2. **CLI Commands** 46 | You can run the commands via: 47 | 48 | ```bash 49 | # If installed globally 50 | task-master [command] [options] 51 | 52 | # If using locally within the project 53 | node scripts/dev.js [command] [options] 54 | ``` 55 | 56 | Available commands: 57 | 58 | - `init`: Initialize a new project 59 | - `parse-prd`: Generate tasks from a PRD document 60 | - `list`: Display all tasks with their status 61 | - `update`: Update tasks based on new information 62 | - `generate`: Create individual task files 63 | - `set-status`: Change a task's status 64 | - `expand`: Add subtasks to a task or all tasks 65 | - `clear-subtasks`: Remove subtasks from specified tasks 66 | - `next`: Determine the next task to work on based on dependencies 67 | - `show`: Display detailed information about a specific task 68 | - `analyze-complexity`: Analyze task complexity and generate recommendations 69 | - `complexity-report`: Display the complexity analysis in a readable format 70 | - `add-dependency`: Add a dependency between tasks 71 | - `remove-dependency`: Remove a dependency from a task 72 | - `validate-dependencies`: Check for invalid dependencies 73 | - `fix-dependencies`: Fix invalid dependencies automatically 74 | - `add-task`: Add a new task using AI 75 | 76 | Run `task-master --help` or `node scripts/dev.js --help` to see detailed usage information. 77 | 78 | ## Listing Tasks 79 | 80 | The `list` command allows you to view all tasks and their status: 81 | 82 | ```bash 83 | # List all tasks 84 | task-master list 85 | 86 | # List tasks with a specific status 87 | task-master list --status=pending 88 | 89 | # List tasks and include their subtasks 90 | task-master list --with-subtasks 91 | 92 | # List tasks with a specific status and include their subtasks 93 | task-master list --status=pending --with-subtasks 94 | ``` 95 | 96 | ## Updating Tasks 97 | 98 | The `update` command allows you to update tasks based on new information or implementation changes: 99 | 100 | ```bash 101 | # Update tasks starting from ID 4 with a new prompt 102 | task-master update --from=4 --prompt="Refactor tasks from ID 4 onward to use Express instead of Fastify" 103 | 104 | # Update all tasks (default from=1) 105 | task-master update --prompt="Add authentication to all relevant tasks" 106 | 107 | # Specify a different tasks file 108 | task-master update --file=custom-tasks.json --from=5 --prompt="Change database from MongoDB to PostgreSQL" 109 | ``` 110 | 111 | Notes: 112 | 113 | - The `--prompt` parameter is required and should explain the changes or new context 114 | - Only tasks that aren't marked as 'done' will be updated 115 | - Tasks with ID >= the specified --from value will be updated 116 | 117 | ## Setting Task Status 118 | 119 | The `set-status` command allows you to change a task's status: 120 | 121 | ```bash 122 | # Mark a task as done 123 | task-master set-status --id=3 --status=done 124 | 125 | # Mark a task as pending 126 | task-master set-status --id=4 --status=pending 127 | 128 | # Mark a specific subtask as done 129 | task-master set-status --id=3.1 --status=done 130 | 131 | # Mark multiple tasks at once 132 | task-master set-status --id=1,2,3 --status=done 133 | ``` 134 | 135 | Notes: 136 | 137 | - When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well 138 | - Common status values are 'done', 'pending', and 'deferred', but any string is accepted 139 | - You can specify multiple task IDs by separating them with commas 140 | - Subtask IDs are specified using the format `parentId.subtaskId` (e.g., `3.1`) 141 | - Dependencies are updated to show completion status (✅ for completed, ⏱️ for pending) throughout the system 142 | 143 | ## Expanding Tasks 144 | 145 | The `expand` command allows you to break down tasks into subtasks for more detailed implementation: 146 | 147 | ```bash 148 | # Expand a specific task with 3 subtasks (default) 149 | task-master expand --id=3 150 | 151 | # Expand a specific task with 5 subtasks 152 | task-master expand --id=3 --num=5 153 | 154 | # Expand a task with additional context 155 | task-master expand --id=3 --prompt="Focus on security aspects" 156 | 157 | # Expand all pending tasks that don't have subtasks 158 | task-master expand --all 159 | 160 | # Force regeneration of subtasks for all pending tasks 161 | task-master expand --all --force 162 | 163 | # Use Perplexity AI for research-backed subtask generation 164 | task-master expand --id=3 --research 165 | 166 | # Use Perplexity AI for research-backed generation on all pending tasks 167 | task-master expand --all --research 168 | ``` 169 | 170 | ## Clearing Subtasks 171 | 172 | The `clear-subtasks` command allows you to remove subtasks from specified tasks: 173 | 174 | ```bash 175 | # Clear subtasks from a specific task 176 | task-master clear-subtasks --id=3 177 | 178 | # Clear subtasks from multiple tasks 179 | task-master clear-subtasks --id=1,2,3 180 | 181 | # Clear subtasks from all tasks 182 | task-master clear-subtasks --all 183 | ``` 184 | 185 | Notes: 186 | 187 | - After clearing subtasks, task files are automatically regenerated 188 | - This is useful when you want to regenerate subtasks with a different approach 189 | - Can be combined with the `expand` command to immediately generate new subtasks 190 | - Works with both parent tasks and individual subtasks 191 | 192 | ## AI Integration (Updated) 193 | 194 | - The script now uses a unified AI service layer (`ai-services-unified.js`). 195 | - Model selection (e.g., Claude vs. Perplexity for `--research`) is determined by the configuration in `.taskmaster/config.json` based on the requested `role` (`main` or `research`). 196 | - API keys are automatically resolved from your `.env` file (for CLI) or MCP session environment. 197 | - To use the research capabilities (e.g., `expand --research`), ensure you have: 198 | 1. Configured a model for the `research` role using `task-master models --setup` (Perplexity models are recommended). 199 | 2. Added the corresponding API key (e.g., `PERPLEXITY_API_KEY`) to your `.env` file. 200 | 201 | ## Logging 202 | 203 | The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable: 204 | 205 | - `debug`: Detailed information, typically useful for troubleshooting 206 | - `info`: Confirmation that things are working as expected (default) 207 | - `warn`: Warning messages that don't prevent execution 208 | - `error`: Error messages that might prevent execution 209 | 210 | When `DEBUG=true` is set, debug logs are also written to a `dev-debug.log` file in the project root. 211 | 212 | ## Managing Task Dependencies 213 | 214 | The `add-dependency` and `remove-dependency` commands allow you to manage task dependencies: 215 | 216 | ```bash 217 | # Add a dependency to a task 218 | task-master add-dependency --id=<id> --depends-on=<id> 219 | 220 | # Remove a dependency from a task 221 | task-master remove-dependency --id=<id> --depends-on=<id> 222 | ``` 223 | 224 | These commands: 225 | 226 | 1. **Allow precise dependency management**: 227 | 228 | - Add dependencies between tasks with automatic validation 229 | - Remove dependencies when they're no longer needed 230 | - Update task files automatically after changes 231 | 232 | 2. **Include validation checks**: 233 | 234 | - Prevent circular dependencies (a task depending on itself) 235 | - Prevent duplicate dependencies 236 | - Verify that both tasks exist before adding/removing dependencies 237 | - Check if dependencies exist before attempting to remove them 238 | 239 | 3. **Provide clear feedback**: 240 | 241 | - Success messages confirm when dependencies are added/removed 242 | - Error messages explain why operations failed (if applicable) 243 | 244 | 4. **Automatically update task files**: 245 | - Regenerates task files to reflect dependency changes 246 | - Ensures tasks and their files stay synchronized 247 | 248 | ## Dependency Validation and Fixing 249 | 250 | The script provides two specialized commands to ensure task dependencies remain valid and properly maintained: 251 | 252 | ### Validating Dependencies 253 | 254 | The `validate-dependencies` command allows you to check for invalid dependencies without making changes: 255 | 256 | ```bash 257 | # Check for invalid dependencies in tasks.json 258 | task-master validate-dependencies 259 | 260 | # Specify a different tasks file 261 | task-master validate-dependencies --file=custom-tasks.json 262 | ``` 263 | 264 | This command: 265 | 266 | - Scans all tasks and subtasks for non-existent dependencies 267 | - Identifies potential self-dependencies (tasks referencing themselves) 268 | - Reports all found issues without modifying files 269 | - Provides a comprehensive summary of dependency state 270 | - Gives detailed statistics on task dependencies 271 | 272 | Use this command to audit your task structure before applying fixes. 273 | 274 | ### Fixing Dependencies 275 | 276 | The `fix-dependencies` command proactively finds and fixes all invalid dependencies: 277 | 278 | ```bash 279 | # Find and fix all invalid dependencies 280 | task-master fix-dependencies 281 | 282 | # Specify a different tasks file 283 | task-master fix-dependencies --file=custom-tasks.json 284 | ``` 285 | 286 | This command: 287 | 288 | 1. **Validates all dependencies** across tasks and subtasks 289 | 2. **Automatically removes**: 290 | - References to non-existent tasks and subtasks 291 | - Self-dependencies (tasks depending on themselves) 292 | 3. **Fixes issues in both**: 293 | - The tasks.json data structure 294 | - Individual task files during regeneration 295 | 4. **Provides a detailed report**: 296 | - Types of issues fixed (non-existent vs. self-dependencies) 297 | - Number of tasks affected (tasks vs. subtasks) 298 | - Where fixes were applied (tasks.json vs. task files) 299 | - List of all individual fixes made 300 | 301 | This is especially useful when tasks have been deleted or IDs have changed, potentially breaking dependency chains. 302 | 303 | ## Analyzing Task Complexity 304 | 305 | The `analyze-complexity` command allows you to automatically assess task complexity and generate expansion recommendations: 306 | 307 | ```bash 308 | # Analyze all tasks and generate expansion recommendations 309 | task-master analyze-complexity 310 | 311 | # Specify a custom output file 312 | task-master analyze-complexity --output=custom-report.json 313 | 314 | # Override the model used for analysis 315 | task-master analyze-complexity --model=claude-3-opus-20240229 316 | 317 | # Set a custom complexity threshold (1-10) 318 | task-master analyze-complexity --threshold=6 319 | 320 | # Use Perplexity AI for research-backed complexity analysis 321 | task-master analyze-complexity --research 322 | ``` 323 | 324 | Notes: 325 | 326 | - The command uses Claude to analyze each task's complexity (or Perplexity with --research flag) 327 | - Tasks are scored on a scale of 1-10 328 | - Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration 329 | - The default output path is `scripts/task-complexity-report.json` 330 | - Each task in the analysis includes a ready-to-use `expansionCommand` that can be copied directly to the terminal or executed programmatically 331 | - Tasks with complexity scores below the threshold (default: 5) may not need expansion 332 | - The research flag provides more contextual and informed complexity assessments 333 | 334 | ### Integration with Expand Command 335 | 336 | The `expand` command automatically checks for and uses complexity analysis if available: 337 | 338 | ```bash 339 | # Expand a task, using complexity report recommendations if available 340 | task-master expand --id=8 341 | 342 | # Expand all tasks, prioritizing by complexity score if a report exists 343 | task-master expand --all 344 | 345 | # Override recommendations with explicit values 346 | task-master expand --id=8 --num=5 --prompt="Custom prompt" 347 | ``` 348 | 349 | When a complexity report exists: 350 | 351 | - The `expand` command will use the recommended subtask count from the report (unless overridden) 352 | - It will use the tailored expansion prompt from the report (unless a custom prompt is provided) 353 | - When using `--all`, tasks are sorted by complexity score (highest first) 354 | - The `--research` flag is preserved from the complexity analysis to expansion 355 | 356 | The output report structure is: 357 | 358 | ```json 359 | { 360 | "meta": { 361 | "generatedAt": "2023-06-15T12:34:56.789Z", 362 | "tasksAnalyzed": 20, 363 | "thresholdScore": 5, 364 | "projectName": "Your Project Name", 365 | "usedResearch": true 366 | }, 367 | "complexityAnalysis": [ 368 | { 369 | "taskId": 8, 370 | "taskTitle": "Develop Implementation Drift Handling", 371 | "complexityScore": 9.5, 372 | "recommendedSubtasks": 6, 373 | "expansionPrompt": "Create subtasks that handle detecting...", 374 | "reasoning": "This task requires sophisticated logic...", 375 | "expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research" 376 | } 377 | // More tasks sorted by complexity score (highest first) 378 | ] 379 | } 380 | ``` 381 | 382 | ## Finding the Next Task 383 | 384 | The `next` command helps you determine which task to work on next based on dependencies and status: 385 | 386 | ```bash 387 | # Show the next task to work on 388 | task-master next 389 | 390 | # Specify a different tasks file 391 | task-master next --file=custom-tasks.json 392 | ``` 393 | 394 | This command: 395 | 396 | 1. Identifies all **eligible tasks** - pending or in-progress tasks whose dependencies are all satisfied (marked as done) 397 | 2. **Prioritizes** these eligible tasks by: 398 | - Priority level (high > medium > low) 399 | - Number of dependencies (fewer dependencies first) 400 | - Task ID (lower ID first) 401 | 3. **Displays** comprehensive information about the selected task: 402 | - Basic task details (ID, title, priority, dependencies) 403 | - Detailed description and implementation details 404 | - Subtasks if they exist 405 | 4. Provides **contextual suggested actions**: 406 | - Command to mark the task as in-progress 407 | - Command to mark the task as done when completed 408 | - Commands for working with subtasks (update status or expand) 409 | 410 | This feature ensures you're always working on the most appropriate task based on your project's current state and dependency structure. 411 | 412 | ## Showing Task Details 413 | 414 | The `show` command allows you to view detailed information about a specific task: 415 | 416 | ```bash 417 | # Show details for a specific task 418 | task-master show 1 419 | 420 | # Alternative syntax with --id option 421 | task-master show --id=1 422 | 423 | # Show details for a subtask 424 | task-master show --id=1.2 425 | 426 | # Specify a different tasks file 427 | task-master show 3 --file=custom-tasks.json 428 | ``` 429 | 430 | This command: 431 | 432 | 1. **Displays comprehensive information** about the specified task: 433 | - Basic task details (ID, title, priority, dependencies, status) 434 | - Full description and implementation details 435 | - Test strategy information 436 | - Subtasks if they exist 437 | 2. **Handles both regular tasks and subtasks**: 438 | - For regular tasks, shows all subtasks and their status 439 | - For subtasks, shows the parent task relationship 440 | 3. **Provides contextual suggested actions**: 441 | - Commands to update the task status 442 | - Commands for working with subtasks 443 | - For subtasks, provides a link to view the parent task 444 | 445 | This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task. 446 | ``` -------------------------------------------------------------------------------- /scripts/modules/task-manager/parse-prd/parse-prd-streaming.js: -------------------------------------------------------------------------------- ```javascript 1 | /** 2 | * Streaming handler for PRD parsing 3 | */ 4 | 5 | import { createParsePrdTracker } from '../../../../src/progress/parse-prd-tracker.js'; 6 | import { displayParsePrdStart } from '../../../../src/ui/parse-prd.js'; 7 | import { getPriorityIndicators } from '../../../../src/ui/indicators.js'; 8 | import { TimeoutManager } from '../../../../src/utils/timeout-manager.js'; 9 | import { 10 | streamObjectService, 11 | generateObjectService 12 | } from '../../ai-services-unified.js'; 13 | import { 14 | getMainModelId, 15 | getParametersForRole, 16 | getResearchModelId, 17 | getDefaultPriority 18 | } from '../../config-manager.js'; 19 | import { LoggingConfig, prdResponseSchema } from './parse-prd-config.js'; 20 | import { estimateTokens, reportTaskProgress } from './parse-prd-helpers.js'; 21 | 22 | /** 23 | * Extract a readable stream from various stream result formats 24 | * @param {any} streamResult - The stream result object from AI service 25 | * @returns {AsyncIterable|ReadableStream} The extracted stream 26 | * @throws {StreamingError} If no valid stream can be extracted 27 | */ 28 | function extractStreamFromResult(streamResult) { 29 | if (!streamResult) { 30 | throw new StreamingError( 31 | 'Stream result is null or undefined', 32 | STREAMING_ERROR_CODES.NOT_ASYNC_ITERABLE 33 | ); 34 | } 35 | 36 | // Try extraction strategies in priority order 37 | const stream = tryExtractStream(streamResult); 38 | 39 | if (!stream) { 40 | throw new StreamingError( 41 | 'Stream object is not async iterable or readable', 42 | STREAMING_ERROR_CODES.NOT_ASYNC_ITERABLE 43 | ); 44 | } 45 | 46 | return stream; 47 | } 48 | 49 | /** 50 | * Try to extract stream using various strategies 51 | */ 52 | function tryExtractStream(streamResult) { 53 | const streamExtractors = [ 54 | { key: 'partialObjectStream', extractor: (obj) => obj.partialObjectStream }, 55 | { key: 'textStream', extractor: (obj) => extractCallable(obj.textStream) }, 56 | { key: 'stream', extractor: (obj) => extractCallable(obj.stream) }, 57 | { key: 'baseStream', extractor: (obj) => obj.baseStream } 58 | ]; 59 | 60 | for (const { key, extractor } of streamExtractors) { 61 | const stream = extractor(streamResult); 62 | if (stream && isStreamable(stream)) { 63 | return stream; 64 | } 65 | } 66 | 67 | // Check if already streamable 68 | return isStreamable(streamResult) ? streamResult : null; 69 | } 70 | 71 | /** 72 | * Extract a property that might be a function or direct value 73 | */ 74 | function extractCallable(property) { 75 | if (!property) return null; 76 | return typeof property === 'function' ? property() : property; 77 | } 78 | 79 | /** 80 | * Check if object is streamable (async iterable or readable stream) 81 | */ 82 | function isStreamable(obj) { 83 | return ( 84 | obj && 85 | (typeof obj[Symbol.asyncIterator] === 'function' || 86 | (obj.getReader && typeof obj.getReader === 'function')) 87 | ); 88 | } 89 | 90 | /** 91 | * Handle streaming AI service call and parsing 92 | * @param {Object} config - Configuration object 93 | * @param {Object} prompts - System and user prompts 94 | * @param {number} numTasks - Number of tasks to generate 95 | * @returns {Promise<Object>} Parsed tasks and telemetry 96 | */ 97 | export async function handleStreamingService(config, prompts, numTasks) { 98 | const context = createStreamingContext(config, prompts, numTasks); 99 | 100 | await initializeProgress(config, numTasks, context.estimatedInputTokens); 101 | 102 | const aiServiceResponse = await callAIServiceWithTimeout( 103 | config, 104 | prompts, 105 | config.streamingTimeout 106 | ); 107 | 108 | const { progressTracker, priorityMap } = await setupProgressTracking( 109 | config, 110 | numTasks 111 | ); 112 | 113 | const streamingResult = await processStreamResponse( 114 | aiServiceResponse.mainResult, 115 | config, 116 | prompts, 117 | numTasks, 118 | progressTracker, 119 | priorityMap, 120 | context.defaultPriority, 121 | context.estimatedInputTokens, 122 | context.logger 123 | ); 124 | 125 | validateStreamingResult(streamingResult); 126 | 127 | // If we have usage data from streaming, log telemetry now 128 | if (streamingResult.usage && config.projectRoot) { 129 | const { logAiUsage } = await import('../../ai-services-unified.js'); 130 | const { getUserId } = await import('../../config-manager.js'); 131 | const userId = getUserId(config.projectRoot); 132 | 133 | if (userId && aiServiceResponse.providerName && aiServiceResponse.modelId) { 134 | try { 135 | const telemetryData = await logAiUsage({ 136 | userId, 137 | commandName: 'parse-prd', 138 | providerName: aiServiceResponse.providerName, 139 | modelId: aiServiceResponse.modelId, 140 | inputTokens: streamingResult.usage.promptTokens || 0, 141 | outputTokens: streamingResult.usage.completionTokens || 0, 142 | outputType: config.isMCP ? 'mcp' : 'cli' 143 | }); 144 | 145 | // Add telemetry to the response 146 | if (telemetryData) { 147 | aiServiceResponse.telemetryData = telemetryData; 148 | } 149 | } catch (telemetryError) { 150 | context.logger.report( 151 | `Failed to log telemetry: ${telemetryError.message}`, 152 | 'debug' 153 | ); 154 | } 155 | } 156 | } 157 | 158 | return prepareFinalResult( 159 | streamingResult, 160 | aiServiceResponse, 161 | context.estimatedInputTokens, 162 | progressTracker 163 | ); 164 | } 165 | 166 | /** 167 | * Create streaming context with common values 168 | */ 169 | function createStreamingContext(config, prompts, numTasks) { 170 | const { systemPrompt, userPrompt } = prompts; 171 | return { 172 | logger: new LoggingConfig(config.mcpLog, config.reportProgress), 173 | estimatedInputTokens: estimateTokens(systemPrompt + userPrompt), 174 | defaultPriority: getDefaultPriority(config.projectRoot) || 'medium' 175 | }; 176 | } 177 | 178 | /** 179 | * Validate streaming result has tasks 180 | */ 181 | function validateStreamingResult(streamingResult) { 182 | if (streamingResult.parsedTasks.length === 0) { 183 | throw new Error('No tasks were generated from the PRD'); 184 | } 185 | } 186 | 187 | /** 188 | * Initialize progress reporting 189 | */ 190 | async function initializeProgress(config, numTasks, estimatedInputTokens) { 191 | if (config.reportProgress) { 192 | await config.reportProgress({ 193 | progress: 0, 194 | total: numTasks, 195 | message: `Starting PRD analysis (Input: ${estimatedInputTokens} tokens)${config.research ? ' with research' : ''}...` 196 | }); 197 | } 198 | } 199 | 200 | /** 201 | * Call AI service with timeout 202 | */ 203 | async function callAIServiceWithTimeout(config, prompts, timeout) { 204 | const { systemPrompt, userPrompt } = prompts; 205 | 206 | return await TimeoutManager.withTimeout( 207 | streamObjectService({ 208 | role: config.research ? 'research' : 'main', 209 | session: config.session, 210 | projectRoot: config.projectRoot, 211 | schema: prdResponseSchema, 212 | systemPrompt, 213 | prompt: userPrompt, 214 | commandName: 'parse-prd', 215 | outputType: config.isMCP ? 'mcp' : 'cli' 216 | }), 217 | timeout, 218 | 'Streaming operation' 219 | ); 220 | } 221 | 222 | /** 223 | * Setup progress tracking for CLI output 224 | */ 225 | async function setupProgressTracking(config, numTasks) { 226 | const priorityMap = getPriorityIndicators(config.isMCP); 227 | let progressTracker = null; 228 | 229 | if (config.outputFormat === 'text' && !config.isMCP) { 230 | progressTracker = createParsePrdTracker({ 231 | numUnits: numTasks, 232 | unitName: 'task', 233 | append: config.append 234 | }); 235 | 236 | const modelId = config.research ? getResearchModelId() : getMainModelId(); 237 | const parameters = getParametersForRole( 238 | config.research ? 'research' : 'main' 239 | ); 240 | 241 | displayParsePrdStart({ 242 | prdFilePath: config.prdPath, 243 | outputPath: config.tasksPath, 244 | numTasks, 245 | append: config.append, 246 | research: config.research, 247 | force: config.force, 248 | existingTasks: [], 249 | nextId: 1, 250 | model: modelId || 'Default', 251 | temperature: parameters?.temperature || 0.7 252 | }); 253 | 254 | progressTracker.start(); 255 | } 256 | 257 | return { progressTracker, priorityMap }; 258 | } 259 | 260 | /** 261 | * Process stream response based on stream type 262 | */ 263 | async function processStreamResponse( 264 | streamResult, 265 | config, 266 | prompts, 267 | numTasks, 268 | progressTracker, 269 | priorityMap, 270 | defaultPriority, 271 | estimatedInputTokens, 272 | logger 273 | ) { 274 | const { systemPrompt, userPrompt } = prompts; 275 | const context = { 276 | config: { 277 | ...config, 278 | schema: prdResponseSchema // Add the schema for generateObject fallback 279 | }, 280 | numTasks, 281 | progressTracker, 282 | priorityMap, 283 | defaultPriority, 284 | estimatedInputTokens, 285 | prompt: userPrompt, 286 | systemPrompt: systemPrompt 287 | }; 288 | 289 | try { 290 | const streamingState = { 291 | lastPartialObject: null, 292 | taskCount: 0, 293 | estimatedOutputTokens: 0, 294 | usage: null 295 | }; 296 | 297 | await processPartialStream( 298 | streamResult.partialObjectStream, 299 | streamingState, 300 | context 301 | ); 302 | 303 | // Wait for usage data if available 304 | if (streamResult.usage) { 305 | try { 306 | streamingState.usage = await streamResult.usage; 307 | } catch (usageError) { 308 | logger.report( 309 | `Failed to get usage data: ${usageError.message}`, 310 | 'debug' 311 | ); 312 | } 313 | } 314 | 315 | return finalizeStreamingResults(streamingState, context); 316 | } catch (error) { 317 | logger.report( 318 | `StreamObject processing failed: ${error.message}. Falling back to generateObject.`, 319 | 'debug' 320 | ); 321 | return await processWithGenerateObject(context, logger); 322 | } 323 | } 324 | 325 | /** 326 | * Process the partial object stream 327 | */ 328 | async function processPartialStream(partialStream, state, context) { 329 | for await (const partialObject of partialStream) { 330 | state.lastPartialObject = partialObject; 331 | 332 | if (partialObject) { 333 | state.estimatedOutputTokens = estimateTokens( 334 | JSON.stringify(partialObject) 335 | ); 336 | } 337 | 338 | await processStreamingTasks(partialObject, state, context); 339 | } 340 | } 341 | 342 | /** 343 | * Process tasks from a streaming partial object 344 | */ 345 | async function processStreamingTasks(partialObject, state, context) { 346 | if (!partialObject?.tasks || !Array.isArray(partialObject.tasks)) { 347 | return; 348 | } 349 | 350 | const newTaskCount = partialObject.tasks.length; 351 | 352 | if (newTaskCount > state.taskCount) { 353 | await processNewTasks( 354 | partialObject.tasks, 355 | state.taskCount, 356 | newTaskCount, 357 | state.estimatedOutputTokens, 358 | context 359 | ); 360 | state.taskCount = newTaskCount; 361 | } else if (context.progressTracker && state.estimatedOutputTokens > 0) { 362 | context.progressTracker.updateTokens( 363 | context.estimatedInputTokens, 364 | state.estimatedOutputTokens, 365 | true 366 | ); 367 | } 368 | } 369 | 370 | /** 371 | * Process newly appeared tasks in the stream 372 | */ 373 | async function processNewTasks( 374 | tasks, 375 | startIndex, 376 | endIndex, 377 | estimatedOutputTokens, 378 | context 379 | ) { 380 | for (let i = startIndex; i < endIndex; i++) { 381 | const task = tasks[i] || {}; 382 | 383 | if (task.title) { 384 | await reportTaskProgress({ 385 | task, 386 | currentCount: i + 1, 387 | totalTasks: context.numTasks, 388 | estimatedTokens: estimatedOutputTokens, 389 | progressTracker: context.progressTracker, 390 | reportProgress: context.config.reportProgress, 391 | priorityMap: context.priorityMap, 392 | defaultPriority: context.defaultPriority, 393 | estimatedInputTokens: context.estimatedInputTokens 394 | }); 395 | } else { 396 | await reportPlaceholderTask(i + 1, estimatedOutputTokens, context); 397 | } 398 | } 399 | } 400 | 401 | /** 402 | * Report a placeholder task while it's being generated 403 | */ 404 | async function reportPlaceholderTask( 405 | taskNumber, 406 | estimatedOutputTokens, 407 | context 408 | ) { 409 | const { 410 | progressTracker, 411 | config, 412 | numTasks, 413 | defaultPriority, 414 | estimatedInputTokens 415 | } = context; 416 | 417 | if (progressTracker) { 418 | progressTracker.addTaskLine( 419 | taskNumber, 420 | `Generating task ${taskNumber}...`, 421 | defaultPriority 422 | ); 423 | progressTracker.updateTokens( 424 | estimatedInputTokens, 425 | estimatedOutputTokens, 426 | true 427 | ); 428 | } 429 | 430 | if (config.reportProgress && !progressTracker) { 431 | await config.reportProgress({ 432 | progress: taskNumber, 433 | total: numTasks, 434 | message: `Generating task ${taskNumber}/${numTasks}...` 435 | }); 436 | } 437 | } 438 | 439 | /** 440 | * Finalize streaming results and update progress display 441 | */ 442 | async function finalizeStreamingResults(state, context) { 443 | const { lastPartialObject, estimatedOutputTokens, taskCount, usage } = state; 444 | 445 | if (!lastPartialObject?.tasks || !Array.isArray(lastPartialObject.tasks)) { 446 | throw new Error('No tasks generated from streamObject'); 447 | } 448 | 449 | // Use actual token counts if available, otherwise use estimates 450 | const finalOutputTokens = usage?.completionTokens || estimatedOutputTokens; 451 | const finalInputTokens = usage?.promptTokens || context.estimatedInputTokens; 452 | 453 | if (context.progressTracker) { 454 | await updateFinalProgress( 455 | lastPartialObject.tasks, 456 | taskCount, 457 | usage ? finalOutputTokens : estimatedOutputTokens, 458 | context, 459 | usage ? finalInputTokens : null 460 | ); 461 | } 462 | 463 | return { 464 | parsedTasks: lastPartialObject.tasks, 465 | estimatedOutputTokens: finalOutputTokens, 466 | actualInputTokens: finalInputTokens, 467 | usage, 468 | usedFallback: false 469 | }; 470 | } 471 | 472 | /** 473 | * Update progress tracker with final task content 474 | */ 475 | async function updateFinalProgress( 476 | tasks, 477 | taskCount, 478 | outputTokens, 479 | context, 480 | actualInputTokens = null 481 | ) { 482 | const { progressTracker, defaultPriority, estimatedInputTokens } = context; 483 | 484 | if (taskCount > 0) { 485 | updateTaskLines(tasks, progressTracker, defaultPriority); 486 | } else { 487 | await reportAllTasks(tasks, outputTokens, context); 488 | } 489 | 490 | progressTracker.updateTokens( 491 | actualInputTokens || estimatedInputTokens, 492 | outputTokens, 493 | false 494 | ); 495 | progressTracker.stop(); 496 | } 497 | 498 | /** 499 | * Update task lines in progress tracker with final content 500 | */ 501 | function updateTaskLines(tasks, progressTracker, defaultPriority) { 502 | for (let i = 0; i < tasks.length; i++) { 503 | const task = tasks[i]; 504 | if (task?.title) { 505 | progressTracker.addTaskLine( 506 | i + 1, 507 | task.title, 508 | task.priority || defaultPriority 509 | ); 510 | } 511 | } 512 | } 513 | 514 | /** 515 | * Report all tasks that were not streamed incrementally 516 | */ 517 | async function reportAllTasks(tasks, estimatedOutputTokens, context) { 518 | for (let i = 0; i < tasks.length; i++) { 519 | const task = tasks[i]; 520 | if (task?.title) { 521 | await reportTaskProgress({ 522 | task, 523 | currentCount: i + 1, 524 | totalTasks: context.numTasks, 525 | estimatedTokens: estimatedOutputTokens, 526 | progressTracker: context.progressTracker, 527 | reportProgress: context.config.reportProgress, 528 | priorityMap: context.priorityMap, 529 | defaultPriority: context.defaultPriority, 530 | estimatedInputTokens: context.estimatedInputTokens 531 | }); 532 | } 533 | } 534 | } 535 | 536 | /** 537 | * Process with generateObject as fallback when streaming fails 538 | */ 539 | async function processWithGenerateObject(context, logger) { 540 | logger.report('Using generateObject fallback for PRD parsing', 'info'); 541 | 542 | // Show placeholder tasks while generating 543 | if (context.progressTracker) { 544 | for (let i = 0; i < context.numTasks; i++) { 545 | context.progressTracker.addTaskLine( 546 | i + 1, 547 | `Generating task ${i + 1}...`, 548 | context.defaultPriority 549 | ); 550 | context.progressTracker.updateTokens( 551 | context.estimatedInputTokens, 552 | 0, 553 | true 554 | ); 555 | } 556 | } 557 | 558 | // Use generateObjectService instead of streaming 559 | const result = await generateObjectService({ 560 | role: context.config.research ? 'research' : 'main', 561 | commandName: 'parse-prd', 562 | prompt: context.prompt, 563 | systemPrompt: context.systemPrompt, 564 | schema: context.config.schema, 565 | outputFormat: context.config.outputFormat || 'text', 566 | projectRoot: context.config.projectRoot, 567 | session: context.config.session 568 | }); 569 | 570 | // Extract tasks from the result (handle both direct tasks and mainResult.tasks) 571 | const tasks = result?.mainResult || result; 572 | 573 | // Process the generated tasks 574 | if (tasks && Array.isArray(tasks.tasks)) { 575 | // Update progress tracker with final tasks 576 | if (context.progressTracker) { 577 | for (let i = 0; i < tasks.tasks.length; i++) { 578 | const task = tasks.tasks[i]; 579 | if (task && task.title) { 580 | context.progressTracker.addTaskLine( 581 | i + 1, 582 | task.title, 583 | task.priority || context.defaultPriority 584 | ); 585 | } 586 | } 587 | 588 | // Final token update - use actual telemetry if available 589 | const outputTokens = 590 | result.telemetryData?.outputTokens || 591 | estimateTokens(JSON.stringify(tasks)); 592 | const inputTokens = 593 | result.telemetryData?.inputTokens || context.estimatedInputTokens; 594 | 595 | context.progressTracker.updateTokens(inputTokens, outputTokens, false); 596 | } 597 | 598 | return { 599 | parsedTasks: tasks.tasks, 600 | estimatedOutputTokens: 601 | result.telemetryData?.outputTokens || 602 | estimateTokens(JSON.stringify(tasks)), 603 | actualInputTokens: result.telemetryData?.inputTokens, 604 | telemetryData: result.telemetryData, 605 | usedFallback: true 606 | }; 607 | } 608 | 609 | throw new Error('Failed to generate tasks using generateObject fallback'); 610 | } 611 | 612 | /** 613 | * Prepare final result with cleanup 614 | */ 615 | function prepareFinalResult( 616 | streamingResult, 617 | aiServiceResponse, 618 | estimatedInputTokens, 619 | progressTracker 620 | ) { 621 | let summary = null; 622 | if (progressTracker) { 623 | summary = progressTracker.getSummary(); 624 | progressTracker.cleanup(); 625 | } 626 | 627 | // If we have actual usage data from streaming, update the AI service response 628 | if (streamingResult.usage && aiServiceResponse) { 629 | // Map the Vercel AI SDK usage format to our telemetry format 630 | const usage = streamingResult.usage; 631 | if (!aiServiceResponse.usage) { 632 | aiServiceResponse.usage = { 633 | promptTokens: usage.promptTokens || 0, 634 | completionTokens: usage.completionTokens || 0, 635 | totalTokens: usage.totalTokens || 0 636 | }; 637 | } 638 | 639 | // The telemetry should have been logged in the unified service runner 640 | // but if not, the usage is now available for telemetry calculation 641 | } 642 | 643 | return { 644 | parsedTasks: streamingResult.parsedTasks, 645 | aiServiceResponse, 646 | estimatedInputTokens: 647 | streamingResult.actualInputTokens || estimatedInputTokens, 648 | estimatedOutputTokens: streamingResult.estimatedOutputTokens, 649 | usedFallback: streamingResult.usedFallback, 650 | progressTracker, 651 | summary 652 | }; 653 | } 654 | ``` -------------------------------------------------------------------------------- /packages/tm-core/src/auth/credential-store.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Tests for CredentialStore with numeric and string timestamp handling 3 | */ 4 | 5 | import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'; 6 | import { CredentialStore } from './credential-store.js'; 7 | import { AuthenticationError } from './types.js'; 8 | import type { AuthCredentials } from './types.js'; 9 | import fs from 'fs'; 10 | import path from 'path'; 11 | import os from 'os'; 12 | 13 | // Mock fs module 14 | vi.mock('fs'); 15 | 16 | // Mock logger 17 | const mockLogger = { 18 | warn: vi.fn(), 19 | info: vi.fn(), 20 | debug: vi.fn(), 21 | error: vi.fn() 22 | }; 23 | 24 | vi.mock('../logger/index.js', () => ({ 25 | getLogger: () => mockLogger 26 | })); 27 | 28 | describe('CredentialStore', () => { 29 | let store: CredentialStore; 30 | const testDir = '/test/config'; 31 | const configFile = '/test/config/auth.json'; 32 | 33 | beforeEach(() => { 34 | vi.clearAllMocks(); 35 | store = new CredentialStore({ 36 | configDir: testDir, 37 | configFile: configFile, 38 | baseUrl: 'https://api.test.com' 39 | }); 40 | }); 41 | 42 | afterEach(() => { 43 | vi.restoreAllMocks(); 44 | }); 45 | 46 | describe('getCredentials with timestamp migration', () => { 47 | it('should handle string ISO timestamp correctly', () => { 48 | const futureDate = new Date(Date.now() + 3600000); // 1 hour from now 49 | const mockCredentials: AuthCredentials = { 50 | token: 'test-token', 51 | userId: 'user-123', 52 | email: '[email protected]', 53 | expiresAt: futureDate.toISOString(), 54 | tokenType: 'standard', 55 | savedAt: new Date().toISOString() 56 | }; 57 | 58 | vi.mocked(fs.existsSync).mockReturnValue(true); 59 | vi.mocked(fs.readFileSync).mockReturnValue( 60 | JSON.stringify(mockCredentials) 61 | ); 62 | 63 | const result = store.getCredentials(); 64 | 65 | expect(result).not.toBeNull(); 66 | expect(result?.token).toBe('test-token'); 67 | // The timestamp should be normalized to numeric milliseconds 68 | expect(typeof result?.expiresAt).toBe('number'); 69 | expect(result?.expiresAt).toBe(futureDate.getTime()); 70 | }); 71 | 72 | it('should handle numeric timestamp correctly', () => { 73 | const futureTimestamp = Date.now() + 7200000; // 2 hours from now 74 | const mockCredentials = { 75 | token: 'test-token', 76 | userId: 'user-456', 77 | email: '[email protected]', 78 | expiresAt: futureTimestamp, 79 | tokenType: 'standard', 80 | savedAt: new Date().toISOString() 81 | }; 82 | 83 | vi.mocked(fs.existsSync).mockReturnValue(true); 84 | vi.mocked(fs.readFileSync).mockReturnValue( 85 | JSON.stringify(mockCredentials) 86 | ); 87 | 88 | const result = store.getCredentials(); 89 | 90 | expect(result).not.toBeNull(); 91 | expect(result?.token).toBe('test-token'); 92 | // Numeric timestamp should remain as-is 93 | expect(typeof result?.expiresAt).toBe('number'); 94 | expect(result?.expiresAt).toBe(futureTimestamp); 95 | }); 96 | 97 | it('should reject invalid string timestamp', () => { 98 | const mockCredentials = { 99 | token: 'test-token', 100 | userId: 'user-789', 101 | expiresAt: 'invalid-date-string', 102 | tokenType: 'standard', 103 | savedAt: new Date().toISOString() 104 | }; 105 | 106 | vi.mocked(fs.existsSync).mockReturnValue(true); 107 | vi.mocked(fs.readFileSync).mockReturnValue( 108 | JSON.stringify(mockCredentials) 109 | ); 110 | 111 | const result = store.getCredentials(); 112 | 113 | expect(result).toBeNull(); 114 | expect(mockLogger.warn).toHaveBeenCalledWith( 115 | 'No valid expiration time provided for token' 116 | ); 117 | }); 118 | 119 | it('should reject NaN timestamp', () => { 120 | const mockCredentials = { 121 | token: 'test-token', 122 | userId: 'user-nan', 123 | expiresAt: NaN, 124 | tokenType: 'standard', 125 | savedAt: new Date().toISOString() 126 | }; 127 | 128 | vi.mocked(fs.existsSync).mockReturnValue(true); 129 | vi.mocked(fs.readFileSync).mockReturnValue( 130 | JSON.stringify(mockCredentials) 131 | ); 132 | 133 | const result = store.getCredentials(); 134 | 135 | expect(result).toBeNull(); 136 | expect(mockLogger.warn).toHaveBeenCalledWith( 137 | 'No valid expiration time provided for token' 138 | ); 139 | }); 140 | 141 | it('should reject Infinity timestamp', () => { 142 | const mockCredentials = { 143 | token: 'test-token', 144 | userId: 'user-inf', 145 | expiresAt: Infinity, 146 | tokenType: 'standard', 147 | savedAt: new Date().toISOString() 148 | }; 149 | 150 | vi.mocked(fs.existsSync).mockReturnValue(true); 151 | vi.mocked(fs.readFileSync).mockReturnValue( 152 | JSON.stringify(mockCredentials) 153 | ); 154 | 155 | const result = store.getCredentials(); 156 | 157 | expect(result).toBeNull(); 158 | expect(mockLogger.warn).toHaveBeenCalledWith( 159 | 'No valid expiration time provided for token' 160 | ); 161 | }); 162 | 163 | it('should handle missing expiresAt field', () => { 164 | const mockCredentials = { 165 | token: 'test-token', 166 | userId: 'user-no-expiry', 167 | tokenType: 'standard', 168 | savedAt: new Date().toISOString() 169 | // No expiresAt field 170 | }; 171 | 172 | vi.mocked(fs.existsSync).mockReturnValue(true); 173 | vi.mocked(fs.readFileSync).mockReturnValue( 174 | JSON.stringify(mockCredentials) 175 | ); 176 | 177 | const result = store.getCredentials(); 178 | 179 | expect(result).toBeNull(); 180 | expect(mockLogger.warn).toHaveBeenCalledWith( 181 | 'No valid expiration time provided for token' 182 | ); 183 | }); 184 | 185 | it('should check token expiration correctly', () => { 186 | const expiredTimestamp = Date.now() - 3600000; // 1 hour ago 187 | const mockCredentials = { 188 | token: 'expired-token', 189 | userId: 'user-expired', 190 | expiresAt: expiredTimestamp, 191 | tokenType: 'standard', 192 | savedAt: new Date().toISOString() 193 | }; 194 | 195 | vi.mocked(fs.existsSync).mockReturnValue(true); 196 | vi.mocked(fs.readFileSync).mockReturnValue( 197 | JSON.stringify(mockCredentials) 198 | ); 199 | 200 | const result = store.getCredentials(); 201 | 202 | expect(result).toBeNull(); 203 | expect(mockLogger.warn).toHaveBeenCalledWith( 204 | expect.stringContaining('Authentication token has expired'), 205 | expect.any(Object) 206 | ); 207 | }); 208 | 209 | it('should allow expired tokens when requested', () => { 210 | const expiredTimestamp = Date.now() - 3600000; // 1 hour ago 211 | const mockCredentials = { 212 | token: 'expired-token', 213 | userId: 'user-expired', 214 | expiresAt: expiredTimestamp, 215 | tokenType: 'standard', 216 | savedAt: new Date().toISOString() 217 | }; 218 | 219 | vi.mocked(fs.existsSync).mockReturnValue(true); 220 | vi.mocked(fs.readFileSync).mockReturnValue( 221 | JSON.stringify(mockCredentials) 222 | ); 223 | 224 | const result = store.getCredentials({ allowExpired: true }); 225 | 226 | expect(result).not.toBeNull(); 227 | expect(result?.token).toBe('expired-token'); 228 | }); 229 | }); 230 | 231 | describe('saveCredentials with timestamp normalization', () => { 232 | beforeEach(() => { 233 | vi.mocked(fs.existsSync).mockReturnValue(true); 234 | vi.mocked(fs.mkdirSync).mockImplementation(() => undefined); 235 | vi.mocked(fs.writeFileSync).mockImplementation(() => undefined); 236 | vi.mocked(fs.renameSync).mockImplementation(() => undefined); 237 | }); 238 | 239 | it('should normalize string timestamp to ISO string when saving', () => { 240 | const futureDate = new Date(Date.now() + 3600000); 241 | const credentials: AuthCredentials = { 242 | token: 'test-token', 243 | userId: 'user-123', 244 | expiresAt: futureDate.toISOString(), 245 | tokenType: 'standard', 246 | savedAt: new Date().toISOString() 247 | }; 248 | 249 | store.saveCredentials(credentials); 250 | 251 | expect(fs.writeFileSync).toHaveBeenCalledWith( 252 | expect.stringContaining('.tmp'), 253 | expect.stringContaining('"expiresAt":'), 254 | expect.any(Object) 255 | ); 256 | 257 | // Check that the written data contains a valid ISO string 258 | const writtenData = vi.mocked(fs.writeFileSync).mock 259 | .calls[0][1] as string; 260 | const parsed = JSON.parse(writtenData); 261 | expect(typeof parsed.expiresAt).toBe('string'); 262 | expect(new Date(parsed.expiresAt).toISOString()).toBe(parsed.expiresAt); 263 | }); 264 | 265 | it('should convert numeric timestamp to ISO string when saving', () => { 266 | const futureTimestamp = Date.now() + 7200000; 267 | const credentials: AuthCredentials = { 268 | token: 'test-token', 269 | userId: 'user-456', 270 | expiresAt: futureTimestamp, 271 | tokenType: 'standard', 272 | savedAt: new Date().toISOString() 273 | }; 274 | 275 | store.saveCredentials(credentials); 276 | 277 | const writtenData = vi.mocked(fs.writeFileSync).mock 278 | .calls[0][1] as string; 279 | const parsed = JSON.parse(writtenData); 280 | expect(typeof parsed.expiresAt).toBe('string'); 281 | expect(new Date(parsed.expiresAt).getTime()).toBe(futureTimestamp); 282 | }); 283 | 284 | it('should reject invalid string timestamp when saving', () => { 285 | const credentials: AuthCredentials = { 286 | token: 'test-token', 287 | userId: 'user-789', 288 | expiresAt: 'invalid-date' as any, 289 | tokenType: 'standard', 290 | savedAt: new Date().toISOString() 291 | }; 292 | 293 | let err: unknown; 294 | try { 295 | store.saveCredentials(credentials); 296 | } catch (e) { 297 | err = e; 298 | } 299 | expect(err).toBeInstanceOf(AuthenticationError); 300 | expect((err as Error).message).toContain('Invalid expiresAt format'); 301 | }); 302 | 303 | it('should reject NaN timestamp when saving', () => { 304 | const credentials: AuthCredentials = { 305 | token: 'test-token', 306 | userId: 'user-nan', 307 | expiresAt: NaN as any, 308 | tokenType: 'standard', 309 | savedAt: new Date().toISOString() 310 | }; 311 | 312 | let err: unknown; 313 | try { 314 | store.saveCredentials(credentials); 315 | } catch (e) { 316 | err = e; 317 | } 318 | expect(err).toBeInstanceOf(AuthenticationError); 319 | expect((err as Error).message).toContain('Invalid expiresAt format'); 320 | }); 321 | 322 | it('should reject Infinity timestamp when saving', () => { 323 | const credentials: AuthCredentials = { 324 | token: 'test-token', 325 | userId: 'user-inf', 326 | expiresAt: Infinity as any, 327 | tokenType: 'standard', 328 | savedAt: new Date().toISOString() 329 | }; 330 | 331 | let err: unknown; 332 | try { 333 | store.saveCredentials(credentials); 334 | } catch (e) { 335 | err = e; 336 | } 337 | expect(err).toBeInstanceOf(AuthenticationError); 338 | expect((err as Error).message).toContain('Invalid expiresAt format'); 339 | }); 340 | 341 | it('should handle missing expiresAt when saving', () => { 342 | const credentials: AuthCredentials = { 343 | token: 'test-token', 344 | userId: 'user-no-expiry', 345 | tokenType: 'standard', 346 | savedAt: new Date().toISOString() 347 | // No expiresAt 348 | }; 349 | 350 | store.saveCredentials(credentials); 351 | 352 | const writtenData = vi.mocked(fs.writeFileSync).mock 353 | .calls[0][1] as string; 354 | const parsed = JSON.parse(writtenData); 355 | expect(parsed.expiresAt).toBeUndefined(); 356 | }); 357 | 358 | it('should not mutate the original credentials object', () => { 359 | const originalTimestamp = Date.now() + 3600000; 360 | const credentials: AuthCredentials = { 361 | token: 'test-token', 362 | userId: 'user-123', 363 | expiresAt: originalTimestamp, 364 | tokenType: 'standard', 365 | savedAt: new Date().toISOString() 366 | }; 367 | 368 | const originalCredentialsCopy = { ...credentials }; 369 | 370 | store.saveCredentials(credentials); 371 | 372 | // Original object should not be modified 373 | expect(credentials).toEqual(originalCredentialsCopy); 374 | expect(credentials.expiresAt).toBe(originalTimestamp); 375 | }); 376 | }); 377 | 378 | describe('corrupt file handling', () => { 379 | it('should quarantine corrupt file on JSON parse error', () => { 380 | vi.mocked(fs.existsSync).mockReturnValue(true); 381 | vi.mocked(fs.readFileSync).mockReturnValue('invalid json {'); 382 | vi.mocked(fs.renameSync).mockImplementation(() => undefined); 383 | 384 | const result = store.getCredentials(); 385 | 386 | expect(result).toBeNull(); 387 | expect(fs.renameSync).toHaveBeenCalledWith( 388 | configFile, 389 | expect.stringContaining('.corrupt-') 390 | ); 391 | expect(mockLogger.warn).toHaveBeenCalledWith( 392 | expect.stringContaining('Quarantined corrupt auth file') 393 | ); 394 | }); 395 | 396 | it('should handle quarantine failure gracefully', () => { 397 | vi.mocked(fs.existsSync).mockReturnValue(true); 398 | vi.mocked(fs.readFileSync).mockReturnValue('invalid json {'); 399 | vi.mocked(fs.renameSync).mockImplementation(() => { 400 | throw new Error('Permission denied'); 401 | }); 402 | 403 | const result = store.getCredentials(); 404 | 405 | expect(result).toBeNull(); 406 | expect(mockLogger.debug).toHaveBeenCalledWith( 407 | expect.stringContaining('Could not quarantine corrupt file') 408 | ); 409 | }); 410 | }); 411 | 412 | describe('clearCredentials', () => { 413 | it('should delete the auth file when it exists', () => { 414 | // Mock file exists 415 | vi.mocked(fs.existsSync).mockReturnValue(true); 416 | vi.mocked(fs.unlinkSync).mockImplementation(() => undefined); 417 | 418 | store.clearCredentials(); 419 | 420 | expect(fs.existsSync).toHaveBeenCalledWith('/test/config/auth.json'); 421 | expect(fs.unlinkSync).toHaveBeenCalledWith('/test/config/auth.json'); 422 | }); 423 | 424 | it('should not throw when auth file does not exist', () => { 425 | // Mock file does not exist 426 | vi.mocked(fs.existsSync).mockReturnValue(false); 427 | 428 | // Should not throw 429 | expect(() => store.clearCredentials()).not.toThrow(); 430 | 431 | // Should not try to unlink non-existent file 432 | expect(fs.unlinkSync).not.toHaveBeenCalled(); 433 | }); 434 | 435 | it('should throw AuthenticationError when unlink fails', () => { 436 | vi.mocked(fs.existsSync).mockReturnValue(true); 437 | vi.mocked(fs.unlinkSync).mockImplementation(() => { 438 | throw new Error('Permission denied'); 439 | }); 440 | 441 | let err: unknown; 442 | try { 443 | store.clearCredentials(); 444 | } catch (e) { 445 | err = e; 446 | } 447 | 448 | expect(err).toBeInstanceOf(AuthenticationError); 449 | expect((err as Error).message).toContain('Failed to clear credentials'); 450 | expect((err as Error).message).toContain('Permission denied'); 451 | }); 452 | }); 453 | 454 | describe('hasValidCredentials', () => { 455 | it('should return true when valid unexpired credentials exist', () => { 456 | const futureDate = new Date(Date.now() + 3600000); // 1 hour from now 457 | const credentials = { 458 | token: 'valid-token', 459 | userId: 'user-123', 460 | expiresAt: futureDate.toISOString(), 461 | tokenType: 'standard', 462 | savedAt: new Date().toISOString() 463 | }; 464 | 465 | vi.mocked(fs.existsSync).mockReturnValue(true); 466 | vi.mocked(fs.readFileSync).mockReturnValue(JSON.stringify(credentials)); 467 | 468 | expect(store.hasValidCredentials()).toBe(true); 469 | }); 470 | 471 | it('should return false when credentials are expired', () => { 472 | const pastDate = new Date(Date.now() - 3600000); // 1 hour ago 473 | const credentials = { 474 | token: 'expired-token', 475 | userId: 'user-123', 476 | expiresAt: pastDate.toISOString(), 477 | tokenType: 'standard', 478 | savedAt: new Date().toISOString() 479 | }; 480 | 481 | vi.mocked(fs.existsSync).mockReturnValue(true); 482 | vi.mocked(fs.readFileSync).mockReturnValue(JSON.stringify(credentials)); 483 | 484 | expect(store.hasValidCredentials()).toBe(false); 485 | }); 486 | 487 | it('should return false when no credentials exist', () => { 488 | vi.mocked(fs.existsSync).mockReturnValue(false); 489 | 490 | expect(store.hasValidCredentials()).toBe(false); 491 | }); 492 | 493 | it('should return false when file contains invalid JSON', () => { 494 | vi.mocked(fs.existsSync).mockReturnValue(true); 495 | vi.mocked(fs.readFileSync).mockReturnValue('invalid json {'); 496 | vi.mocked(fs.renameSync).mockImplementation(() => undefined); 497 | 498 | expect(store.hasValidCredentials()).toBe(false); 499 | }); 500 | 501 | it('should return false for credentials without expiry', () => { 502 | const credentials = { 503 | token: 'no-expiry-token', 504 | userId: 'user-123', 505 | tokenType: 'standard', 506 | savedAt: new Date().toISOString() 507 | }; 508 | 509 | vi.mocked(fs.existsSync).mockReturnValue(true); 510 | vi.mocked(fs.readFileSync).mockReturnValue(JSON.stringify(credentials)); 511 | 512 | // Credentials without expiry are considered invalid 513 | expect(store.hasValidCredentials()).toBe(false); 514 | 515 | // Should log warning about missing expiration 516 | expect(mockLogger.warn).toHaveBeenCalledWith( 517 | 'No valid expiration time provided for token' 518 | ); 519 | }); 520 | 521 | it('should use allowExpired=false by default', () => { 522 | // Spy on getCredentials to verify it's called with correct params 523 | const getCredentialsSpy = vi.spyOn(store, 'getCredentials'); 524 | 525 | vi.mocked(fs.existsSync).mockReturnValue(false); 526 | store.hasValidCredentials(); 527 | 528 | expect(getCredentialsSpy).toHaveBeenCalledWith({ allowExpired: false }); 529 | }); 530 | }); 531 | 532 | describe('cleanupCorruptFiles', () => { 533 | it('should remove old corrupt files', () => { 534 | const now = Date.now(); 535 | const oldFile = 'auth.json.corrupt-' + (now - 8 * 24 * 60 * 60 * 1000); // 8 days old 536 | const newFile = 'auth.json.corrupt-' + (now - 1000); // 1 second old 537 | 538 | vi.mocked(fs.existsSync).mockReturnValue(true); 539 | vi.mocked(fs.readdirSync).mockReturnValue([ 540 | { name: oldFile, isFile: () => true }, 541 | { name: newFile, isFile: () => true }, 542 | { name: 'auth.json', isFile: () => true } 543 | ] as any); 544 | vi.mocked(fs.statSync).mockImplementation((filePath) => { 545 | if (filePath.includes(oldFile)) { 546 | return { mtimeMs: now - 8 * 24 * 60 * 60 * 1000 } as any; 547 | } 548 | return { mtimeMs: now - 1000 } as any; 549 | }); 550 | vi.mocked(fs.unlinkSync).mockImplementation(() => undefined); 551 | 552 | store.cleanupCorruptFiles(); 553 | 554 | expect(fs.unlinkSync).toHaveBeenCalledWith( 555 | expect.stringContaining(oldFile) 556 | ); 557 | expect(fs.unlinkSync).not.toHaveBeenCalledWith( 558 | expect.stringContaining(newFile) 559 | ); 560 | }); 561 | 562 | it('should handle cleanup errors gracefully', () => { 563 | vi.mocked(fs.existsSync).mockReturnValue(true); 564 | vi.mocked(fs.readdirSync).mockImplementation(() => { 565 | throw new Error('Permission denied'); 566 | }); 567 | 568 | // Should not throw 569 | expect(() => store.cleanupCorruptFiles()).not.toThrow(); 570 | expect(mockLogger.debug).toHaveBeenCalledWith( 571 | expect.stringContaining('Error during corrupt file cleanup') 572 | ); 573 | }); 574 | }); 575 | }); 576 | ``` -------------------------------------------------------------------------------- /tests/unit/scripts/modules/task-manager/research.test.js: -------------------------------------------------------------------------------- ```javascript 1 | import { jest } from '@jest/globals'; 2 | 3 | jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({ 4 | findProjectRoot: jest.fn(() => '/test/project/root'), 5 | log: jest.fn(), 6 | readJSON: jest.fn(), 7 | flattenTasksWithSubtasks: jest.fn(() => []), 8 | isEmpty: jest.fn(() => false) 9 | })); 10 | 11 | // Mock UI-affecting external libs to minimal no-op implementations 12 | jest.unstable_mockModule('chalk', () => ({ 13 | default: { 14 | white: Object.assign( 15 | jest.fn((text) => text), 16 | { 17 | bold: jest.fn((text) => text) 18 | } 19 | ), 20 | cyan: Object.assign( 21 | jest.fn((text) => text), 22 | { 23 | bold: jest.fn((text) => text) 24 | } 25 | ), 26 | green: Object.assign( 27 | jest.fn((text) => text), 28 | { 29 | bold: jest.fn((text) => text) 30 | } 31 | ), 32 | yellow: jest.fn((text) => text), 33 | red: jest.fn((text) => text), 34 | gray: jest.fn((text) => text), 35 | blue: Object.assign( 36 | jest.fn((text) => text), 37 | { 38 | bold: jest.fn((text) => text) 39 | } 40 | ), 41 | bold: jest.fn((text) => text) 42 | } 43 | })); 44 | 45 | jest.unstable_mockModule('boxen', () => ({ default: (text) => text })); 46 | 47 | jest.unstable_mockModule('inquirer', () => ({ 48 | default: { prompt: jest.fn() } 49 | })); 50 | 51 | jest.unstable_mockModule('cli-highlight', () => ({ 52 | highlight: (code) => code 53 | })); 54 | 55 | jest.unstable_mockModule('cli-table3', () => ({ 56 | default: jest.fn().mockImplementation(() => ({ 57 | push: jest.fn(), 58 | toString: jest.fn(() => '') 59 | })) 60 | })); 61 | 62 | jest.unstable_mockModule( 63 | '../../../../../scripts/modules/utils/contextGatherer.js', 64 | () => ({ 65 | ContextGatherer: jest.fn().mockImplementation(() => ({ 66 | gather: jest.fn().mockResolvedValue({ 67 | context: 'Gathered context', 68 | tokenBreakdown: { total: 500 } 69 | }), 70 | countTokens: jest.fn(() => 100) 71 | })) 72 | }) 73 | ); 74 | 75 | jest.unstable_mockModule( 76 | '../../../../../scripts/modules/utils/fuzzyTaskSearch.js', 77 | () => ({ 78 | FuzzyTaskSearch: jest.fn().mockImplementation(() => ({ 79 | findRelevantTasks: jest.fn(() => []), 80 | getTaskIds: jest.fn(() => []) 81 | })) 82 | }) 83 | ); 84 | 85 | jest.unstable_mockModule( 86 | '../../../../../scripts/modules/ai-services-unified.js', 87 | () => ({ 88 | generateTextService: jest.fn().mockResolvedValue({ 89 | mainResult: 90 | 'Test research result with ```javascript\nconsole.log("test");\n```', 91 | telemetryData: {} 92 | }) 93 | }) 94 | ); 95 | 96 | jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({ 97 | displayAiUsageSummary: jest.fn(), 98 | startLoadingIndicator: jest.fn(() => ({ stop: jest.fn() })), 99 | stopLoadingIndicator: jest.fn() 100 | })); 101 | 102 | jest.unstable_mockModule( 103 | '../../../../../scripts/modules/prompt-manager.js', 104 | () => ({ 105 | getPromptManager: jest.fn().mockReturnValue({ 106 | loadPrompt: jest.fn().mockResolvedValue({ 107 | systemPrompt: 'System prompt', 108 | userPrompt: 'User prompt' 109 | }) 110 | }) 111 | }) 112 | ); 113 | 114 | const { performResearch } = await import( 115 | '../../../../../scripts/modules/task-manager/research.js' 116 | ); 117 | 118 | // Import mocked modules for testing 119 | const utils = await import('../../../../../scripts/modules/utils.js'); 120 | const { ContextGatherer } = await import( 121 | '../../../../../scripts/modules/utils/contextGatherer.js' 122 | ); 123 | const { FuzzyTaskSearch } = await import( 124 | '../../../../../scripts/modules/utils/fuzzyTaskSearch.js' 125 | ); 126 | const { generateTextService } = await import( 127 | '../../../../../scripts/modules/ai-services-unified.js' 128 | ); 129 | 130 | describe('performResearch project root validation', () => { 131 | it('throws error when project root cannot be determined', async () => { 132 | // Mock findProjectRoot to return null 133 | utils.findProjectRoot.mockReturnValueOnce(null); 134 | 135 | await expect( 136 | performResearch('Test query', {}, {}, 'json', false) 137 | ).rejects.toThrow('Could not determine project root directory'); 138 | }); 139 | }); 140 | 141 | describe('performResearch tag-aware functionality', () => { 142 | let mockContextGatherer; 143 | let mockFuzzySearch; 144 | let mockReadJSON; 145 | let mockFlattenTasks; 146 | 147 | beforeEach(() => { 148 | // Reset all mocks 149 | jest.clearAllMocks(); 150 | 151 | // Set up default mocks 152 | utils.findProjectRoot.mockReturnValue('/test/project/root'); 153 | utils.readJSON.mockResolvedValue({ 154 | tasks: [ 155 | { id: 1, title: 'Task 1', description: 'Description 1' }, 156 | { id: 2, title: 'Task 2', description: 'Description 2' } 157 | ] 158 | }); 159 | utils.flattenTasksWithSubtasks.mockReturnValue([ 160 | { id: 1, title: 'Task 1', description: 'Description 1' }, 161 | { id: 2, title: 'Task 2', description: 'Description 2' } 162 | ]); 163 | 164 | // Set up ContextGatherer mock 165 | mockContextGatherer = { 166 | gather: jest.fn().mockResolvedValue({ 167 | context: 'Gathered context', 168 | tokenBreakdown: { total: 500 } 169 | }), 170 | countTokens: jest.fn(() => 100) 171 | }; 172 | ContextGatherer.mockImplementation(() => mockContextGatherer); 173 | 174 | // Set up FuzzyTaskSearch mock 175 | mockFuzzySearch = { 176 | findRelevantTasks: jest.fn(() => [ 177 | { id: 1, title: 'Task 1', description: 'Description 1' } 178 | ]), 179 | getTaskIds: jest.fn(() => ['1']) 180 | }; 181 | FuzzyTaskSearch.mockImplementation(() => mockFuzzySearch); 182 | 183 | // Store references for easier access 184 | mockReadJSON = utils.readJSON; 185 | mockFlattenTasks = utils.flattenTasksWithSubtasks; 186 | }); 187 | 188 | describe('tag parameter passing to ContextGatherer', () => { 189 | it('passes tag parameter to ContextGatherer constructor', async () => { 190 | const testTag = 'feature-branch'; 191 | 192 | await performResearch('Test query', { tag: testTag }, {}, 'json', false); 193 | 194 | expect(ContextGatherer).toHaveBeenCalledWith( 195 | '/test/project/root', 196 | testTag 197 | ); 198 | }); 199 | 200 | it('passes undefined tag when no tag is provided', async () => { 201 | await performResearch('Test query', {}, {}, 'json', false); 202 | 203 | expect(ContextGatherer).toHaveBeenCalledWith( 204 | '/test/project/root', 205 | undefined 206 | ); 207 | }); 208 | 209 | it('passes empty string tag when empty string is provided', async () => { 210 | await performResearch('Test query', { tag: '' }, {}, 'json', false); 211 | 212 | expect(ContextGatherer).toHaveBeenCalledWith('/test/project/root', ''); 213 | }); 214 | 215 | it('passes null tag when null is provided', async () => { 216 | await performResearch('Test query', { tag: null }, {}, 'json', false); 217 | 218 | expect(ContextGatherer).toHaveBeenCalledWith('/test/project/root', null); 219 | }); 220 | }); 221 | 222 | describe('tag-aware readJSON calls', () => { 223 | it('calls readJSON with correct tag parameter for task discovery', async () => { 224 | const testTag = 'development'; 225 | 226 | await performResearch('Test query', { tag: testTag }, {}, 'json', false); 227 | 228 | expect(mockReadJSON).toHaveBeenCalledWith( 229 | expect.stringContaining('tasks.json'), 230 | '/test/project/root', 231 | testTag 232 | ); 233 | }); 234 | 235 | it('calls readJSON with undefined tag when no tag provided', async () => { 236 | await performResearch('Test query', {}, {}, 'json', false); 237 | 238 | expect(mockReadJSON).toHaveBeenCalledWith( 239 | expect.stringContaining('tasks.json'), 240 | '/test/project/root', 241 | undefined 242 | ); 243 | }); 244 | 245 | it('calls readJSON with provided projectRoot and tag', async () => { 246 | const customProjectRoot = '/custom/project/root'; 247 | const testTag = 'production'; 248 | 249 | await performResearch( 250 | 'Test query', 251 | { 252 | projectRoot: customProjectRoot, 253 | tag: testTag 254 | }, 255 | {}, 256 | 'json', 257 | false 258 | ); 259 | 260 | expect(mockReadJSON).toHaveBeenCalledWith( 261 | expect.stringContaining('tasks.json'), 262 | customProjectRoot, 263 | testTag 264 | ); 265 | }); 266 | }); 267 | 268 | describe('context gathering behavior for different tags', () => { 269 | it('calls contextGatherer.gather with correct parameters', async () => { 270 | const options = { 271 | taskIds: ['1', '2'], 272 | filePaths: ['src/file.js'], 273 | customContext: 'Custom context', 274 | includeProjectTree: true, 275 | tag: 'feature-branch' 276 | }; 277 | 278 | await performResearch('Test query', options, {}, 'json', false); 279 | 280 | expect(mockContextGatherer.gather).toHaveBeenCalledWith({ 281 | tasks: expect.arrayContaining(['1', '2']), 282 | files: ['src/file.js'], 283 | customContext: 'Custom context', 284 | includeProjectTree: true, 285 | format: 'research', 286 | includeTokenCounts: true 287 | }); 288 | }); 289 | 290 | it('handles empty task discovery gracefully when readJSON fails', async () => { 291 | mockReadJSON.mockRejectedValueOnce(new Error('File not found')); 292 | 293 | const result = await performResearch( 294 | 'Test query', 295 | { tag: 'test-tag' }, 296 | {}, 297 | 'json', 298 | false 299 | ); 300 | 301 | // Should still succeed even if task discovery fails 302 | expect(result).toBeDefined(); 303 | expect(mockContextGatherer.gather).toHaveBeenCalledWith({ 304 | tasks: [], 305 | files: [], 306 | customContext: '', 307 | includeProjectTree: false, 308 | format: 'research', 309 | includeTokenCounts: true 310 | }); 311 | }); 312 | 313 | it('combines provided taskIds with auto-discovered tasks', async () => { 314 | const providedTaskIds = ['3', '4']; 315 | const autoDiscoveredIds = ['1', '2']; 316 | 317 | mockFuzzySearch.getTaskIds.mockReturnValue(autoDiscoveredIds); 318 | 319 | await performResearch( 320 | 'Test query', 321 | { 322 | taskIds: providedTaskIds, 323 | tag: 'feature-branch' 324 | }, 325 | {}, 326 | 'json', 327 | false 328 | ); 329 | 330 | expect(mockContextGatherer.gather).toHaveBeenCalledWith({ 331 | tasks: expect.arrayContaining([ 332 | ...providedTaskIds, 333 | ...autoDiscoveredIds 334 | ]), 335 | files: [], 336 | customContext: '', 337 | includeProjectTree: false, 338 | format: 'research', 339 | includeTokenCounts: true 340 | }); 341 | }); 342 | 343 | it('removes duplicate tasks when auto-discovered tasks overlap with provided tasks', async () => { 344 | const providedTaskIds = ['1', '2']; 345 | const autoDiscoveredIds = ['2', '3']; // '2' is duplicate 346 | 347 | mockFuzzySearch.getTaskIds.mockReturnValue(autoDiscoveredIds); 348 | 349 | await performResearch( 350 | 'Test query', 351 | { 352 | taskIds: providedTaskIds, 353 | tag: 'feature-branch' 354 | }, 355 | {}, 356 | 'json', 357 | false 358 | ); 359 | 360 | expect(mockContextGatherer.gather).toHaveBeenCalledWith({ 361 | tasks: ['1', '2', '3'], // Should include '3' but not duplicate '2' 362 | files: [], 363 | customContext: '', 364 | includeProjectTree: false, 365 | format: 'research', 366 | includeTokenCounts: true 367 | }); 368 | }); 369 | }); 370 | 371 | describe('tag-aware fuzzy search', () => { 372 | it('initializes FuzzyTaskSearch with flattened tasks from correct tag', async () => { 373 | const testTag = 'development'; 374 | const mockFlattenedTasks = [ 375 | { id: 1, title: 'Dev Task 1' }, 376 | { id: 2, title: 'Dev Task 2' } 377 | ]; 378 | 379 | mockFlattenTasks.mockReturnValue(mockFlattenedTasks); 380 | 381 | await performResearch('Test query', { tag: testTag }, {}, 'json', false); 382 | 383 | expect(mockFlattenTasks).toHaveBeenCalledWith( 384 | expect.arrayContaining([ 385 | expect.objectContaining({ id: 1 }), 386 | expect.objectContaining({ id: 2 }) 387 | ]) 388 | ); 389 | expect(FuzzyTaskSearch).toHaveBeenCalledWith( 390 | mockFlattenedTasks, 391 | 'research' 392 | ); 393 | }); 394 | 395 | it('calls fuzzy search with correct parameters', async () => { 396 | const testQuery = 'authentication implementation'; 397 | 398 | await performResearch( 399 | testQuery, 400 | { tag: 'feature-branch' }, 401 | {}, 402 | 'json', 403 | false 404 | ); 405 | 406 | expect(mockFuzzySearch.findRelevantTasks).toHaveBeenCalledWith( 407 | testQuery, 408 | { 409 | maxResults: 8, 410 | includeRecent: true, 411 | includeCategoryMatches: true 412 | } 413 | ); 414 | }); 415 | 416 | it('handles empty tasks data gracefully', async () => { 417 | mockReadJSON.mockResolvedValueOnce({ tasks: [] }); 418 | 419 | await performResearch( 420 | 'Test query', 421 | { tag: 'empty-tag' }, 422 | {}, 423 | 'json', 424 | false 425 | ); 426 | 427 | // Should not call FuzzyTaskSearch when no tasks exist 428 | expect(FuzzyTaskSearch).not.toHaveBeenCalled(); 429 | expect(mockContextGatherer.gather).toHaveBeenCalledWith({ 430 | tasks: [], 431 | files: [], 432 | customContext: '', 433 | includeProjectTree: false, 434 | format: 'research', 435 | includeTokenCounts: true 436 | }); 437 | }); 438 | 439 | it('handles null tasks data gracefully', async () => { 440 | mockReadJSON.mockResolvedValueOnce(null); 441 | 442 | await performResearch( 443 | 'Test query', 444 | { tag: 'null-tag' }, 445 | {}, 446 | 'json', 447 | false 448 | ); 449 | 450 | // Should not call FuzzyTaskSearch when data is null 451 | expect(FuzzyTaskSearch).not.toHaveBeenCalled(); 452 | }); 453 | }); 454 | 455 | describe('error handling for invalid tags', () => { 456 | it('continues execution when readJSON throws error for invalid tag', async () => { 457 | mockReadJSON.mockRejectedValueOnce(new Error('Tag not found')); 458 | 459 | const result = await performResearch( 460 | 'Test query', 461 | { tag: 'invalid-tag' }, 462 | {}, 463 | 'json', 464 | false 465 | ); 466 | 467 | // Should still succeed and return a result 468 | expect(result).toBeDefined(); 469 | expect(mockContextGatherer.gather).toHaveBeenCalled(); 470 | }); 471 | 472 | it('logs debug message when task discovery fails', async () => { 473 | const mockLog = { 474 | debug: jest.fn(), 475 | info: jest.fn(), 476 | warn: jest.fn(), 477 | error: jest.fn(), 478 | success: jest.fn() 479 | }; 480 | 481 | mockReadJSON.mockRejectedValueOnce(new Error('File not found')); 482 | 483 | await performResearch( 484 | 'Test query', 485 | { tag: 'error-tag' }, 486 | { mcpLog: mockLog }, 487 | 'json', 488 | false 489 | ); 490 | 491 | expect(mockLog.debug).toHaveBeenCalledWith( 492 | expect.stringContaining('Could not auto-discover tasks') 493 | ); 494 | }); 495 | 496 | it('handles ContextGatherer constructor errors gracefully', async () => { 497 | ContextGatherer.mockImplementationOnce(() => { 498 | throw new Error('Invalid tag provided'); 499 | }); 500 | 501 | await expect( 502 | performResearch('Test query', { tag: 'invalid-tag' }, {}, 'json', false) 503 | ).rejects.toThrow('Invalid tag provided'); 504 | }); 505 | 506 | it('handles ContextGatherer.gather errors gracefully', async () => { 507 | mockContextGatherer.gather.mockRejectedValueOnce( 508 | new Error('Gather failed') 509 | ); 510 | 511 | await expect( 512 | performResearch( 513 | 'Test query', 514 | { tag: 'gather-error-tag' }, 515 | {}, 516 | 'json', 517 | false 518 | ) 519 | ).rejects.toThrow('Gather failed'); 520 | }); 521 | }); 522 | 523 | describe('MCP integration with tags', () => { 524 | it('uses MCP logger when mcpLog is provided in context', async () => { 525 | const mockMCPLog = { 526 | debug: jest.fn(), 527 | info: jest.fn(), 528 | warn: jest.fn(), 529 | error: jest.fn(), 530 | success: jest.fn() 531 | }; 532 | 533 | mockReadJSON.mockRejectedValueOnce(new Error('Test error')); 534 | 535 | await performResearch( 536 | 'Test query', 537 | { tag: 'mcp-tag' }, 538 | { mcpLog: mockMCPLog }, 539 | 'json', 540 | false 541 | ); 542 | 543 | expect(mockMCPLog.debug).toHaveBeenCalledWith( 544 | expect.stringContaining('Could not auto-discover tasks') 545 | ); 546 | }); 547 | 548 | it('passes session to generateTextService when provided', async () => { 549 | const mockSession = { userId: 'test-user', env: {} }; 550 | 551 | await performResearch( 552 | 'Test query', 553 | { tag: 'session-tag' }, 554 | { session: mockSession }, 555 | 'json', 556 | false 557 | ); 558 | 559 | expect(generateTextService).toHaveBeenCalledWith( 560 | expect.objectContaining({ 561 | session: mockSession 562 | }) 563 | ); 564 | }); 565 | }); 566 | 567 | describe('output format handling with tags', () => { 568 | it('displays UI banner only in text format', async () => { 569 | const consoleSpy = jest.spyOn(console, 'log').mockImplementation(); 570 | 571 | await performResearch('Test query', { tag: 'ui-tag' }, {}, 'text', false); 572 | 573 | expect(consoleSpy).toHaveBeenCalledWith( 574 | expect.stringContaining('🔍 AI Research Query') 575 | ); 576 | 577 | consoleSpy.mockRestore(); 578 | }); 579 | 580 | it('does not display UI banner in json format', async () => { 581 | const consoleSpy = jest.spyOn(console, 'log').mockImplementation(); 582 | 583 | await performResearch('Test query', { tag: 'ui-tag' }, {}, 'json', false); 584 | 585 | expect(consoleSpy).not.toHaveBeenCalledWith( 586 | expect.stringContaining('🔍 AI Research Query') 587 | ); 588 | 589 | consoleSpy.mockRestore(); 590 | }); 591 | }); 592 | 593 | describe('comprehensive tag integration test', () => { 594 | it('performs complete research flow with tag-aware functionality', async () => { 595 | const testOptions = { 596 | taskIds: ['1', '2'], 597 | filePaths: ['src/main.js'], 598 | customContext: 'Testing tag integration', 599 | includeProjectTree: true, 600 | detailLevel: 'high', 601 | tag: 'integration-test', 602 | projectRoot: '/custom/root' 603 | }; 604 | 605 | const testContext = { 606 | session: { userId: 'test-user' }, 607 | mcpLog: { 608 | debug: jest.fn(), 609 | info: jest.fn(), 610 | warn: jest.fn(), 611 | error: jest.fn(), 612 | success: jest.fn() 613 | }, 614 | commandName: 'test-research', 615 | outputType: 'mcp' 616 | }; 617 | 618 | // Mock successful task discovery 619 | mockFuzzySearch.getTaskIds.mockReturnValue(['3', '4']); 620 | 621 | const result = await performResearch( 622 | 'Integration test query', 623 | testOptions, 624 | testContext, 625 | 'json', 626 | false 627 | ); 628 | 629 | // Verify ContextGatherer was initialized with correct tag 630 | expect(ContextGatherer).toHaveBeenCalledWith( 631 | '/custom/root', 632 | 'integration-test' 633 | ); 634 | 635 | // Verify readJSON was called with correct parameters 636 | expect(mockReadJSON).toHaveBeenCalledWith( 637 | expect.stringContaining('tasks.json'), 638 | '/custom/root', 639 | 'integration-test' 640 | ); 641 | 642 | // Verify context gathering was called with combined tasks 643 | expect(mockContextGatherer.gather).toHaveBeenCalledWith({ 644 | tasks: ['1', '2', '3', '4'], 645 | files: ['src/main.js'], 646 | customContext: 'Testing tag integration', 647 | includeProjectTree: true, 648 | format: 'research', 649 | includeTokenCounts: true 650 | }); 651 | 652 | // Verify AI service was called with session 653 | expect(generateTextService).toHaveBeenCalledWith( 654 | expect.objectContaining({ 655 | session: testContext.session, 656 | role: 'research' 657 | }) 658 | ); 659 | 660 | expect(result).toBeDefined(); 661 | }); 662 | }); 663 | }); 664 | ``` -------------------------------------------------------------------------------- /tests/unit/profiles/mcp-config-validation.test.js: -------------------------------------------------------------------------------- ```javascript 1 | import { RULE_PROFILES } from '../../../src/constants/profiles.js'; 2 | import { getRulesProfile } from '../../../src/utils/rule-transformer.js'; 3 | import path from 'path'; 4 | 5 | describe('MCP Configuration Validation', () => { 6 | describe('Profile MCP Configuration Properties', () => { 7 | const expectedMcpConfigurations = { 8 | amp: { 9 | shouldHaveMcp: true, 10 | expectedDir: '.vscode', 11 | expectedConfigName: 'settings.json', 12 | expectedPath: '.vscode/settings.json' 13 | }, 14 | claude: { 15 | shouldHaveMcp: true, 16 | expectedDir: '.', 17 | expectedConfigName: '.mcp.json', 18 | expectedPath: '.mcp.json' 19 | }, 20 | cline: { 21 | shouldHaveMcp: false, 22 | expectedDir: '.clinerules', 23 | expectedConfigName: null, 24 | expectedPath: null 25 | }, 26 | codex: { 27 | shouldHaveMcp: false, 28 | expectedDir: '.', 29 | expectedConfigName: null, 30 | expectedPath: null 31 | }, 32 | cursor: { 33 | shouldHaveMcp: true, 34 | expectedDir: '.cursor', 35 | expectedConfigName: 'mcp.json', 36 | expectedPath: '.cursor/mcp.json' 37 | }, 38 | gemini: { 39 | shouldHaveMcp: true, 40 | expectedDir: '.gemini', 41 | expectedConfigName: 'settings.json', 42 | expectedPath: '.gemini/settings.json' 43 | }, 44 | kiro: { 45 | shouldHaveMcp: true, 46 | expectedDir: '.kiro', 47 | expectedConfigName: 'settings/mcp.json', 48 | expectedPath: '.kiro/settings/mcp.json' 49 | }, 50 | opencode: { 51 | shouldHaveMcp: true, 52 | expectedDir: '.', 53 | expectedConfigName: 'opencode.json', 54 | expectedPath: 'opencode.json' 55 | }, 56 | roo: { 57 | shouldHaveMcp: true, 58 | expectedDir: '.roo', 59 | expectedConfigName: 'mcp.json', 60 | expectedPath: '.roo/mcp.json' 61 | }, 62 | trae: { 63 | shouldHaveMcp: false, 64 | expectedDir: '.trae', 65 | expectedConfigName: null, 66 | expectedPath: null 67 | }, 68 | vscode: { 69 | shouldHaveMcp: true, 70 | expectedDir: '.vscode', 71 | expectedConfigName: 'mcp.json', 72 | expectedPath: '.vscode/mcp.json' 73 | }, 74 | windsurf: { 75 | shouldHaveMcp: true, 76 | expectedDir: '.windsurf', 77 | expectedConfigName: 'mcp.json', 78 | expectedPath: '.windsurf/mcp.json' 79 | }, 80 | zed: { 81 | shouldHaveMcp: true, 82 | expectedDir: '.zed', 83 | expectedConfigName: 'settings.json', 84 | expectedPath: '.zed/settings.json' 85 | } 86 | }; 87 | 88 | Object.entries(expectedMcpConfigurations).forEach( 89 | ([profileName, expected]) => { 90 | test(`should have correct MCP configuration for ${profileName} profile`, () => { 91 | const profile = getRulesProfile(profileName); 92 | expect(profile).toBeDefined(); 93 | expect(profile.mcpConfig).toBe(expected.shouldHaveMcp); 94 | expect(profile.profileDir).toBe(expected.expectedDir); 95 | expect(profile.mcpConfigName).toBe(expected.expectedConfigName); 96 | expect(profile.mcpConfigPath).toBe(expected.expectedPath); 97 | }); 98 | } 99 | ); 100 | }); 101 | 102 | describe('MCP Configuration Path Consistency', () => { 103 | test('should ensure all profiles have consistent mcpConfigPath construction', () => { 104 | RULE_PROFILES.forEach((profileName) => { 105 | const profile = getRulesProfile(profileName); 106 | if (profile.mcpConfig !== false) { 107 | // For root directory profiles, path.join('.', filename) normalizes to just 'filename' 108 | // except for Claude which uses '.mcp.json' explicitly 109 | let expectedPath; 110 | if (profile.profileDir === '.') { 111 | if (profileName === 'claude') { 112 | expectedPath = '.mcp.json'; // Claude explicitly uses '.mcp.json' 113 | } else { 114 | expectedPath = profile.mcpConfigName; // Other root profiles normalize to just the filename 115 | } 116 | } else { 117 | expectedPath = `${profile.profileDir}/${profile.mcpConfigName}`; 118 | } 119 | expect(profile.mcpConfigPath).toBe(expectedPath); 120 | } 121 | }); 122 | }); 123 | 124 | test('should ensure no two profiles have the same MCP config path', () => { 125 | const mcpPaths = new Set(); 126 | RULE_PROFILES.forEach((profileName) => { 127 | const profile = getRulesProfile(profileName); 128 | if (profile.mcpConfig !== false) { 129 | expect(mcpPaths.has(profile.mcpConfigPath)).toBe(false); 130 | mcpPaths.add(profile.mcpConfigPath); 131 | } 132 | }); 133 | }); 134 | 135 | test('should ensure all MCP-enabled profiles use proper directory structure', () => { 136 | const rootProfiles = ['opencode', 'claude', 'codex']; // Profiles that use root directory for config 137 | const nestedConfigProfiles = ['kiro']; // Profiles that use nested directories for config 138 | 139 | RULE_PROFILES.forEach((profileName) => { 140 | const profile = getRulesProfile(profileName); 141 | if (profile.mcpConfig !== false) { 142 | if (rootProfiles.includes(profileName)) { 143 | // Root profiles have different patterns 144 | if (profileName === 'claude') { 145 | expect(profile.mcpConfigPath).toBe('.mcp.json'); 146 | } else { 147 | // Other root profiles normalize to just the filename (no ./ prefix) 148 | expect(profile.mcpConfigPath).toMatch(/^[\w_.]+$/); 149 | } 150 | } else if (nestedConfigProfiles.includes(profileName)) { 151 | // Profiles with nested config directories 152 | expect(profile.mcpConfigPath).toMatch( 153 | /^\.[\w-]+\/[\w-]+\/[\w_.]+$/ 154 | ); 155 | } else { 156 | // Other profiles should have config files in their specific directories 157 | expect(profile.mcpConfigPath).toMatch(/^\.[\w-]+\/[\w_.]+$/); 158 | } 159 | } 160 | }); 161 | }); 162 | 163 | test('should ensure all profiles have required MCP properties', () => { 164 | RULE_PROFILES.forEach((profileName) => { 165 | const profile = getRulesProfile(profileName); 166 | expect(profile).toHaveProperty('mcpConfig'); 167 | expect(profile).toHaveProperty('profileDir'); 168 | expect(profile).toHaveProperty('mcpConfigName'); 169 | expect(profile).toHaveProperty('mcpConfigPath'); 170 | }); 171 | }); 172 | }); 173 | 174 | describe('MCP Configuration File Names', () => { 175 | test('should use standard mcp.json for MCP-enabled profiles', () => { 176 | const standardMcpProfiles = ['cursor', 'roo', 'vscode', 'windsurf']; 177 | standardMcpProfiles.forEach((profileName) => { 178 | const profile = getRulesProfile(profileName); 179 | expect(profile.mcpConfigName).toBe('mcp.json'); 180 | }); 181 | }); 182 | 183 | test('should use custom settings.json for Gemini profile', () => { 184 | const profile = getRulesProfile('gemini'); 185 | expect(profile.mcpConfigName).toBe('settings.json'); 186 | }); 187 | 188 | test('should have null config name for non-MCP profiles', () => { 189 | // Only codex, cline, and trae profiles should have null config names 190 | const nonMcpProfiles = ['codex', 'cline', 'trae']; 191 | 192 | for (const profileName of nonMcpProfiles) { 193 | const profile = getRulesProfile(profileName); 194 | expect(profile.mcpConfigName).toBe(null); 195 | } 196 | }); 197 | }); 198 | 199 | describe('Profile Directory Structure', () => { 200 | test('should ensure each profile has a unique directory', () => { 201 | const profileDirs = new Set(); 202 | // Profiles that use root directory (can share the same directory) 203 | const rootProfiles = ['claude', 'codex', 'gemini', 'opencode']; 204 | // Profiles that intentionally share the same directory 205 | const sharedDirectoryProfiles = ['amp', 'vscode']; // Both use .vscode 206 | 207 | RULE_PROFILES.forEach((profileName) => { 208 | const profile = getRulesProfile(profileName); 209 | 210 | // Root profiles can share the root directory for rules 211 | if (rootProfiles.includes(profileName) && profile.rulesDir === '.') { 212 | expect(profile.rulesDir).toBe('.'); 213 | } 214 | 215 | // Profile directories should be unique (except for root profiles and shared directory profiles) 216 | if ( 217 | !rootProfiles.includes(profileName) && 218 | !sharedDirectoryProfiles.includes(profileName) 219 | ) { 220 | if (profile.profileDir !== '.') { 221 | expect(profileDirs.has(profile.profileDir)).toBe(false); 222 | profileDirs.add(profile.profileDir); 223 | } 224 | } else if (sharedDirectoryProfiles.includes(profileName)) { 225 | // Shared directory profiles should use .vscode 226 | expect(profile.profileDir).toBe('.vscode'); 227 | } 228 | }); 229 | }); 230 | 231 | test('should ensure profile directories follow expected naming convention', () => { 232 | // Profiles that use root directory for rules 233 | const rootRulesProfiles = ['claude', 'codex', 'gemini', 'opencode']; 234 | 235 | RULE_PROFILES.forEach((profileName) => { 236 | const profile = getRulesProfile(profileName); 237 | 238 | // Some profiles use root directory for rules 239 | if ( 240 | rootRulesProfiles.includes(profileName) && 241 | profile.rulesDir === '.' 242 | ) { 243 | expect(profile.rulesDir).toBe('.'); 244 | } 245 | 246 | // Profile directories (not rules directories) should follow the .name pattern 247 | // unless they are root profiles with profileDir = '.' 248 | if (profile.profileDir !== '.') { 249 | expect(profile.profileDir).toMatch(/^\.[\w-]+$/); 250 | } 251 | }); 252 | }); 253 | }); 254 | 255 | describe('MCP Configuration Creation Logic', () => { 256 | test('should indicate which profiles require MCP configuration creation', () => { 257 | // Get all profiles that have MCP configuration enabled 258 | const mcpEnabledProfiles = RULE_PROFILES.filter((profileName) => { 259 | const profile = getRulesProfile(profileName); 260 | return profile.mcpConfig !== false; 261 | }); 262 | 263 | // Verify expected MCP-enabled profiles 264 | expect(mcpEnabledProfiles).toContain('amp'); 265 | expect(mcpEnabledProfiles).toContain('claude'); 266 | expect(mcpEnabledProfiles).toContain('cursor'); 267 | expect(mcpEnabledProfiles).toContain('gemini'); 268 | expect(mcpEnabledProfiles).toContain('opencode'); 269 | expect(mcpEnabledProfiles).toContain('roo'); 270 | expect(mcpEnabledProfiles).toContain('vscode'); 271 | expect(mcpEnabledProfiles).toContain('windsurf'); 272 | expect(mcpEnabledProfiles).toContain('zed'); 273 | expect(mcpEnabledProfiles).not.toContain('cline'); 274 | expect(mcpEnabledProfiles).not.toContain('codex'); 275 | expect(mcpEnabledProfiles).not.toContain('trae'); 276 | }); 277 | 278 | test('should provide all necessary information for MCP config creation', () => { 279 | RULE_PROFILES.forEach((profileName) => { 280 | const profile = getRulesProfile(profileName); 281 | if (profile.mcpConfig !== false) { 282 | expect(profile.mcpConfigPath).toBeDefined(); 283 | expect(typeof profile.mcpConfigPath).toBe('string'); 284 | expect(profile.mcpConfigPath.length).toBeGreaterThan(0); 285 | } 286 | }); 287 | }); 288 | }); 289 | 290 | describe('MCP Configuration Path Usage Verification', () => { 291 | test('should verify that rule transformer functions use mcpConfigPath correctly', () => { 292 | RULE_PROFILES.forEach((profileName) => { 293 | const profile = getRulesProfile(profileName); 294 | if (profile.mcpConfig !== false) { 295 | // Verify the path is properly formatted for path.join usage 296 | expect(profile.mcpConfigPath.startsWith('/')).toBe(false); 297 | 298 | // Root directory profiles have different patterns 299 | if (profile.profileDir === '.') { 300 | if (profileName === 'claude') { 301 | expect(profile.mcpConfigPath).toBe('.mcp.json'); 302 | } else { 303 | // Other root profiles (opencode) normalize to just the filename 304 | expect(profile.mcpConfigPath).toBe(profile.mcpConfigName); 305 | } 306 | } else { 307 | // Non-root profiles should contain a directory separator 308 | expect(profile.mcpConfigPath).toContain('/'); 309 | } 310 | 311 | // Verify it matches the expected pattern based on how path.join works 312 | let expectedPath; 313 | if (profile.profileDir === '.') { 314 | if (profileName === 'claude') { 315 | expectedPath = '.mcp.json'; // Claude explicitly uses '.mcp.json' 316 | } else { 317 | expectedPath = profile.mcpConfigName; // path.join('.', 'filename') normalizes to 'filename' 318 | } 319 | } else { 320 | expectedPath = `${profile.profileDir}/${profile.mcpConfigName}`; 321 | } 322 | expect(profile.mcpConfigPath).toBe(expectedPath); 323 | } 324 | }); 325 | }); 326 | 327 | test('should verify that mcpConfigPath is properly constructed for path.join usage', () => { 328 | RULE_PROFILES.forEach((profileName) => { 329 | const profile = getRulesProfile(profileName); 330 | if (profile.mcpConfig !== false) { 331 | // Test that path.join works correctly with the mcpConfigPath 332 | const testProjectRoot = '/test/project'; 333 | const fullPath = path.join(testProjectRoot, profile.mcpConfigPath); 334 | 335 | // Should result in a proper absolute path 336 | // Note: path.join normalizes paths, so './opencode.json' becomes 'opencode.json' 337 | const normalizedExpectedPath = path.join( 338 | testProjectRoot, 339 | profile.mcpConfigPath 340 | ); 341 | expect(fullPath).toBe(normalizedExpectedPath); 342 | expect(fullPath).toContain(profile.mcpConfigName); 343 | } 344 | }); 345 | }); 346 | }); 347 | 348 | describe('MCP Configuration Function Integration', () => { 349 | test('should verify that setupMCPConfiguration receives the correct mcpConfigPath parameter', () => { 350 | RULE_PROFILES.forEach((profileName) => { 351 | const profile = getRulesProfile(profileName); 352 | if (profile.mcpConfig !== false) { 353 | // Verify the path structure is correct for the new function signature 354 | if (profile.profileDir === '.') { 355 | // Root directory profiles have special handling 356 | if (profileName === 'claude') { 357 | expect(profile.mcpConfigPath).toBe('.mcp.json'); 358 | } else { 359 | // Other root profiles normalize to just the filename 360 | expect(profile.mcpConfigPath).toBe(profile.mcpConfigName); 361 | } 362 | } else if (profileName === 'kiro') { 363 | // Kiro has a nested config structure 364 | const parts = profile.mcpConfigPath.split('/'); 365 | expect(parts).toHaveLength(3); // Should be profileDir/settings/mcp.json 366 | expect(parts[0]).toBe(profile.profileDir); 367 | expect(parts[1]).toBe('settings'); 368 | expect(parts[2]).toBe('mcp.json'); 369 | } else { 370 | // Non-root profiles should have profileDir/configName structure 371 | const parts = profile.mcpConfigPath.split('/'); 372 | expect(parts).toHaveLength(2); // Should be profileDir/configName 373 | expect(parts[0]).toBe(profile.profileDir); 374 | expect(parts[1]).toBe(profile.mcpConfigName); 375 | } 376 | } 377 | }); 378 | }); 379 | }); 380 | 381 | describe('MCP configuration validation', () => { 382 | const mcpProfiles = [ 383 | 'amp', 384 | 'claude', 385 | 'cursor', 386 | 'gemini', 387 | 'opencode', 388 | 'roo', 389 | 'windsurf', 390 | 'vscode', 391 | 'zed' 392 | ]; 393 | const nonMcpProfiles = ['codex', 'cline', 'trae']; 394 | const profilesWithLifecycle = ['claude']; 395 | const profilesWithoutLifecycle = ['codex']; 396 | 397 | test.each(mcpProfiles)( 398 | 'should have valid MCP config for %s profile', 399 | (profileName) => { 400 | const profile = getRulesProfile(profileName); 401 | expect(profile).toBeDefined(); 402 | expect(profile.mcpConfig).toBe(true); 403 | expect(profile.mcpConfigPath).toBeDefined(); 404 | expect(typeof profile.mcpConfigPath).toBe('string'); 405 | } 406 | ); 407 | 408 | test.each(nonMcpProfiles)( 409 | 'should not require MCP config for %s profile', 410 | (profileName) => { 411 | const profile = getRulesProfile(profileName); 412 | expect(profile).toBeDefined(); 413 | expect(profile.mcpConfig).toBe(false); 414 | } 415 | ); 416 | }); 417 | 418 | describe('Profile structure validation', () => { 419 | const allProfiles = [ 420 | 'amp', 421 | 'claude', 422 | 'cline', 423 | 'codex', 424 | 'cursor', 425 | 'gemini', 426 | 'opencode', 427 | 'roo', 428 | 'trae', 429 | 'vscode', 430 | 'windsurf', 431 | 'zed' 432 | ]; 433 | const profilesWithLifecycle = ['amp', 'claude']; 434 | const profilesWithPostConvertLifecycle = ['opencode']; 435 | const profilesWithoutLifecycle = ['codex']; 436 | 437 | test.each(allProfiles)( 438 | 'should have file mappings for %s profile', 439 | (profileName) => { 440 | const profile = getRulesProfile(profileName); 441 | expect(profile).toBeDefined(); 442 | expect(profile.fileMap).toBeDefined(); 443 | expect(typeof profile.fileMap).toBe('object'); 444 | expect(Object.keys(profile.fileMap).length).toBeGreaterThan(0); 445 | } 446 | ); 447 | 448 | test.each(profilesWithLifecycle)( 449 | 'should have file mappings and lifecycle functions for %s profile', 450 | (profileName) => { 451 | const profile = getRulesProfile(profileName); 452 | expect(profile).toBeDefined(); 453 | // Claude profile has both fileMap and lifecycle functions 454 | expect(profile.fileMap).toBeDefined(); 455 | expect(typeof profile.fileMap).toBe('object'); 456 | expect(Object.keys(profile.fileMap).length).toBeGreaterThan(0); 457 | expect(typeof profile.onAddRulesProfile).toBe('function'); 458 | expect(typeof profile.onRemoveRulesProfile).toBe('function'); 459 | expect(typeof profile.onPostConvertRulesProfile).toBe('function'); 460 | } 461 | ); 462 | 463 | test.each(profilesWithPostConvertLifecycle)( 464 | 'should have file mappings and post-convert lifecycle functions for %s profile', 465 | (profileName) => { 466 | const profile = getRulesProfile(profileName); 467 | expect(profile).toBeDefined(); 468 | // OpenCode profile has fileMap and post-convert lifecycle functions 469 | expect(profile.fileMap).toBeDefined(); 470 | expect(typeof profile.fileMap).toBe('object'); 471 | expect(Object.keys(profile.fileMap).length).toBeGreaterThan(0); 472 | expect(profile.onAddRulesProfile).toBeUndefined(); // OpenCode doesn't have onAdd 473 | expect(typeof profile.onRemoveRulesProfile).toBe('function'); 474 | expect(typeof profile.onPostConvertRulesProfile).toBe('function'); 475 | } 476 | ); 477 | 478 | test.each(profilesWithoutLifecycle)( 479 | 'should have file mappings without lifecycle functions for %s profile', 480 | (profileName) => { 481 | const profile = getRulesProfile(profileName); 482 | expect(profile).toBeDefined(); 483 | // Codex profile has fileMap but no lifecycle functions (simplified) 484 | expect(profile.fileMap).toBeDefined(); 485 | expect(typeof profile.fileMap).toBe('object'); 486 | expect(Object.keys(profile.fileMap).length).toBeGreaterThan(0); 487 | expect(profile.onAddRulesProfile).toBeUndefined(); 488 | expect(profile.onRemoveRulesProfile).toBeUndefined(); 489 | expect(profile.onPostConvertRulesProfile).toBeUndefined(); 490 | } 491 | ); 492 | }); 493 | }); 494 | ```