#
tokens: 49815/50000 4/821 files (page 30/38)
lines: off (toggle) GitHub
raw markdown copy
This is page 30 of 38. Use http://codebase.md/eyaltoledano/claude-task-master?page={x} to view the full context.

# Directory Structure

```
├── .changeset
│   ├── config.json
│   └── README.md
├── .claude
│   ├── agents
│   │   ├── task-checker.md
│   │   ├── task-executor.md
│   │   └── task-orchestrator.md
│   ├── commands
│   │   ├── dedupe.md
│   │   └── tm
│   │       ├── add-dependency
│   │       │   └── add-dependency.md
│   │       ├── add-subtask
│   │       │   ├── add-subtask.md
│   │       │   └── convert-task-to-subtask.md
│   │       ├── add-task
│   │       │   └── add-task.md
│   │       ├── analyze-complexity
│   │       │   └── analyze-complexity.md
│   │       ├── complexity-report
│   │       │   └── complexity-report.md
│   │       ├── expand
│   │       │   ├── expand-all-tasks.md
│   │       │   └── expand-task.md
│   │       ├── fix-dependencies
│   │       │   └── fix-dependencies.md
│   │       ├── generate
│   │       │   └── generate-tasks.md
│   │       ├── help.md
│   │       ├── init
│   │       │   ├── init-project-quick.md
│   │       │   └── init-project.md
│   │       ├── learn.md
│   │       ├── list
│   │       │   ├── list-tasks-by-status.md
│   │       │   ├── list-tasks-with-subtasks.md
│   │       │   └── list-tasks.md
│   │       ├── models
│   │       │   ├── setup-models.md
│   │       │   └── view-models.md
│   │       ├── next
│   │       │   └── next-task.md
│   │       ├── parse-prd
│   │       │   ├── parse-prd-with-research.md
│   │       │   └── parse-prd.md
│   │       ├── remove-dependency
│   │       │   └── remove-dependency.md
│   │       ├── remove-subtask
│   │       │   └── remove-subtask.md
│   │       ├── remove-subtasks
│   │       │   ├── remove-all-subtasks.md
│   │       │   └── remove-subtasks.md
│   │       ├── remove-task
│   │       │   └── remove-task.md
│   │       ├── set-status
│   │       │   ├── to-cancelled.md
│   │       │   ├── to-deferred.md
│   │       │   ├── to-done.md
│   │       │   ├── to-in-progress.md
│   │       │   ├── to-pending.md
│   │       │   └── to-review.md
│   │       ├── setup
│   │       │   ├── install-taskmaster.md
│   │       │   └── quick-install-taskmaster.md
│   │       ├── show
│   │       │   └── show-task.md
│   │       ├── status
│   │       │   └── project-status.md
│   │       ├── sync-readme
│   │       │   └── sync-readme.md
│   │       ├── tm-main.md
│   │       ├── update
│   │       │   ├── update-single-task.md
│   │       │   ├── update-task.md
│   │       │   └── update-tasks-from-id.md
│   │       ├── utils
│   │       │   └── analyze-project.md
│   │       ├── validate-dependencies
│   │       │   └── validate-dependencies.md
│   │       └── workflows
│   │           ├── auto-implement-tasks.md
│   │           ├── command-pipeline.md
│   │           └── smart-workflow.md
│   └── TM_COMMANDS_GUIDE.md
├── .coderabbit.yaml
├── .cursor
│   ├── mcp.json
│   └── rules
│       ├── ai_providers.mdc
│       ├── ai_services.mdc
│       ├── architecture.mdc
│       ├── changeset.mdc
│       ├── commands.mdc
│       ├── context_gathering.mdc
│       ├── cursor_rules.mdc
│       ├── dependencies.mdc
│       ├── dev_workflow.mdc
│       ├── git_workflow.mdc
│       ├── glossary.mdc
│       ├── mcp.mdc
│       ├── new_features.mdc
│       ├── self_improve.mdc
│       ├── tags.mdc
│       ├── taskmaster.mdc
│       ├── tasks.mdc
│       ├── telemetry.mdc
│       ├── test_workflow.mdc
│       ├── tests.mdc
│       ├── ui.mdc
│       └── utilities.mdc
├── .cursorignore
├── .env.example
├── .github
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── enhancements---feature-requests.md
│   │   └── feedback.md
│   ├── PULL_REQUEST_TEMPLATE
│   │   ├── bugfix.md
│   │   ├── config.yml
│   │   ├── feature.md
│   │   └── integration.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── scripts
│   │   ├── auto-close-duplicates.mjs
│   │   ├── backfill-duplicate-comments.mjs
│   │   ├── check-pre-release-mode.mjs
│   │   ├── parse-metrics.mjs
│   │   ├── release.mjs
│   │   ├── tag-extension.mjs
│   │   └── utils.mjs
│   └── workflows
│       ├── auto-close-duplicates.yml
│       ├── backfill-duplicate-comments.yml
│       ├── ci.yml
│       ├── claude-dedupe-issues.yml
│       ├── claude-docs-trigger.yml
│       ├── claude-docs-updater.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── extension-ci.yml
│       ├── extension-release.yml
│       ├── log-issue-events.yml
│       ├── pre-release.yml
│       ├── release-check.yml
│       ├── release.yml
│       ├── update-models-md.yml
│       └── weekly-metrics-discord.yml
├── .gitignore
├── .kiro
│   ├── hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── settings
│   │   └── mcp.json
│   └── steering
│       ├── dev_workflow.md
│       ├── kiro_rules.md
│       ├── self_improve.md
│       ├── taskmaster_hooks_workflow.md
│       └── taskmaster.md
├── .manypkg.json
├── .mcp.json
├── .npmignore
├── .nvmrc
├── .taskmaster
│   ├── CLAUDE.md
│   ├── config.json
│   ├── docs
│   │   ├── MIGRATION-ROADMAP.md
│   │   ├── prd-tm-start.txt
│   │   ├── prd.txt
│   │   ├── README.md
│   │   ├── research
│   │   │   ├── 2025-06-14_how-can-i-improve-the-scope-up-and-scope-down-comm.md
│   │   │   ├── 2025-06-14_should-i-be-using-any-specific-libraries-for-this.md
│   │   │   ├── 2025-06-14_test-save-functionality.md
│   │   │   ├── 2025-06-14_test-the-fix-for-duplicate-saves-final-test.md
│   │   │   └── 2025-08-01_do-we-need-to-add-new-commands-or-can-we-just-weap.md
│   │   ├── task-template-importing-prd.txt
│   │   ├── test-prd.txt
│   │   └── tm-core-phase-1.txt
│   ├── reports
│   │   ├── task-complexity-report_cc-kiro-hooks.json
│   │   ├── task-complexity-report_test-prd-tag.json
│   │   ├── task-complexity-report_tm-core-phase-1.json
│   │   ├── task-complexity-report.json
│   │   └── tm-core-complexity.json
│   ├── state.json
│   ├── tasks
│   │   ├── task_001_tm-start.txt
│   │   ├── task_002_tm-start.txt
│   │   ├── task_003_tm-start.txt
│   │   ├── task_004_tm-start.txt
│   │   ├── task_007_tm-start.txt
│   │   └── tasks.json
│   └── templates
│       └── example_prd.txt
├── .vscode
│   ├── extensions.json
│   └── settings.json
├── apps
│   ├── cli
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   ├── commands
│   │   │   │   ├── auth.command.ts
│   │   │   │   ├── context.command.ts
│   │   │   │   ├── list.command.ts
│   │   │   │   ├── set-status.command.ts
│   │   │   │   ├── show.command.ts
│   │   │   │   └── start.command.ts
│   │   │   ├── index.ts
│   │   │   ├── ui
│   │   │   │   ├── components
│   │   │   │   │   ├── dashboard.component.ts
│   │   │   │   │   ├── header.component.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── next-task.component.ts
│   │   │   │   │   ├── suggested-steps.component.ts
│   │   │   │   │   └── task-detail.component.ts
│   │   │   │   └── index.ts
│   │   │   └── utils
│   │   │       ├── auto-update.ts
│   │   │       └── ui.ts
│   │   └── tsconfig.json
│   ├── docs
│   │   ├── archive
│   │   │   ├── ai-client-utils-example.mdx
│   │   │   ├── ai-development-workflow.mdx
│   │   │   ├── command-reference.mdx
│   │   │   ├── configuration.mdx
│   │   │   ├── cursor-setup.mdx
│   │   │   ├── examples.mdx
│   │   │   └── Installation.mdx
│   │   ├── best-practices
│   │   │   ├── advanced-tasks.mdx
│   │   │   ├── configuration-advanced.mdx
│   │   │   └── index.mdx
│   │   ├── capabilities
│   │   │   ├── cli-root-commands.mdx
│   │   │   ├── index.mdx
│   │   │   ├── mcp.mdx
│   │   │   └── task-structure.mdx
│   │   ├── CHANGELOG.md
│   │   ├── docs.json
│   │   ├── favicon.svg
│   │   ├── getting-started
│   │   │   ├── contribute.mdx
│   │   │   ├── faq.mdx
│   │   │   └── quick-start
│   │   │       ├── configuration-quick.mdx
│   │   │       ├── execute-quick.mdx
│   │   │       ├── installation.mdx
│   │   │       ├── moving-forward.mdx
│   │   │       ├── prd-quick.mdx
│   │   │       ├── quick-start.mdx
│   │   │       ├── requirements.mdx
│   │   │       ├── rules-quick.mdx
│   │   │       └── tasks-quick.mdx
│   │   ├── introduction.mdx
│   │   ├── licensing.md
│   │   ├── logo
│   │   │   ├── dark.svg
│   │   │   ├── light.svg
│   │   │   └── task-master-logo.png
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── style.css
│   │   ├── vercel.json
│   │   └── whats-new.mdx
│   └── extension
│       ├── .vscodeignore
│       ├── assets
│       │   ├── banner.png
│       │   ├── icon-dark.svg
│       │   ├── icon-light.svg
│       │   ├── icon.png
│       │   ├── screenshots
│       │   │   ├── kanban-board.png
│       │   │   └── task-details.png
│       │   └── sidebar-icon.svg
│       ├── CHANGELOG.md
│       ├── components.json
│       ├── docs
│       │   ├── extension-CI-setup.md
│       │   └── extension-development-guide.md
│       ├── esbuild.js
│       ├── LICENSE
│       ├── package.json
│       ├── package.mjs
│       ├── package.publish.json
│       ├── README.md
│       ├── src
│       │   ├── components
│       │   │   ├── ConfigView.tsx
│       │   │   ├── constants.ts
│       │   │   ├── TaskDetails
│       │   │   │   ├── AIActionsSection.tsx
│       │   │   │   ├── DetailsSection.tsx
│       │   │   │   ├── PriorityBadge.tsx
│       │   │   │   ├── SubtasksSection.tsx
│       │   │   │   ├── TaskMetadataSidebar.tsx
│       │   │   │   └── useTaskDetails.ts
│       │   │   ├── TaskDetailsView.tsx
│       │   │   ├── TaskMasterLogo.tsx
│       │   │   └── ui
│       │   │       ├── badge.tsx
│       │   │       ├── breadcrumb.tsx
│       │   │       ├── button.tsx
│       │   │       ├── card.tsx
│       │   │       ├── collapsible.tsx
│       │   │       ├── CollapsibleSection.tsx
│       │   │       ├── dropdown-menu.tsx
│       │   │       ├── label.tsx
│       │   │       ├── scroll-area.tsx
│       │   │       ├── separator.tsx
│       │   │       ├── shadcn-io
│       │   │       │   └── kanban
│       │   │       │       └── index.tsx
│       │   │       └── textarea.tsx
│       │   ├── extension.ts
│       │   ├── index.ts
│       │   ├── lib
│       │   │   └── utils.ts
│       │   ├── services
│       │   │   ├── config-service.ts
│       │   │   ├── error-handler.ts
│       │   │   ├── notification-preferences.ts
│       │   │   ├── polling-service.ts
│       │   │   ├── polling-strategies.ts
│       │   │   ├── sidebar-webview-manager.ts
│       │   │   ├── task-repository.ts
│       │   │   ├── terminal-manager.ts
│       │   │   └── webview-manager.ts
│       │   ├── test
│       │   │   └── extension.test.ts
│       │   ├── utils
│       │   │   ├── configManager.ts
│       │   │   ├── connectionManager.ts
│       │   │   ├── errorHandler.ts
│       │   │   ├── event-emitter.ts
│       │   │   ├── logger.ts
│       │   │   ├── mcpClient.ts
│       │   │   ├── notificationPreferences.ts
│       │   │   └── task-master-api
│       │   │       ├── cache
│       │   │       │   └── cache-manager.ts
│       │   │       ├── index.ts
│       │   │       ├── mcp-client.ts
│       │   │       ├── transformers
│       │   │       │   └── task-transformer.ts
│       │   │       └── types
│       │   │           └── index.ts
│       │   └── webview
│       │       ├── App.tsx
│       │       ├── components
│       │       │   ├── AppContent.tsx
│       │       │   ├── EmptyState.tsx
│       │       │   ├── ErrorBoundary.tsx
│       │       │   ├── PollingStatus.tsx
│       │       │   ├── PriorityBadge.tsx
│       │       │   ├── SidebarView.tsx
│       │       │   ├── TagDropdown.tsx
│       │       │   ├── TaskCard.tsx
│       │       │   ├── TaskEditModal.tsx
│       │       │   ├── TaskMasterKanban.tsx
│       │       │   ├── ToastContainer.tsx
│       │       │   └── ToastNotification.tsx
│       │       ├── constants
│       │       │   └── index.ts
│       │       ├── contexts
│       │       │   └── VSCodeContext.tsx
│       │       ├── hooks
│       │       │   ├── useTaskQueries.ts
│       │       │   ├── useVSCodeMessages.ts
│       │       │   └── useWebviewHeight.ts
│       │       ├── index.css
│       │       ├── index.tsx
│       │       ├── providers
│       │       │   └── QueryProvider.tsx
│       │       ├── reducers
│       │       │   └── appReducer.ts
│       │       ├── sidebar.tsx
│       │       ├── types
│       │       │   └── index.ts
│       │       └── utils
│       │           ├── logger.ts
│       │           └── toast.ts
│       └── tsconfig.json
├── assets
│   ├── .windsurfrules
│   ├── AGENTS.md
│   ├── claude
│   │   ├── agents
│   │   │   ├── task-checker.md
│   │   │   ├── task-executor.md
│   │   │   └── task-orchestrator.md
│   │   ├── commands
│   │   │   └── tm
│   │   │       ├── add-dependency
│   │   │       │   └── add-dependency.md
│   │   │       ├── add-subtask
│   │   │       │   ├── add-subtask.md
│   │   │       │   └── convert-task-to-subtask.md
│   │   │       ├── add-task
│   │   │       │   └── add-task.md
│   │   │       ├── analyze-complexity
│   │   │       │   └── analyze-complexity.md
│   │   │       ├── clear-subtasks
│   │   │       │   ├── clear-all-subtasks.md
│   │   │       │   └── clear-subtasks.md
│   │   │       ├── complexity-report
│   │   │       │   └── complexity-report.md
│   │   │       ├── expand
│   │   │       │   ├── expand-all-tasks.md
│   │   │       │   └── expand-task.md
│   │   │       ├── fix-dependencies
│   │   │       │   └── fix-dependencies.md
│   │   │       ├── generate
│   │   │       │   └── generate-tasks.md
│   │   │       ├── help.md
│   │   │       ├── init
│   │   │       │   ├── init-project-quick.md
│   │   │       │   └── init-project.md
│   │   │       ├── learn.md
│   │   │       ├── list
│   │   │       │   ├── list-tasks-by-status.md
│   │   │       │   ├── list-tasks-with-subtasks.md
│   │   │       │   └── list-tasks.md
│   │   │       ├── models
│   │   │       │   ├── setup-models.md
│   │   │       │   └── view-models.md
│   │   │       ├── next
│   │   │       │   └── next-task.md
│   │   │       ├── parse-prd
│   │   │       │   ├── parse-prd-with-research.md
│   │   │       │   └── parse-prd.md
│   │   │       ├── remove-dependency
│   │   │       │   └── remove-dependency.md
│   │   │       ├── remove-subtask
│   │   │       │   └── remove-subtask.md
│   │   │       ├── remove-subtasks
│   │   │       │   ├── remove-all-subtasks.md
│   │   │       │   └── remove-subtasks.md
│   │   │       ├── remove-task
│   │   │       │   └── remove-task.md
│   │   │       ├── set-status
│   │   │       │   ├── to-cancelled.md
│   │   │       │   ├── to-deferred.md
│   │   │       │   ├── to-done.md
│   │   │       │   ├── to-in-progress.md
│   │   │       │   ├── to-pending.md
│   │   │       │   └── to-review.md
│   │   │       ├── setup
│   │   │       │   ├── install-taskmaster.md
│   │   │       │   └── quick-install-taskmaster.md
│   │   │       ├── show
│   │   │       │   └── show-task.md
│   │   │       ├── status
│   │   │       │   └── project-status.md
│   │   │       ├── sync-readme
│   │   │       │   └── sync-readme.md
│   │   │       ├── tm-main.md
│   │   │       ├── update
│   │   │       │   ├── update-single-task.md
│   │   │       │   ├── update-task.md
│   │   │       │   └── update-tasks-from-id.md
│   │   │       ├── utils
│   │   │       │   └── analyze-project.md
│   │   │       ├── validate-dependencies
│   │   │       │   └── validate-dependencies.md
│   │   │       └── workflows
│   │   │           ├── auto-implement-tasks.md
│   │   │           ├── command-pipeline.md
│   │   │           └── smart-workflow.md
│   │   └── TM_COMMANDS_GUIDE.md
│   ├── config.json
│   ├── env.example
│   ├── example_prd.txt
│   ├── gitignore
│   ├── kiro-hooks
│   │   ├── tm-code-change-task-tracker.kiro.hook
│   │   ├── tm-complexity-analyzer.kiro.hook
│   │   ├── tm-daily-standup-assistant.kiro.hook
│   │   ├── tm-git-commit-task-linker.kiro.hook
│   │   ├── tm-pr-readiness-checker.kiro.hook
│   │   ├── tm-task-dependency-auto-progression.kiro.hook
│   │   └── tm-test-success-task-completer.kiro.hook
│   ├── roocode
│   │   ├── .roo
│   │   │   ├── rules-architect
│   │   │   │   └── architect-rules
│   │   │   ├── rules-ask
│   │   │   │   └── ask-rules
│   │   │   ├── rules-code
│   │   │   │   └── code-rules
│   │   │   ├── rules-debug
│   │   │   │   └── debug-rules
│   │   │   ├── rules-orchestrator
│   │   │   │   └── orchestrator-rules
│   │   │   └── rules-test
│   │   │       └── test-rules
│   │   └── .roomodes
│   ├── rules
│   │   ├── cursor_rules.mdc
│   │   ├── dev_workflow.mdc
│   │   ├── self_improve.mdc
│   │   ├── taskmaster_hooks_workflow.mdc
│   │   └── taskmaster.mdc
│   └── scripts_README.md
├── bin
│   └── task-master.js
├── biome.json
├── CHANGELOG.md
├── CLAUDE.md
├── context
│   ├── chats
│   │   ├── add-task-dependencies-1.md
│   │   └── max-min-tokens.txt.md
│   ├── fastmcp-core.txt
│   ├── fastmcp-docs.txt
│   ├── MCP_INTEGRATION.md
│   ├── mcp-js-sdk-docs.txt
│   ├── mcp-protocol-repo.txt
│   ├── mcp-protocol-schema-03262025.json
│   └── mcp-protocol-spec.txt
├── CONTRIBUTING.md
├── docs
│   ├── CLI-COMMANDER-PATTERN.md
│   ├── command-reference.md
│   ├── configuration.md
│   ├── contributor-docs
│   │   └── testing-roo-integration.md
│   ├── cross-tag-task-movement.md
│   ├── examples
│   │   └── claude-code-usage.md
│   ├── examples.md
│   ├── licensing.md
│   ├── mcp-provider-guide.md
│   ├── mcp-provider.md
│   ├── migration-guide.md
│   ├── models.md
│   ├── providers
│   │   └── gemini-cli.md
│   ├── README.md
│   ├── scripts
│   │   └── models-json-to-markdown.js
│   ├── task-structure.md
│   └── tutorial.md
├── images
│   └── logo.png
├── index.js
├── jest.config.js
├── jest.resolver.cjs
├── LICENSE
├── llms-install.md
├── mcp-server
│   ├── server.js
│   └── src
│       ├── core
│       │   ├── __tests__
│       │   │   └── context-manager.test.js
│       │   ├── context-manager.js
│       │   ├── direct-functions
│       │   │   ├── add-dependency.js
│       │   │   ├── add-subtask.js
│       │   │   ├── add-tag.js
│       │   │   ├── add-task.js
│       │   │   ├── analyze-task-complexity.js
│       │   │   ├── cache-stats.js
│       │   │   ├── clear-subtasks.js
│       │   │   ├── complexity-report.js
│       │   │   ├── copy-tag.js
│       │   │   ├── create-tag-from-branch.js
│       │   │   ├── delete-tag.js
│       │   │   ├── expand-all-tasks.js
│       │   │   ├── expand-task.js
│       │   │   ├── fix-dependencies.js
│       │   │   ├── generate-task-files.js
│       │   │   ├── initialize-project.js
│       │   │   ├── list-tags.js
│       │   │   ├── list-tasks.js
│       │   │   ├── models.js
│       │   │   ├── move-task-cross-tag.js
│       │   │   ├── move-task.js
│       │   │   ├── next-task.js
│       │   │   ├── parse-prd.js
│       │   │   ├── remove-dependency.js
│       │   │   ├── remove-subtask.js
│       │   │   ├── remove-task.js
│       │   │   ├── rename-tag.js
│       │   │   ├── research.js
│       │   │   ├── response-language.js
│       │   │   ├── rules.js
│       │   │   ├── scope-down.js
│       │   │   ├── scope-up.js
│       │   │   ├── set-task-status.js
│       │   │   ├── show-task.js
│       │   │   ├── update-subtask-by-id.js
│       │   │   ├── update-task-by-id.js
│       │   │   ├── update-tasks.js
│       │   │   ├── use-tag.js
│       │   │   └── validate-dependencies.js
│       │   ├── task-master-core.js
│       │   └── utils
│       │       ├── env-utils.js
│       │       └── path-utils.js
│       ├── custom-sdk
│       │   ├── errors.js
│       │   ├── index.js
│       │   ├── json-extractor.js
│       │   ├── language-model.js
│       │   ├── message-converter.js
│       │   └── schema-converter.js
│       ├── index.js
│       ├── logger.js
│       ├── providers
│       │   └── mcp-provider.js
│       └── tools
│           ├── add-dependency.js
│           ├── add-subtask.js
│           ├── add-tag.js
│           ├── add-task.js
│           ├── analyze.js
│           ├── clear-subtasks.js
│           ├── complexity-report.js
│           ├── copy-tag.js
│           ├── delete-tag.js
│           ├── expand-all.js
│           ├── expand-task.js
│           ├── fix-dependencies.js
│           ├── generate.js
│           ├── get-operation-status.js
│           ├── get-task.js
│           ├── get-tasks.js
│           ├── index.js
│           ├── initialize-project.js
│           ├── list-tags.js
│           ├── models.js
│           ├── move-task.js
│           ├── next-task.js
│           ├── parse-prd.js
│           ├── remove-dependency.js
│           ├── remove-subtask.js
│           ├── remove-task.js
│           ├── rename-tag.js
│           ├── research.js
│           ├── response-language.js
│           ├── rules.js
│           ├── scope-down.js
│           ├── scope-up.js
│           ├── set-task-status.js
│           ├── update-subtask.js
│           ├── update-task.js
│           ├── update.js
│           ├── use-tag.js
│           ├── utils.js
│           └── validate-dependencies.js
├── mcp-test.js
├── output.json
├── package-lock.json
├── package.json
├── packages
│   ├── build-config
│   │   ├── CHANGELOG.md
│   │   ├── package.json
│   │   ├── src
│   │   │   └── tsdown.base.ts
│   │   └── tsconfig.json
│   └── tm-core
│       ├── .gitignore
│       ├── CHANGELOG.md
│       ├── docs
│       │   └── listTasks-architecture.md
│       ├── package.json
│       ├── POC-STATUS.md
│       ├── README.md
│       ├── src
│       │   ├── auth
│       │   │   ├── auth-manager.test.ts
│       │   │   ├── auth-manager.ts
│       │   │   ├── config.ts
│       │   │   ├── credential-store.test.ts
│       │   │   ├── credential-store.ts
│       │   │   ├── index.ts
│       │   │   ├── oauth-service.ts
│       │   │   ├── supabase-session-storage.ts
│       │   │   └── types.ts
│       │   ├── clients
│       │   │   ├── index.ts
│       │   │   └── supabase-client.ts
│       │   ├── config
│       │   │   ├── config-manager.spec.ts
│       │   │   ├── config-manager.ts
│       │   │   ├── index.ts
│       │   │   └── services
│       │   │       ├── config-loader.service.spec.ts
│       │   │       ├── config-loader.service.ts
│       │   │       ├── config-merger.service.spec.ts
│       │   │       ├── config-merger.service.ts
│       │   │       ├── config-persistence.service.spec.ts
│       │   │       ├── config-persistence.service.ts
│       │   │       ├── environment-config-provider.service.spec.ts
│       │   │       ├── environment-config-provider.service.ts
│       │   │       ├── index.ts
│       │   │       ├── runtime-state-manager.service.spec.ts
│       │   │       └── runtime-state-manager.service.ts
│       │   ├── constants
│       │   │   └── index.ts
│       │   ├── entities
│       │   │   └── task.entity.ts
│       │   ├── errors
│       │   │   ├── index.ts
│       │   │   └── task-master-error.ts
│       │   ├── executors
│       │   │   ├── base-executor.ts
│       │   │   ├── claude-executor.ts
│       │   │   ├── executor-factory.ts
│       │   │   ├── executor-service.ts
│       │   │   ├── index.ts
│       │   │   └── types.ts
│       │   ├── index.ts
│       │   ├── interfaces
│       │   │   ├── ai-provider.interface.ts
│       │   │   ├── configuration.interface.ts
│       │   │   ├── index.ts
│       │   │   └── storage.interface.ts
│       │   ├── logger
│       │   │   ├── factory.ts
│       │   │   ├── index.ts
│       │   │   └── logger.ts
│       │   ├── mappers
│       │   │   └── TaskMapper.ts
│       │   ├── parser
│       │   │   └── index.ts
│       │   ├── providers
│       │   │   ├── ai
│       │   │   │   ├── base-provider.ts
│       │   │   │   └── index.ts
│       │   │   └── index.ts
│       │   ├── repositories
│       │   │   ├── supabase-task-repository.ts
│       │   │   └── task-repository.interface.ts
│       │   ├── services
│       │   │   ├── index.ts
│       │   │   ├── organization.service.ts
│       │   │   ├── task-execution-service.ts
│       │   │   └── task-service.ts
│       │   ├── storage
│       │   │   ├── api-storage.ts
│       │   │   ├── file-storage
│       │   │   │   ├── file-operations.ts
│       │   │   │   ├── file-storage.ts
│       │   │   │   ├── format-handler.ts
│       │   │   │   ├── index.ts
│       │   │   │   └── path-resolver.ts
│       │   │   ├── index.ts
│       │   │   └── storage-factory.ts
│       │   ├── subpath-exports.test.ts
│       │   ├── task-master-core.ts
│       │   ├── types
│       │   │   ├── database.types.ts
│       │   │   ├── index.ts
│       │   │   └── legacy.ts
│       │   └── utils
│       │       ├── id-generator.ts
│       │       └── index.ts
│       ├── tests
│       │   ├── integration
│       │   │   └── list-tasks.test.ts
│       │   ├── mocks
│       │   │   └── mock-provider.ts
│       │   ├── setup.ts
│       │   └── unit
│       │       ├── base-provider.test.ts
│       │       ├── executor.test.ts
│       │       └── smoke.test.ts
│       ├── tsconfig.json
│       └── vitest.config.ts
├── README-task-master.md
├── README.md
├── scripts
│   ├── dev.js
│   ├── init.js
│   ├── modules
│   │   ├── ai-services-unified.js
│   │   ├── commands.js
│   │   ├── config-manager.js
│   │   ├── dependency-manager.js
│   │   ├── index.js
│   │   ├── prompt-manager.js
│   │   ├── supported-models.json
│   │   ├── sync-readme.js
│   │   ├── task-manager
│   │   │   ├── add-subtask.js
│   │   │   ├── add-task.js
│   │   │   ├── analyze-task-complexity.js
│   │   │   ├── clear-subtasks.js
│   │   │   ├── expand-all-tasks.js
│   │   │   ├── expand-task.js
│   │   │   ├── find-next-task.js
│   │   │   ├── generate-task-files.js
│   │   │   ├── is-task-dependent.js
│   │   │   ├── list-tasks.js
│   │   │   ├── migrate.js
│   │   │   ├── models.js
│   │   │   ├── move-task.js
│   │   │   ├── parse-prd
│   │   │   │   ├── index.js
│   │   │   │   ├── parse-prd-config.js
│   │   │   │   ├── parse-prd-helpers.js
│   │   │   │   ├── parse-prd-non-streaming.js
│   │   │   │   ├── parse-prd-streaming.js
│   │   │   │   └── parse-prd.js
│   │   │   ├── remove-subtask.js
│   │   │   ├── remove-task.js
│   │   │   ├── research.js
│   │   │   ├── response-language.js
│   │   │   ├── scope-adjustment.js
│   │   │   ├── set-task-status.js
│   │   │   ├── tag-management.js
│   │   │   ├── task-exists.js
│   │   │   ├── update-single-task-status.js
│   │   │   ├── update-subtask-by-id.js
│   │   │   ├── update-task-by-id.js
│   │   │   └── update-tasks.js
│   │   ├── task-manager.js
│   │   ├── ui.js
│   │   ├── update-config-tokens.js
│   │   ├── utils
│   │   │   ├── contextGatherer.js
│   │   │   ├── fuzzyTaskSearch.js
│   │   │   └── git-utils.js
│   │   └── utils.js
│   ├── task-complexity-report.json
│   ├── test-claude-errors.js
│   └── test-claude.js
├── src
│   ├── ai-providers
│   │   ├── anthropic.js
│   │   ├── azure.js
│   │   ├── base-provider.js
│   │   ├── bedrock.js
│   │   ├── claude-code.js
│   │   ├── custom-sdk
│   │   │   ├── claude-code
│   │   │   │   ├── errors.js
│   │   │   │   ├── index.js
│   │   │   │   ├── json-extractor.js
│   │   │   │   ├── language-model.js
│   │   │   │   ├── message-converter.js
│   │   │   │   └── types.js
│   │   │   └── grok-cli
│   │   │       ├── errors.js
│   │   │       ├── index.js
│   │   │       ├── json-extractor.js
│   │   │       ├── language-model.js
│   │   │       ├── message-converter.js
│   │   │       └── types.js
│   │   ├── gemini-cli.js
│   │   ├── google-vertex.js
│   │   ├── google.js
│   │   ├── grok-cli.js
│   │   ├── groq.js
│   │   ├── index.js
│   │   ├── ollama.js
│   │   ├── openai.js
│   │   ├── openrouter.js
│   │   ├── perplexity.js
│   │   └── xai.js
│   ├── constants
│   │   ├── commands.js
│   │   ├── paths.js
│   │   ├── profiles.js
│   │   ├── providers.js
│   │   ├── rules-actions.js
│   │   ├── task-priority.js
│   │   └── task-status.js
│   ├── profiles
│   │   ├── amp.js
│   │   ├── base-profile.js
│   │   ├── claude.js
│   │   ├── cline.js
│   │   ├── codex.js
│   │   ├── cursor.js
│   │   ├── gemini.js
│   │   ├── index.js
│   │   ├── kilo.js
│   │   ├── kiro.js
│   │   ├── opencode.js
│   │   ├── roo.js
│   │   ├── trae.js
│   │   ├── vscode.js
│   │   ├── windsurf.js
│   │   └── zed.js
│   ├── progress
│   │   ├── base-progress-tracker.js
│   │   ├── cli-progress-factory.js
│   │   ├── parse-prd-tracker.js
│   │   ├── progress-tracker-builder.js
│   │   └── tracker-ui.js
│   ├── prompts
│   │   ├── add-task.json
│   │   ├── analyze-complexity.json
│   │   ├── expand-task.json
│   │   ├── parse-prd.json
│   │   ├── README.md
│   │   ├── research.json
│   │   ├── schemas
│   │   │   ├── parameter.schema.json
│   │   │   ├── prompt-template.schema.json
│   │   │   ├── README.md
│   │   │   └── variant.schema.json
│   │   ├── update-subtask.json
│   │   ├── update-task.json
│   │   └── update-tasks.json
│   ├── provider-registry
│   │   └── index.js
│   ├── task-master.js
│   ├── ui
│   │   ├── confirm.js
│   │   ├── indicators.js
│   │   └── parse-prd.js
│   └── utils
│       ├── asset-resolver.js
│       ├── create-mcp-config.js
│       ├── format.js
│       ├── getVersion.js
│       ├── logger-utils.js
│       ├── manage-gitignore.js
│       ├── path-utils.js
│       ├── profiles.js
│       ├── rule-transformer.js
│       ├── stream-parser.js
│       └── timeout-manager.js
├── test-clean-tags.js
├── test-config-manager.js
├── test-prd.txt
├── test-tag-functions.js
├── test-version-check-full.js
├── test-version-check.js
├── tests
│   ├── e2e
│   │   ├── e2e_helpers.sh
│   │   ├── parse_llm_output.cjs
│   │   ├── run_e2e.sh
│   │   ├── run_fallback_verification.sh
│   │   └── test_llm_analysis.sh
│   ├── fixture
│   │   └── test-tasks.json
│   ├── fixtures
│   │   ├── .taskmasterconfig
│   │   ├── sample-claude-response.js
│   │   ├── sample-prd.txt
│   │   └── sample-tasks.js
│   ├── integration
│   │   ├── claude-code-optional.test.js
│   │   ├── cli
│   │   │   ├── commands.test.js
│   │   │   ├── complex-cross-tag-scenarios.test.js
│   │   │   └── move-cross-tag.test.js
│   │   ├── manage-gitignore.test.js
│   │   ├── mcp-server
│   │   │   └── direct-functions.test.js
│   │   ├── move-task-cross-tag.integration.test.js
│   │   ├── move-task-simple.integration.test.js
│   │   └── profiles
│   │       ├── amp-init-functionality.test.js
│   │       ├── claude-init-functionality.test.js
│   │       ├── cline-init-functionality.test.js
│   │       ├── codex-init-functionality.test.js
│   │       ├── cursor-init-functionality.test.js
│   │       ├── gemini-init-functionality.test.js
│   │       ├── opencode-init-functionality.test.js
│   │       ├── roo-files-inclusion.test.js
│   │       ├── roo-init-functionality.test.js
│   │       ├── rules-files-inclusion.test.js
│   │       ├── trae-init-functionality.test.js
│   │       ├── vscode-init-functionality.test.js
│   │       └── windsurf-init-functionality.test.js
│   ├── manual
│   │   ├── progress
│   │   │   ├── parse-prd-analysis.js
│   │   │   ├── test-parse-prd.js
│   │   │   └── TESTING_GUIDE.md
│   │   └── prompts
│   │       ├── prompt-test.js
│   │       └── README.md
│   ├── README.md
│   ├── setup.js
│   └── unit
│       ├── ai-providers
│       │   ├── claude-code.test.js
│       │   ├── custom-sdk
│       │   │   └── claude-code
│       │   │       └── language-model.test.js
│       │   ├── gemini-cli.test.js
│       │   ├── mcp-components.test.js
│       │   └── openai.test.js
│       ├── ai-services-unified.test.js
│       ├── commands.test.js
│       ├── config-manager.test.js
│       ├── config-manager.test.mjs
│       ├── dependency-manager.test.js
│       ├── init.test.js
│       ├── initialize-project.test.js
│       ├── kebab-case-validation.test.js
│       ├── manage-gitignore.test.js
│       ├── mcp
│       │   └── tools
│       │       ├── __mocks__
│       │       │   └── move-task.js
│       │       ├── add-task.test.js
│       │       ├── analyze-complexity.test.js
│       │       ├── expand-all.test.js
│       │       ├── get-tasks.test.js
│       │       ├── initialize-project.test.js
│       │       ├── move-task-cross-tag-options.test.js
│       │       ├── move-task-cross-tag.test.js
│       │       └── remove-task.test.js
│       ├── mcp-providers
│       │   ├── mcp-components.test.js
│       │   └── mcp-provider.test.js
│       ├── parse-prd.test.js
│       ├── profiles
│       │   ├── amp-integration.test.js
│       │   ├── claude-integration.test.js
│       │   ├── cline-integration.test.js
│       │   ├── codex-integration.test.js
│       │   ├── cursor-integration.test.js
│       │   ├── gemini-integration.test.js
│       │   ├── kilo-integration.test.js
│       │   ├── kiro-integration.test.js
│       │   ├── mcp-config-validation.test.js
│       │   ├── opencode-integration.test.js
│       │   ├── profile-safety-check.test.js
│       │   ├── roo-integration.test.js
│       │   ├── rule-transformer-cline.test.js
│       │   ├── rule-transformer-cursor.test.js
│       │   ├── rule-transformer-gemini.test.js
│       │   ├── rule-transformer-kilo.test.js
│       │   ├── rule-transformer-kiro.test.js
│       │   ├── rule-transformer-opencode.test.js
│       │   ├── rule-transformer-roo.test.js
│       │   ├── rule-transformer-trae.test.js
│       │   ├── rule-transformer-vscode.test.js
│       │   ├── rule-transformer-windsurf.test.js
│       │   ├── rule-transformer-zed.test.js
│       │   ├── rule-transformer.test.js
│       │   ├── selective-profile-removal.test.js
│       │   ├── subdirectory-support.test.js
│       │   ├── trae-integration.test.js
│       │   ├── vscode-integration.test.js
│       │   ├── windsurf-integration.test.js
│       │   └── zed-integration.test.js
│       ├── progress
│       │   └── base-progress-tracker.test.js
│       ├── prompt-manager.test.js
│       ├── prompts
│       │   └── expand-task-prompt.test.js
│       ├── providers
│       │   └── provider-registry.test.js
│       ├── scripts
│       │   └── modules
│       │       ├── commands
│       │       │   ├── move-cross-tag.test.js
│       │       │   └── README.md
│       │       ├── dependency-manager
│       │       │   ├── circular-dependencies.test.js
│       │       │   ├── cross-tag-dependencies.test.js
│       │       │   └── fix-dependencies-command.test.js
│       │       ├── task-manager
│       │       │   ├── add-subtask.test.js
│       │       │   ├── add-task.test.js
│       │       │   ├── analyze-task-complexity.test.js
│       │       │   ├── clear-subtasks.test.js
│       │       │   ├── complexity-report-tag-isolation.test.js
│       │       │   ├── expand-all-tasks.test.js
│       │       │   ├── expand-task.test.js
│       │       │   ├── find-next-task.test.js
│       │       │   ├── generate-task-files.test.js
│       │       │   ├── list-tasks.test.js
│       │       │   ├── move-task-cross-tag.test.js
│       │       │   ├── move-task.test.js
│       │       │   ├── parse-prd.test.js
│       │       │   ├── remove-subtask.test.js
│       │       │   ├── remove-task.test.js
│       │       │   ├── research.test.js
│       │       │   ├── scope-adjustment.test.js
│       │       │   ├── set-task-status.test.js
│       │       │   ├── setup.js
│       │       │   ├── update-single-task-status.test.js
│       │       │   ├── update-subtask-by-id.test.js
│       │       │   ├── update-task-by-id.test.js
│       │       │   └── update-tasks.test.js
│       │       ├── ui
│       │       │   └── cross-tag-error-display.test.js
│       │       └── utils-tag-aware-paths.test.js
│       ├── task-finder.test.js
│       ├── task-manager
│       │   ├── clear-subtasks.test.js
│       │   ├── move-task.test.js
│       │   ├── tag-boundary.test.js
│       │   └── tag-management.test.js
│       ├── task-master.test.js
│       ├── ui
│       │   └── indicators.test.js
│       ├── ui.test.js
│       ├── utils-strip-ansi.test.js
│       └── utils.test.js
├── tsconfig.json
├── tsdown.config.ts
└── turbo.json
```

# Files

--------------------------------------------------------------------------------
/scripts/modules/config-manager.js:
--------------------------------------------------------------------------------

```javascript
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import chalk from 'chalk';
import { z } from 'zod';
import { AI_COMMAND_NAMES } from '../../src/constants/commands.js';
import {
	LEGACY_CONFIG_FILE,
	TASKMASTER_DIR
} from '../../src/constants/paths.js';
import {
	ALL_PROVIDERS,
	CUSTOM_PROVIDERS,
	CUSTOM_PROVIDERS_ARRAY,
	VALIDATED_PROVIDERS
} from '../../src/constants/providers.js';
import { findConfigPath } from '../../src/utils/path-utils.js';
import { findProjectRoot, isEmpty, log, resolveEnvVariable } from './utils.js';
import MODEL_MAP from './supported-models.json' with { type: 'json' };

// Calculate __dirname in ESM
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);

// Default configuration values (used if config file is missing or incomplete)
const DEFAULTS = {
	models: {
		main: {
			provider: 'anthropic',
			modelId: 'claude-sonnet-4-20250514',
			maxTokens: 64000,
			temperature: 0.2
		},
		research: {
			provider: 'perplexity',
			modelId: 'sonar',
			maxTokens: 8700,
			temperature: 0.1
		},
		fallback: {
			// No default fallback provider/model initially
			provider: 'anthropic',
			modelId: 'claude-3-7-sonnet-20250219',
			maxTokens: 120000, // Default parameters if fallback IS configured
			temperature: 0.2
		}
	},
	global: {
		logLevel: 'info',
		debug: false,
		defaultNumTasks: 10,
		defaultSubtasks: 5,
		defaultPriority: 'medium',
		projectName: 'Task Master',
		ollamaBaseURL: 'http://localhost:11434/api',
		bedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com',
		responseLanguage: 'English',
		enableCodebaseAnalysis: true
	},
	claudeCode: {},
	grokCli: {
		timeout: 120000,
		workingDirectory: null,
		defaultModel: 'grok-4-latest'
	}
};

// --- Internal Config Loading ---
let loadedConfig = null;
let loadedConfigRoot = null; // Track which root loaded the config

// Custom Error for configuration issues
class ConfigurationError extends Error {
	constructor(message) {
		super(message);
		this.name = 'ConfigurationError';
	}
}

function _loadAndValidateConfig(explicitRoot = null) {
	const defaults = DEFAULTS; // Use the defined defaults
	let rootToUse = explicitRoot;
	let configSource = explicitRoot
		? `explicit root (${explicitRoot})`
		: 'defaults (no root provided yet)';

	// ---> If no explicit root, TRY to find it <---
	if (!rootToUse) {
		rootToUse = findProjectRoot();
		if (rootToUse) {
			configSource = `found root (${rootToUse})`;
		} else {
			// No root found, use current working directory as fallback
			// This prevents infinite loops during initialization
			rootToUse = process.cwd();
			configSource = `current directory (${rootToUse}) - no project markers found`;
		}
	}
	// ---> End find project root logic <---

	// --- Find configuration file ---
	let configPath = null;
	let config = { ...defaults }; // Start with a deep copy of defaults
	let configExists = false;

	// During initialization (no project markers), skip config file search entirely
	const hasProjectMarkers =
		fs.existsSync(path.join(rootToUse, TASKMASTER_DIR)) ||
		fs.existsSync(path.join(rootToUse, LEGACY_CONFIG_FILE));

	if (hasProjectMarkers) {
		// Only try to find config if we have project markers
		// This prevents the repeated warnings during init
		configPath = findConfigPath(null, { projectRoot: rootToUse });
	}

	if (configPath) {
		configExists = true;
		const isLegacy = configPath.endsWith(LEGACY_CONFIG_FILE);

		try {
			const rawData = fs.readFileSync(configPath, 'utf-8');
			const parsedConfig = JSON.parse(rawData);

			// Deep merge parsed config onto defaults
			config = {
				models: {
					main: { ...defaults.models.main, ...parsedConfig?.models?.main },
					research: {
						...defaults.models.research,
						...parsedConfig?.models?.research
					},
					fallback:
						parsedConfig?.models?.fallback?.provider &&
						parsedConfig?.models?.fallback?.modelId
							? { ...defaults.models.fallback, ...parsedConfig.models.fallback }
							: { ...defaults.models.fallback }
				},
				global: { ...defaults.global, ...parsedConfig?.global },
				claudeCode: { ...defaults.claudeCode, ...parsedConfig?.claudeCode },
				grokCli: { ...defaults.grokCli, ...parsedConfig?.grokCli }
			};
			configSource = `file (${configPath})`; // Update source info

			// Issue deprecation warning if using legacy config file
			if (isLegacy) {
				console.warn(
					chalk.yellow(
						`⚠️  DEPRECATION WARNING: Found configuration in legacy location '${configPath}'. Please migrate to .taskmaster/config.json. Run 'task-master migrate' to automatically migrate your project.`
					)
				);
			}

			// --- Validation (Warn if file content is invalid) ---
			// Use log.warn for consistency
			if (!validateProvider(config.models.main.provider)) {
				console.warn(
					chalk.yellow(
						`Warning: Invalid main provider "${config.models.main.provider}" in ${configPath}. Falling back to default.`
					)
				);
				config.models.main = { ...defaults.models.main };
			}
			if (!validateProvider(config.models.research.provider)) {
				console.warn(
					chalk.yellow(
						`Warning: Invalid research provider "${config.models.research.provider}" in ${configPath}. Falling back to default.`
					)
				);
				config.models.research = { ...defaults.models.research };
			}
			if (
				config.models.fallback?.provider &&
				!validateProvider(config.models.fallback.provider)
			) {
				console.warn(
					chalk.yellow(
						`Warning: Invalid fallback provider "${config.models.fallback.provider}" in ${configPath}. Fallback model configuration will be ignored.`
					)
				);
				config.models.fallback.provider = undefined;
				config.models.fallback.modelId = undefined;
			}
			if (config.claudeCode && !isEmpty(config.claudeCode)) {
				config.claudeCode = validateClaudeCodeSettings(config.claudeCode);
			}
		} catch (error) {
			// Use console.error for actual errors during parsing
			console.error(
				chalk.red(
					`Error reading or parsing ${configPath}: ${error.message}. Using default configuration.`
				)
			);
			config = { ...defaults }; // Reset to defaults on parse error
			configSource = `defaults (parse error at ${configPath})`;
		}
	} else {
		// Config file doesn't exist at the determined rootToUse.
		if (explicitRoot) {
			// Only warn if an explicit root was *expected*.
			console.warn(
				chalk.yellow(
					`Warning: Configuration file not found at provided project root (${explicitRoot}). Using default configuration. Run 'task-master models --setup' to configure.`
				)
			);
		} else {
			// Don't warn about missing config during initialization
			// Only warn if this looks like an existing project (has .taskmaster dir or legacy config marker)
			const hasTaskmasterDir = fs.existsSync(
				path.join(rootToUse, TASKMASTER_DIR)
			);
			const hasLegacyMarker = fs.existsSync(
				path.join(rootToUse, LEGACY_CONFIG_FILE)
			);

			if (hasTaskmasterDir || hasLegacyMarker) {
				console.warn(
					chalk.yellow(
						`Warning: Configuration file not found at derived root (${rootToUse}). Using defaults.`
					)
				);
			}
		}
		// Keep config as defaults
		config = { ...defaults };
		configSource = `defaults (no config file found at ${rootToUse})`;
	}

	return config;
}

/**
 * Gets the current configuration, loading it if necessary.
 * Handles MCP initialization context gracefully.
 * @param {string|null} explicitRoot - Optional explicit path to the project root.
 * @param {boolean} forceReload - Force reloading the config file.
 * @returns {object} The loaded configuration object.
 */
function getConfig(explicitRoot = null, forceReload = false) {
	// Determine if a reload is necessary
	const needsLoad =
		!loadedConfig ||
		forceReload ||
		(explicitRoot && explicitRoot !== loadedConfigRoot);

	if (needsLoad) {
		const newConfig = _loadAndValidateConfig(explicitRoot); // _load handles null explicitRoot

		// Only update the global cache if loading was forced or if an explicit root
		// was provided (meaning we attempted to load a specific project's config).
		// We avoid caching the initial default load triggered without an explicitRoot.
		if (forceReload || explicitRoot) {
			loadedConfig = newConfig;
			loadedConfigRoot = explicitRoot; // Store the root used for this loaded config
		}
		return newConfig; // Return the newly loaded/default config
	}

	// If no load was needed, return the cached config
	return loadedConfig;
}

/**
 * Validates if a provider name is supported.
 * Custom providers (azure, vertex, bedrock, openrouter, ollama) are always allowed.
 * Validated providers must exist in the MODEL_MAP from supported-models.json.
 * @param {string} providerName The name of the provider.
 * @returns {boolean} True if the provider is valid, false otherwise.
 */
function validateProvider(providerName) {
	// Custom providers are always allowed
	if (CUSTOM_PROVIDERS_ARRAY.includes(providerName)) {
		return true;
	}

	// Validated providers must exist in MODEL_MAP
	if (VALIDATED_PROVIDERS.includes(providerName)) {
		return !!(MODEL_MAP && MODEL_MAP[providerName]);
	}

	// Unknown providers are not allowed
	return false;
}

/**
 * Optional: Validates if a modelId is known for a given provider based on MODEL_MAP.
 * This is a non-strict validation; an unknown model might still be valid.
 * @param {string} providerName The name of the provider.
 * @param {string} modelId The model ID.
 * @returns {boolean} True if the modelId is in the map for the provider, false otherwise.
 */
function validateProviderModelCombination(providerName, modelId) {
	// If provider isn't even in our map, we can't validate the model
	if (!MODEL_MAP[providerName]) {
		return true; // Allow unknown providers or those without specific model lists
	}
	// If the provider is known, check if the model is in its list OR if the list is empty (meaning accept any)
	return (
		MODEL_MAP[providerName].length === 0 ||
		// Use .some() to check the 'id' property of objects in the array
		MODEL_MAP[providerName].some((modelObj) => modelObj.id === modelId)
	);
}

/**
 * Validates Claude Code AI provider custom settings
 * @param {object} settings The settings to validate
 * @returns {object} The validated settings
 */
function validateClaudeCodeSettings(settings) {
	// Define the base settings schema without commandSpecific first
	const BaseSettingsSchema = z.object({
		maxTurns: z.number().int().positive().optional(),
		customSystemPrompt: z.string().optional(),
		appendSystemPrompt: z.string().optional(),
		permissionMode: z
			.enum(['default', 'acceptEdits', 'plan', 'bypassPermissions'])
			.optional(),
		allowedTools: z.array(z.string()).optional(),
		disallowedTools: z.array(z.string()).optional(),
		mcpServers: z
			.record(
				z.string(),
				z.object({
					type: z.enum(['stdio', 'sse']).optional(),
					command: z.string(),
					args: z.array(z.string()).optional(),
					env: z.record(z.string()).optional(),
					url: z.string().url().optional(),
					headers: z.record(z.string()).optional()
				})
			)
			.optional()
	});

	// Define CommandSpecificSchema using the base schema
	const CommandSpecificSchema = z.record(
		z.enum(AI_COMMAND_NAMES),
		BaseSettingsSchema
	);

	// Define the full settings schema with commandSpecific
	const SettingsSchema = BaseSettingsSchema.extend({
		commandSpecific: CommandSpecificSchema.optional()
	});

	let validatedSettings = {};

	try {
		validatedSettings = SettingsSchema.parse(settings);
	} catch (error) {
		console.warn(
			chalk.yellow(
				`Warning: Invalid Claude Code settings in config: ${error.message}. Falling back to default.`
			)
		);

		validatedSettings = {};
	}

	return validatedSettings;
}

// --- Claude Code Settings Getters ---

function getClaudeCodeSettings(explicitRoot = null, forceReload = false) {
	const config = getConfig(explicitRoot, forceReload);
	// Ensure Claude Code defaults are applied if Claude Code section is missing
	return { ...DEFAULTS.claudeCode, ...(config?.claudeCode || {}) };
}

function getClaudeCodeSettingsForCommand(
	commandName,
	explicitRoot = null,
	forceReload = false
) {
	const settings = getClaudeCodeSettings(explicitRoot, forceReload);
	const commandSpecific = settings?.commandSpecific || {};
	return { ...settings, ...commandSpecific[commandName] };
}

function getGrokCliSettings(explicitRoot = null, forceReload = false) {
	const config = getConfig(explicitRoot, forceReload);
	// Ensure Grok CLI defaults are applied if Grok CLI section is missing
	return { ...DEFAULTS.grokCli, ...(config?.grokCli || {}) };
}

function getGrokCliSettingsForCommand(
	commandName,
	explicitRoot = null,
	forceReload = false
) {
	const settings = getGrokCliSettings(explicitRoot, forceReload);
	const commandSpecific = settings?.commandSpecific || {};
	return { ...settings, ...commandSpecific[commandName] };
}

// --- Role-Specific Getters ---

function getModelConfigForRole(role, explicitRoot = null) {
	const config = getConfig(explicitRoot);
	const roleConfig = config?.models?.[role];
	if (!roleConfig) {
		log(
			'warn',
			`No model configuration found for role: ${role}. Returning default.`
		);
		return DEFAULTS.models[role] || {};
	}
	return roleConfig;
}

function getMainProvider(explicitRoot = null) {
	return getModelConfigForRole('main', explicitRoot).provider;
}

function getMainModelId(explicitRoot = null) {
	return getModelConfigForRole('main', explicitRoot).modelId;
}

function getMainMaxTokens(explicitRoot = null) {
	// Directly return value from config (which includes defaults)
	return getModelConfigForRole('main', explicitRoot).maxTokens;
}

function getMainTemperature(explicitRoot = null) {
	// Directly return value from config
	return getModelConfigForRole('main', explicitRoot).temperature;
}

function getResearchProvider(explicitRoot = null) {
	return getModelConfigForRole('research', explicitRoot).provider;
}

/**
 * Check if codebase analysis feature flag is enabled across all sources
 * Priority: .env > MCP env > config.json
 * @param {object|null} session - MCP session object (optional)
 * @param {string|null} projectRoot - Project root path (optional)
 * @returns {boolean} True if codebase analysis is enabled
 */
function isCodebaseAnalysisEnabled(session = null, projectRoot = null) {
	// Priority 1: Environment variable
	const envFlag = resolveEnvVariable(
		'TASKMASTER_ENABLE_CODEBASE_ANALYSIS',
		session,
		projectRoot
	);
	if (envFlag !== null && envFlag !== undefined && envFlag !== '') {
		return envFlag.toLowerCase() === 'true' || envFlag === '1';
	}

	// Priority 2: MCP session environment
	if (session?.env?.TASKMASTER_ENABLE_CODEBASE_ANALYSIS) {
		const mcpFlag = session.env.TASKMASTER_ENABLE_CODEBASE_ANALYSIS;
		return mcpFlag.toLowerCase() === 'true' || mcpFlag === '1';
	}

	// Priority 3: Configuration file
	const globalConfig = getGlobalConfig(projectRoot);
	return globalConfig.enableCodebaseAnalysis !== false; // Default to true
}

/**
 * Check if codebase analysis is available and enabled
 * @param {boolean} useResearch - Whether to check research provider or main provider
 * @param {string|null} projectRoot - Project root path (optional)
 * @param {object|null} session - MCP session object (optional)
 * @returns {boolean} True if codebase analysis is available and enabled
 */
function hasCodebaseAnalysis(
	useResearch = false,
	projectRoot = null,
	session = null
) {
	// First check if the feature is enabled
	if (!isCodebaseAnalysisEnabled(session, projectRoot)) {
		return false;
	}

	// Then check if a codebase analysis provider is configured
	const currentProvider = useResearch
		? getResearchProvider(projectRoot)
		: getMainProvider(projectRoot);

	return (
		currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE ||
		currentProvider === CUSTOM_PROVIDERS.GEMINI_CLI ||
		currentProvider === CUSTOM_PROVIDERS.GROK_CLI
	);
}

function getResearchModelId(explicitRoot = null) {
	return getModelConfigForRole('research', explicitRoot).modelId;
}

function getResearchMaxTokens(explicitRoot = null) {
	// Directly return value from config
	return getModelConfigForRole('research', explicitRoot).maxTokens;
}

function getResearchTemperature(explicitRoot = null) {
	// Directly return value from config
	return getModelConfigForRole('research', explicitRoot).temperature;
}

function getFallbackProvider(explicitRoot = null) {
	// Directly return value from config (will be undefined if not set)
	return getModelConfigForRole('fallback', explicitRoot).provider;
}

function getFallbackModelId(explicitRoot = null) {
	// Directly return value from config
	return getModelConfigForRole('fallback', explicitRoot).modelId;
}

function getFallbackMaxTokens(explicitRoot = null) {
	// Directly return value from config
	return getModelConfigForRole('fallback', explicitRoot).maxTokens;
}

function getFallbackTemperature(explicitRoot = null) {
	// Directly return value from config
	return getModelConfigForRole('fallback', explicitRoot).temperature;
}

// --- Global Settings Getters ---

function getGlobalConfig(explicitRoot = null) {
	const config = getConfig(explicitRoot);
	// Ensure global defaults are applied if global section is missing
	return { ...DEFAULTS.global, ...(config?.global || {}) };
}

function getLogLevel(explicitRoot = null) {
	// Directly return value from config
	return getGlobalConfig(explicitRoot).logLevel.toLowerCase();
}

function getDebugFlag(explicitRoot = null) {
	// Directly return value from config, ensure boolean
	return getGlobalConfig(explicitRoot).debug === true;
}

function getDefaultSubtasks(explicitRoot = null) {
	// Directly return value from config, ensure integer
	const val = getGlobalConfig(explicitRoot).defaultSubtasks;
	const parsedVal = parseInt(val, 10);
	return Number.isNaN(parsedVal) ? DEFAULTS.global.defaultSubtasks : parsedVal;
}

function getDefaultNumTasks(explicitRoot = null) {
	const val = getGlobalConfig(explicitRoot).defaultNumTasks;
	const parsedVal = parseInt(val, 10);
	return Number.isNaN(parsedVal) ? DEFAULTS.global.defaultNumTasks : parsedVal;
}

function getDefaultPriority(explicitRoot = null) {
	// Directly return value from config
	return getGlobalConfig(explicitRoot).defaultPriority;
}

function getProjectName(explicitRoot = null) {
	// Directly return value from config
	return getGlobalConfig(explicitRoot).projectName;
}

function getOllamaBaseURL(explicitRoot = null) {
	// Directly return value from config
	return getGlobalConfig(explicitRoot).ollamaBaseURL;
}

function getAzureBaseURL(explicitRoot = null) {
	// Directly return value from config
	return getGlobalConfig(explicitRoot).azureBaseURL;
}

function getBedrockBaseURL(explicitRoot = null) {
	// Directly return value from config
	return getGlobalConfig(explicitRoot).bedrockBaseURL;
}

/**
 * Gets the Google Cloud project ID for Vertex AI from configuration
 * @param {string|null} explicitRoot - Optional explicit path to the project root.
 * @returns {string|null} The project ID or null if not configured
 */
function getVertexProjectId(explicitRoot = null) {
	// Return value from config
	return getGlobalConfig(explicitRoot).vertexProjectId;
}

/**
 * Gets the Google Cloud location for Vertex AI from configuration
 * @param {string|null} explicitRoot - Optional explicit path to the project root.
 * @returns {string} The location or default value of "us-central1"
 */
function getVertexLocation(explicitRoot = null) {
	// Return value from config or default
	return getGlobalConfig(explicitRoot).vertexLocation || 'us-central1';
}

function getResponseLanguage(explicitRoot = null) {
	// Directly return value from config
	return getGlobalConfig(explicitRoot).responseLanguage;
}

function getCodebaseAnalysisEnabled(explicitRoot = null) {
	// Return boolean-safe value with default true
	return getGlobalConfig(explicitRoot).enableCodebaseAnalysis !== false;
}

/**
 * Gets model parameters (maxTokens, temperature) for a specific role,
 * considering model-specific overrides from supported-models.json.
 * @param {string} role - The role ('main', 'research', 'fallback').
 * @param {string|null} explicitRoot - Optional explicit path to the project root.
 * @returns {{maxTokens: number, temperature: number}}
 */
function getParametersForRole(role, explicitRoot = null) {
	const roleConfig = getModelConfigForRole(role, explicitRoot);
	const roleMaxTokens = roleConfig.maxTokens;
	const roleTemperature = roleConfig.temperature;
	const modelId = roleConfig.modelId;
	const providerName = roleConfig.provider;

	let effectiveMaxTokens = roleMaxTokens; // Start with the role's default
	let effectiveTemperature = roleTemperature; // Start with the role's default

	try {
		// Find the model definition in MODEL_MAP
		const providerModels = MODEL_MAP[providerName];
		if (providerModels && Array.isArray(providerModels)) {
			const modelDefinition = providerModels.find((m) => m.id === modelId);

			// Check if a model-specific max_tokens is defined and valid
			if (
				modelDefinition &&
				typeof modelDefinition.max_tokens === 'number' &&
				modelDefinition.max_tokens > 0
			) {
				const modelSpecificMaxTokens = modelDefinition.max_tokens;
				// Use the minimum of the role default and the model specific limit
				effectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens);
				log(
					'debug',
					`Applying model-specific max_tokens (${modelSpecificMaxTokens}) for ${modelId}. Effective limit: ${effectiveMaxTokens}`
				);
			} else {
				log(
					'debug',
					`No valid model-specific max_tokens override found for ${modelId}. Using role default: ${roleMaxTokens}`
				);
			}

			// Check if a model-specific temperature is defined
			if (
				modelDefinition &&
				typeof modelDefinition.temperature === 'number' &&
				modelDefinition.temperature >= 0 &&
				modelDefinition.temperature <= 1
			) {
				effectiveTemperature = modelDefinition.temperature;
				log(
					'debug',
					`Applying model-specific temperature (${modelDefinition.temperature}) for ${modelId}`
				);
			}
		} else {
			// Special handling for custom OpenRouter models
			if (providerName === CUSTOM_PROVIDERS.OPENROUTER) {
				// Use a conservative default for OpenRouter models not in our list
				const openrouterDefault = 32768;
				effectiveMaxTokens = Math.min(roleMaxTokens, openrouterDefault);
				log(
					'debug',
					`Custom OpenRouter model ${modelId} detected. Using conservative max_tokens: ${effectiveMaxTokens}`
				);
			} else {
				log(
					'debug',
					`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
				);
			}
		}
	} catch (lookupError) {
		log(
			'warn',
			`Error looking up model-specific parameters for ${modelId}: ${lookupError.message}. Using role defaults.`
		);
		// Fallback to role defaults on error
		effectiveMaxTokens = roleMaxTokens;
		effectiveTemperature = roleTemperature;
	}

	return {
		maxTokens: effectiveMaxTokens,
		temperature: effectiveTemperature
	};
}

/**
 * Checks if the API key for a given provider is set in the environment.
 * Checks process.env first, then session.env if session is provided, then .env file if projectRoot provided.
 * @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic').
 * @param {object|null} [session=null] - The MCP session object (optional).
 * @param {string|null} [projectRoot=null] - The project root directory (optional, for .env file check).
 * @returns {boolean} True if the API key is set, false otherwise.
 */
function isApiKeySet(providerName, session = null, projectRoot = null) {
	// Define the expected environment variable name for each provider

	// Providers that don't require API keys for authentication
	const providersWithoutApiKeys = [
		CUSTOM_PROVIDERS.OLLAMA,
		CUSTOM_PROVIDERS.BEDROCK,
		CUSTOM_PROVIDERS.MCP,
		CUSTOM_PROVIDERS.GEMINI_CLI,
		CUSTOM_PROVIDERS.GROK_CLI
	];

	if (providersWithoutApiKeys.includes(providerName?.toLowerCase())) {
		return true; // Indicate key status is effectively "OK"
	}

	// Claude Code doesn't require an API key
	if (providerName?.toLowerCase() === 'claude-code') {
		return true; // No API key needed
	}

	const keyMap = {
		openai: 'OPENAI_API_KEY',
		anthropic: 'ANTHROPIC_API_KEY',
		google: 'GOOGLE_API_KEY',
		perplexity: 'PERPLEXITY_API_KEY',
		mistral: 'MISTRAL_API_KEY',
		azure: 'AZURE_OPENAI_API_KEY',
		openrouter: 'OPENROUTER_API_KEY',
		xai: 'XAI_API_KEY',
		groq: 'GROQ_API_KEY',
		vertex: 'GOOGLE_API_KEY', // Vertex uses the same key as Google
		'claude-code': 'CLAUDE_CODE_API_KEY', // Not actually used, but included for consistency
		bedrock: 'AWS_ACCESS_KEY_ID' // Bedrock uses AWS credentials
		// Add other providers as needed
	};

	const providerKey = providerName?.toLowerCase();
	if (!providerKey || !keyMap[providerKey]) {
		log('warn', `Unknown provider name: ${providerName} in isApiKeySet check.`);
		return false;
	}

	const envVarName = keyMap[providerKey];
	const apiKeyValue = resolveEnvVariable(envVarName, session, projectRoot);

	// Check if the key exists, is not empty, and is not a placeholder
	return (
		apiKeyValue &&
		apiKeyValue.trim() !== '' &&
		!/YOUR_.*_API_KEY_HERE/.test(apiKeyValue) && // General placeholder check
		!apiKeyValue.includes('KEY_HERE')
	); // Another common placeholder pattern
}

/**
 * Checks the API key status within .cursor/mcp.json for a given provider.
 * Reads the mcp.json file, finds the taskmaster-ai server config, and checks the relevant env var.
 * @param {string} providerName The name of the provider.
 * @param {string|null} projectRoot - Optional explicit path to the project root.
 * @returns {boolean} True if the key exists and is not a placeholder, false otherwise.
 */
function getMcpApiKeyStatus(providerName, projectRoot = null) {
	const rootDir = projectRoot || findProjectRoot(); // Use existing root finding
	if (!rootDir) {
		console.warn(
			chalk.yellow('Warning: Could not find project root to check mcp.json.')
		);
		return false; // Cannot check without root
	}
	const mcpConfigPath = path.join(rootDir, '.cursor', 'mcp.json');

	if (!fs.existsSync(mcpConfigPath)) {
		// console.warn(chalk.yellow('Warning: .cursor/mcp.json not found.'));
		return false; // File doesn't exist
	}

	try {
		const mcpConfigRaw = fs.readFileSync(mcpConfigPath, 'utf-8');
		const mcpConfig = JSON.parse(mcpConfigRaw);

		const mcpEnv =
			mcpConfig?.mcpServers?.['task-master-ai']?.env ||
			mcpConfig?.mcpServers?.['taskmaster-ai']?.env;
		if (!mcpEnv) {
			return false;
		}

		let apiKeyToCheck = null;
		let placeholderValue = null;

		switch (providerName) {
			case 'anthropic':
				apiKeyToCheck = mcpEnv.ANTHROPIC_API_KEY;
				placeholderValue = 'YOUR_ANTHROPIC_API_KEY_HERE';
				break;
			case 'openai':
				apiKeyToCheck = mcpEnv.OPENAI_API_KEY;
				placeholderValue = 'YOUR_OPENAI_API_KEY_HERE'; // Assuming placeholder matches OPENAI
				break;
			case 'openrouter':
				apiKeyToCheck = mcpEnv.OPENROUTER_API_KEY;
				placeholderValue = 'YOUR_OPENROUTER_API_KEY_HERE';
				break;
			case 'google':
				apiKeyToCheck = mcpEnv.GOOGLE_API_KEY;
				placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE';
				break;
			case 'perplexity':
				apiKeyToCheck = mcpEnv.PERPLEXITY_API_KEY;
				placeholderValue = 'YOUR_PERPLEXITY_API_KEY_HERE';
				break;
			case 'xai':
				apiKeyToCheck = mcpEnv.XAI_API_KEY;
				placeholderValue = 'YOUR_XAI_API_KEY_HERE';
				break;
			case 'groq':
				apiKeyToCheck = mcpEnv.GROQ_API_KEY;
				placeholderValue = 'YOUR_GROQ_API_KEY_HERE';
				break;
			case 'ollama':
				return true; // No key needed
			case 'claude-code':
				return true; // No key needed
			case 'mistral':
				apiKeyToCheck = mcpEnv.MISTRAL_API_KEY;
				placeholderValue = 'YOUR_MISTRAL_API_KEY_HERE';
				break;
			case 'azure':
				apiKeyToCheck = mcpEnv.AZURE_OPENAI_API_KEY;
				placeholderValue = 'YOUR_AZURE_OPENAI_API_KEY_HERE';
				break;
			case 'vertex':
				apiKeyToCheck = mcpEnv.GOOGLE_API_KEY; // Vertex uses Google API key
				placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE';
				break;
			case 'bedrock':
				apiKeyToCheck = mcpEnv.AWS_ACCESS_KEY_ID; // Bedrock uses AWS credentials
				placeholderValue = 'YOUR_AWS_ACCESS_KEY_ID_HERE';
				break;
			default:
				return false; // Unknown provider
		}

		return !!apiKeyToCheck && !/KEY_HERE$/.test(apiKeyToCheck);
	} catch (error) {
		console.error(
			chalk.red(`Error reading or parsing .cursor/mcp.json: ${error.message}`)
		);
		return false;
	}
}

/**
 * Gets a list of available models based on the MODEL_MAP.
 * @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>}
 */
function getAvailableModels() {
	const available = [];
	for (const [provider, models] of Object.entries(MODEL_MAP)) {
		if (models.length > 0) {
			models
				.filter((modelObj) => Boolean(modelObj.supported))
				.forEach((modelObj) => {
					// Basic name generation - can be improved
					const modelId = modelObj.id;
					const sweScore = modelObj.swe_score;
					const cost = modelObj.cost_per_1m_tokens;
					const allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];
					const nameParts = modelId
						.split('-')
						.map((p) => p.charAt(0).toUpperCase() + p.slice(1));
					// Handle specific known names better if needed
					let name = nameParts.join(' ');
					if (modelId === 'claude-3.5-sonnet-20240620')
						name = 'Claude 3.5 Sonnet';
					if (modelId === 'claude-3-7-sonnet-20250219')
						name = 'Claude 3.7 Sonnet';
					if (modelId === 'gpt-4o') name = 'GPT-4o';
					if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';
					if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';
					if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';

					available.push({
						id: modelId,
						name: name,
						provider: provider,
						swe_score: sweScore,
						cost_per_1m_tokens: cost,
						allowed_roles: allowedRoles,
						max_tokens: modelObj.max_tokens
					});
				});
		} else {
			// For providers with empty lists (like ollama), maybe add a placeholder or skip
			available.push({
				id: `[${provider}-any]`,
				name: `Any (${provider})`,
				provider: provider
			});
		}
	}
	return available;
}

/**
 * Writes the configuration object to the file.
 * @param {Object} config The configuration object to write.
 * @param {string|null} explicitRoot - Optional explicit path to the project root.
 * @returns {boolean} True if successful, false otherwise.
 */
function writeConfig(config, explicitRoot = null) {
	// ---> Determine root path reliably <---
	let rootPath = explicitRoot;
	if (explicitRoot === null || explicitRoot === undefined) {
		// Logic matching _loadAndValidateConfig
		const foundRoot = findProjectRoot(); // *** Explicitly call findProjectRoot ***
		if (!foundRoot) {
			console.error(
				chalk.red(
					'Error: Could not determine project root. Configuration not saved.'
				)
			);
			return false;
		}
		rootPath = foundRoot;
	}
	// ---> End determine root path logic <---

	// Use new config location: .taskmaster/config.json
	const taskmasterDir = path.join(rootPath, '.taskmaster');
	const configPath = path.join(taskmasterDir, 'config.json');

	try {
		// Ensure .taskmaster directory exists
		if (!fs.existsSync(taskmasterDir)) {
			fs.mkdirSync(taskmasterDir, { recursive: true });
		}

		fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
		loadedConfig = config; // Update the cache after successful write
		return true;
	} catch (error) {
		console.error(
			chalk.red(
				`Error writing configuration to ${configPath}: ${error.message}`
			)
		);
		return false;
	}
}

/**
 * Checks if a configuration file exists at the project root (new or legacy location)
 * @param {string|null} explicitRoot - Optional explicit path to the project root
 * @returns {boolean} True if the file exists, false otherwise
 */
function isConfigFilePresent(explicitRoot = null) {
	return findConfigPath(null, { projectRoot: explicitRoot }) !== null;
}

/**
 * Gets the user ID from the configuration.
 * @param {string|null} explicitRoot - Optional explicit path to the project root.
 * @returns {string|null} The user ID or null if not found.
 */
function getUserId(explicitRoot = null) {
	const config = getConfig(explicitRoot);
	if (!config.global) {
		config.global = {}; // Ensure global object exists
	}
	if (!config.global.userId) {
		config.global.userId = '1234567890';
		// Attempt to write the updated config.
		// It's important that writeConfig correctly resolves the path
		// using explicitRoot, similar to how getConfig does.
		const success = writeConfig(config, explicitRoot);
		if (!success) {
			// Log an error or handle the failure to write,
			// though for now, we'll proceed with the in-memory default.
			log(
				'warning',
				'Failed to write updated configuration with new userId. Please let the developers know.'
			);
		}
	}
	return config.global.userId;
}

/**
 * Gets a list of all known provider names (both validated and custom).
 * @returns {string[]} An array of all provider names.
 */
function getAllProviders() {
	return ALL_PROVIDERS;
}

function getBaseUrlForRole(role, explicitRoot = null) {
	const roleConfig = getModelConfigForRole(role, explicitRoot);
	if (roleConfig && typeof roleConfig.baseURL === 'string') {
		return roleConfig.baseURL;
	}
	const provider = roleConfig?.provider;
	if (provider) {
		const envVarName = `${provider.toUpperCase()}_BASE_URL`;
		return resolveEnvVariable(envVarName, null, explicitRoot);
	}
	return undefined;
}

// Export the providers without API keys array for use in other modules
export const providersWithoutApiKeys = [
	CUSTOM_PROVIDERS.OLLAMA,
	CUSTOM_PROVIDERS.BEDROCK,
	CUSTOM_PROVIDERS.GEMINI_CLI,
	CUSTOM_PROVIDERS.GROK_CLI,
	CUSTOM_PROVIDERS.MCP
];

export {
	// Core config access
	getConfig,
	writeConfig,
	ConfigurationError,
	isConfigFilePresent,
	// Claude Code settings
	getClaudeCodeSettings,
	getClaudeCodeSettingsForCommand,
	// Grok CLI settings
	getGrokCliSettings,
	getGrokCliSettingsForCommand,
	// Validation
	validateProvider,
	validateProviderModelCombination,
	validateClaudeCodeSettings,
	VALIDATED_PROVIDERS,
	CUSTOM_PROVIDERS,
	ALL_PROVIDERS,
	MODEL_MAP,
	getAvailableModels,
	// Role-specific getters (No env var overrides)
	getMainProvider,
	getMainModelId,
	getMainMaxTokens,
	getMainTemperature,
	getResearchProvider,
	getResearchModelId,
	getResearchMaxTokens,
	getResearchTemperature,
	hasCodebaseAnalysis,
	getFallbackProvider,
	getFallbackModelId,
	getFallbackMaxTokens,
	getFallbackTemperature,
	getBaseUrlForRole,
	// Global setting getters (No env var overrides)
	getLogLevel,
	getDebugFlag,
	getDefaultNumTasks,
	getDefaultSubtasks,
	getDefaultPriority,
	getProjectName,
	getOllamaBaseURL,
	getAzureBaseURL,
	getBedrockBaseURL,
	getResponseLanguage,
	getCodebaseAnalysisEnabled,
	isCodebaseAnalysisEnabled,
	getParametersForRole,
	getUserId,
	// API Key Checkers (still relevant)
	isApiKeySet,
	getMcpApiKeyStatus,
	// ADD: Function to get all provider names
	getAllProviders,
	getVertexProjectId,
	getVertexLocation
};

```

--------------------------------------------------------------------------------
/scripts/modules/task-manager/list-tasks.js:
--------------------------------------------------------------------------------

```javascript
import chalk from 'chalk';
import boxen from 'boxen';
import Table from 'cli-table3';

import {
	log,
	readJSON,
	truncate,
	readComplexityReport,
	addComplexityToTask
} from '../utils.js';
import findNextTask from './find-next-task.js';

import {
	displayBanner,
	getStatusWithColor,
	formatDependenciesWithStatus,
	getComplexityWithColor,
	createProgressBar
} from '../ui.js';

/**
 * List all tasks
 * @param {string} tasksPath - Path to the tasks.json file
 * @param {string} statusFilter - Filter by status (single status or comma-separated list, e.g., 'pending' or 'blocked,deferred')
 * @param {string} reportPath - Path to the complexity report
 * @param {boolean} withSubtasks - Whether to show subtasks
 * @param {string} outputFormat - Output format (text or json)
 * @param {Object} context - Context object (required)
 * @param {string} context.projectRoot - Project root path
 * @param {string} context.tag - Tag for the task
 * @returns {Object} - Task list result for json format
 */
function listTasks(
	tasksPath,
	statusFilter,
	reportPath = null,
	withSubtasks = false,
	outputFormat = 'text',
	context = {}
) {
	const { projectRoot, tag } = context;
	try {
		// Extract projectRoot from context if provided
		const data = readJSON(tasksPath, projectRoot, tag); // Pass projectRoot to readJSON
		if (!data || !data.tasks) {
			throw new Error(`No valid tasks found in ${tasksPath}`);
		}

		// Add complexity scores to tasks if report exists
		// `reportPath` is already tag-aware (resolved at the CLI boundary).
		const complexityReport = readComplexityReport(reportPath);
		// Apply complexity scores to tasks
		if (complexityReport && complexityReport.complexityAnalysis) {
			data.tasks.forEach((task) => addComplexityToTask(task, complexityReport));
		}

		// Filter tasks by status if specified - now supports comma-separated statuses
		let filteredTasks;
		if (statusFilter && statusFilter.toLowerCase() !== 'all') {
			// Handle comma-separated statuses
			const allowedStatuses = statusFilter
				.split(',')
				.map((s) => s.trim().toLowerCase())
				.filter((s) => s.length > 0); // Remove empty strings

			filteredTasks = data.tasks.filter(
				(task) =>
					task.status && allowedStatuses.includes(task.status.toLowerCase())
			);
		} else {
			// Default to all tasks if no filter or filter is 'all'
			filteredTasks = data.tasks;
		}

		// Calculate completion statistics
		const totalTasks = data.tasks.length;
		const completedTasks = data.tasks.filter(
			(task) => task.status === 'done' || task.status === 'completed'
		).length;
		const completionPercentage =
			totalTasks > 0 ? (completedTasks / totalTasks) * 100 : 0;

		// Count statuses for tasks
		const doneCount = completedTasks;
		const inProgressCount = data.tasks.filter(
			(task) => task.status === 'in-progress'
		).length;
		const pendingCount = data.tasks.filter(
			(task) => task.status === 'pending'
		).length;
		const blockedCount = data.tasks.filter(
			(task) => task.status === 'blocked'
		).length;
		const deferredCount = data.tasks.filter(
			(task) => task.status === 'deferred'
		).length;
		const cancelledCount = data.tasks.filter(
			(task) => task.status === 'cancelled'
		).length;
		const reviewCount = data.tasks.filter(
			(task) => task.status === 'review'
		).length;

		// Count subtasks and their statuses
		let totalSubtasks = 0;
		let completedSubtasks = 0;
		let inProgressSubtasks = 0;
		let pendingSubtasks = 0;
		let blockedSubtasks = 0;
		let deferredSubtasks = 0;
		let cancelledSubtasks = 0;
		let reviewSubtasks = 0;

		data.tasks.forEach((task) => {
			if (task.subtasks && task.subtasks.length > 0) {
				totalSubtasks += task.subtasks.length;
				completedSubtasks += task.subtasks.filter(
					(st) => st.status === 'done' || st.status === 'completed'
				).length;
				inProgressSubtasks += task.subtasks.filter(
					(st) => st.status === 'in-progress'
				).length;
				pendingSubtasks += task.subtasks.filter(
					(st) => st.status === 'pending'
				).length;
				blockedSubtasks += task.subtasks.filter(
					(st) => st.status === 'blocked'
				).length;
				deferredSubtasks += task.subtasks.filter(
					(st) => st.status === 'deferred'
				).length;
				cancelledSubtasks += task.subtasks.filter(
					(st) => st.status === 'cancelled'
				).length;
				reviewSubtasks += task.subtasks.filter(
					(st) => st.status === 'review'
				).length;
			}
		});

		const subtaskCompletionPercentage =
			totalSubtasks > 0 ? (completedSubtasks / totalSubtasks) * 100 : 0;

		// Calculate dependency statistics (moved up to be available for all output formats)
		const completedTaskIds = new Set(
			data.tasks
				.filter((t) => t.status === 'done' || t.status === 'completed')
				.map((t) => t.id)
		);

		const tasksWithNoDeps = data.tasks.filter(
			(t) =>
				t.status !== 'done' &&
				t.status !== 'completed' &&
				(!t.dependencies || t.dependencies.length === 0)
		).length;

		const tasksWithAllDepsSatisfied = data.tasks.filter(
			(t) =>
				t.status !== 'done' &&
				t.status !== 'completed' &&
				t.dependencies &&
				t.dependencies.length > 0 &&
				t.dependencies.every((depId) => completedTaskIds.has(depId))
		).length;

		const tasksWithUnsatisfiedDeps = data.tasks.filter(
			(t) =>
				t.status !== 'done' &&
				t.status !== 'completed' &&
				t.dependencies &&
				t.dependencies.length > 0 &&
				!t.dependencies.every((depId) => completedTaskIds.has(depId))
		).length;

		// Calculate total tasks ready to work on (no deps + satisfied deps)
		const tasksReadyToWork = tasksWithNoDeps + tasksWithAllDepsSatisfied;

		// Calculate most depended-on tasks
		const dependencyCount = {};
		data.tasks.forEach((task) => {
			if (task.dependencies && task.dependencies.length > 0) {
				task.dependencies.forEach((depId) => {
					dependencyCount[depId] = (dependencyCount[depId] || 0) + 1;
				});
			}
		});

		// Find the most depended-on task
		let mostDependedOnTaskId = null;
		let maxDependents = 0;

		for (const [taskId, count] of Object.entries(dependencyCount)) {
			if (count > maxDependents) {
				maxDependents = count;
				mostDependedOnTaskId = parseInt(taskId);
			}
		}

		// Get the most depended-on task
		const mostDependedOnTask =
			mostDependedOnTaskId !== null
				? data.tasks.find((t) => t.id === mostDependedOnTaskId)
				: null;

		// Calculate average dependencies per task
		const totalDependencies = data.tasks.reduce(
			(sum, task) => sum + (task.dependencies ? task.dependencies.length : 0),
			0
		);
		const avgDependenciesPerTask = totalDependencies / data.tasks.length;

		// Find next task to work on, passing the complexity report
		const nextItem = findNextTask(data.tasks, complexityReport);

		// For JSON output, return structured data
		if (outputFormat === 'json') {
			// *** Modification: Remove 'details' field for JSON output ***
			const tasksWithoutDetails = filteredTasks.map((task) => {
				// <-- USES filteredTasks!
				// Omit 'details' from the parent task
				const { details, ...taskRest } = task;

				// If subtasks exist, omit 'details' from them too
				if (taskRest.subtasks && Array.isArray(taskRest.subtasks)) {
					taskRest.subtasks = taskRest.subtasks.map((subtask) => {
						const { details: subtaskDetails, ...subtaskRest } = subtask;
						return subtaskRest;
					});
				}
				return taskRest;
			});
			// *** End of Modification ***

			return {
				tasks: tasksWithoutDetails, // <--- THIS IS THE ARRAY BEING RETURNED
				filter: statusFilter || 'all', // Return the actual filter used
				stats: {
					total: totalTasks,
					completed: doneCount,
					inProgress: inProgressCount,
					pending: pendingCount,
					blocked: blockedCount,
					deferred: deferredCount,
					cancelled: cancelledCount,
					review: reviewCount,
					completionPercentage,
					subtasks: {
						total: totalSubtasks,
						completed: completedSubtasks,
						inProgress: inProgressSubtasks,
						pending: pendingSubtasks,
						blocked: blockedSubtasks,
						deferred: deferredSubtasks,
						cancelled: cancelledSubtasks,
						completionPercentage: subtaskCompletionPercentage
					}
				}
			};
		}

		// For markdown-readme output, return formatted markdown
		if (outputFormat === 'markdown-readme') {
			return generateMarkdownOutput(data, filteredTasks, {
				totalTasks,
				completedTasks,
				completionPercentage,
				doneCount,
				inProgressCount,
				pendingCount,
				blockedCount,
				deferredCount,
				cancelledCount,
				totalSubtasks,
				completedSubtasks,
				subtaskCompletionPercentage,
				inProgressSubtasks,
				pendingSubtasks,
				blockedSubtasks,
				deferredSubtasks,
				cancelledSubtasks,
				reviewSubtasks,
				tasksWithNoDeps,
				tasksReadyToWork,
				tasksWithUnsatisfiedDeps,
				mostDependedOnTask,
				mostDependedOnTaskId,
				maxDependents,
				avgDependenciesPerTask,
				complexityReport,
				withSubtasks,
				nextItem
			});
		}

		// For compact output, return minimal one-line format
		if (outputFormat === 'compact') {
			return renderCompactOutput(filteredTasks, withSubtasks);
		}

		// ... existing code for text output ...

		// Calculate status breakdowns as percentages of total
		const taskStatusBreakdown = {
			'in-progress': totalTasks > 0 ? (inProgressCount / totalTasks) * 100 : 0,
			pending: totalTasks > 0 ? (pendingCount / totalTasks) * 100 : 0,
			blocked: totalTasks > 0 ? (blockedCount / totalTasks) * 100 : 0,
			deferred: totalTasks > 0 ? (deferredCount / totalTasks) * 100 : 0,
			cancelled: totalTasks > 0 ? (cancelledCount / totalTasks) * 100 : 0,
			review: totalTasks > 0 ? (reviewCount / totalTasks) * 100 : 0
		};

		const subtaskStatusBreakdown = {
			'in-progress':
				totalSubtasks > 0 ? (inProgressSubtasks / totalSubtasks) * 100 : 0,
			pending: totalSubtasks > 0 ? (pendingSubtasks / totalSubtasks) * 100 : 0,
			blocked: totalSubtasks > 0 ? (blockedSubtasks / totalSubtasks) * 100 : 0,
			deferred:
				totalSubtasks > 0 ? (deferredSubtasks / totalSubtasks) * 100 : 0,
			cancelled:
				totalSubtasks > 0 ? (cancelledSubtasks / totalSubtasks) * 100 : 0,
			review: totalSubtasks > 0 ? (reviewSubtasks / totalSubtasks) * 100 : 0
		};

		// Create progress bars with status breakdowns
		const taskProgressBar = createProgressBar(
			completionPercentage,
			30,
			taskStatusBreakdown
		);
		const subtaskProgressBar = createProgressBar(
			subtaskCompletionPercentage,
			30,
			subtaskStatusBreakdown
		);

		// Get terminal width - more reliable method
		let terminalWidth;
		try {
			// Try to get the actual terminal columns
			terminalWidth = process.stdout.columns;
		} catch (e) {
			// Fallback if columns cannot be determined
			log('debug', 'Could not determine terminal width, using default');
		}
		// Ensure we have a reasonable default if detection fails
		terminalWidth = terminalWidth || 80;

		// Ensure terminal width is at least a minimum value to prevent layout issues
		terminalWidth = Math.max(terminalWidth, 80);

		// Create dashboard content
		const projectDashboardContent =
			chalk.white.bold('Project Dashboard') +
			'\n' +
			`Tasks Progress: ${chalk.greenBright(taskProgressBar)} ${completionPercentage.toFixed(0)}%\n` +
			`Done: ${chalk.green(doneCount)}  In Progress: ${chalk.blue(inProgressCount)}  Pending: ${chalk.yellow(pendingCount)}  Blocked: ${chalk.red(blockedCount)}  Deferred: ${chalk.gray(deferredCount)}  Cancelled: ${chalk.gray(cancelledCount)}\n\n` +
			`Subtasks Progress: ${chalk.cyan(subtaskProgressBar)} ${subtaskCompletionPercentage.toFixed(0)}%\n` +
			`Completed: ${chalk.green(completedSubtasks)}/${totalSubtasks}  In Progress: ${chalk.blue(inProgressSubtasks)}  Pending: ${chalk.yellow(pendingSubtasks)}  Blocked: ${chalk.red(blockedSubtasks)}  Deferred: ${chalk.gray(deferredSubtasks)}  Cancelled: ${chalk.gray(cancelledSubtasks)}\n\n` +
			chalk.cyan.bold('Priority Breakdown:') +
			'\n' +
			`${chalk.red('•')} ${chalk.white('High priority:')} ${data.tasks.filter((t) => t.priority === 'high').length}\n` +
			`${chalk.yellow('•')} ${chalk.white('Medium priority:')} ${data.tasks.filter((t) => t.priority === 'medium').length}\n` +
			`${chalk.green('•')} ${chalk.white('Low priority:')} ${data.tasks.filter((t) => t.priority === 'low').length}`;

		const dependencyDashboardContent =
			chalk.white.bold('Dependency Status & Next Task') +
			'\n' +
			chalk.cyan.bold('Dependency Metrics:') +
			'\n' +
			`${chalk.green('•')} ${chalk.white('Tasks with no dependencies:')} ${tasksWithNoDeps}\n` +
			`${chalk.green('•')} ${chalk.white('Tasks ready to work on:')} ${tasksReadyToWork}\n` +
			`${chalk.yellow('•')} ${chalk.white('Tasks blocked by dependencies:')} ${tasksWithUnsatisfiedDeps}\n` +
			`${chalk.magenta('•')} ${chalk.white('Most depended-on task:')} ${mostDependedOnTask ? chalk.cyan(`#${mostDependedOnTaskId} (${maxDependents} dependents)`) : chalk.gray('None')}\n` +
			`${chalk.blue('•')} ${chalk.white('Avg dependencies per task:')} ${avgDependenciesPerTask.toFixed(1)}\n\n` +
			chalk.cyan.bold('Next Task to Work On:') +
			'\n' +
			`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}
` +
			`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''}  Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : ''}
` +
			`Complexity: ${nextItem && nextItem.complexityScore ? getComplexityWithColor(nextItem.complexityScore) : chalk.gray('N/A')}`;

		// Calculate width for side-by-side display
		// Box borders, padding take approximately 4 chars on each side
		const minDashboardWidth = 50; // Minimum width for dashboard
		const minDependencyWidth = 50; // Minimum width for dependency dashboard
		const totalMinWidth = minDashboardWidth + minDependencyWidth + 4; // Extra 4 chars for spacing

		// If terminal is wide enough, show boxes side by side with responsive widths
		if (terminalWidth >= totalMinWidth) {
			// Calculate widths proportionally for each box - use exact 50% width each
			const availableWidth = terminalWidth;
			const halfWidth = Math.floor(availableWidth / 2);

			// Account for border characters (2 chars on each side)
			const boxContentWidth = halfWidth - 4;

			// Create boxen options with precise widths
			const dashboardBox = boxen(projectDashboardContent, {
				padding: 1,
				borderColor: 'blue',
				borderStyle: 'round',
				width: boxContentWidth,
				dimBorder: false
			});

			const dependencyBox = boxen(dependencyDashboardContent, {
				padding: 1,
				borderColor: 'magenta',
				borderStyle: 'round',
				width: boxContentWidth,
				dimBorder: false
			});

			// Create a better side-by-side layout with exact spacing
			const dashboardLines = dashboardBox.split('\n');
			const dependencyLines = dependencyBox.split('\n');

			// Make sure both boxes have the same height
			const maxHeight = Math.max(dashboardLines.length, dependencyLines.length);

			// For each line of output, pad the dashboard line to exactly halfWidth chars
			// This ensures the dependency box starts at exactly the right position
			const combinedLines = [];
			for (let i = 0; i < maxHeight; i++) {
				// Get the dashboard line (or empty string if we've run out of lines)
				const dashLine = i < dashboardLines.length ? dashboardLines[i] : '';
				// Get the dependency line (or empty string if we've run out of lines)
				const depLine = i < dependencyLines.length ? dependencyLines[i] : '';

				// Remove any trailing spaces from dashLine before padding to exact width
				const trimmedDashLine = dashLine.trimEnd();
				// Pad the dashboard line to exactly halfWidth chars with no extra spaces
				const paddedDashLine = trimmedDashLine.padEnd(halfWidth, ' ');

				// Join the lines with no space in between
				combinedLines.push(paddedDashLine + depLine);
			}

			// Join all lines and output
			console.log(combinedLines.join('\n'));
		} else {
			// Terminal too narrow, show boxes stacked vertically
			const dashboardBox = boxen(projectDashboardContent, {
				padding: 1,
				borderColor: 'blue',
				borderStyle: 'round',
				margin: { top: 0, bottom: 1 }
			});

			const dependencyBox = boxen(dependencyDashboardContent, {
				padding: 1,
				borderColor: 'magenta',
				borderStyle: 'round',
				margin: { top: 0, bottom: 1 }
			});

			// Display stacked vertically
			console.log(dashboardBox);
			console.log(dependencyBox);
		}

		if (filteredTasks.length === 0) {
			console.log(
				boxen(
					statusFilter
						? chalk.yellow(`No tasks with status '${statusFilter}' found`)
						: chalk.yellow('No tasks found'),
					{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }
				)
			);
			return;
		}

		// COMPLETELY REVISED TABLE APPROACH
		// Define percentage-based column widths and calculate actual widths
		// Adjust percentages based on content type and user requirements

		// Adjust ID width if showing subtasks (subtask IDs are longer: e.g., "1.2")
		const idWidthPct = withSubtasks ? 10 : 7;

		// Calculate max status length to accommodate "in-progress"
		const statusWidthPct = 15;

		// Increase priority column width as requested
		const priorityWidthPct = 12;

		// Make dependencies column smaller as requested (-20%)
		const depsWidthPct = 20;

		const complexityWidthPct = 10;

		// Calculate title/description width as remaining space (+20% from dependencies reduction)
		const titleWidthPct =
			100 -
			idWidthPct -
			statusWidthPct -
			priorityWidthPct -
			depsWidthPct -
			complexityWidthPct;

		// Allow 10 characters for borders and padding
		const availableWidth = terminalWidth - 10;

		// Calculate actual column widths based on percentages
		const idWidth = Math.floor(availableWidth * (idWidthPct / 100));
		const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));
		const priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100));
		const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));
		const complexityWidth = Math.floor(
			availableWidth * (complexityWidthPct / 100)
		);
		const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));

		// Create a table with correct borders and spacing
		const table = new Table({
			head: [
				chalk.cyan.bold('ID'),
				chalk.cyan.bold('Title'),
				chalk.cyan.bold('Status'),
				chalk.cyan.bold('Priority'),
				chalk.cyan.bold('Dependencies'),
				chalk.cyan.bold('Complexity')
			],
			colWidths: [
				idWidth,
				titleWidth,
				statusWidth,
				priorityWidth,
				depsWidth,
				complexityWidth // Added complexity column width
			],
			style: {
				head: [], // No special styling for header
				border: [], // No special styling for border
				compact: false // Use default spacing
			},
			wordWrap: true,
			wrapOnWordBoundary: true
		});

		// Process tasks for the table
		filteredTasks.forEach((task) => {
			// Format dependencies with status indicators (colored)
			let depText = 'None';
			if (task.dependencies && task.dependencies.length > 0) {
				// Use the proper formatDependenciesWithStatus function for colored status
				depText = formatDependenciesWithStatus(
					task.dependencies,
					data.tasks,
					true,
					complexityReport
				);
			} else {
				depText = chalk.gray('None');
			}

			// Clean up any ANSI codes or confusing characters
			const cleanTitle = task.title.replace(/\n/g, ' ');

			// Get priority color
			const priorityColor =
				{
					high: chalk.red,
					medium: chalk.yellow,
					low: chalk.gray
				}[task.priority || 'medium'] || chalk.white;

			// Format status
			const status = getStatusWithColor(task.status, true);

			// Add the row without truncating dependencies
			table.push([
				task.id.toString(),
				truncate(cleanTitle, titleWidth - 3),
				status,
				priorityColor(truncate(task.priority || 'medium', priorityWidth - 2)),
				depText,
				task.complexityScore
					? getComplexityWithColor(task.complexityScore)
					: chalk.gray('N/A')
			]);

			// Add subtasks if requested
			if (withSubtasks && task.subtasks && task.subtasks.length > 0) {
				task.subtasks.forEach((subtask) => {
					// Format subtask dependencies with status indicators
					let subtaskDepText = 'None';
					if (subtask.dependencies && subtask.dependencies.length > 0) {
						// Handle both subtask-to-subtask and subtask-to-task dependencies
						const formattedDeps = subtask.dependencies
							.map((depId) => {
								// Check if it's a dependency on another subtask
								if (typeof depId === 'number' && depId < 100) {
									const foundSubtask = task.subtasks.find(
										(st) => st.id === depId
									);
									if (foundSubtask) {
										const isDone =
											foundSubtask.status === 'done' ||
											foundSubtask.status === 'completed';
										const isInProgress = foundSubtask.status === 'in-progress';

										// Use consistent color formatting instead of emojis
										if (isDone) {
											return chalk.green.bold(`${task.id}.${depId}`);
										} else if (isInProgress) {
											return chalk.hex('#FFA500').bold(`${task.id}.${depId}`);
										} else {
											return chalk.red.bold(`${task.id}.${depId}`);
										}
									}
								}
								// Default to regular task dependency
								const depTask = data.tasks.find((t) => t.id === depId);
								if (depTask) {
									// Add complexity to depTask before checking status
									addComplexityToTask(depTask, complexityReport);
									const isDone =
										depTask.status === 'done' || depTask.status === 'completed';
									const isInProgress = depTask.status === 'in-progress';
									// Use the same color scheme as in formatDependenciesWithStatus
									if (isDone) {
										return chalk.green.bold(`${depId}`);
									} else if (isInProgress) {
										return chalk.hex('#FFA500').bold(`${depId}`);
									} else {
										return chalk.red.bold(`${depId}`);
									}
								}
								return chalk.cyan(depId.toString());
							})
							.join(', ');

						subtaskDepText = formattedDeps || chalk.gray('None');
					}

					// Add the subtask row without truncating dependencies
					table.push([
						`${task.id}.${subtask.id}`,
						chalk.dim(`└─ ${truncate(subtask.title, titleWidth - 5)}`),
						getStatusWithColor(subtask.status, true),
						chalk.dim('-'),
						subtaskDepText,
						subtask.complexityScore
							? chalk.gray(`${subtask.complexityScore}`)
							: chalk.gray('N/A')
					]);
				});
			}
		});

		// Ensure we output the table even if it had to wrap
		try {
			console.log(table.toString());
		} catch (err) {
			log('error', `Error rendering table: ${err.message}`);

			// Fall back to simpler output
			console.log(
				chalk.yellow(
					'\nFalling back to simple task list due to terminal width constraints:'
				)
			);
			filteredTasks.forEach((task) => {
				console.log(
					`${chalk.cyan(task.id)}: ${chalk.white(task.title)} - ${getStatusWithColor(task.status)}`
				);
			});
		}

		// Show filter info if applied
		if (statusFilter) {
			console.log(chalk.yellow(`\nFiltered by status: ${statusFilter}`));
			console.log(
				chalk.yellow(`Showing ${filteredTasks.length} of ${totalTasks} tasks`)
			);
		}

		// Define priority colors
		const priorityColors = {
			high: chalk.red.bold,
			medium: chalk.yellow,
			low: chalk.gray
		};

		// Show next task box in a prominent color
		if (nextItem) {
			// Prepare subtasks section if they exist (Only tasks have .subtasks property)
			let subtasksSection = '';
			// Check if the nextItem is a top-level task before looking for subtasks
			const parentTaskForSubtasks = data.tasks.find(
				(t) => String(t.id) === String(nextItem.id)
			); // Find the original task object
			if (
				parentTaskForSubtasks &&
				parentTaskForSubtasks.subtasks &&
				parentTaskForSubtasks.subtasks.length > 0
			) {
				subtasksSection = `\n\n${chalk.white.bold('Subtasks:')}\n`;
				subtasksSection += parentTaskForSubtasks.subtasks
					.map((subtask) => {
						// Add complexity to subtask before display
						addComplexityToTask(subtask, complexityReport);
						// Using a more simplified format for subtask status display
						const status = subtask.status || 'pending';
						const statusColors = {
							done: chalk.green,
							completed: chalk.green,
							pending: chalk.yellow,
							'in-progress': chalk.blue,
							deferred: chalk.gray,
							blocked: chalk.red,
							cancelled: chalk.gray
						};
						const statusColor =
							statusColors[status.toLowerCase()] || chalk.white;
						// Ensure subtask ID is displayed correctly using parent ID from the original task object
						return `${chalk.cyan(`${parentTaskForSubtasks.id}.${subtask.id}`)} [${statusColor(status)}] ${subtask.title}`;
					})
					.join('\n');
			}

			console.log(
				boxen(
					chalk.hex('#FF8800').bold(
						// Use nextItem.id and nextItem.title
						`🔥 Next Task to Work On: #${nextItem.id} - ${nextItem.title}`
					) +
						'\n\n' +
						// Use nextItem.priority, nextItem.status, nextItem.dependencies
						`${chalk.white('Priority:')} ${priorityColors[nextItem.priority || 'medium'](nextItem.priority || 'medium')}   ${chalk.white('Status:')} ${getStatusWithColor(nextItem.status, true)}\n` +
						`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : chalk.gray('None')}\n\n` +
						// Use nextTask.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)
						// *** Fetching original item for description and details ***
						`${chalk.white('Description:')} ${getWorkItemDescription(nextItem, data.tasks)}` +
						subtasksSection + // <-- Subtasks are handled above now
						'\n\n' +
						// Use nextItem.id
						`${chalk.cyan('Start working:')} ${chalk.yellow(`task-master set-status --id=${nextItem.id} --status=in-progress`)}\n` +
						// Use nextItem.id
						`${chalk.cyan('View details:')} ${chalk.yellow(`task-master show ${nextItem.id}`)}`,
					{
						padding: { left: 2, right: 2, top: 1, bottom: 1 },
						borderColor: '#FF8800',
						borderStyle: 'round',
						margin: { top: 1, bottom: 1 },
						title: '⚡ RECOMMENDED NEXT TASK ⚡',
						titleAlignment: 'center',
						width: terminalWidth - 4,
						fullscreen: false
					}
				)
			);
		} else {
			console.log(
				boxen(
					chalk.hex('#FF8800').bold('No eligible next task found') +
						'\n\n' +
						'All pending tasks have dependencies that are not yet completed, or all tasks are done.',
					{
						padding: 1,
						borderColor: '#FF8800',
						borderStyle: 'round',
						margin: { top: 1, bottom: 1 },
						title: '⚡ NEXT TASK ⚡',
						titleAlignment: 'center',
						width: terminalWidth - 4 // Use full terminal width minus a small margin
					}
				)
			);
		}

		// Show next steps
		console.log(
			boxen(
				chalk.white.bold('Suggested Next Steps:') +
					'\n\n' +
					`${chalk.cyan('1.')} Run ${chalk.yellow('task-master next')} to see what to work on next\n` +
					`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks\n` +
					`${chalk.cyan('3.')} Run ${chalk.yellow('task-master set-status --id=<id> --status=done')} to mark a task as complete`,
				{
					padding: 1,
					borderColor: 'gray',
					borderStyle: 'round',
					margin: { top: 1 }
				}
			)
		);
	} catch (error) {
		log('error', `Error listing tasks: ${error.message}`);

		if (outputFormat === 'json') {
			// Return structured error for JSON output
			throw {
				code: 'TASK_LIST_ERROR',
				message: error.message,
				details: error.stack
			};
		}

		console.error(chalk.red(`Error: ${error.message}`));
		process.exit(1);
	}
}

// *** Helper function to get description for task or subtask ***
function getWorkItemDescription(item, allTasks) {
	if (!item) return 'N/A';
	if (item.parentId) {
		// It's a subtask
		const parent = allTasks.find((t) => t.id === item.parentId);
		const subtask = parent?.subtasks?.find(
			(st) => `${parent.id}.${st.id}` === item.id
		);
		return subtask?.description || 'No description available.';
	} else {
		// It's a top-level task
		const task = allTasks.find((t) => String(t.id) === String(item.id));
		return task?.description || 'No description available.';
	}
}

/**
 * Generate markdown-formatted output for README files
 * @param {Object} data - Full tasks data
 * @param {Array} filteredTasks - Filtered tasks array
 * @param {Object} stats - Statistics object
 * @returns {string} - Formatted markdown string
 */
function generateMarkdownOutput(data, filteredTasks, stats) {
	const {
		totalTasks,
		completedTasks,
		completionPercentage,
		doneCount,
		inProgressCount,
		pendingCount,
		blockedCount,
		deferredCount,
		cancelledCount,
		totalSubtasks,
		completedSubtasks,
		subtaskCompletionPercentage,
		inProgressSubtasks,
		pendingSubtasks,
		blockedSubtasks,
		deferredSubtasks,
		cancelledSubtasks,
		tasksWithNoDeps,
		tasksReadyToWork,
		tasksWithUnsatisfiedDeps,
		mostDependedOnTask,
		mostDependedOnTaskId,
		maxDependents,
		avgDependenciesPerTask,
		complexityReport,
		withSubtasks,
		nextItem
	} = stats;

	let markdown = '';

	// Create progress bars for markdown (using Unicode block characters)
	const createMarkdownProgressBar = (percentage, width = 20) => {
		const filled = Math.round((percentage / 100) * width);
		const empty = width - filled;
		return '█'.repeat(filled) + '░'.repeat(empty);
	};

	const taskProgressBar = createMarkdownProgressBar(completionPercentage, 20);
	const subtaskProgressBar = createMarkdownProgressBar(
		subtaskCompletionPercentage,
		20
	);

	// Dashboard section
	// markdown += '```\n';
	markdown += '| Project Dashboard |  |\n';
	markdown += '| :-                |:-|\n';
	markdown += `| Task Progress     | ${taskProgressBar} ${Math.round(completionPercentage)}% |\n`;
	markdown += `| Done | ${doneCount} |\n`;
	markdown += `| In Progress | ${inProgressCount} |\n`;
	markdown += `| Pending | ${pendingCount} |\n`;
	markdown += `| Deferred | ${deferredCount} |\n`;
	markdown += `| Cancelled | ${cancelledCount} |\n`;
	markdown += `|-|-|\n`;
	markdown += `| Subtask Progress | ${subtaskProgressBar} ${Math.round(subtaskCompletionPercentage)}% |\n`;
	markdown += `| Completed | ${completedSubtasks} |\n`;
	markdown += `| In Progress | ${inProgressSubtasks} |\n`;
	markdown += `| Pending | ${pendingSubtasks} |\n`;

	markdown += '\n\n';

	// Tasks table
	markdown +=
		'| ID | Title | Status | Priority | Dependencies | Complexity |\n';
	markdown +=
		'| :- | :-    | :-     | :-       | :-           | :-         |\n';

	// Helper function to format status with symbols
	const getStatusSymbol = (status) => {
		switch (status) {
			case 'done':
			case 'completed':
				return '✓&nbsp;done';
			case 'in-progress':
				return '►&nbsp;in-progress';
			case 'pending':
				return '○&nbsp;pending';
			case 'blocked':
				return '⭕&nbsp;blocked';
			case 'deferred':
				return 'x&nbsp;deferred';
			case 'cancelled':
				return 'x&nbsp;cancelled';
			case 'review':
				return '?&nbsp;review';
			default:
				return status || 'pending';
		}
	};

	// Helper function to format dependencies without color codes
	const formatDependenciesForMarkdown = (deps, allTasks) => {
		if (!deps || deps.length === 0) return 'None';
		return deps
			.map((depId) => {
				const depTask = allTasks.find((t) => t.id === depId);
				return depTask ? depId.toString() : depId.toString();
			})
			.join(', ');
	};

	// Process all tasks
	filteredTasks.forEach((task) => {
		const taskTitle = task.title; // No truncation for README
		const statusSymbol = getStatusSymbol(task.status);
		const priority = task.priority || 'medium';
		const deps = formatDependenciesForMarkdown(task.dependencies, data.tasks);
		const complexity = task.complexityScore
			? `● ${task.complexityScore}`
			: 'N/A';

		markdown += `| ${task.id} | ${taskTitle} | ${statusSymbol} | ${priority} | ${deps} | ${complexity} |\n`;

		// Add subtasks if requested
		if (withSubtasks && task.subtasks && task.subtasks.length > 0) {
			task.subtasks.forEach((subtask) => {
				const subtaskTitle = `${subtask.title}`; // No truncation
				const subtaskStatus = getStatusSymbol(subtask.status);
				const subtaskDeps = formatDependenciesForMarkdown(
					subtask.dependencies,
					data.tasks
				);
				const subtaskComplexity = subtask.complexityScore
					? subtask.complexityScore.toString()
					: 'N/A';

				markdown += `| ${task.id}.${subtask.id} | ${subtaskTitle} | ${subtaskStatus} | -            | ${subtaskDeps} | ${subtaskComplexity} |\n`;
			});
		}
	});

	return markdown;
}

/**
 * Format dependencies for compact output with truncation and coloring
 * @param {Array} dependencies - Array of dependency IDs
 * @returns {string} - Formatted dependency string with arrow prefix
 */
function formatCompactDependencies(dependencies) {
	if (!dependencies || dependencies.length === 0) {
		return '';
	}

	if (dependencies.length > 5) {
		const visible = dependencies.slice(0, 5).join(',');
		const remaining = dependencies.length - 5;
		return ` → ${chalk.cyan(visible)}${chalk.gray('... (+' + remaining + ' more)')}`;
	} else {
		return ` → ${chalk.cyan(dependencies.join(','))}`;
	}
}

/**
 * Format a single task in compact one-line format
 * @param {Object} task - Task object
 * @param {number} maxTitleLength - Maximum title length before truncation
 * @returns {string} - Formatted task line
 */
function formatCompactTask(task, maxTitleLength = 50) {
	const status = task.status || 'pending';
	const priority = task.priority || 'medium';
	const title = truncate(task.title || 'Untitled', maxTitleLength);

	// Use colored status from existing function
	const coloredStatus = getStatusWithColor(status, true);

	// Color priority based on level
	const priorityColors = {
		high: chalk.red,
		medium: chalk.yellow,
		low: chalk.gray
	};
	const priorityColor = priorityColors[priority] || chalk.white;

	// Format dependencies using shared helper
	const depsText = formatCompactDependencies(task.dependencies);

	return `${chalk.cyan(task.id)} ${coloredStatus} ${chalk.white(title)} ${priorityColor('(' + priority + ')')}${depsText}`;
}

/**
 * Format a subtask in compact format with indentation
 * @param {Object} subtask - Subtask object
 * @param {string|number} parentId - Parent task ID
 * @param {number} maxTitleLength - Maximum title length before truncation
 * @returns {string} - Formatted subtask line
 */
function formatCompactSubtask(subtask, parentId, maxTitleLength = 47) {
	const status = subtask.status || 'pending';
	const title = truncate(subtask.title || 'Untitled', maxTitleLength);

	// Use colored status from existing function
	const coloredStatus = getStatusWithColor(status, true);

	// Format dependencies using shared helper
	const depsText = formatCompactDependencies(subtask.dependencies);

	return `  ${chalk.cyan(parentId + '.' + subtask.id)} ${coloredStatus} ${chalk.dim(title)}${depsText}`;
}

/**
 * Render complete compact output
 * @param {Array} filteredTasks - Tasks to display
 * @param {boolean} withSubtasks - Whether to include subtasks
 * @returns {void} - Outputs directly to console
 */
function renderCompactOutput(filteredTasks, withSubtasks) {
	if (filteredTasks.length === 0) {
		console.log('No tasks found');
		return;
	}

	const output = [];

	filteredTasks.forEach((task) => {
		output.push(formatCompactTask(task));

		if (withSubtasks && task.subtasks && task.subtasks.length > 0) {
			task.subtasks.forEach((subtask) => {
				output.push(formatCompactSubtask(subtask, task.id));
			});
		}
	});

	console.log(output.join('\n'));
}

export default listTasks;

```

--------------------------------------------------------------------------------
/tests/unit/config-manager.test.js:
--------------------------------------------------------------------------------

```javascript
import fs from 'fs';
import path from 'path';
import { jest } from '@jest/globals';
import { fileURLToPath } from 'url';

// Mock modules first before any imports
jest.mock('fs', () => ({
	existsSync: jest.fn((filePath) => {
		// Prevent Jest internal file access
		if (
			filePath.includes('jest-message-util') ||
			filePath.includes('node_modules')
		) {
			return false;
		}
		return false; // Default to false for config discovery prevention
	}),
	readFileSync: jest.fn(() => '{}'),
	writeFileSync: jest.fn(),
	mkdirSync: jest.fn()
}));

jest.mock('path', () => ({
	join: jest.fn((dir, file) => `${dir}/${file}`),
	dirname: jest.fn((filePath) => filePath.split('/').slice(0, -1).join('/')),
	resolve: jest.fn((...paths) => paths.join('/')),
	basename: jest.fn((filePath) => filePath.split('/').pop())
}));

jest.mock('chalk', () => ({
	red: jest.fn((text) => text),
	blue: jest.fn((text) => text),
	green: jest.fn((text) => text),
	yellow: jest.fn((text) => text),
	white: jest.fn((text) => ({
		bold: jest.fn((text) => text)
	})),
	reset: jest.fn((text) => text),
	dim: jest.fn((text) => text) // Add dim function to prevent chalk errors
}));

// Mock console to prevent Jest internal access
const mockConsole = {
	log: jest.fn(),
	info: jest.fn(),
	warn: jest.fn(),
	error: jest.fn()
};
global.console = mockConsole;

// --- Define Mock Function Instances ---
const mockFindConfigPath = jest.fn(() => null); // Default to null, can be overridden in tests

// Mock path-utils to prevent config file path discovery and logging
jest.mock('../../src/utils/path-utils.js', () => ({
	__esModule: true,
	findProjectRoot: jest.fn(() => '/mock/project'),
	findConfigPath: mockFindConfigPath, // Use the mock function instance
	findTasksPath: jest.fn(() => '/mock/tasks.json'),
	findComplexityReportPath: jest.fn(() => null),
	resolveTasksOutputPath: jest.fn(() => '/mock/tasks.json'),
	resolveComplexityReportOutputPath: jest.fn(() => '/mock/report.json')
}));

// --- Read REAL supported-models.json data BEFORE mocks ---
const __filename = fileURLToPath(import.meta.url); // Get current file path
const __dirname = path.dirname(__filename); // Get current directory
const realSupportedModelsPath = path.resolve(
	__dirname,
	'../../scripts/modules/supported-models.json'
);
let REAL_SUPPORTED_MODELS_CONTENT;
let REAL_SUPPORTED_MODELS_DATA;
try {
	REAL_SUPPORTED_MODELS_CONTENT = fs.readFileSync(
		realSupportedModelsPath,
		'utf-8'
	);
	REAL_SUPPORTED_MODELS_DATA = JSON.parse(REAL_SUPPORTED_MODELS_CONTENT);
} catch (err) {
	console.error(
		'FATAL TEST SETUP ERROR: Could not read or parse real supported-models.json',
		err
	);
	REAL_SUPPORTED_MODELS_CONTENT = '{}'; // Default to empty object on error
	REAL_SUPPORTED_MODELS_DATA = {};
	process.exit(1); // Exit if essential test data can't be loaded
}

// --- Define Mock Function Instances ---
const mockFindProjectRoot = jest.fn();
const mockLog = jest.fn();

// --- Mock Dependencies BEFORE importing the module under test ---

// Mock the 'utils.js' module using a factory function
jest.mock('../../scripts/modules/utils.js', () => ({
	__esModule: true, // Indicate it's an ES module mock
	findProjectRoot: mockFindProjectRoot, // Use the mock function instance
	log: mockLog, // Use the mock function instance
	// Include other necessary exports from utils if config-manager uses them directly
	resolveEnvVariable: jest.fn() // Example if needed
}));

// --- Import the module under test AFTER mocks are defined ---
import * as configManager from '../../scripts/modules/config-manager.js';
// Import the mocked 'fs' module to allow spying on its functions
import fsMocked from 'fs';

// --- Test Data (Keep as is, ensure DEFAULT_CONFIG is accurate) ---
const MOCK_PROJECT_ROOT = '/mock/project';
const MOCK_CONFIG_PATH = path.join(
	MOCK_PROJECT_ROOT,
	'.taskmaster/config.json'
);

// Updated DEFAULT_CONFIG reflecting the implementation
const DEFAULT_CONFIG = {
	models: {
		main: {
			provider: 'anthropic',
			modelId: 'claude-sonnet-4-20250514',
			maxTokens: 64000,
			temperature: 0.2
		},
		research: {
			provider: 'perplexity',
			modelId: 'sonar',
			maxTokens: 8700,
			temperature: 0.1
		},
		fallback: {
			provider: 'anthropic',
			modelId: 'claude-3-7-sonnet-20250219',
			maxTokens: 120000,
			temperature: 0.2
		}
	},
	global: {
		logLevel: 'info',
		debug: false,
		defaultNumTasks: 10,
		defaultSubtasks: 5,
		defaultPriority: 'medium',
		projectName: 'Task Master',
		ollamaBaseURL: 'http://localhost:11434/api',
		bedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com',
		enableCodebaseAnalysis: true,
		responseLanguage: 'English'
	},
	claudeCode: {},
	grokCli: {
		timeout: 120000,
		workingDirectory: null,
		defaultModel: 'grok-4-latest'
	}
};

// Other test data (VALID_CUSTOM_CONFIG, PARTIAL_CONFIG, INVALID_PROVIDER_CONFIG)
const VALID_CUSTOM_CONFIG = {
	models: {
		main: {
			provider: 'openai',
			modelId: 'gpt-4o',
			maxTokens: 4096,
			temperature: 0.5
		},
		research: {
			provider: 'google',
			modelId: 'gemini-1.5-pro-latest',
			maxTokens: 8192,
			temperature: 0.3
		},
		fallback: {
			provider: 'anthropic',
			modelId: 'claude-3-opus-20240229',
			maxTokens: 100000,
			temperature: 0.4
		}
	},
	global: {
		logLevel: 'debug',
		defaultPriority: 'high',
		projectName: 'My Custom Project'
	}
};

const PARTIAL_CONFIG = {
	models: {
		main: { provider: 'openai', modelId: 'gpt-4-turbo' }
	},
	global: {
		projectName: 'Partial Project'
	}
};

const INVALID_PROVIDER_CONFIG = {
	models: {
		main: { provider: 'invalid-provider', modelId: 'some-model' },
		research: {
			provider: 'perplexity',
			modelId: 'llama-3-sonar-large-32k-online'
		}
	},
	global: {
		logLevel: 'warn'
	}
};

// Claude Code test data
const VALID_CLAUDE_CODE_CONFIG = {
	maxTurns: 5,
	customSystemPrompt: 'You are a helpful coding assistant',
	appendSystemPrompt: 'Always follow best practices',
	permissionMode: 'acceptEdits',
	allowedTools: ['Read', 'LS', 'Edit'],
	disallowedTools: ['Write'],
	mcpServers: {
		'test-server': {
			type: 'stdio',
			command: 'node',
			args: ['server.js'],
			env: { NODE_ENV: 'test' }
		}
	},
	commandSpecific: {
		'add-task': {
			maxTurns: 3,
			permissionMode: 'plan'
		},
		research: {
			customSystemPrompt: 'You are a research assistant'
		}
	}
};

const INVALID_CLAUDE_CODE_CONFIG = {
	maxTurns: 'invalid', // Should be number
	permissionMode: 'invalid-mode', // Invalid enum value
	allowedTools: 'not-an-array', // Should be array
	mcpServers: {
		'invalid-server': {
			type: 'invalid-type', // Invalid enum value
			url: 'not-a-valid-url' // Invalid URL format
		}
	},
	commandSpecific: {
		'invalid-command': {
			// Invalid command name
			maxTurns: -1 // Invalid negative number
		}
	}
};

const PARTIAL_CLAUDE_CODE_CONFIG = {
	maxTurns: 10,
	permissionMode: 'default',
	commandSpecific: {
		'expand-task': {
			customSystemPrompt: 'Focus on task breakdown'
		}
	}
};

// Define spies globally to be restored in afterAll
let consoleErrorSpy;
let consoleWarnSpy;
let fsReadFileSyncSpy;
let fsWriteFileSyncSpy;
let fsExistsSyncSpy;

beforeAll(() => {
	// Set up console spies
	consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {});
	consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(() => {});
});

afterAll(() => {
	// Restore all spies
	jest.restoreAllMocks();
});

// Reset mocks before each test for isolation
beforeEach(() => {
	// Clear all mock calls and reset implementations between tests
	jest.clearAllMocks();
	// Reset the external mock instances for utils
	mockFindProjectRoot.mockReset();
	mockLog.mockReset();
	mockFindConfigPath.mockReset();

	// --- Set up spies ON the imported 'fs' mock ---
	fsExistsSyncSpy = jest.spyOn(fsMocked, 'existsSync');
	fsReadFileSyncSpy = jest.spyOn(fsMocked, 'readFileSync');
	fsWriteFileSyncSpy = jest.spyOn(fsMocked, 'writeFileSync');

	// --- Default Mock Implementations ---
	mockFindProjectRoot.mockReturnValue(MOCK_PROJECT_ROOT); // Default for utils.findProjectRoot
	mockFindConfigPath.mockReturnValue(null); // Default to no config file found
	fsExistsSyncSpy.mockReturnValue(true); // Assume files exist by default

	// Default readFileSync: Return REAL models content, mocked config, or throw error
	fsReadFileSyncSpy.mockImplementation((filePath) => {
		const baseName = path.basename(filePath);
		if (baseName === 'supported-models.json') {
			// Return the REAL file content stringified
			return REAL_SUPPORTED_MODELS_CONTENT;
		} else if (filePath === MOCK_CONFIG_PATH) {
			// Still mock the .taskmasterconfig reads
			return JSON.stringify(DEFAULT_CONFIG); // Default behavior
		}
		// For Jest internal files or other unexpected files, return empty string instead of throwing
		// This prevents Jest's internal file operations from breaking tests
		if (
			filePath.includes('jest-message-util') ||
			filePath.includes('node_modules')
		) {
			return '{}'; // Return empty JSON for Jest internal files
		}
		// Throw for truly unexpected reads that should be caught in tests
		throw new Error(`Unexpected fs.readFileSync call in test: ${filePath}`);
	});

	// Default writeFileSync: Do nothing, just allow calls
	fsWriteFileSyncSpy.mockImplementation(() => {});
});

// --- Validation Functions ---
describe('Validation Functions', () => {
	// Tests for validateProvider and validateProviderModelCombination
	test('validateProvider should return true for valid providers', () => {
		expect(configManager.validateProvider('openai')).toBe(true);
		expect(configManager.validateProvider('anthropic')).toBe(true);
		expect(configManager.validateProvider('google')).toBe(true);
		expect(configManager.validateProvider('perplexity')).toBe(true);
		expect(configManager.validateProvider('ollama')).toBe(true);
		expect(configManager.validateProvider('openrouter')).toBe(true);
		expect(configManager.validateProvider('bedrock')).toBe(true);
	});

	test('validateProvider should return false for invalid providers', () => {
		expect(configManager.validateProvider('invalid-provider')).toBe(false);
		expect(configManager.validateProvider('grok')).toBe(false); // Not in mock map
		expect(configManager.validateProvider('')).toBe(false);
		expect(configManager.validateProvider(null)).toBe(false);
	});

	test('validateProviderModelCombination should validate known good combinations', () => {
		// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
		configManager.getConfig(MOCK_PROJECT_ROOT, true);
		expect(
			configManager.validateProviderModelCombination('openai', 'gpt-4o')
		).toBe(true);
		expect(
			configManager.validateProviderModelCombination(
				'anthropic',
				'claude-3-5-sonnet-20241022'
			)
		).toBe(true);
	});

	test('validateProviderModelCombination should return false for known bad combinations', () => {
		// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
		configManager.getConfig(MOCK_PROJECT_ROOT, true);
		expect(
			configManager.validateProviderModelCombination(
				'openai',
				'claude-3-opus-20240229'
			)
		).toBe(false);
	});

	test('validateProviderModelCombination should return true for ollama/openrouter (empty lists in map)', () => {
		// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
		configManager.getConfig(MOCK_PROJECT_ROOT, true);
		expect(
			configManager.validateProviderModelCombination('ollama', 'any-model')
		).toBe(false);
		expect(
			configManager.validateProviderModelCombination('openrouter', 'any/model')
		).toBe(false);
	});

	test('validateProviderModelCombination should return true for providers not in map', () => {
		// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
		configManager.getConfig(MOCK_PROJECT_ROOT, true);
		// The implementation returns true if the provider isn't in the map
		expect(
			configManager.validateProviderModelCombination(
				'unknown-provider',
				'some-model'
			)
		).toBe(true);
	});
});

// --- Claude Code Validation Tests ---
describe('Claude Code Validation', () => {
	test('validateClaudeCodeSettings should return valid settings for correct input', () => {
		const result = configManager.validateClaudeCodeSettings(
			VALID_CLAUDE_CODE_CONFIG
		);

		expect(result).toEqual(VALID_CLAUDE_CODE_CONFIG);
		expect(consoleWarnSpy).not.toHaveBeenCalled();
	});

	test('validateClaudeCodeSettings should return empty object for invalid input', () => {
		const result = configManager.validateClaudeCodeSettings(
			INVALID_CLAUDE_CODE_CONFIG
		);

		expect(result).toEqual({});
		expect(consoleWarnSpy).toHaveBeenCalledWith(
			expect.stringContaining('Warning: Invalid Claude Code settings in config')
		);
	});

	test('validateClaudeCodeSettings should handle partial valid configuration', () => {
		const result = configManager.validateClaudeCodeSettings(
			PARTIAL_CLAUDE_CODE_CONFIG
		);

		expect(result).toEqual(PARTIAL_CLAUDE_CODE_CONFIG);
		expect(consoleWarnSpy).not.toHaveBeenCalled();
	});

	test('validateClaudeCodeSettings should return empty object for empty input', () => {
		const result = configManager.validateClaudeCodeSettings({});

		expect(result).toEqual({});
		expect(consoleWarnSpy).not.toHaveBeenCalled();
	});

	test('validateClaudeCodeSettings should handle null/undefined input', () => {
		expect(configManager.validateClaudeCodeSettings(null)).toEqual({});
		expect(configManager.validateClaudeCodeSettings(undefined)).toEqual({});
		expect(consoleWarnSpy).toHaveBeenCalledTimes(2);
	});
});

// --- Claude Code Getter Tests ---
describe('Claude Code Getter Functions', () => {
	test('getClaudeCodeSettings should return default empty object when no config exists', () => {
		// No config file exists, should return empty object
		fsExistsSyncSpy.mockReturnValue(false);
		const settings = configManager.getClaudeCodeSettings(MOCK_PROJECT_ROOT);

		expect(settings).toEqual({});
	});

	test('getClaudeCodeSettings should return merged settings from config file', () => {
		// Config file with Claude Code settings
		const configWithClaudeCode = {
			...VALID_CUSTOM_CONFIG,
			claudeCode: VALID_CLAUDE_CODE_CONFIG
		};

		// Mock findConfigPath to return the mock config path
		mockFindConfigPath.mockReturnValue(MOCK_CONFIG_PATH);

		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH)
				return JSON.stringify(configWithClaudeCode);
			if (path.basename(filePath) === 'supported-models.json') {
				return JSON.stringify({
					openai: [{ id: 'gpt-4o' }],
					google: [{ id: 'gemini-1.5-pro-latest' }],
					anthropic: [
						{ id: 'claude-3-opus-20240229' },
						{ id: 'claude-3-7-sonnet-20250219' },
						{ id: 'claude-3-5-sonnet' }
					],
					perplexity: [{ id: 'sonar-pro' }],
					ollama: [],
					openrouter: []
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);

		const settings = configManager.getClaudeCodeSettings(
			MOCK_PROJECT_ROOT,
			true
		); // Force reload

		expect(settings).toEqual(VALID_CLAUDE_CODE_CONFIG);
	});

	test('getClaudeCodeSettingsForCommand should return command-specific settings', () => {
		// Config with command-specific settings
		const configWithClaudeCode = {
			...VALID_CUSTOM_CONFIG,
			claudeCode: VALID_CLAUDE_CODE_CONFIG
		};

		// Mock findConfigPath to return the mock config path
		mockFindConfigPath.mockReturnValue(MOCK_CONFIG_PATH);

		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (path.basename(filePath) === 'supported-models.json') return '{}';
			if (filePath === MOCK_CONFIG_PATH)
				return JSON.stringify(configWithClaudeCode);
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);

		const settings = configManager.getClaudeCodeSettingsForCommand(
			'add-task',
			MOCK_PROJECT_ROOT,
			true
		); // Force reload

		// Should merge global settings with command-specific settings
		const expectedSettings = {
			...VALID_CLAUDE_CODE_CONFIG,
			...VALID_CLAUDE_CODE_CONFIG.commandSpecific['add-task']
		};
		expect(settings).toEqual(expectedSettings);
	});

	test('getClaudeCodeSettingsForCommand should return global settings for unknown command', () => {
		// Config with Claude Code settings
		const configWithClaudeCode = {
			...VALID_CUSTOM_CONFIG,
			claudeCode: PARTIAL_CLAUDE_CODE_CONFIG
		};

		// Mock findConfigPath to return the mock config path
		mockFindConfigPath.mockReturnValue(MOCK_CONFIG_PATH);

		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (path.basename(filePath) === 'supported-models.json') return '{}';
			if (filePath === MOCK_CONFIG_PATH)
				return JSON.stringify(configWithClaudeCode);
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);

		const settings = configManager.getClaudeCodeSettingsForCommand(
			'unknown-command',
			MOCK_PROJECT_ROOT,
			true
		); // Force reload

		// Should return global settings only
		expect(settings).toEqual(PARTIAL_CLAUDE_CODE_CONFIG);
	});
});

// --- getConfig Tests ---
describe('getConfig Tests', () => {
	test('should return default config if .taskmasterconfig does not exist', () => {
		// Arrange
		fsExistsSyncSpy.mockReturnValue(false);
		// findProjectRoot mock is set in beforeEach

		// Act: Call getConfig with explicit root
		const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Force reload

		// Assert
		expect(config).toEqual(DEFAULT_CONFIG);
		expect(mockFindProjectRoot).not.toHaveBeenCalled(); // Explicit root provided
		// The implementation checks for .taskmaster directory first
		expect(fsExistsSyncSpy).toHaveBeenCalledWith(
			path.join(MOCK_PROJECT_ROOT, '.taskmaster')
		);
		expect(fsReadFileSyncSpy).not.toHaveBeenCalled(); // No read if file doesn't exist
		expect(consoleWarnSpy).toHaveBeenCalledWith(
			expect.stringContaining('not found at provided project root')
		);
	});

	test.skip('should use findProjectRoot and return defaults if file not found', () => {
		// TODO: Fix mock interaction, findProjectRoot isn't being registered as called
		// Arrange
		fsExistsSyncSpy.mockReturnValue(false);
		// findProjectRoot mock is set in beforeEach

		// Act: Call getConfig without explicit root
		const config = configManager.getConfig(null, true); // Force reload

		// Assert
		expect(mockFindProjectRoot).toHaveBeenCalled(); // Should be called now
		expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
		expect(config).toEqual(DEFAULT_CONFIG);
		expect(fsReadFileSyncSpy).not.toHaveBeenCalled();
		expect(consoleWarnSpy).toHaveBeenCalledWith(
			expect.stringContaining('not found at derived root')
		); // Adjusted expected warning
	});

	test('should read and merge valid config file with defaults', () => {
		// Arrange: Override readFileSync for this test
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH)
				return JSON.stringify(VALID_CUSTOM_CONFIG);
			if (path.basename(filePath) === 'supported-models.json') {
				// Provide necessary models for validation within getConfig
				return JSON.stringify({
					openai: [{ id: 'gpt-4o' }],
					google: [{ id: 'gemini-1.5-pro-latest' }],
					perplexity: [{ id: 'sonar-pro' }],
					anthropic: [
						{ id: 'claude-3-opus-20240229' },
						{ id: 'claude-3-5-sonnet' },
						{ id: 'claude-3-7-sonnet-20250219' },
						{ id: 'claude-3-5-sonnet' }
					],
					ollama: [],
					openrouter: []
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);
		// findProjectRoot mock set in beforeEach

		// Act
		const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Force reload

		// Assert: Construct expected merged config
		const expectedMergedConfig = {
			models: {
				main: {
					...DEFAULT_CONFIG.models.main,
					...VALID_CUSTOM_CONFIG.models.main
				},
				research: {
					...DEFAULT_CONFIG.models.research,
					...VALID_CUSTOM_CONFIG.models.research
				},
				fallback: {
					...DEFAULT_CONFIG.models.fallback,
					...VALID_CUSTOM_CONFIG.models.fallback
				}
			},
			global: { ...DEFAULT_CONFIG.global, ...VALID_CUSTOM_CONFIG.global },
			claudeCode: {
				...DEFAULT_CONFIG.claudeCode,
				...VALID_CUSTOM_CONFIG.claudeCode
			},
			grokCli: { ...DEFAULT_CONFIG.grokCli }
		};
		expect(config).toEqual(expectedMergedConfig);
		expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
		expect(fsReadFileSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8');
	});

	test('should merge defaults for partial config file', () => {
		// Arrange
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH) return JSON.stringify(PARTIAL_CONFIG);
			if (path.basename(filePath) === 'supported-models.json') {
				return JSON.stringify({
					openai: [{ id: 'gpt-4-turbo' }],
					perplexity: [{ id: 'sonar-pro' }],
					anthropic: [
						{ id: 'claude-3-7-sonnet-20250219' },
						{ id: 'claude-3-5-sonnet' }
					],
					ollama: [],
					openrouter: []
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);
		// findProjectRoot mock set in beforeEach

		// Act
		const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Assert: Construct expected merged config
		const expectedMergedConfig = {
			models: {
				main: { ...DEFAULT_CONFIG.models.main, ...PARTIAL_CONFIG.models.main },
				research: { ...DEFAULT_CONFIG.models.research },
				fallback: { ...DEFAULT_CONFIG.models.fallback }
			},
			global: { ...DEFAULT_CONFIG.global, ...PARTIAL_CONFIG.global },
			claudeCode: {
				...DEFAULT_CONFIG.claudeCode,
				...VALID_CUSTOM_CONFIG.claudeCode
			},
			grokCli: { ...DEFAULT_CONFIG.grokCli }
		};
		expect(config).toEqual(expectedMergedConfig);
		expect(fsReadFileSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8');
	});

	test('should handle JSON parsing error and return defaults', () => {
		// Arrange
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH) return 'invalid json';
			// Mock models read needed for initial load before parse error
			if (path.basename(filePath) === 'supported-models.json') {
				return JSON.stringify({
					anthropic: [{ id: 'claude-3-7-sonnet-20250219' }],
					perplexity: [{ id: 'sonar-pro' }],
					fallback: [{ id: 'claude-3-5-sonnet' }],
					ollama: [],
					openrouter: []
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);
		// findProjectRoot mock set in beforeEach

		// Act
		const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Assert
		expect(config).toEqual(DEFAULT_CONFIG);
		expect(consoleErrorSpy).toHaveBeenCalledWith(
			expect.stringContaining('Error reading or parsing')
		);
	});

	test('should handle file read error and return defaults', () => {
		// Arrange
		const readError = new Error('Permission denied');
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH) throw readError;
			// Mock models read needed for initial load before read error
			if (path.basename(filePath) === 'supported-models.json') {
				return JSON.stringify({
					anthropic: [{ id: 'claude-3-7-sonnet-20250219' }],
					perplexity: [{ id: 'sonar-pro' }],
					fallback: [{ id: 'claude-3-5-sonnet' }],
					ollama: [],
					openrouter: []
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);
		// findProjectRoot mock set in beforeEach

		// Act
		const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Assert
		expect(config).toEqual(DEFAULT_CONFIG);
		expect(consoleErrorSpy).toHaveBeenCalledWith(
			expect.stringContaining('Permission denied. Using default configuration.')
		);
	});

	test('should validate provider and fallback to default if invalid', () => {
		// Arrange
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH)
				return JSON.stringify(INVALID_PROVIDER_CONFIG);
			if (path.basename(filePath) === 'supported-models.json') {
				return JSON.stringify({
					perplexity: [{ id: 'llama-3-sonar-large-32k-online' }],
					anthropic: [
						{ id: 'claude-3-7-sonnet-20250219' },
						{ id: 'claude-3-5-sonnet' }
					],
					ollama: [],
					openrouter: []
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);
		// findProjectRoot mock set in beforeEach

		// Act
		const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Assert
		expect(consoleWarnSpy).toHaveBeenCalledWith(
			expect.stringContaining(
				'Warning: Invalid main provider "invalid-provider"'
			)
		);
		const expectedMergedConfig = {
			models: {
				main: { ...DEFAULT_CONFIG.models.main },
				research: {
					...DEFAULT_CONFIG.models.research,
					...INVALID_PROVIDER_CONFIG.models.research
				},
				fallback: { ...DEFAULT_CONFIG.models.fallback }
			},
			global: { ...DEFAULT_CONFIG.global, ...INVALID_PROVIDER_CONFIG.global },
			claudeCode: {
				...DEFAULT_CONFIG.claudeCode,
				...VALID_CUSTOM_CONFIG.claudeCode
			},
			grokCli: { ...DEFAULT_CONFIG.grokCli }
		};
		expect(config).toEqual(expectedMergedConfig);
	});
});

// --- writeConfig Tests ---
describe('writeConfig', () => {
	test('should write valid config to file', () => {
		// Arrange (Default mocks are sufficient)
		// findProjectRoot mock set in beforeEach
		fsWriteFileSyncSpy.mockImplementation(() => {}); // Ensure it doesn't throw

		// Act
		const success = configManager.writeConfig(
			VALID_CUSTOM_CONFIG,
			MOCK_PROJECT_ROOT
		);

		// Assert
		expect(success).toBe(true);
		expect(fsWriteFileSyncSpy).toHaveBeenCalledWith(
			MOCK_CONFIG_PATH,
			JSON.stringify(VALID_CUSTOM_CONFIG, null, 2) // writeConfig stringifies
		);
		expect(consoleErrorSpy).not.toHaveBeenCalled();
	});

	test('should return false and log error if write fails', () => {
		// Arrange
		const mockWriteError = new Error('Disk full');
		fsWriteFileSyncSpy.mockImplementation(() => {
			throw mockWriteError;
		});
		// findProjectRoot mock set in beforeEach

		// Act
		const success = configManager.writeConfig(
			VALID_CUSTOM_CONFIG,
			MOCK_PROJECT_ROOT
		);

		// Assert
		expect(success).toBe(false);
		expect(fsWriteFileSyncSpy).toHaveBeenCalled();
		expect(consoleErrorSpy).toHaveBeenCalledWith(
			expect.stringContaining('Disk full')
		);
	});

	test.skip('should return false if project root cannot be determined', () => {
		// TODO: Fix mock interaction or function logic, returns true unexpectedly in test
		// Arrange: Override mock for this specific test
		mockFindProjectRoot.mockReturnValue(null);

		// Act: Call without explicit root
		const success = configManager.writeConfig(VALID_CUSTOM_CONFIG);

		// Assert
		expect(success).toBe(false); // Function should return false if root is null
		expect(mockFindProjectRoot).toHaveBeenCalled();
		expect(fsWriteFileSyncSpy).not.toHaveBeenCalled();
		expect(consoleErrorSpy).toHaveBeenCalledWith(
			expect.stringContaining('Could not determine project root')
		);
	});
});

// --- Getter Functions ---
describe('Getter Functions', () => {
	test('getMainProvider should return provider from config', () => {
		// Arrange: Set up readFileSync to return VALID_CUSTOM_CONFIG
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH)
				return JSON.stringify(VALID_CUSTOM_CONFIG);
			if (path.basename(filePath) === 'supported-models.json') {
				return JSON.stringify({
					openai: [{ id: 'gpt-4o' }],
					google: [{ id: 'gemini-1.5-pro-latest' }],
					anthropic: [
						{ id: 'claude-3-opus-20240229' },
						{ id: 'claude-3-7-sonnet-20250219' },
						{ id: 'claude-3-5-sonnet' }
					],
					perplexity: [{ id: 'sonar-pro' }],
					ollama: [],
					openrouter: []
				}); // Added perplexity
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);
		// findProjectRoot mock set in beforeEach

		// Act
		const provider = configManager.getMainProvider(MOCK_PROJECT_ROOT);

		// Assert
		expect(provider).toBe(VALID_CUSTOM_CONFIG.models.main.provider);
	});

	test('getLogLevel should return logLevel from config', () => {
		// Arrange: Set up readFileSync to return VALID_CUSTOM_CONFIG
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH)
				return JSON.stringify(VALID_CUSTOM_CONFIG);
			if (path.basename(filePath) === 'supported-models.json') {
				// Provide enough mock model data for validation within getConfig
				return JSON.stringify({
					openai: [{ id: 'gpt-4o' }],
					google: [{ id: 'gemini-1.5-pro-latest' }],
					anthropic: [
						{ id: 'claude-3-opus-20240229' },
						{ id: 'claude-3-7-sonnet-20250219' },
						{ id: 'claude-3-5-sonnet' }
					],
					perplexity: [{ id: 'sonar-pro' }],
					ollama: [],
					openrouter: []
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);
		// findProjectRoot mock set in beforeEach

		// Act
		const logLevel = configManager.getLogLevel(MOCK_PROJECT_ROOT);

		// Assert
		expect(logLevel).toBe(VALID_CUSTOM_CONFIG.global.logLevel);
	});

	test('getResponseLanguage should return responseLanguage from config', () => {
		// Arrange
		// Prepare a config object with responseLanguage property for this test
		const configWithLanguage = JSON.stringify({
			models: {
				main: { provider: 'openai', modelId: 'gpt-4-turbo' }
			},
			global: {
				projectName: 'Test Project',
				responseLanguage: '中文'
			}
		});

		// Set up fs.readFileSync to return our test config
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH) {
				return configWithLanguage;
			}
			if (path.basename(filePath) === 'supported-models.json') {
				return JSON.stringify({
					openai: [{ id: 'gpt-4-turbo' }]
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});

		fsExistsSyncSpy.mockReturnValue(true);

		// Ensure getConfig returns new values instead of cached ones
		configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Act
		const responseLanguage =
			configManager.getResponseLanguage(MOCK_PROJECT_ROOT);

		// Assert
		expect(responseLanguage).toBe('中文');
	});

	test('getResponseLanguage should return undefined when responseLanguage is not in config', () => {
		// Arrange
		const configWithoutLanguage = JSON.stringify({
			models: {
				main: { provider: 'openai', modelId: 'gpt-4-turbo' }
			},
			global: {
				projectName: 'Test Project'
				// No responseLanguage property
			}
		});

		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH) {
				return configWithoutLanguage;
			}
			if (path.basename(filePath) === 'supported-models.json') {
				return JSON.stringify({
					openai: [{ id: 'gpt-4-turbo' }]
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});

		fsExistsSyncSpy.mockReturnValue(true);

		// Ensure getConfig returns new values instead of cached ones
		configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Act
		const responseLanguage =
			configManager.getResponseLanguage(MOCK_PROJECT_ROOT);

		// Assert
		expect(responseLanguage).toBe('English');
	});

	// Add more tests for other getters (getResearchProvider, getProjectName, etc.)
});

// --- isConfigFilePresent Tests ---
describe('isConfigFilePresent', () => {
	test('should return true if config file exists', () => {
		fsExistsSyncSpy.mockReturnValue(true);
		// findProjectRoot mock set in beforeEach
		expect(configManager.isConfigFilePresent(MOCK_PROJECT_ROOT)).toBe(true);
		expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
	});

	test('should return false if config file does not exist', () => {
		fsExistsSyncSpy.mockReturnValue(false);
		// findProjectRoot mock set in beforeEach
		expect(configManager.isConfigFilePresent(MOCK_PROJECT_ROOT)).toBe(false);
		expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
	});

	test.skip('should use findProjectRoot if explicitRoot is not provided', () => {
		// TODO: Fix mock interaction, findProjectRoot isn't being registered as called
		fsExistsSyncSpy.mockReturnValue(true);
		// findProjectRoot mock set in beforeEach
		expect(configManager.isConfigFilePresent()).toBe(true);
		expect(mockFindProjectRoot).toHaveBeenCalled(); // Should be called now
	});
});

// --- getAllProviders Tests ---
describe('getAllProviders', () => {
	test('should return all providers from ALL_PROVIDERS constant', () => {
		// Arrange: Ensure config is loaded with real data
		configManager.getConfig(null, true); // Force load using the mock that returns real data

		// Act
		const providers = configManager.getAllProviders();

		// Assert
		// getAllProviders() should return the same as the ALL_PROVIDERS constant
		expect(providers).toEqual(configManager.ALL_PROVIDERS);
		expect(providers.length).toBe(configManager.ALL_PROVIDERS.length);

		// Verify it includes both validated and custom providers
		expect(providers).toEqual(
			expect.arrayContaining(configManager.VALIDATED_PROVIDERS)
		);
		expect(providers).toEqual(
			expect.arrayContaining(Object.values(configManager.CUSTOM_PROVIDERS))
		);
	});
});

// Add tests for getParametersForRole if needed

// --- defaultNumTasks Tests ---
describe('Configuration Getters', () => {
	test('getDefaultNumTasks should return default value when config is valid', () => {
		// Arrange: Mock fs.readFileSync to return valid config when called with the expected path
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH) {
				return JSON.stringify({
					global: {
						defaultNumTasks: 15
					}
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);

		// Force reload to clear cache
		configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Act: Call getDefaultNumTasks with explicit root
		const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);

		// Assert
		expect(result).toBe(15);
	});

	test('getDefaultNumTasks should return fallback when config value is invalid', () => {
		// Arrange: Mock fs.readFileSync to return invalid config
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH) {
				return JSON.stringify({
					global: {
						defaultNumTasks: 'invalid'
					}
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);

		// Force reload to clear cache
		configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Act: Call getDefaultNumTasks with explicit root
		const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);

		// Assert
		expect(result).toBe(10); // Should fallback to DEFAULTS.global.defaultNumTasks
	});

	test('getDefaultNumTasks should return fallback when config value is missing', () => {
		// Arrange: Mock fs.readFileSync to return config without defaultNumTasks
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH) {
				return JSON.stringify({
					global: {}
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);

		// Force reload to clear cache
		configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Act: Call getDefaultNumTasks with explicit root
		const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);

		// Assert
		expect(result).toBe(10); // Should fallback to DEFAULTS.global.defaultNumTasks
	});

	test('getDefaultNumTasks should handle non-existent config file', () => {
		// Arrange: Mock file not existing
		fsExistsSyncSpy.mockReturnValue(false);

		// Force reload to clear cache
		configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Act: Call getDefaultNumTasks with explicit root
		const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);

		// Assert
		expect(result).toBe(10); // Should fallback to DEFAULTS.global.defaultNumTasks
	});

	test('getDefaultNumTasks should accept explicit project root', () => {
		// Arrange: Mock fs.readFileSync to return valid config
		fsReadFileSyncSpy.mockImplementation((filePath) => {
			if (filePath === MOCK_CONFIG_PATH) {
				return JSON.stringify({
					global: {
						defaultNumTasks: 20
					}
				});
			}
			throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
		});
		fsExistsSyncSpy.mockReturnValue(true);

		// Force reload to clear cache
		configManager.getConfig(MOCK_PROJECT_ROOT, true);

		// Act: Call getDefaultNumTasks with explicit project root
		const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);

		// Assert
		expect(result).toBe(20);
	});
});

// Note: Tests for setMainModel, setResearchModel were removed as the functions were removed in the implementation.
// If similar setter functions exist, add tests for them following the writeConfig pattern.

```

--------------------------------------------------------------------------------
/tests/e2e/run_e2e.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash

# Treat unset variables as an error when substituting.
set -u
# Prevent errors in pipelines from being masked.
set -o pipefail

# --- Default Settings ---
run_verification_test=true

# --- Argument Parsing ---
# Simple loop to check for the skip flag
# Note: This needs to happen *before* the main block piped to tee
# if we want the decision logged early. Or handle args inside.
# Let's handle it before for clarity.
processed_args=()
while [[ $# -gt 0 ]]; do
  case "$1" in
    --skip-verification)
      run_verification_test=false
      echo "[INFO] Argument '--skip-verification' detected. Fallback verification will be skipped."
      shift # Consume the flag
      ;;
    --analyze-log)
      # Keep the analyze-log flag handling separate for now
      # It exits early, so doesn't conflict with the main run flags
      processed_args+=("$1")
      if [[ $# -gt 1 ]]; then
        processed_args+=("$2")
        shift 2
      else
        shift 1
      fi
      ;;
    *)
      # Unknown argument, pass it along or handle error
      # For now, just pass it along in case --analyze-log needs it later
      processed_args+=("$1")
      shift
      ;;
  esac
done
# Restore processed arguments ONLY if the array is not empty
if [ ${#processed_args[@]} -gt 0 ]; then
  set -- "${processed_args[@]}"
fi


# --- Configuration ---
# Assumes script is run from the project root (claude-task-master)
TASKMASTER_SOURCE_DIR="." # Current directory is the source
# Base directory for test runs, relative to project root
BASE_TEST_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/_runs"
# Log directory, relative to project root
LOG_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/log"
# Path to the sample PRD, relative to project root
SAMPLE_PRD_SOURCE="$TASKMASTER_SOURCE_DIR/tests/fixtures/sample-prd.txt"
# Path to the main .env file in the source directory
MAIN_ENV_FILE="$TASKMASTER_SOURCE_DIR/.env"
# ---

# <<< Source the helper script >>>
# shellcheck source=tests/e2e/e2e_helpers.sh
source "$TASKMASTER_SOURCE_DIR/tests/e2e/e2e_helpers.sh"

# ==========================================
# >>> Global Helper Functions Defined in run_e2e.sh <<<
# --- Helper Functions (Define globally before export) ---
_format_duration() {
  local total_seconds=$1
  local minutes=$((total_seconds / 60))
  local seconds=$((total_seconds % 60))
  printf "%dm%02ds" "$minutes" "$seconds"
}

# Note: This relies on 'overall_start_time' being set globally before the function is called
_get_elapsed_time_for_log() {
  local current_time
  current_time=$(date +%s)
  # Use overall_start_time here, as start_time_for_helpers might not be relevant globally
  local elapsed_seconds
  elapsed_seconds=$((current_time - overall_start_time))
  _format_duration "$elapsed_seconds"
}

log_info() {
  echo "[INFO] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
}

log_success() {
  echo "[SUCCESS] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
}

log_error() {
  echo "[ERROR] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1" >&2
}

log_step() {
  test_step_count=$((test_step_count + 1))
  echo ""
  echo "============================================="
  echo "  STEP ${test_step_count}: [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
  echo "============================================="
}
# ==========================================

# <<< Export helper functions for subshells >>>
export -f log_info log_success log_error log_step _format_duration _get_elapsed_time_for_log extract_and_sum_cost

# --- Argument Parsing for Analysis-Only Mode ---
# This remains the same, as it exits early if matched
if [ "$#" -ge 1 ] && [ "$1" == "--analyze-log" ]; then
  LOG_TO_ANALYZE=""
  # Check if a log file path was provided as the second argument
  if [ "$#" -ge 2 ] && [ -n "$2" ]; then
    LOG_TO_ANALYZE="$2"
    echo "[INFO] Using specified log file for analysis: $LOG_TO_ANALYZE"
  else
    echo "[INFO] Log file not specified. Attempting to find the latest log..."
    # Find the latest log file in the LOG_DIR
    # Ensure LOG_DIR is absolute for ls to work correctly regardless of PWD
    ABS_LOG_DIR="$(cd "$TASKMASTER_SOURCE_DIR/$LOG_DIR" && pwd)"
    LATEST_LOG=$(ls -t "$ABS_LOG_DIR"/e2e_run_*.log 2>/dev/null | head -n 1)

    if [ -z "$LATEST_LOG" ]; then
      echo "[ERROR] No log files found matching 'e2e_run_*.log' in $ABS_LOG_DIR. Cannot analyze." >&2
      exit 1
    fi
    LOG_TO_ANALYZE="$LATEST_LOG"
    echo "[INFO] Found latest log file: $LOG_TO_ANALYZE"
  fi

  # Ensure the log path is absolute (it should be if found by ls, but double-check)
  if [[ "$LOG_TO_ANALYZE" != /* ]]; then
    LOG_TO_ANALYZE="$(pwd)/$LOG_TO_ANALYZE" # Fallback if relative path somehow occurred
  fi
  echo "[INFO] Running in analysis-only mode for log: $LOG_TO_ANALYZE"

  # --- Derive TEST_RUN_DIR from log file path ---
  # Extract timestamp like YYYYMMDD_HHMMSS from e2e_run_YYYYMMDD_HHMMSS.log
  log_basename=$(basename "$LOG_TO_ANALYZE")
  # Ensure the sed command matches the .log suffix correctly
  timestamp_match=$(echo "$log_basename" | sed -n 's/^e2e_run_\([0-9]\{8\}_[0-9]\{6\}\)\.log$/\1/p')

  if [ -z "$timestamp_match" ]; then
    echo "[ERROR] Could not extract timestamp from log file name: $log_basename" >&2
    echo "[ERROR] Expected format: e2e_run_YYYYMMDD_HHMMSS.log" >&2
    exit 1
  fi

  # Construct the expected run directory path relative to project root
  EXPECTED_RUN_DIR="$TASKMASTER_SOURCE_DIR/tests/e2e/_runs/run_$timestamp_match"
  # Make it absolute
  EXPECTED_RUN_DIR_ABS="$(cd "$TASKMASTER_SOURCE_DIR" && pwd)/tests/e2e/_runs/run_$timestamp_match"

  if [ ! -d "$EXPECTED_RUN_DIR_ABS" ]; then
    echo "[ERROR] Corresponding test run directory not found: $EXPECTED_RUN_DIR_ABS" >&2
    exit 1
  fi

  # Save original dir before changing
  ORIGINAL_DIR=$(pwd)

  echo "[INFO] Changing directory to $EXPECTED_RUN_DIR_ABS for analysis context..."
  cd "$EXPECTED_RUN_DIR_ABS"

  # Call the analysis function (sourced from helpers)
  echo "[INFO] Calling analyze_log_with_llm function..."
  analyze_log_with_llm "$LOG_TO_ANALYZE" "$(cd "$ORIGINAL_DIR/$TASKMASTER_SOURCE_DIR" && pwd)" # Pass absolute project root
  ANALYSIS_EXIT_CODE=$?

  # Return to original directory
  cd "$ORIGINAL_DIR"
  exit $ANALYSIS_EXIT_CODE
fi
# --- End Analysis-Only Mode Logic ---

# --- Normal Execution Starts Here (if not in analysis-only mode) ---

# --- Test State Variables ---
# Note: These are mainly for step numbering within the log now, not for final summary
test_step_count=0
start_time_for_helpers=0 # Separate start time for helper functions inside the pipe
total_e2e_cost="0.0" # Initialize total E2E cost
# ---

# --- Log File Setup ---
# Create the log directory if it doesn't exist
mkdir -p "$LOG_DIR"
# Define timestamped log file path
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
# <<< Use pwd to create an absolute path AND add .log extension >>>
LOG_FILE="$(pwd)/$LOG_DIR/e2e_run_${TIMESTAMP}.log"

# Define and create the test run directory *before* the main pipe
mkdir -p "$BASE_TEST_DIR" # Ensure base exists first
TEST_RUN_DIR="$BASE_TEST_DIR/run_$TIMESTAMP"
mkdir -p "$TEST_RUN_DIR"

# Echo starting message to the original terminal BEFORE the main piped block
echo "Starting E2E test. Output will be shown here and saved to: $LOG_FILE"
echo "Running from directory: $(pwd)"
echo "--- Starting E2E Run ---" # Separator before piped output starts

# Record start time for overall duration *before* the pipe
overall_start_time=$(date +%s)

# <<< DEFINE ORIGINAL_DIR GLOBALLY HERE >>>
ORIGINAL_DIR=$(pwd)

# ==========================================
# >>> MOVE FUNCTION DEFINITION HERE <<<
# --- Helper Functions (Define globally) ---
_format_duration() {
  local total_seconds=$1
  local minutes=$((total_seconds / 60))
  local seconds=$((total_seconds % 60))
  printf "%dm%02ds" "$minutes" "$seconds"
}

# Note: This relies on 'overall_start_time' being set globally before the function is called
_get_elapsed_time_for_log() {
  local current_time=$(date +%s)
  # Use overall_start_time here, as start_time_for_helpers might not be relevant globally
  local elapsed_seconds=$((current_time - overall_start_time))
  _format_duration "$elapsed_seconds"
}

log_info() {
  echo "[INFO] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
}

log_success() {
  echo "[SUCCESS] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
}

log_error() {
  echo "[ERROR] [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1" >&2
}

log_step() {
  test_step_count=$((test_step_count + 1))
  echo ""
  echo "============================================="
  echo "  STEP ${test_step_count}: [$(_get_elapsed_time_for_log)] $(date +"%Y-%m-%d %H:%M:%S") $1"
  echo "============================================="
}

# ==========================================

# --- Main Execution Block (Piped to tee) ---
# Wrap the main part of the script in braces and pipe its output (stdout and stderr) to tee
{
  # Note: Helper functions are now defined globally above,
  # but we still need start_time_for_helpers if any logging functions
  # called *inside* this block depend on it. If not, it can be removed.
  start_time_for_helpers=$(date +%s) # Keep if needed by helpers called inside this block

  # Log the verification decision
  if [ "$run_verification_test" = true ]; then
      log_info "Fallback verification test will be run as part of this E2E test."
  else
      log_info "Fallback verification test will be SKIPPED (--skip-verification flag detected)."
  fi

  # --- Dependency Checks ---
  log_step "Checking for dependencies (jq, bc)"
  if ! command -v jq &> /dev/null; then
      log_error "Dependency 'jq' is not installed or not found in PATH. Please install jq (e.g., 'brew install jq' or 'sudo apt-get install jq')."
      exit 1
  fi
  if ! command -v bc &> /dev/null; then
      log_error "Dependency 'bc' not installed (for cost calculation). Please install bc (e.g., 'brew install bc' or 'sudo apt-get install bc')."
      exit 1
  fi
  log_success "Dependencies 'jq' and 'bc' found."

  # --- Test Setup (Output to tee) ---
  log_step "Setting up test environment"

  log_step "Creating global npm link for task-master-ai"
  if npm link; then
    log_success "Global link created/updated."
  else
    log_error "Failed to run 'npm link'. Check permissions or output for details."
    exit 1
  fi

  log_info "Ensured base test directory exists: $BASE_TEST_DIR"

  log_info "Using test run directory (created earlier): $TEST_RUN_DIR"

  # Check if source .env file exists
  if [ ! -f "$MAIN_ENV_FILE" ]; then
      log_error "Source .env file not found at $MAIN_ENV_FILE. Cannot proceed with API-dependent tests."
      exit 1
  fi
  log_info "Source .env file found at $MAIN_ENV_FILE."

  # Check if sample PRD exists
  if [ ! -f "$SAMPLE_PRD_SOURCE" ]; then
    log_error "Sample PRD not found at $SAMPLE_PRD_SOURCE. Please check path."
    exit 1
  fi

  log_info "Copying sample PRD to test directory..."
  cp "$SAMPLE_PRD_SOURCE" "$TEST_RUN_DIR/prd.txt"
  if [ ! -f "$TEST_RUN_DIR/prd.txt" ]; then
    log_error "Failed to copy sample PRD to $TEST_RUN_DIR."
    exit 1
  fi
  log_success "Sample PRD copied."

  # ORIGINAL_DIR=$(pwd) # Save original dir # <<< REMOVED FROM HERE
  cd "$TEST_RUN_DIR"
  log_info "Changed directory to $(pwd)"

  # === Copy .env file BEFORE init ===
  log_step "Copying source .env file for API keys"
  if cp "$ORIGINAL_DIR/.env" ".env"; then
    log_success ".env file copied successfully."
  else
    log_error "Failed to copy .env file from $ORIGINAL_DIR/.env"
    exit 1
  fi
  # ========================================

  # --- Test Execution (Output to tee) ---

  log_step "Linking task-master-ai package locally"
  npm link task-master-ai
  log_success "Package linked locally."

  log_step "Initializing Task Master project (non-interactive)"
  task-master init -y --name="E2E Test $TIMESTAMP" --description="Automated E2E test run"
  if [ ! -f ".taskmaster/config.json" ]; then
    log_error "Initialization failed: .taskmaster/config.json not found."
    exit 1
  fi
  log_success "Project initialized."

  log_step "Parsing PRD"
  cmd_output_prd=$(task-master parse-prd ./prd.txt --force 2>&1)
  exit_status_prd=$?
  echo "$cmd_output_prd"
  extract_and_sum_cost "$cmd_output_prd"
  if [ $exit_status_prd -ne 0 ] || [ ! -s ".taskmaster/tasks/tasks.json" ]; then
    log_error "Parsing PRD failed: .taskmaster/tasks/tasks.json not found or is empty. Exit status: $exit_status_prd"
    exit 1
  else
    log_success "PRD parsed successfully."
  fi

  log_step "Expanding Task 1 (to ensure subtask 1.1 exists)"
  cmd_output_analyze=$(task-master analyze-complexity --research --output complexity_results.json 2>&1)
  exit_status_analyze=$?
  echo "$cmd_output_analyze"
  extract_and_sum_cost "$cmd_output_analyze"
  if [ $exit_status_analyze -ne 0 ] || [ ! -f "complexity_results.json" ]; then
    log_error "Complexity analysis failed: complexity_results.json not found. Exit status: $exit_status_analyze"
    exit 1
  else
    log_success "Complexity analysis saved to complexity_results.json"
  fi

  log_step "Generating complexity report"
  task-master complexity-report --file complexity_results.json > complexity_report_formatted.log
  log_success "Formatted complexity report saved to complexity_report_formatted.log"

  log_step "Expanding Task 1 (assuming it exists)"
  cmd_output_expand1=$(task-master expand --id=1 --cr complexity_results.json 2>&1)
  exit_status_expand1=$?
  echo "$cmd_output_expand1"
  extract_and_sum_cost "$cmd_output_expand1"
  if [ $exit_status_expand1 -ne 0 ]; then
    log_error "Expanding Task 1 failed. Exit status: $exit_status_expand1"
  else
    log_success "Attempted to expand Task 1."
  fi

  log_step "Setting status for Subtask 1.1 (assuming it exists)"
  task-master set-status --id=1.1 --status=done
  log_success "Attempted to set status for Subtask 1.1 to 'done'."

  log_step "Listing tasks again (after changes)"
  task-master list --with-subtasks > task_list_after_changes.log
  log_success "Task list after changes saved to task_list_after_changes.log"

  # === Start New Test Section: Tag-Aware Expand Testing ===
  log_step "Creating additional tag for expand testing"
  task-master add-tag feature-expand --description="Tag for testing expand command with tag preservation"
  log_success "Created feature-expand tag."

  log_step "Adding task to feature-expand tag"
  task-master add-task --tag=feature-expand --prompt="Test task for tag-aware expansion" --priority=medium
  # Get the new task ID dynamically
  new_expand_task_id=$(jq -r '.["feature-expand"].tasks[-1].id' .taskmaster/tasks/tasks.json)
  log_success "Added task $new_expand_task_id to feature-expand tag."

  log_step "Verifying tags exist before expand test"
  task-master tags > tags_before_expand.log
  tag_count_before=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
  log_success "Tag count before expand: $tag_count_before"

  log_step "Expanding task in feature-expand tag (testing tag corruption fix)"
  cmd_output_expand_tagged=$(task-master expand --tag=feature-expand --id="$new_expand_task_id" 2>&1)
  exit_status_expand_tagged=$?
  echo "$cmd_output_expand_tagged"
  extract_and_sum_cost "$cmd_output_expand_tagged"
  if [ $exit_status_expand_tagged -ne 0 ]; then
    log_error "Tagged expand failed. Exit status: $exit_status_expand_tagged"
  else
    log_success "Tagged expand completed."
  fi

  log_step "Verifying tag preservation after expand"
  task-master tags > tags_after_expand.log
  tag_count_after=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
  
  if [ "$tag_count_before" -eq "$tag_count_after" ]; then
    log_success "Tag count preserved: $tag_count_after (no corruption detected)"
  else
    log_error "Tag corruption detected! Before: $tag_count_before, After: $tag_count_after"
  fi

  log_step "Verifying master tag still exists and has tasks"
  master_task_count=$(jq -r '.master.tasks | length' .taskmaster/tasks/tasks.json 2>/dev/null || echo "0")
  if [ "$master_task_count" -gt "0" ]; then
    log_success "Master tag preserved with $master_task_count tasks"
  else
    log_error "Master tag corrupted or empty after tagged expand"
  fi

  log_step "Verifying feature-expand tag has expanded subtasks"
  expanded_subtask_count=$(jq -r ".\"feature-expand\".tasks[] | select(.id == $new_expand_task_id) | .subtasks | length" .taskmaster/tasks/tasks.json 2>/dev/null || echo "0")
  if [ "$expanded_subtask_count" -gt "0" ]; then
    log_success "Expand successful: $expanded_subtask_count subtasks created in feature-expand tag"
  else
    log_error "Expand failed: No subtasks found in feature-expand tag"
  fi

  log_step "Testing force expand with tag preservation"
  cmd_output_force_expand=$(task-master expand --tag=feature-expand --id="$new_expand_task_id" --force 2>&1)
  exit_status_force_expand=$?
  echo "$cmd_output_force_expand"
  extract_and_sum_cost "$cmd_output_force_expand"
  
  # Verify tags still preserved after force expand
  tag_count_after_force=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
  if [ "$tag_count_before" -eq "$tag_count_after_force" ]; then
    log_success "Force expand preserved all tags"
  else
    log_error "Force expand caused tag corruption"
  fi

  log_step "Testing expand --all with tag preservation"
  # Add another task to feature-expand for expand-all testing
  task-master add-task --tag=feature-expand --prompt="Second task for expand-all testing" --priority=low
  second_expand_task_id=$(jq -r '.["feature-expand"].tasks[-1].id' .taskmaster/tasks/tasks.json)
  
  cmd_output_expand_all=$(task-master expand --tag=feature-expand --all 2>&1)
  exit_status_expand_all=$?
  echo "$cmd_output_expand_all"
  extract_and_sum_cost "$cmd_output_expand_all"
  
  # Verify tags preserved after expand-all
  tag_count_after_all=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
  if [ "$tag_count_before" -eq "$tag_count_after_all" ]; then
    log_success "Expand --all preserved all tags"
  else
    log_error "Expand --all caused tag corruption"
  fi
  
  log_success "Completed expand --all tag preservation test."

  # === End New Test Section: Tag-Aware Expand Testing ===

  # === Test Model Commands ===
  log_step "Checking initial model configuration"
  task-master models > models_initial_config.log
  log_success "Initial model config saved to models_initial_config.log"

  log_step "Setting main model"
  task-master models --set-main claude-3-7-sonnet-20250219
  log_success "Set main model."

  log_step "Setting research model"
  task-master models --set-research sonar-pro
  log_success "Set research model."

  log_step "Setting fallback model"
  task-master models --set-fallback claude-3-5-sonnet-20241022
  log_success "Set fallback model."

  log_step "Checking final model configuration"
  task-master models > models_final_config.log
  log_success "Final model config saved to models_final_config.log"

  log_step "Resetting main model to default (Claude Sonnet) before provider tests"
  task-master models --set-main claude-3-7-sonnet-20250219
  log_success "Main model reset to claude-3-7-sonnet-20250219."

  # === End Model Commands Test ===

  # === Fallback Model generateObjectService Verification ===
  if [ "$run_verification_test" = true ]; then
    log_step "Starting Fallback Model (generateObjectService) Verification (Calls separate script)"
    verification_script_path="$ORIGINAL_DIR/tests/e2e/run_fallback_verification.sh"

    if [ -x "$verification_script_path" ]; then
        log_info "--- Executing Fallback Verification Script: $verification_script_path ---"
        verification_output=$("$verification_script_path" "$(pwd)" 2>&1)
        verification_exit_code=$?
        echo "$verification_output"
        extract_and_sum_cost "$verification_output"

        log_info "--- Finished Fallback Verification Script Execution (Exit Code: $verification_exit_code) ---"

        # Log success/failure based on captured exit code
        if [ $verification_exit_code -eq 0 ]; then
            log_success "Fallback verification script reported success."
        else
            log_error "Fallback verification script reported FAILURE (Exit Code: $verification_exit_code)."
        fi
    else
        log_error "Fallback verification script not found or not executable at $verification_script_path. Skipping verification."
    fi
  else
      log_info "Skipping Fallback Verification test as requested by flag."
  fi
  # === END Verification Section ===


  # === Multi-Provider Add-Task Test (Keep as is) ===
  log_step "Starting Multi-Provider Add-Task Test Sequence"

  # Define providers, models, and flags
  # Array order matters: providers[i] corresponds to models[i] and flags[i]
  declare -a providers=("anthropic" "openai" "google" "perplexity" "xai" "openrouter")
  declare -a models=(
    "claude-3-7-sonnet-20250219"
    "gpt-4o"
    "gemini-2.5-pro-preview-05-06"
    "sonar-pro" # Note: This is research-only, add-task might fail if not using research model
    "grok-3"
    "anthropic/claude-3.7-sonnet" # OpenRouter uses Claude 3.7
  )
  # Flags: Add provider-specific flags here, e.g., --openrouter. Use empty string if none.
  declare -a flags=("" "" "" "" "" "--openrouter")

  # Consistent prompt for all providers
  add_task_prompt="Create a task to implement user authentication using OAuth 2.0 with Google as the provider. Include steps for registering the app, handling the callback, and storing user sessions."
  log_info "Using consistent prompt for add-task tests: \"$add_task_prompt\""
  echo "--- Multi-Provider Add Task Summary ---" > provider_add_task_summary.log # Initialize summary log

  for i in "${!providers[@]}"; do
    provider="${providers[$i]}"
    model="${models[$i]}"
    flag="${flags[$i]}"

    log_step "Testing Add-Task with Provider: $provider (Model: $model)"

    # 1. Set the main model for this provider
    log_info "Setting main model to $model for $provider ${flag:+using flag $flag}..."
    set_model_cmd="task-master models --set-main \"$model\" $flag"
    echo "Executing: $set_model_cmd"
    if eval $set_model_cmd; then
      log_success "Successfully set main model for $provider."
    else
      log_error "Failed to set main model for $provider. Skipping add-task for this provider."
      # Optionally save failure info here if needed for LLM analysis
      echo "Provider $provider set-main FAILED" >> provider_add_task_summary.log
      continue # Skip to the next provider
    fi

    # 2. Run add-task
    log_info "Running add-task with prompt..."
    add_task_output_file="add_task_raw_output_${provider}_${model//\//_}.log" # Sanitize ID
    # Run add-task and capture ALL output (stdout & stderr) to a file AND a variable
    add_task_cmd_output=$(task-master add-task --prompt "$add_task_prompt" 2>&1 | tee "$add_task_output_file")
    add_task_exit_code=${PIPESTATUS[0]}

    # 3. Check for success and extract task ID
    new_task_id=""
    extract_and_sum_cost "$add_task_cmd_output"
    if [ $add_task_exit_code -eq 0 ] && (echo "$add_task_cmd_output" | grep -q "✓ Added new task #" || echo "$add_task_cmd_output" | grep -q "✅ New task created successfully:" || echo "$add_task_cmd_output" | grep -q "Task [0-9]\+ Created Successfully"); then
      new_task_id=$(echo "$add_task_cmd_output" | grep -o -E "(Task |#)[0-9.]+" | grep -o -E "[0-9.]+" | head -n 1)
      if [ -n "$new_task_id" ]; then
        log_success "Add-task succeeded for $provider. New task ID: $new_task_id"
        echo "Provider $provider add-task SUCCESS (ID: $new_task_id)" >> provider_add_task_summary.log
      else
        # Succeeded but couldn't parse ID - treat as warning/anomaly
        log_error "Add-task command succeeded for $provider, but failed to extract task ID from output."
        echo "Provider $provider add-task SUCCESS (ID extraction FAILED)" >> provider_add_task_summary.log
        new_task_id="UNKNOWN_ID_EXTRACTION_FAILED"
      fi
    else
      log_error "Add-task command failed for $provider (Exit Code: $add_task_exit_code). See $add_task_output_file for details."
      echo "Provider $provider add-task FAILED (Exit Code: $add_task_exit_code)" >> provider_add_task_summary.log
      new_task_id="FAILED"
    fi

    # 4. Run task show if ID was obtained (even if extraction failed, use placeholder)
    if [ "$new_task_id" != "FAILED" ] && [ "$new_task_id" != "UNKNOWN_ID_EXTRACTION_FAILED" ]; then
      log_info "Running task show for new task ID: $new_task_id"
      show_output_file="add_task_show_output_${provider}_id_${new_task_id}.log"
      if task-master show "$new_task_id" > "$show_output_file"; then
        log_success "Task show output saved to $show_output_file"
      else
        log_error "task show command failed for ID $new_task_id. Check log."
        # Still keep the file, it might contain error output
      fi
    elif [ "$new_task_id" == "UNKNOWN_ID_EXTRACTION_FAILED" ]; then
       log_info "Skipping task show for $provider due to ID extraction failure."
    else
       log_info "Skipping task show for $provider due to add-task failure."
    fi

  done # End of provider loop

  log_step "Finished Multi-Provider Add-Task Test Sequence"
  echo "Provider add-task summary log available at: provider_add_task_summary.log"
  # === End Multi-Provider Add-Task Test ===

  log_step "Listing tasks again (after multi-add)"
  task-master list --with-subtasks > task_list_after_multi_add.log
  log_success "Task list after multi-add saved to task_list_after_multi_add.log"


  # === Resume Core Task Commands Test ===
  log_step "Listing tasks (for core tests)"
  task-master list > task_list_core_test_start.log
  log_success "Core test initial task list saved."

  log_step "Getting next task"
  task-master next > next_task_core_test.log
  log_success "Core test next task saved."

  log_step "Showing Task 1 details"
  task-master show 1 > task_1_details_core_test.log
  log_success "Task 1 details saved."

  log_step "Adding dependency (Task 2 depends on Task 1)"
  task-master add-dependency --id=2 --depends-on=1
  log_success "Added dependency 2->1."

  log_step "Validating dependencies (after add)"
  task-master validate-dependencies > validate_dependencies_after_add_core.log
  log_success "Dependency validation after add saved."

  log_step "Removing dependency (Task 2 depends on Task 1)"
  task-master remove-dependency --id=2 --depends-on=1
  log_success "Removed dependency 2->1."

  log_step "Fixing dependencies (should be no-op now)"
  task-master fix-dependencies > fix_dependencies_output_core.log
  log_success "Fix dependencies attempted."

  # === Start New Test Section: Validate/Fix Bad Dependencies ===

  log_step "Intentionally adding non-existent dependency (1 -> 999)"
  task-master add-dependency --id=1 --depends-on=999 || log_error "Failed to add non-existent dependency (unexpected)"
  # Don't exit even if the above fails, the goal is to test validation
  log_success "Attempted to add dependency 1 -> 999."

  log_step "Validating dependencies (expecting non-existent error)"
  task-master validate-dependencies > validate_deps_non_existent.log 2>&1 || true # Allow command to fail without exiting script
  if grep -q "Non-existent dependency ID: 999" validate_deps_non_existent.log; then
      log_success "Validation correctly identified non-existent dependency 999."
  else
      log_error "Validation DID NOT report non-existent dependency 999 as expected. Check validate_deps_non_existent.log"
  fi

  log_step "Fixing dependencies (should remove 1 -> 999)"
  task-master fix-dependencies > fix_deps_after_non_existent.log
  log_success "Attempted to fix dependencies."

  log_step "Validating dependencies (after fix)"
  task-master validate-dependencies > validate_deps_after_fix_non_existent.log 2>&1 || true # Allow potential failure
  if grep -q "Non-existent dependency ID: 999" validate_deps_after_fix_non_existent.log; then
      log_error "Validation STILL reports non-existent dependency 999 after fix. Check logs."
  else
      log_success "Validation shows non-existent dependency 999 was removed."
  fi


  log_step "Intentionally adding circular dependency (4 -> 5 -> 4)"
  task-master add-dependency --id=4 --depends-on=5 || log_error "Failed to add dependency 4->5"
  task-master add-dependency --id=5 --depends-on=4 || log_error "Failed to add dependency 5->4"
  log_success "Attempted to add dependencies 4 -> 5 and 5 -> 4."


  log_step "Validating dependencies (expecting circular error)"
  task-master validate-dependencies > validate_deps_circular.log 2>&1 || true # Allow command to fail
  # Note: Adjust the grep pattern based on the EXACT error message from validate-dependencies
  if grep -q -E "Circular dependency detected involving task IDs: (4, 5|5, 4)" validate_deps_circular.log; then
      log_success "Validation correctly identified circular dependency between 4 and 5."
  else
      log_error "Validation DID NOT report circular dependency 4<->5 as expected. Check validate_deps_circular.log"
  fi

  log_step "Fixing dependencies (should remove one side of 4 <-> 5)"
  task-master fix-dependencies > fix_deps_after_circular.log
  log_success "Attempted to fix dependencies."

  log_step "Validating dependencies (after fix circular)"
  task-master validate-dependencies > validate_deps_after_fix_circular.log 2>&1 || true # Allow potential failure
  if grep -q -E "Circular dependency detected involving task IDs: (4, 5|5, 4)" validate_deps_after_fix_circular.log; then
      log_error "Validation STILL reports circular dependency 4<->5 after fix. Check logs."
  else
      log_success "Validation shows circular dependency 4<->5 was resolved."
  fi

  # === End New Test Section ===

  # Find the next available task ID dynamically instead of hardcoding 11, 12
  # Assuming tasks are added sequentially and we didn't remove any core tasks yet
  last_task_id=$(jq '[.master.tasks[].id] | max' .taskmaster/tasks/tasks.json)
  manual_task_id=$((last_task_id + 1))
  ai_task_id=$((manual_task_id + 1))

  log_step "Adding Task $manual_task_id (Manual)"
  task-master add-task --title="Manual E2E Task" --description="Add basic health check endpoint" --priority=low --dependencies=3 # Depends on backend setup
  log_success "Added Task $manual_task_id manually."

  log_step "Adding Task $ai_task_id (AI)"
  cmd_output_add_ai=$(task-master add-task --prompt="Implement basic UI styling using CSS variables for colors and spacing" --priority=medium --dependencies=1 2>&1)
  exit_status_add_ai=$?
  echo "$cmd_output_add_ai"
  extract_and_sum_cost "$cmd_output_add_ai"
  if [ $exit_status_add_ai -ne 0 ]; then
    log_error "Adding AI Task $ai_task_id failed. Exit status: $exit_status_add_ai"
  else
    log_success "Added Task $ai_task_id via AI prompt."
  fi


  log_step "Updating Task 3 (update-task AI)"
  cmd_output_update_task3=$(task-master update-task --id=3 --prompt="Update backend server setup: Ensure CORS is configured to allow requests from the frontend origin." 2>&1)
  exit_status_update_task3=$?
  echo "$cmd_output_update_task3"
  extract_and_sum_cost "$cmd_output_update_task3"
  if [ $exit_status_update_task3 -ne 0 ]; then
    log_error "Updating Task 3 failed. Exit status: $exit_status_update_task3"
  else
    log_success "Attempted update for Task 3."
  fi

  log_step "Updating Tasks from Task 5 (update AI)"
  cmd_output_update_from5=$(task-master update --from=5 --prompt="Refactor the backend storage module to use a simple JSON file (storage.json) instead of an in-memory object for persistence. Update relevant tasks." 2>&1)
  exit_status_update_from5=$?
  echo "$cmd_output_update_from5"
  extract_and_sum_cost "$cmd_output_update_from5"
  if [ $exit_status_update_from5 -ne 0 ]; then
    log_error "Updating from Task 5 failed. Exit status: $exit_status_update_from5"
  else
    log_success "Attempted update from Task 5 onwards."
  fi

  log_step "Expanding Task 8 (AI)"
  cmd_output_expand8=$(task-master expand --id=8 2>&1)
  exit_status_expand8=$?
  echo "$cmd_output_expand8"
  extract_and_sum_cost "$cmd_output_expand8"
  if [ $exit_status_expand8 -ne 0 ]; then
    log_error "Expanding Task 8 failed. Exit status: $exit_status_expand8"
  else
    log_success "Attempted to expand Task 8."
  fi

  log_step "Updating Subtask 8.1 (update-subtask AI)"
  cmd_output_update_subtask81=$(task-master update-subtask --id=8.1 --prompt="Implementation note: Remember to handle potential API errors and display a user-friendly message." 2>&1)
  exit_status_update_subtask81=$?
  echo "$cmd_output_update_subtask81"
  extract_and_sum_cost "$cmd_output_update_subtask81"
  if [ $exit_status_update_subtask81 -ne 0 ]; then
    log_error "Updating Subtask 8.1 failed. Exit status: $exit_status_update_subtask81"
  else
    log_success "Attempted update for Subtask 8.1."
  fi

  # Add a couple more subtasks for multi-remove test
  log_step 'Adding subtasks to Task 2 (for multi-remove test)'
  task-master add-subtask --parent=2 --title="Subtask 2.1 for removal"
  task-master add-subtask --parent=2 --title="Subtask 2.2 for removal"
  log_success "Added subtasks 2.1 and 2.2."

  log_step "Removing Subtasks 2.1 and 2.2 (multi-ID)"
  task-master remove-subtask --id=2.1,2.2
  log_success "Removed subtasks 2.1 and 2.2."

  log_step "Setting status for Task 1 to done"
  task-master set-status --id=1 --status=done
  log_success "Set status for Task 1 to done."

  log_step "Getting next task (after status change)"
  task-master next > next_task_after_change_core.log
  log_success "Next task after change saved."

  # === Start New Test Section: List Filtering ===
  log_step "Listing tasks filtered by status 'done'"
  task-master list --status=done > task_list_status_done.log
  log_success "Filtered list saved to task_list_status_done.log (Manual/LLM check recommended)"
  # Optional assertion: Check if Task 1 ID exists and Task 2 ID does NOT
  # if grep -q "^1\." task_list_status_done.log && ! grep -q "^2\." task_list_status_done.log; then
  #    log_success "Basic check passed: Task 1 found, Task 2 not found in 'done' list."
  # else
  #    log_error "Basic check failed for list --status=done."
  # fi
  # === End New Test Section ===

  log_step "Clearing subtasks from Task 8"
  task-master clear-subtasks --id=8
  log_success "Attempted to clear subtasks from Task 8."

  log_step "Removing Tasks $manual_task_id and $ai_task_id (multi-ID)"
  # Remove the tasks we added earlier
  task-master remove-task --id="$manual_task_id,$ai_task_id" -y
  log_success "Removed tasks $manual_task_id and $ai_task_id."

  # === Start New Test Section: Subtasks & Dependencies ===

  log_step "Expanding Task 2 (to ensure multiple tasks have subtasks)"
  task-master expand --id=2 # Expand task 2: Backend setup
  log_success "Attempted to expand Task 2."

  log_step "Listing tasks with subtasks (Before Clear All)"
  task-master list --with-subtasks > task_list_before_clear_all.log
  log_success "Task list before clear-all saved."

  log_step "Clearing ALL subtasks"
  task-master clear-subtasks --all
  log_success "Attempted to clear all subtasks."

  log_step "Listing tasks with subtasks (After Clear All)"
  task-master list --with-subtasks > task_list_after_clear_all.log
  log_success "Task list after clear-all saved. (Manual/LLM check recommended to verify subtasks removed)"

  log_step "Expanding Task 3 again (to have subtasks for next test)"
  task-master expand --id=3
  log_success "Attempted to expand Task 3."
  # Verify 3.1 exists 
  if ! jq -e '.master.tasks[] | select(.id == 3) | .subtasks[] | select(.id == 1)' .taskmaster/tasks/tasks.json > /dev/null; then
      log_error "Subtask 3.1 not found in tasks.json after expanding Task 3."
      exit 1
  fi

  log_step "Adding dependency: Task 4 depends on Subtask 3.1"
  task-master add-dependency --id=4 --depends-on=3.1
  log_success "Added dependency 4 -> 3.1."

  log_step "Showing Task 4 details (after adding subtask dependency)"
  task-master show 4 > task_4_details_after_dep_add.log
  log_success "Task 4 details saved. (Manual/LLM check recommended for dependency [3.1])"

  log_step "Removing dependency: Task 4 depends on Subtask 3.1"
  task-master remove-dependency --id=4 --depends-on=3.1
  log_success "Removed dependency 4 -> 3.1."

  log_step "Showing Task 4 details (after removing subtask dependency)"
  task-master show 4 > task_4_details_after_dep_remove.log
  log_success "Task 4 details saved. (Manual/LLM check recommended to verify dependency removed)"

  # === End New Test Section ===

  log_step "Generating task files (final)"
  task-master generate
  log_success "Generated task files."
  # === End Core Task Commands Test ===

  # === AI Commands (Re-test some after changes) ===
  log_step "Analyzing complexity (AI with Research - Final Check)"
  cmd_output_analyze_final=$(task-master analyze-complexity --research --output complexity_results_final.json 2>&1)
  exit_status_analyze_final=$?
  echo "$cmd_output_analyze_final"
  extract_and_sum_cost "$cmd_output_analyze_final"
  if [ $exit_status_analyze_final -ne 0 ] || [ ! -f "complexity_results_final.json" ]; then
    log_error "Final Complexity analysis failed. Exit status: $exit_status_analyze_final. File found: $(test -f complexity_results_final.json && echo true || echo false)"
    exit 1 # Critical for subsequent report step
  else
    log_success "Final Complexity analysis command executed and file created."
  fi

  log_step "Generating complexity report (Non-AI - Final Check)"
  task-master complexity-report --file complexity_results_final.json > complexity_report_formatted_final.log
  log_success "Final Formatted complexity report saved."

  # === End AI Commands Re-test ===

  log_step "Listing tasks again (final)"
  task-master list --with-subtasks > task_list_final.log
  log_success "Final task list saved to task_list_final.log"

  # --- Test Completion (Output to tee) ---
  log_step "E2E Test Steps Completed"
  echo ""
  ABS_TEST_RUN_DIR="$(pwd)"
  echo "Test artifacts and logs are located in: $ABS_TEST_RUN_DIR"
  echo "Key artifact files (within above dir):"
  ls -1 # List files in the current directory
  echo ""
  echo "Full script log also available at: $LOG_FILE (relative to project root)"

  # Optional: cd back to original directory
  # cd "$ORIGINAL_DIR"

# End of the main execution block brace
} 2>&1 | tee "$LOG_FILE"

# --- Final Terminal Message ---
EXIT_CODE=${PIPESTATUS[0]}
overall_end_time=$(date +%s)
total_elapsed_seconds=$((overall_end_time - overall_start_time))

# Format total duration
total_minutes=$((total_elapsed_seconds / 60))
total_sec_rem=$((total_elapsed_seconds % 60))
formatted_total_time=$(printf "%dm%02ds" "$total_minutes" "$total_sec_rem")

# Count steps and successes from the log file *after* the pipe finishes
# Use grep -c for counting lines matching the pattern
# Corrected pattern to match '  STEP X:' format
final_step_count=$(grep -c '^[[:space:]]\+STEP [0-9]\+:' "$LOG_FILE" || true)
final_success_count=$(grep -c '\[SUCCESS\]' "$LOG_FILE" || true) # Count lines containing [SUCCESS]

echo "--- E2E Run Summary ---"
echo "Log File: $LOG_FILE"
echo "Total Elapsed Time: ${formatted_total_time}"
echo "Total Steps Executed: ${final_step_count}" # Use count from log

if [ $EXIT_CODE -eq 0 ]; then
    echo "Status: SUCCESS"
    # Use counts from log file
    echo "Successful Steps: ${final_success_count}/${final_step_count}"
else
    echo "Status: FAILED"
    # Use count from log file for total steps attempted
    echo "Failure likely occurred during/after Step: ${final_step_count}"
    # Use count from log file for successes before failure
    echo "Successful Steps Before Failure: ${final_success_count}"
    echo "Please check the log file '$LOG_FILE' for error details."
fi
echo "-------------------------"

# --- Attempt LLM Analysis ---
# Run this *after* the main execution block and tee pipe finish writing the log file
if [ -d "$TEST_RUN_DIR" ]; then
  # Define absolute path to source dir if not already defined (though it should be by setup)
  TASKMASTER_SOURCE_DIR_ABS=${TASKMASTER_SOURCE_DIR_ABS:-$(cd "$ORIGINAL_DIR/$TASKMASTER_SOURCE_DIR" && pwd)}

  cd "$TEST_RUN_DIR"
  # Pass the absolute source directory path
  analyze_log_with_llm "$LOG_FILE" "$TASKMASTER_SOURCE_DIR_ABS"
  ANALYSIS_EXIT_CODE=$? # Capture the exit code of the analysis function
  # Optional: cd back again if needed
  cd "$ORIGINAL_DIR" # Ensure we change back to the original directory
else
  formatted_duration_for_error=$(_format_duration "$total_elapsed_seconds")
  echo "[ERROR] [$formatted_duration_for_error] $(date +"%Y-%m-%d %H:%M:%S") Test run directory $TEST_RUN_DIR not found. Cannot perform LLM analysis." >&2
fi

# Final cost formatting
formatted_total_e2e_cost=$(printf "%.6f" "$total_e2e_cost")
echo "Total E2E AI Cost: $formatted_total_e2e_cost USD"

exit $EXIT_CODE
```
Page 30/38FirstPrevNextLast