#
tokens: 33252/50000 2/353 files (page 18/19)
lines: off (toggle) GitHub
raw markdown copy
This is page 18 of 19. Use http://codebase.md/beehiveinnovations/gemini-mcp-server?page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── commands
│   │   └── fix-github-issue.md
│   └── settings.json
├── .coveragerc
├── .dockerignore
├── .env.example
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── documentation.yml
│   │   ├── feature_request.yml
│   │   └── tool_addition.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── docker-pr.yml
│       ├── docker-release.yml
│       ├── semantic-pr.yml
│       ├── semantic-release.yml
│       └── test.yml
├── .gitignore
├── .pre-commit-config.yaml
├── AGENTS.md
├── CHANGELOG.md
├── claude_config_example.json
├── CLAUDE.md
├── clink
│   ├── __init__.py
│   ├── agents
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── claude.py
│   │   ├── codex.py
│   │   └── gemini.py
│   ├── constants.py
│   ├── models.py
│   ├── parsers
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── claude.py
│   │   ├── codex.py
│   │   └── gemini.py
│   └── registry.py
├── code_quality_checks.ps1
├── code_quality_checks.sh
├── communication_simulator_test.py
├── conf
│   ├── __init__.py
│   ├── azure_models.json
│   ├── cli_clients
│   │   ├── claude.json
│   │   ├── codex.json
│   │   └── gemini.json
│   ├── custom_models.json
│   ├── dial_models.json
│   ├── gemini_models.json
│   ├── openai_models.json
│   ├── openrouter_models.json
│   └── xai_models.json
├── config.py
├── docker
│   ├── README.md
│   └── scripts
│       ├── build.ps1
│       ├── build.sh
│       ├── deploy.ps1
│       ├── deploy.sh
│       └── healthcheck.py
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── adding_providers.md
│   ├── adding_tools.md
│   ├── advanced-usage.md
│   ├── ai_banter.md
│   ├── ai-collaboration.md
│   ├── azure_openai.md
│   ├── configuration.md
│   ├── context-revival.md
│   ├── contributions.md
│   ├── custom_models.md
│   ├── docker-deployment.md
│   ├── gemini-setup.md
│   ├── getting-started.md
│   ├── index.md
│   ├── locale-configuration.md
│   ├── logging.md
│   ├── model_ranking.md
│   ├── testing.md
│   ├── tools
│   │   ├── analyze.md
│   │   ├── apilookup.md
│   │   ├── challenge.md
│   │   ├── chat.md
│   │   ├── clink.md
│   │   ├── codereview.md
│   │   ├── consensus.md
│   │   ├── debug.md
│   │   ├── docgen.md
│   │   ├── listmodels.md
│   │   ├── planner.md
│   │   ├── precommit.md
│   │   ├── refactor.md
│   │   ├── secaudit.md
│   │   ├── testgen.md
│   │   ├── thinkdeep.md
│   │   ├── tracer.md
│   │   └── version.md
│   ├── troubleshooting.md
│   ├── vcr-testing.md
│   └── wsl-setup.md
├── examples
│   ├── claude_config_macos.json
│   └── claude_config_wsl.json
├── LICENSE
├── providers
│   ├── __init__.py
│   ├── azure_openai.py
│   ├── base.py
│   ├── custom.py
│   ├── dial.py
│   ├── gemini.py
│   ├── openai_compatible.py
│   ├── openai.py
│   ├── openrouter.py
│   ├── registries
│   │   ├── __init__.py
│   │   ├── azure.py
│   │   ├── base.py
│   │   ├── custom.py
│   │   ├── dial.py
│   │   ├── gemini.py
│   │   ├── openai.py
│   │   ├── openrouter.py
│   │   └── xai.py
│   ├── registry_provider_mixin.py
│   ├── registry.py
│   ├── shared
│   │   ├── __init__.py
│   │   ├── model_capabilities.py
│   │   ├── model_response.py
│   │   ├── provider_type.py
│   │   └── temperature.py
│   └── xai.py
├── pyproject.toml
├── pytest.ini
├── README.md
├── requirements-dev.txt
├── requirements.txt
├── run_integration_tests.ps1
├── run_integration_tests.sh
├── run-server.ps1
├── run-server.sh
├── scripts
│   └── sync_version.py
├── server.py
├── simulator_tests
│   ├── __init__.py
│   ├── base_test.py
│   ├── conversation_base_test.py
│   ├── log_utils.py
│   ├── test_analyze_validation.py
│   ├── test_basic_conversation.py
│   ├── test_chat_simple_validation.py
│   ├── test_codereview_validation.py
│   ├── test_consensus_conversation.py
│   ├── test_consensus_three_models.py
│   ├── test_consensus_workflow_accurate.py
│   ├── test_content_validation.py
│   ├── test_conversation_chain_validation.py
│   ├── test_cross_tool_comprehensive.py
│   ├── test_cross_tool_continuation.py
│   ├── test_debug_certain_confidence.py
│   ├── test_debug_validation.py
│   ├── test_line_number_validation.py
│   ├── test_logs_validation.py
│   ├── test_model_thinking_config.py
│   ├── test_o3_model_selection.py
│   ├── test_o3_pro_expensive.py
│   ├── test_ollama_custom_url.py
│   ├── test_openrouter_fallback.py
│   ├── test_openrouter_models.py
│   ├── test_per_tool_deduplication.py
│   ├── test_planner_continuation_history.py
│   ├── test_planner_validation_old.py
│   ├── test_planner_validation.py
│   ├── test_precommitworkflow_validation.py
│   ├── test_prompt_size_limit_bug.py
│   ├── test_refactor_validation.py
│   ├── test_secaudit_validation.py
│   ├── test_testgen_validation.py
│   ├── test_thinkdeep_validation.py
│   ├── test_token_allocation_validation.py
│   ├── test_vision_capability.py
│   └── test_xai_models.py
├── systemprompts
│   ├── __init__.py
│   ├── analyze_prompt.py
│   ├── chat_prompt.py
│   ├── clink
│   │   ├── codex_codereviewer.txt
│   │   ├── default_codereviewer.txt
│   │   ├── default_planner.txt
│   │   └── default.txt
│   ├── codereview_prompt.py
│   ├── consensus_prompt.py
│   ├── debug_prompt.py
│   ├── docgen_prompt.py
│   ├── generate_code_prompt.py
│   ├── planner_prompt.py
│   ├── precommit_prompt.py
│   ├── refactor_prompt.py
│   ├── secaudit_prompt.py
│   ├── testgen_prompt.py
│   ├── thinkdeep_prompt.py
│   └── tracer_prompt.py
├── tests
│   ├── __init__.py
│   ├── CASSETTE_MAINTENANCE.md
│   ├── conftest.py
│   ├── gemini_cassettes
│   │   ├── chat_codegen
│   │   │   └── gemini25_pro_calculator
│   │   │       └── mldev.json
│   │   ├── chat_cross
│   │   │   └── step1_gemini25_flash_number
│   │   │       └── mldev.json
│   │   └── consensus
│   │       └── step2_gemini25_flash_against
│   │           └── mldev.json
│   ├── http_transport_recorder.py
│   ├── mock_helpers.py
│   ├── openai_cassettes
│   │   ├── chat_cross_step2_gpt5_reminder.json
│   │   ├── chat_gpt5_continuation.json
│   │   ├── chat_gpt5_moon_distance.json
│   │   ├── consensus_step1_gpt5_for.json
│   │   └── o3_pro_basic_math.json
│   ├── pii_sanitizer.py
│   ├── sanitize_cassettes.py
│   ├── test_alias_target_restrictions.py
│   ├── test_auto_mode_comprehensive.py
│   ├── test_auto_mode_custom_provider_only.py
│   ├── test_auto_mode_model_listing.py
│   ├── test_auto_mode_provider_selection.py
│   ├── test_auto_mode.py
│   ├── test_auto_model_planner_fix.py
│   ├── test_azure_openai_provider.py
│   ├── test_buggy_behavior_prevention.py
│   ├── test_cassette_semantic_matching.py
│   ├── test_challenge.py
│   ├── test_chat_codegen_integration.py
│   ├── test_chat_cross_model_continuation.py
│   ├── test_chat_openai_integration.py
│   ├── test_chat_simple.py
│   ├── test_clink_claude_agent.py
│   ├── test_clink_claude_parser.py
│   ├── test_clink_codex_agent.py
│   ├── test_clink_gemini_agent.py
│   ├── test_clink_gemini_parser.py
│   ├── test_clink_integration.py
│   ├── test_clink_parsers.py
│   ├── test_clink_tool.py
│   ├── test_collaboration.py
│   ├── test_config.py
│   ├── test_consensus_integration.py
│   ├── test_consensus_schema.py
│   ├── test_consensus.py
│   ├── test_conversation_continuation_integration.py
│   ├── test_conversation_field_mapping.py
│   ├── test_conversation_file_features.py
│   ├── test_conversation_memory.py
│   ├── test_conversation_missing_files.py
│   ├── test_custom_openai_temperature_fix.py
│   ├── test_custom_provider.py
│   ├── test_debug.py
│   ├── test_deploy_scripts.py
│   ├── test_dial_provider.py
│   ├── test_directory_expansion_tracking.py
│   ├── test_disabled_tools.py
│   ├── test_docker_claude_desktop_integration.py
│   ├── test_docker_config_complete.py
│   ├── test_docker_healthcheck.py
│   ├── test_docker_implementation.py
│   ├── test_docker_mcp_validation.py
│   ├── test_docker_security.py
│   ├── test_docker_volume_persistence.py
│   ├── test_file_protection.py
│   ├── test_gemini_token_usage.py
│   ├── test_image_support_integration.py
│   ├── test_image_validation.py
│   ├── test_integration_utf8.py
│   ├── test_intelligent_fallback.py
│   ├── test_issue_245_simple.py
│   ├── test_large_prompt_handling.py
│   ├── test_line_numbers_integration.py
│   ├── test_listmodels_restrictions.py
│   ├── test_listmodels.py
│   ├── test_mcp_error_handling.py
│   ├── test_model_enumeration.py
│   ├── test_model_metadata_continuation.py
│   ├── test_model_resolution_bug.py
│   ├── test_model_restrictions.py
│   ├── test_o3_pro_output_text_fix.py
│   ├── test_o3_temperature_fix_simple.py
│   ├── test_openai_compatible_token_usage.py
│   ├── test_openai_provider.py
│   ├── test_openrouter_provider.py
│   ├── test_openrouter_registry.py
│   ├── test_parse_model_option.py
│   ├── test_per_tool_model_defaults.py
│   ├── test_pii_sanitizer.py
│   ├── test_pip_detection_fix.py
│   ├── test_planner.py
│   ├── test_precommit_workflow.py
│   ├── test_prompt_regression.py
│   ├── test_prompt_size_limit_bug_fix.py
│   ├── test_provider_retry_logic.py
│   ├── test_provider_routing_bugs.py
│   ├── test_provider_utf8.py
│   ├── test_providers.py
│   ├── test_rate_limit_patterns.py
│   ├── test_refactor.py
│   ├── test_secaudit.py
│   ├── test_server.py
│   ├── test_supported_models_aliases.py
│   ├── test_thinking_modes.py
│   ├── test_tools.py
│   ├── test_tracer.py
│   ├── test_utf8_localization.py
│   ├── test_utils.py
│   ├── test_uvx_resource_packaging.py
│   ├── test_uvx_support.py
│   ├── test_workflow_file_embedding.py
│   ├── test_workflow_metadata.py
│   ├── test_workflow_prompt_size_validation_simple.py
│   ├── test_workflow_utf8.py
│   ├── test_xai_provider.py
│   ├── transport_helpers.py
│   └── triangle.png
├── tools
│   ├── __init__.py
│   ├── analyze.py
│   ├── apilookup.py
│   ├── challenge.py
│   ├── chat.py
│   ├── clink.py
│   ├── codereview.py
│   ├── consensus.py
│   ├── debug.py
│   ├── docgen.py
│   ├── listmodels.py
│   ├── models.py
│   ├── planner.py
│   ├── precommit.py
│   ├── refactor.py
│   ├── secaudit.py
│   ├── shared
│   │   ├── __init__.py
│   │   ├── base_models.py
│   │   ├── base_tool.py
│   │   ├── exceptions.py
│   │   └── schema_builders.py
│   ├── simple
│   │   ├── __init__.py
│   │   └── base.py
│   ├── testgen.py
│   ├── thinkdeep.py
│   ├── tracer.py
│   ├── version.py
│   └── workflow
│       ├── __init__.py
│       ├── base.py
│       ├── schema_builders.py
│       └── workflow_mixin.py
├── utils
│   ├── __init__.py
│   ├── client_info.py
│   ├── conversation_memory.py
│   ├── env.py
│   ├── file_types.py
│   ├── file_utils.py
│   ├── image_utils.py
│   ├── model_context.py
│   ├── model_restrictions.py
│   ├── security_config.py
│   ├── storage_backend.py
│   └── token_utils.py
└── zen-mcp-server
```

# Files

--------------------------------------------------------------------------------
/tools/workflow/workflow_mixin.py:
--------------------------------------------------------------------------------

```python
"""
Workflow Mixin for Zen MCP Tools

This module provides a sophisticated workflow-based pattern that enables tools to
perform multi-step work with structured findings and expert analysis.

Key Components:
- BaseWorkflowMixin: Abstract base class providing comprehensive workflow functionality

The workflow pattern enables tools like debug, precommit, and codereview to perform
systematic multi-step work with pause/resume capabilities, context-aware file embedding,
and seamless integration with external AI models for expert analysis.

Features:
- Multi-step workflow orchestration with pause/resume
- Context-aware file embedding optimization
- Expert analysis integration with token budgeting
- Conversation memory and threading support
- Proper inheritance-based architecture (no hasattr/getattr)
- Comprehensive type annotations for IDE support
"""

import json
import logging
import os
import re
from abc import ABC, abstractmethod
from typing import Any, Optional

from mcp.types import TextContent

from config import MCP_PROMPT_SIZE_LIMIT
from utils.conversation_memory import add_turn, create_thread

from ..shared.base_models import ConsolidatedFindings
from ..shared.exceptions import ToolExecutionError

logger = logging.getLogger(__name__)


class BaseWorkflowMixin(ABC):
    """
    Abstract base class providing guided workflow functionality for tools.

    This class implements a sophisticated workflow pattern where the CLI performs
    systematic local work before calling external models for expert analysis.
    Tools can inherit from this class to gain comprehensive workflow capabilities.

    Architecture:
    - Uses proper inheritance patterns instead of hasattr/getattr
    - Provides hook methods with default implementations
    - Requires abstract methods to be implemented by subclasses
    - Fully type-annotated for excellent IDE support

    Context-Aware File Embedding:
    - Intermediate steps: Only reference file names (saves the CLI's context)
    - Final steps: Embed full file content for expert analysis
    - Integrates with existing token budgeting infrastructure

    Requirements:
    This class expects to be used with BaseTool and requires implementation of:
    - get_model_provider(model_name)
    - _resolve_model_context(arguments, request)
    - get_system_prompt()
    - get_default_temperature()
    - _prepare_file_content_for_prompt()
    """

    def __init__(self) -> None:
        super().__init__()
        self.work_history: list[dict[str, Any]] = []
        self.consolidated_findings: ConsolidatedFindings = ConsolidatedFindings()
        self.initial_request: Optional[str] = None

    # ================================================================================
    # Abstract Methods - Required Implementation by BaseTool or Subclasses
    # ================================================================================

    @abstractmethod
    def get_name(self) -> str:
        """Return the name of this tool. Usually provided by BaseTool."""
        pass

    @abstractmethod
    def get_workflow_request_model(self) -> type:
        """Return the request model class for this workflow tool."""
        pass

    @abstractmethod
    def get_system_prompt(self) -> str:
        """Return the system prompt for this tool. Usually provided by BaseTool."""
        pass

    @abstractmethod
    def get_language_instruction(self) -> str:
        """Return the language instruction for localization. Usually provided by BaseTool."""
        pass

    @abstractmethod
    def get_default_temperature(self) -> float:
        """Return the default temperature for this tool. Usually provided by BaseTool."""
        pass

    @abstractmethod
    def get_model_provider(self, model_name: str) -> Any:
        """Get model provider for the given model. Usually provided by BaseTool."""
        pass

    @abstractmethod
    def _resolve_model_context(self, arguments: dict[str, Any], request: Any) -> tuple[str, Any]:
        """Resolve model context from arguments. Usually provided by BaseTool."""
        pass

    @abstractmethod
    def _prepare_file_content_for_prompt(
        self,
        request_files: list[str],
        continuation_id: Optional[str],
        context_description: str = "New files",
        max_tokens: Optional[int] = None,
        reserve_tokens: int = 1_000,
        remaining_budget: Optional[int] = None,
        arguments: Optional[dict[str, Any]] = None,
        model_context: Optional[Any] = None,
    ) -> tuple[str, list[str]]:
        """Prepare file content for prompts. Usually provided by BaseTool."""
        pass

    # ================================================================================
    # Abstract Methods - Tool-Specific Implementation Required
    # ================================================================================

    @abstractmethod
    def get_work_steps(self, request: Any) -> list[str]:
        """Define tool-specific work steps and criteria"""
        pass

    @abstractmethod
    def get_required_actions(
        self, step_number: int, confidence: str, findings: str, total_steps: int, request=None
    ) -> list[str]:
        """Define required actions for each work phase.

        Args:
            step_number: Current step (1-based)
            confidence: Current confidence level (exploring, low, medium, high, certain)
            findings: Current findings text
            total_steps: Total estimated steps for this work
            request: Optional request object for continuation-aware decisions

        Returns:
            List of specific actions the CLI should take before calling tool again
        """
        pass

    # ================================================================================
    # Hook Methods - Default Implementations with Override Capability
    # ================================================================================

    def should_call_expert_analysis(self, consolidated_findings: ConsolidatedFindings, request=None) -> bool:
        """
        Decide when to call external model based on tool-specific criteria.

        Default implementation for tools that don't use expert analysis.
        Override this for tools that do use expert analysis.

        Args:
            consolidated_findings: Findings from workflow steps
            request: Current request object (optional for backwards compatibility)
        """
        if not self.requires_expert_analysis():
            return False

        # Check if user requested to skip assistant model
        if request and not self.get_request_use_assistant_model(request):
            return False

        # Default logic for tools that support expert analysis
        return (
            len(consolidated_findings.relevant_files) > 0
            or len(consolidated_findings.findings) >= 2
            or len(consolidated_findings.issues_found) > 0
        )

    def prepare_expert_analysis_context(self, consolidated_findings: ConsolidatedFindings) -> str:
        """
        Prepare context for external model call.

        Default implementation for tools that don't use expert analysis.
        Override this for tools that do use expert analysis.
        """
        if not self.requires_expert_analysis():
            return ""

        # Default context preparation
        context_parts = [
            f"=== {self.get_name().upper()} WORK SUMMARY ===",
            f"Total steps: {len(consolidated_findings.findings)}",
            f"Files examined: {len(consolidated_findings.files_checked)}",
            f"Relevant files: {len(consolidated_findings.relevant_files)}",
            "",
            "=== WORK PROGRESSION ===",
        ]

        for finding in consolidated_findings.findings:
            context_parts.append(finding)

        return "\n".join(context_parts)

    def requires_expert_analysis(self) -> bool:
        """
        Override this to completely disable expert analysis for the tool.

        Returns True if the tool supports expert analysis (default).
        Returns False if the tool is self-contained (like planner).
        """
        return True

    def should_include_files_in_expert_prompt(self) -> bool:
        """
        Whether to include file content in the expert analysis prompt.
        Override this to return True if your tool needs files in the prompt.
        """
        return False

    def should_embed_system_prompt(self) -> bool:
        """
        Whether to embed the system prompt in the main prompt.
        Override this to return True if your tool needs the system prompt embedded.
        """
        return False

    def get_expert_thinking_mode(self) -> str:
        """
        Get the thinking mode for expert analysis.
        Override this to customize the thinking mode.
        """
        return "high"

    def get_request_temperature(self, request) -> float:
        """Get temperature from request. Override for custom temperature handling."""
        try:
            return request.temperature if request.temperature is not None else self.get_default_temperature()
        except AttributeError:
            return self.get_default_temperature()

    def get_validated_temperature(self, request, model_context: Any) -> tuple[float, list[str]]:
        """
        Get temperature from request and validate it against model constraints.

        This is a convenience method that combines temperature extraction and validation
        for workflow tools. It ensures temperature is within valid range for the model.

        Args:
            request: The request object containing temperature
            model_context: Model context object containing model info

        Returns:
            Tuple of (validated_temperature, warning_messages)
        """
        temperature = self.get_request_temperature(request)
        return self.validate_and_correct_temperature(temperature, model_context)

    def get_request_thinking_mode(self, request) -> str:
        """Get thinking mode from request. Override for custom thinking mode handling."""
        try:
            return request.thinking_mode if request.thinking_mode is not None else self.get_expert_thinking_mode()
        except AttributeError:
            return self.get_expert_thinking_mode()

    def get_expert_analysis_instruction(self) -> str:
        """
        Get the instruction to append after the expert context.
        Override this to provide tool-specific instructions.
        """
        return "Please provide expert analysis based on the investigation findings."

    def get_request_use_assistant_model(self, request) -> bool:
        """
        Get use_assistant_model from request. Override for custom assistant model handling.

        Args:
            request: Current request object

        Returns:
            True if assistant model should be used, False otherwise
        """
        try:
            return request.use_assistant_model if request.use_assistant_model is not None else True
        except AttributeError:
            return True

    def get_step_guidance_message(self, request) -> str:
        """
        Get step guidance message. Override for tool-specific guidance.
        Default implementation uses required actions.
        """
        required_actions = self.get_required_actions(
            request.step_number, self.get_request_confidence(request), request.findings, request.total_steps, request
        )

        next_step_number = request.step_number + 1
        return (
            f"MANDATORY: DO NOT call the {self.get_name()} tool again immediately. "
            f"You MUST first work using appropriate tools. "
            f"REQUIRED ACTIONS before calling {self.get_name()} step {next_step_number}:\n"
            + "\n".join(f"{i + 1}. {action}" for i, action in enumerate(required_actions))
            + f"\n\nOnly call {self.get_name()} again with step_number: {next_step_number} "
            f"AFTER completing this work."
        )

    def _prepare_files_for_expert_analysis(self) -> str:
        """
        Prepare file content for expert analysis.

        EXPERT ANALYSIS REQUIRES ACTUAL FILE CONTENT:
        Expert analysis needs actual file content of all unique files marked as relevant
        throughout the workflow, regardless of conversation history optimization.

        SIMPLIFIED LOGIC:
        Expert analysis gets all unique files from relevant_files across the entire workflow.
        This includes:
        - Current step's relevant_files (consolidated_findings.relevant_files)
        - Plus any additional relevant_files from conversation history (if continued workflow)

        This ensures expert analysis has complete context without including irrelevant files.
        """
        all_relevant_files = set()

        # 1. Get files from current consolidated relevant_files
        all_relevant_files.update(self.consolidated_findings.relevant_files)

        # 2. Get additional relevant_files from conversation history (if continued workflow)
        try:
            current_arguments = self.get_current_arguments()
            if current_arguments:
                continuation_id = current_arguments.get("continuation_id")

                if continuation_id:
                    from utils.conversation_memory import get_conversation_file_list, get_thread

                    thread_context = get_thread(continuation_id)
                    if thread_context:
                        # Get all files from conversation (these were relevant_files in previous steps)
                        conversation_files = get_conversation_file_list(thread_context)
                        all_relevant_files.update(conversation_files)
                        logger.debug(
                            f"[WORKFLOW_FILES] {self.get_name()}: Added {len(conversation_files)} files from conversation history"
                        )
        except Exception as e:
            logger.warning(f"[WORKFLOW_FILES] {self.get_name()}: Could not get conversation files: {e}")

        # Convert to list and remove any empty/None values
        files_for_expert = [f for f in all_relevant_files if f and f.strip()]

        if not files_for_expert:
            logger.debug(f"[WORKFLOW_FILES] {self.get_name()}: No relevant files found for expert analysis")
            return ""

        # Expert analysis needs actual file content, bypassing conversation optimization
        try:
            file_content, processed_files = self._force_embed_files_for_expert_analysis(files_for_expert)

            logger.info(
                f"[WORKFLOW_FILES] {self.get_name()}: Prepared {len(processed_files)} unique relevant files for expert analysis "
                f"(from {len(self.consolidated_findings.relevant_files)} current relevant files)"
            )

            return file_content

        except Exception as e:
            logger.error(f"[WORKFLOW_FILES] {self.get_name()}: Failed to prepare files for expert analysis: {e}")
            return ""

    def _force_embed_files_for_expert_analysis(self, files: list[str]) -> tuple[str, list[str]]:
        """
        Force embed files for expert analysis, bypassing conversation history filtering.

        Expert analysis has different requirements than normal workflow steps:
        - Normal steps: Optimize tokens by skipping files in conversation history
        - Expert analysis: Needs actual file content regardless of conversation history

        Args:
            files: List of file paths to embed

        Returns:
            tuple[str, list[str]]: (file_content, processed_files)
        """
        # Use read_files directly with token budgeting, bypassing filter_new_files
        from utils.file_utils import expand_paths, read_files

        # Get token budget for files
        current_model_context = self.get_current_model_context()
        if current_model_context:
            try:
                token_allocation = current_model_context.calculate_token_allocation()
                max_tokens = token_allocation.file_tokens
                logger.debug(
                    f"[WORKFLOW_FILES] {self.get_name()}: Using {max_tokens:,} tokens for expert analysis files"
                )
            except Exception as e:
                logger.warning(f"[WORKFLOW_FILES] {self.get_name()}: Failed to get token allocation: {e}")
                max_tokens = 100_000  # Fallback
        else:
            max_tokens = 100_000  # Fallback

        # Read files directly without conversation history filtering
        logger.debug(f"[WORKFLOW_FILES] {self.get_name()}: Force embedding {len(files)} files for expert analysis")
        file_content = read_files(
            files,
            max_tokens=max_tokens,
            reserve_tokens=1000,
            include_line_numbers=self.wants_line_numbers_by_default(),
        )

        # Expand paths to get individual files for tracking
        processed_files = expand_paths(files)

        logger.debug(
            f"[WORKFLOW_FILES] {self.get_name()}: Expert analysis embedding: {len(processed_files)} files, "
            f"{len(file_content):,} characters"
        )

        return file_content, processed_files

    def wants_line_numbers_by_default(self) -> bool:
        """
        Whether this tool wants line numbers in file content by default.
        Override this to customize line number behavior.
        """
        return True  # Most workflow tools benefit from line numbers for analysis

    def _add_files_to_expert_context(self, expert_context: str, file_content: str) -> str:
        """
        Add file content to the expert context.
        Override this to customize how files are added to the context.
        """
        return f"{expert_context}\n\n=== ESSENTIAL FILES ===\n{file_content}\n=== END ESSENTIAL FILES ==="

    # ================================================================================
    # Context-Aware File Embedding - Core Implementation
    # ================================================================================

    def _handle_workflow_file_context(self, request: Any, arguments: dict[str, Any]) -> None:
        """
        Handle file context appropriately based on workflow phase.

        CONTEXT-AWARE FILE EMBEDDING STRATEGY:
        1. Intermediate steps + continuation: Only reference file names (save the CLI's context)
        2. Final step: Embed full file content for expert analysis
        3. Expert analysis: Always embed relevant files with token budgeting

        This prevents wasting the CLI's limited context on intermediate steps while ensuring
        the final expert analysis has complete file context.
        """
        continuation_id = self.get_request_continuation_id(request)
        is_final_step = not self.get_request_next_step_required(request)
        step_number = self.get_request_step_number(request)

        # Extract model context for token budgeting
        model_context = arguments.get("_model_context")
        self._model_context = model_context

        # Clear any previous file context to ensure clean state
        self._embedded_file_content = ""
        self._file_reference_note = ""
        self._actually_processed_files = []

        # Determine if we should embed files or just reference them
        should_embed_files = self._should_embed_files_in_workflow_step(step_number, continuation_id, is_final_step)

        if should_embed_files:
            # Final step or expert analysis - embed full file content
            logger.debug(f"[WORKFLOW_FILES] {self.get_name()}: Embedding files for final step/expert analysis")
            self._embed_workflow_files(request, arguments)
        else:
            # Intermediate step with continuation - only reference file names
            logger.debug(f"[WORKFLOW_FILES] {self.get_name()}: Only referencing file names for intermediate step")
            self._reference_workflow_files(request)

    def _should_embed_files_in_workflow_step(
        self, step_number: int, continuation_id: Optional[str], is_final_step: bool
    ) -> bool:
        """
        Determine whether to embed file content based on workflow context.

        CORRECT LOGIC:
        - NEVER embed files when the CLI is getting the next step (next_step_required=True)
        - ONLY embed files when sending to external model (next_step_required=False)

        Args:
            step_number: Current step number
            continuation_id: Thread continuation ID (None for new conversations)
            is_final_step: Whether this is the final step (next_step_required == False)

        Returns:
            bool: True if files should be embedded, False if only referenced
        """
        # RULE 1: Final steps (no more steps needed) - embed files for expert analysis
        if is_final_step:
            logger.debug("[WORKFLOW_FILES] Final step - will embed files for expert analysis")
            return True

        # RULE 2: Any intermediate step (more steps needed) - NEVER embed files
        # This includes:
        # - New conversations with next_step_required=True
        # - Steps with continuation_id and next_step_required=True
        logger.debug("[WORKFLOW_FILES] Intermediate step (more work needed) - will only reference files")
        return False

    def _embed_workflow_files(self, request: Any, arguments: dict[str, Any]) -> None:
        """
        Embed full file content for final steps and expert analysis.
        Uses proper token budgeting like existing debug.py.
        """
        # Use relevant_files as the standard field for workflow tools
        request_files = self.get_request_relevant_files(request)
        if not request_files:
            logger.debug(f"[WORKFLOW_FILES] {self.get_name()}: No relevant_files to embed")
            return

        try:
            # Model context should be available from early validation, but might be deferred for tests
            current_model_context = self.get_current_model_context()
            if not current_model_context:
                # Try to resolve model context now (deferred from early validation)
                try:
                    model_name, model_context = self._resolve_model_context(arguments, request)
                    self._model_context = model_context
                    self._current_model_name = model_name
                except Exception as e:
                    logger.error(f"[WORKFLOW_FILES] {self.get_name()}: Failed to resolve model context: {e}")
                    # Create fallback model context (preserves existing test behavior)
                    from utils.model_context import ModelContext

                    model_name = self.get_request_model_name(request)
                    self._model_context = ModelContext(model_name)
                    self._current_model_name = model_name

            # Use the same file preparation logic as BaseTool with token budgeting
            continuation_id = self.get_request_continuation_id(request)
            remaining_tokens = arguments.get("_remaining_tokens")

            file_content, processed_files = self._prepare_file_content_for_prompt(
                request_files,
                continuation_id,
                "Workflow files for analysis",
                remaining_budget=remaining_tokens,
                arguments=arguments,
                model_context=self._model_context,
            )

            # Store for use in expert analysis
            self._embedded_file_content = file_content
            self._actually_processed_files = processed_files

            logger.info(
                f"[WORKFLOW_FILES] {self.get_name()}: Embedded {len(processed_files)} relevant_files for final analysis"
            )

        except Exception as e:
            logger.error(f"[WORKFLOW_FILES] {self.get_name()}: Failed to embed files: {e}")
            # Continue without file embedding rather than failing
            self._embedded_file_content = ""
            self._actually_processed_files = []

    def _reference_workflow_files(self, request: Any) -> None:
        """
        Reference file names without embedding content for intermediate steps.
        Saves the CLI's context while still providing file awareness.
        """
        # Workflow tools use relevant_files, not files
        request_files = self.get_request_relevant_files(request)
        logger.debug(
            f"[WORKFLOW_FILES] {self.get_name()}: _reference_workflow_files called with {len(request_files)} relevant_files"
        )

        if not request_files:
            logger.debug(f"[WORKFLOW_FILES] {self.get_name()}: No files to reference, skipping")
            return

        # Store file references for conversation context
        self._referenced_files = request_files

        # Create a simple reference note
        file_names = [os.path.basename(f) for f in request_files]
        reference_note = f"Files referenced in this step: {', '.join(file_names)}\n"

        self._file_reference_note = reference_note
        logger.debug(f"[WORKFLOW_FILES] {self.get_name()}: Set _file_reference_note: {self._file_reference_note}")

        logger.info(
            f"[WORKFLOW_FILES] {self.get_name()}: Referenced {len(request_files)} files without embedding content"
        )

    # ================================================================================
    # Main Workflow Orchestration
    # ================================================================================

    async def execute_workflow(self, arguments: dict[str, Any]) -> list[TextContent]:
        """
        Main workflow orchestration following debug tool pattern.

        Comprehensive workflow implementation that handles all common patterns:
        1. Request validation and step management
        2. Continuation and backtracking support
        3. Step data processing and consolidation
        4. Tool-specific field mapping and customization
        5. Completion logic with optional expert analysis
        6. Generic "certain confidence" handling
        7. Step guidance and required actions
        8. Conversation memory integration
        """
        from mcp.types import TextContent

        try:
            # Store arguments for access by helper methods
            self._current_arguments = arguments

            # Validate request using tool-specific model
            request = self.get_workflow_request_model()(**arguments)

            # Validate step field size (basic validation for workflow instructions)
            # If step is too large, user should use shorter instructions and put details in files
            step_content = request.step
            if step_content and len(step_content) > MCP_PROMPT_SIZE_LIMIT:
                from tools.models import ToolOutput

                error_output = ToolOutput(
                    status="resend_prompt",
                    content="Step instructions are too long. Please use shorter instructions and provide detailed context via file paths instead.",
                    content_type="text",
                    metadata={"prompt_size": len(step_content), "limit": MCP_PROMPT_SIZE_LIMIT},
                )
                raise ValueError(f"MCP_SIZE_CHECK:{error_output.model_dump_json()}")

            # Validate file paths for security (same as base tool)
            # Use try/except instead of hasattr as per coding standards
            try:
                path_error = self.validate_file_paths(request)
                if path_error:
                    from tools.models import ToolOutput

                    error_output = ToolOutput(
                        status="error",
                        content=path_error,
                        content_type="text",
                    )
                    logger.error("Path validation failed for %s: %s", self.get_name(), path_error)
                    raise ToolExecutionError(error_output.model_dump_json())
            except AttributeError:
                # validate_file_paths method not available - skip validation
                pass

            # Try to validate model availability early for production scenarios
            # For tests, defer model validation to later to allow mocks to work
            try:
                model_name, model_context = self._resolve_model_context(arguments, request)
                # Store for later use
                self._current_model_name = model_name
                self._model_context = model_context
            except ValueError as e:
                # Model resolution failed - in production this would be an error,
                # but for tests we defer to allow mocks to handle model resolution
                logger.debug(f"Early model validation failed, deferring to later: {e}")
                self._current_model_name = None
                self._model_context = None

            # Handle continuation
            continuation_id = request.continuation_id

            # Restore workflow state on continuation
            if continuation_id:
                from utils.conversation_memory import get_thread

                thread = get_thread(continuation_id)
                if thread and thread.turns:
                    # Find the most recent assistant turn from this tool with workflow state
                    for turn in reversed(thread.turns):
                        if turn.role == "assistant" and turn.tool_name == self.get_name() and turn.model_metadata:
                            state = turn.model_metadata
                            if isinstance(state, dict) and "work_history" in state:
                                self.work_history = state.get("work_history", [])
                                self.initial_request = state.get("initial_request")
                                # Rebuild consolidated findings from restored history
                                self._reprocess_consolidated_findings()
                                logger.debug(
                                    f"[{self.get_name()}] Restored workflow state with {len(self.work_history)} history items"
                                )
                                break  # State restored, exit loop

            # Adjust total steps if needed
            if request.step_number > request.total_steps:
                request.total_steps = request.step_number

            # Create thread for first step
            if not continuation_id and request.step_number == 1:
                clean_args = {k: v for k, v in arguments.items() if k not in ["_model_context", "_resolved_model_name"]}
                continuation_id = create_thread(self.get_name(), clean_args)
                self.initial_request = request.step
                # Allow tools to store initial description for expert analysis
                self.store_initial_issue(request.step)

            # Process work step - allow tools to customize field mapping
            step_data = self.prepare_step_data(request)

            # Store in history
            self.work_history.append(step_data)

            # Update consolidated findings
            self._update_consolidated_findings(step_data)

            # Handle file context appropriately based on workflow phase
            self._handle_workflow_file_context(request, arguments)

            # Build response with tool-specific customization
            response_data = self.build_base_response(request, continuation_id)

            # If work is complete, handle completion logic
            if not request.next_step_required:
                response_data = await self.handle_work_completion(response_data, request, arguments)
            else:
                # Force CLI to work before calling tool again
                response_data = self.handle_work_continuation(response_data, request)

            # Allow tools to customize the final response
            response_data = self.customize_workflow_response(response_data, request)

            # Add metadata (provider_used and model_used) to workflow response
            self._add_workflow_metadata(response_data, arguments)

            # Store in conversation memory
            if continuation_id:
                self.store_conversation_turn(continuation_id, response_data, request)

            return [TextContent(type="text", text=json.dumps(response_data, indent=2, ensure_ascii=False))]

        except ToolExecutionError:
            raise
        except Exception as e:
            if str(e).startswith("MCP_SIZE_CHECK:"):
                payload = str(e)[len("MCP_SIZE_CHECK:") :]
                raise ToolExecutionError(payload)

            logger.error(f"Error in {self.get_name()} work: {e}", exc_info=True)
            error_data = {
                "status": f"{self.get_name()}_failed",
                "error": str(e),
                "step_number": arguments.get("step_number", 0),
            }

            # Add metadata to error responses too
            self._add_workflow_metadata(error_data, arguments)

            raise ToolExecutionError(json.dumps(error_data, indent=2, ensure_ascii=False)) from e

    # Hook methods for tool customization

    def prepare_step_data(self, request) -> dict:
        """
        Prepare step data from request. Tools can override to customize field mapping.
        """
        step_data = {
            "step": request.step,
            "step_number": request.step_number,
            "findings": request.findings,
            "files_checked": self.get_request_files_checked(request),
            "relevant_files": self.get_request_relevant_files(request),
            "relevant_context": self.get_request_relevant_context(request),
            "issues_found": self.get_request_issues_found(request),
            "confidence": self.get_request_confidence(request),
            "hypothesis": self.get_request_hypothesis(request),
            "images": self.get_request_images(request),
        }
        return step_data

    def build_base_response(self, request, continuation_id: str = None) -> dict:
        """
        Build the base response structure. Tools can override for custom response fields.
        """
        response_data = {
            "status": f"{self.get_name()}_in_progress",
            "step_number": request.step_number,
            "total_steps": request.total_steps,
            "next_step_required": request.next_step_required,
            f"{self.get_name()}_status": {
                "files_checked": len(self.consolidated_findings.files_checked),
                "relevant_files": len(self.consolidated_findings.relevant_files),
                "relevant_context": len(self.consolidated_findings.relevant_context),
                "issues_found": len(self.consolidated_findings.issues_found),
                "images_collected": len(self.consolidated_findings.images),
                "current_confidence": self.get_request_confidence(request),
            },
        }

        if continuation_id:
            response_data["continuation_id"] = continuation_id

        # Add file context information based on workflow phase
        embedded_content = self.get_embedded_file_content()
        reference_note = self.get_file_reference_note()
        processed_files = self.get_actually_processed_files()

        logger.debug(
            f"[WORKFLOW_FILES] {self.get_name()}: Building response - has embedded_content: {bool(embedded_content)}, has reference_note: {bool(reference_note)}"
        )

        # Prioritize embedded content over references for final steps
        if embedded_content:
            # Final step - include embedded file information
            logger.debug(f"[WORKFLOW_FILES] {self.get_name()}: Adding fully_embedded file context")
            response_data["file_context"] = {
                "type": "fully_embedded",
                "files_embedded": len(processed_files),
                "context_optimization": "Full file content embedded for expert analysis",
            }
        elif reference_note:
            # Intermediate step - include file reference note
            logger.debug(f"[WORKFLOW_FILES] {self.get_name()}: Adding reference_only file context")
            response_data["file_context"] = {
                "type": "reference_only",
                "note": reference_note,
                "context_optimization": "Files referenced but not embedded to preserve the context window",
            }

        return response_data

    def should_skip_expert_analysis(self, request, consolidated_findings) -> bool:
        """
        Determine if expert analysis should be skipped due to high certainty.

        Default: False (always call expert analysis)
        Override in tools like debug to check for "certain" confidence.
        """
        return False

    def handle_completion_without_expert_analysis(self, request, consolidated_findings) -> dict:
        """
        Handle completion when skipping expert analysis.

        Tools can override this for custom high-confidence completion handling.
        Default implementation provides generic response.
        """
        work_summary = self.prepare_work_summary()
        continuation_id = self.get_request_continuation_id(request)

        response_data = {
            "status": self.get_completion_status(),
            f"complete_{self.get_name()}": {
                "initial_request": self.get_initial_request(request.step),
                "steps_taken": len(consolidated_findings.findings),
                "files_examined": list(consolidated_findings.files_checked),
                "relevant_files": list(consolidated_findings.relevant_files),
                "relevant_context": list(consolidated_findings.relevant_context),
                "work_summary": work_summary,
                "final_analysis": self.get_final_analysis_from_request(request),
                "confidence_level": self.get_confidence_level(request),
            },
            "next_steps": self.get_completion_message(),
            "skip_expert_analysis": True,
            "expert_analysis": {
                "status": self.get_skip_expert_analysis_status(),
                "reason": self.get_skip_reason(),
            },
        }

        if continuation_id:
            response_data["continuation_id"] = continuation_id

        return response_data

    # ================================================================================
    # Inheritance Hook Methods - Replace hasattr/getattr Anti-patterns
    # ================================================================================

    def get_request_confidence(self, request: Any) -> str:
        """Get confidence from request. Override for custom confidence handling."""
        try:
            return request.confidence or "low"
        except AttributeError:
            return "low"

    def get_request_relevant_context(self, request: Any) -> list[str]:
        """Get relevant context from request. Override for custom field mapping."""
        try:
            return request.relevant_context or []
        except AttributeError:
            return []

    def get_request_issues_found(self, request: Any) -> list[str]:
        """Get issues found from request. Override for custom field mapping."""
        try:
            return request.issues_found or []
        except AttributeError:
            return []

    def get_request_hypothesis(self, request: Any) -> Optional[str]:
        """Get hypothesis from request. Override for custom field mapping."""
        try:
            return request.hypothesis
        except AttributeError:
            return None

    def get_request_images(self, request: Any) -> list[str]:
        """Get images from request. Override for custom field mapping."""
        try:
            return request.images or []
        except AttributeError:
            return []

    # File Context Access Methods

    def get_embedded_file_content(self) -> str:
        """Get embedded file content. Returns empty string if not available."""
        try:
            return self._embedded_file_content or ""
        except AttributeError:
            return ""

    def get_file_reference_note(self) -> str:
        """Get file reference note. Returns empty string if not available."""
        try:
            return self._file_reference_note or ""
        except AttributeError:
            return ""

    def get_actually_processed_files(self) -> list[str]:
        """Get list of actually processed files. Returns empty list if not available."""
        try:
            return self._actually_processed_files or []
        except AttributeError:
            return []

    def get_current_model_context(self):
        """Get current model context. Returns None if not available."""
        try:
            return self._model_context
        except AttributeError:
            return None

    def get_request_model_name(self, request: Any) -> str:
        """Get model name from request. Override for custom model handling."""
        try:
            return request.model or "flash"
        except AttributeError:
            return "flash"

    def get_request_continuation_id(self, request: Any) -> Optional[str]:
        """Get continuation ID from request. Override for custom continuation handling."""
        try:
            return request.continuation_id
        except AttributeError:
            return None

    def get_request_next_step_required(self, request: Any) -> bool:
        """Get next step required from request. Override for custom step handling."""
        try:
            return request.next_step_required
        except AttributeError:
            return True

    def get_request_step_number(self, request: Any) -> int:
        """Get step number from request. Override for custom step handling."""
        try:
            return request.step_number or 1
        except AttributeError:
            return 1

    def get_request_relevant_files(self, request: Any) -> list[str]:
        """Get relevant files from request. Override for custom file handling."""
        try:
            return request.relevant_files or []
        except AttributeError:
            return []

    def get_request_files_checked(self, request: Any) -> list[str]:
        """Get files checked from request. Override for custom file handling."""
        try:
            return request.files_checked or []
        except AttributeError:
            return []

    def get_current_arguments(self) -> dict[str, Any]:
        """Get current arguments. Returns empty dict if not available."""
        try:
            return self._current_arguments or {}
        except AttributeError:
            return {}

    def store_initial_issue(self, step_description: str):
        """Store initial issue description. Override for custom storage."""
        # Default implementation - tools can override to store differently
        self.initial_issue = step_description

    def get_initial_request(self, fallback_step: str) -> str:
        """Get initial request description. Override for custom retrieval."""
        try:
            return self.initial_request or fallback_step
        except AttributeError:
            return fallback_step

    # Default implementations for inheritance hooks

    def prepare_work_summary(self) -> str:
        """Prepare work summary. Override for custom implementation."""
        return f"Completed {len(self.consolidated_findings.findings)} work steps"

    def get_completion_status(self) -> str:
        """Get completion status. Override for tool-specific status."""
        return "high_confidence_completion"

    def get_final_analysis_from_request(self, request):
        """Extract final analysis from request. Override for tool-specific fields."""
        return self.get_request_hypothesis(request)

    def get_confidence_level(self, request) -> str:
        """Get confidence level. Override for tool-specific confidence handling."""
        return self.get_request_confidence(request) or "high"

    def get_completion_message(self) -> str:
        """Get completion message. Override for tool-specific messaging."""
        return (
            f"{self.get_name().capitalize()} complete with high confidence. Present results "
            "and proceed with implementation without requiring further consultation."
        )

    def get_skip_reason(self) -> str:
        """Get reason for skipping expert analysis. Override for tool-specific reasons."""
        return f"{self.get_name()} completed with sufficient confidence"

    def get_skip_expert_analysis_status(self) -> str:
        """Get status for skipped expert analysis. Override for tool-specific status."""
        return "skipped_by_tool_design"

    def get_completion_next_steps_message(self, expert_analysis_used: bool = False) -> str:
        """
        Get the message to show when work is complete.
        Tools can override for custom messaging.

        Args:
            expert_analysis_used: True if expert analysis was successfully executed
        """
        base_message = (
            f"{self.get_name().upper()} IS COMPLETE. You MUST now summarize and present ALL key findings, confirmed "
            "hypotheses, and exact recommended solutions. Clearly identify the most likely root cause and "
            "provide concrete, actionable implementation guidance. Highlight affected code paths and display "
            "reasoning that led to this conclusion—make it easy for a developer to understand exactly where "
            "the problem lies."
        )

        # Add expert analysis guidance only when expert analysis was actually used
        if expert_analysis_used:
            expert_guidance = self.get_expert_analysis_guidance()
            if expert_guidance:
                return f"{base_message}\n\n{expert_guidance}"

        return base_message

    def get_expert_analysis_guidance(self) -> str:
        """
        Get additional guidance for handling expert analysis results.

        Subclasses can override this to provide specific instructions about how
        to validate and use expert analysis findings. Returns empty string by default.

        When expert analysis is called, this guidance will be:
        1. Appended to the completion next steps message
        2. Added as "important_considerations" field in the response data

        Example implementation:
        ```python
        def get_expert_analysis_guidance(self) -> str:
            return (
                "IMPORTANT: Expert analysis provided above. You MUST validate "
                "the expert findings rather than accepting them blindly. "
                "Cross-reference with your own investigation and ensure "
                "recommendations align with the codebase context."
            )
        ```

        Returns:
            Additional guidance text or empty string if no guidance needed
        """
        return ""

    def customize_workflow_response(self, response_data: dict, request) -> dict:
        """
        Allow tools to customize the workflow response before returning.

        Tools can override this to add tool-specific fields, modify status names,
        customize field mapping, etc. Default implementation returns unchanged.
        """
        # Ensure file context information is preserved in all response paths
        if not response_data.get("file_context"):
            embedded_content = self.get_embedded_file_content()
            reference_note = self.get_file_reference_note()
            processed_files = self.get_actually_processed_files()

            # Prioritize embedded content over references for final steps
            if embedded_content:
                response_data["file_context"] = {
                    "type": "fully_embedded",
                    "files_embedded": len(processed_files),
                    "context_optimization": "Full file content embedded for expert analysis",
                }
            elif reference_note:
                response_data["file_context"] = {
                    "type": "reference_only",
                    "note": reference_note,
                    "context_optimization": "Files referenced but not embedded to preserve the context window",
                }

        return response_data

    def store_conversation_turn(self, continuation_id: str, response_data: dict, request):
        """
        Store the conversation turn. Tools can override for custom memory storage.
        """
        # CRITICAL: Extract clean content for conversation history (exclude internal workflow metadata)
        clean_content = self._extract_clean_workflow_content_for_history(response_data)

        # Serialize workflow state for persistence across stateless tool calls
        workflow_state = {"work_history": self.work_history, "initial_request": getattr(self, "initial_request", None)}

        add_turn(
            thread_id=continuation_id,
            role="assistant",
            content=clean_content,  # Use cleaned content instead of full response_data
            tool_name=self.get_name(),
            files=self.get_request_relevant_files(request),
            images=self.get_request_images(request),
            model_metadata=workflow_state,  # Persist the state
        )

    def _add_workflow_metadata(self, response_data: dict, arguments: dict[str, Any]) -> None:
        """
        Add metadata (provider_used and model_used) to workflow response.

        This ensures workflow tools have the same metadata as regular tools,
        making it consistent across all tool types for tracking which provider
        and model were used for the response.

        Args:
            response_data: The response data dictionary to modify
            arguments: The original arguments containing model context
        """
        try:
            # Get model information from arguments (set by server.py)
            resolved_model_name = arguments.get("_resolved_model_name")
            model_context = arguments.get("_model_context")

            if resolved_model_name and model_context:
                # Extract provider information from model context
                provider = model_context.provider
                provider_name = provider.get_provider_type().value if provider else "unknown"

                # Create metadata dictionary
                metadata = {
                    "tool_name": self.get_name(),
                    "model_used": resolved_model_name,
                    "provider_used": provider_name,
                }

                # Preserve existing metadata and add workflow metadata
                if "metadata" not in response_data:
                    response_data["metadata"] = {}
                response_data["metadata"].update(metadata)

                logger.debug(
                    f"[WORKFLOW_METADATA] {self.get_name()}: Added metadata - "
                    f"model: {resolved_model_name}, provider: {provider_name}"
                )
            else:
                # Fallback - try to get model info from request
                request = self.get_workflow_request_model()(**arguments)
                model_name = self.get_request_model_name(request)

                # Basic metadata without provider info
                metadata = {
                    "tool_name": self.get_name(),
                    "model_used": model_name,
                    "provider_used": "unknown",
                }

                # Preserve existing metadata and add workflow metadata
                if "metadata" not in response_data:
                    response_data["metadata"] = {}
                response_data["metadata"].update(metadata)

                logger.debug(
                    f"[WORKFLOW_METADATA] {self.get_name()}: Added fallback metadata - "
                    f"model: {model_name}, provider: unknown"
                )

        except Exception as e:
            # Don't fail the workflow if metadata addition fails
            logger.warning(f"[WORKFLOW_METADATA] {self.get_name()}: Failed to add metadata: {e}")
            # Still add basic metadata with tool name
            response_data["metadata"] = {"tool_name": self.get_name()}

    def _extract_clean_workflow_content_for_history(self, response_data: dict) -> str:
        """
        Extract clean content from workflow response suitable for conversation history.

        This method removes internal workflow metadata, continuation offers, and
        status information that should not appear when the conversation is
        reconstructed for expert models or other tools.

        Args:
            response_data: The full workflow response data

        Returns:
            str: Clean content suitable for conversation history storage
        """
        # Create a clean copy with only essential content for conversation history
        clean_data = {}

        # Include core content if present
        if "content" in response_data:
            clean_data["content"] = response_data["content"]

        # Include expert analysis if present (but clean it)
        if "expert_analysis" in response_data:
            expert_analysis = response_data["expert_analysis"]
            if isinstance(expert_analysis, dict):
                # Only include the actual analysis content, not metadata
                clean_expert = {}
                if "raw_analysis" in expert_analysis:
                    clean_expert["analysis"] = expert_analysis["raw_analysis"]
                elif "content" in expert_analysis:
                    clean_expert["analysis"] = expert_analysis["content"]
                if clean_expert:
                    clean_data["expert_analysis"] = clean_expert

        # Include findings/issues if present (core workflow output)
        if "complete_analysis" in response_data:
            complete_analysis = response_data["complete_analysis"]
            if isinstance(complete_analysis, dict):
                clean_complete = {}
                # Include essential analysis data without internal metadata
                for key in ["findings", "issues_found", "relevant_context", "insights"]:
                    if key in complete_analysis:
                        clean_complete[key] = complete_analysis[key]
                if clean_complete:
                    clean_data["analysis_summary"] = clean_complete

        # Include step information for context but remove internal workflow metadata
        if "step_number" in response_data:
            clean_data["step_info"] = {
                "step": response_data.get("step", ""),
                "step_number": response_data.get("step_number", 1),
                "total_steps": response_data.get("total_steps", 1),
            }

        # Exclude problematic fields that should never appear in conversation history:
        # - continuation_id (confuses LLMs with old IDs)
        # - status (internal workflow state)
        # - next_step_required (internal control flow)
        # - analysis_status (internal tracking)
        # - file_context (internal optimization info)
        # - required_actions (internal workflow instructions)

        return json.dumps(clean_data, indent=2, ensure_ascii=False)

    # Core workflow logic methods

    async def handle_work_completion(self, response_data: dict, request, arguments: dict) -> dict:
        """
        Handle work completion logic - expert analysis decision and response building.
        """
        response_data[f"{self.get_name()}_complete"] = True

        # Check if tool wants to skip expert analysis due to high certainty
        if self.should_skip_expert_analysis(request, self.consolidated_findings):
            # Handle completion without expert analysis
            completion_response = self.handle_completion_without_expert_analysis(request, self.consolidated_findings)
            response_data.update(completion_response)
        elif self.requires_expert_analysis() and self.should_call_expert_analysis(self.consolidated_findings, request):
            # Standard expert analysis path
            response_data["status"] = "calling_expert_analysis"

            # Call expert analysis
            expert_analysis = await self._call_expert_analysis(arguments, request)
            response_data["expert_analysis"] = expert_analysis

            # Handle special expert analysis statuses
            if isinstance(expert_analysis, dict) and expert_analysis.get("status") in [
                "files_required_to_continue",
                "investigation_paused",
                "refactoring_paused",
            ]:
                # Promote the special status to the main response
                special_status = expert_analysis["status"]
                response_data["status"] = special_status
                response_data["content"] = expert_analysis.get(
                    "raw_analysis", json.dumps(expert_analysis, ensure_ascii=False)
                )
                del response_data["expert_analysis"]

                # Update next steps for special status
                if special_status == "files_required_to_continue":
                    response_data["next_steps"] = "Provide the requested files and continue the analysis."
                else:
                    response_data["next_steps"] = expert_analysis.get(
                        "next_steps", "Continue based on expert analysis."
                    )
            elif isinstance(expert_analysis, dict) and expert_analysis.get("status") == "analysis_error":
                # Expert analysis failed - promote error status
                response_data["status"] = "error"
                response_data["content"] = expert_analysis.get("error", "Expert analysis failed")
                response_data["content_type"] = "text"
                del response_data["expert_analysis"]
            else:
                # Expert analysis was successfully executed - include expert guidance
                response_data["next_steps"] = self.get_completion_next_steps_message(expert_analysis_used=True)

                # Add expert analysis guidance as important considerations
                expert_guidance = self.get_expert_analysis_guidance()
                if expert_guidance:
                    response_data["important_considerations"] = expert_guidance

            # Prepare complete work summary
            work_summary = self._prepare_work_summary()
            response_data[f"complete_{self.get_name()}"] = {
                "initial_request": self.get_initial_request(request.step),
                "steps_taken": len(self.work_history),
                "files_examined": list(self.consolidated_findings.files_checked),
                "relevant_files": list(self.consolidated_findings.relevant_files),
                "relevant_context": list(self.consolidated_findings.relevant_context),
                "issues_found": self.consolidated_findings.issues_found,
                "work_summary": work_summary,
            }
        else:
            # Tool doesn't require expert analysis or local work was sufficient
            if not self.requires_expert_analysis():
                # Tool is self-contained (like planner)
                response_data["status"] = f"{self.get_name()}_complete"
                response_data["next_steps"] = (
                    f"{self.get_name().capitalize()} work complete. Present results to the user."
                )
            else:
                # Local work was sufficient for tools that support expert analysis
                response_data["status"] = "local_work_complete"
                response_data["next_steps"] = (
                    f"Local {self.get_name()} complete with sufficient confidence. Present findings "
                    "and recommendations to the user based on the work results."
                )

        return response_data

    def handle_work_continuation(self, response_data: dict, request) -> dict:
        """
        Handle work continuation - force pause and provide guidance.
        """
        response_data["status"] = f"pause_for_{self.get_name()}"
        response_data[f"{self.get_name()}_required"] = True

        # Get tool-specific required actions
        required_actions = self.get_required_actions(
            request.step_number, self.get_request_confidence(request), request.findings, request.total_steps, request
        )
        response_data["required_actions"] = required_actions

        # Generate step guidance
        response_data["next_steps"] = self.get_step_guidance_message(request)

        return response_data

    def _update_consolidated_findings(self, step_data: dict):
        """Update consolidated findings with new step data"""
        self.consolidated_findings.files_checked.update(step_data.get("files_checked", []))
        self.consolidated_findings.relevant_files.update(step_data.get("relevant_files", []))
        self.consolidated_findings.relevant_context.update(step_data.get("relevant_context", []))
        self.consolidated_findings.findings.append(f"Step {step_data['step_number']}: {step_data['findings']}")
        if step_data.get("hypothesis"):
            self.consolidated_findings.hypotheses.append(
                {
                    "step": step_data["step_number"],
                    "hypothesis": step_data["hypothesis"],
                    "confidence": step_data["confidence"],
                }
            )
        if step_data.get("issues_found"):
            self.consolidated_findings.issues_found.extend(step_data["issues_found"])
        if step_data.get("images"):
            self.consolidated_findings.images.extend(step_data["images"])
        # Update confidence to latest value from this step
        if step_data.get("confidence"):
            self.consolidated_findings.confidence = step_data["confidence"]

    def _reprocess_consolidated_findings(self):
        """Reprocess consolidated findings after backtracking"""
        self.consolidated_findings = ConsolidatedFindings()
        for step in self.work_history:
            self._update_consolidated_findings(step)

    def _prepare_work_summary(self) -> str:
        """Prepare a comprehensive summary of the work"""
        summary_parts = [
            f"=== {self.get_name().upper()} WORK SUMMARY ===",
            f"Total steps: {len(self.work_history)}",
            f"Files examined: {len(self.consolidated_findings.files_checked)}",
            f"Relevant files identified: {len(self.consolidated_findings.relevant_files)}",
            f"Methods/functions involved: {len(self.consolidated_findings.relevant_context)}",
            f"Issues found: {len(self.consolidated_findings.issues_found)}",
            "",
            "=== WORK PROGRESSION ===",
        ]

        for finding in self.consolidated_findings.findings:
            summary_parts.append(finding)

        if self.consolidated_findings.hypotheses:
            summary_parts.extend(
                [
                    "",
                    "=== HYPOTHESIS EVOLUTION ===",
                ]
            )
            for hyp in self.consolidated_findings.hypotheses:
                summary_parts.append(f"Step {hyp['step']} ({hyp['confidence']} confidence): {hyp['hypothesis']}")

        if self.consolidated_findings.issues_found:
            summary_parts.extend(
                [
                    "",
                    "=== ISSUES IDENTIFIED ===",
                ]
            )
            for issue in self.consolidated_findings.issues_found:
                severity = issue.get("severity", "unknown")
                description = issue.get("description", "No description")
                summary_parts.append(f"[{severity.upper()}] {description}")

        return "\n".join(summary_parts)

    async def _call_expert_analysis(self, arguments: dict, request) -> dict:
        """Call external model for expert analysis"""
        try:
            # Model context should be resolved from early validation, but handle fallback for tests
            if not self._model_context:
                # Try to resolve model context for expert analysis (deferred from early validation)
                try:
                    model_name, model_context = self._resolve_model_context(arguments, request)
                    self._model_context = model_context
                    self._current_model_name = model_name
                except Exception as e:
                    logger.error(f"Failed to resolve model context for expert analysis: {e}")
                    # Use request model as fallback (preserves existing test behavior)
                    model_name = self.get_request_model_name(request)
                    from utils.model_context import ModelContext

                    model_context = ModelContext(model_name)
                    self._model_context = model_context
                    self._current_model_name = model_name
            else:
                model_name = self._current_model_name

            provider = self._model_context.provider

            # Prepare expert analysis context
            expert_context = self.prepare_expert_analysis_context(self.consolidated_findings)

            # Check if tool wants to include files in prompt
            if self.should_include_files_in_expert_prompt():
                file_content = self._prepare_files_for_expert_analysis()
                if file_content:
                    expert_context = self._add_files_to_expert_context(expert_context, file_content)

            # Get system prompt for this tool with localization support
            base_system_prompt = self.get_system_prompt()
            capability_augmented_prompt = self._augment_system_prompt_with_capabilities(
                base_system_prompt, getattr(self._model_context, "capabilities", None)
            )
            language_instruction = self.get_language_instruction()
            system_prompt = language_instruction + capability_augmented_prompt

            # Check if tool wants system prompt embedded in main prompt
            if self.should_embed_system_prompt():
                prompt = f"{system_prompt}\n\n{expert_context}\n\n{self.get_expert_analysis_instruction()}"
                system_prompt = ""  # Clear it since we embedded it
            else:
                prompt = expert_context

            # Validate temperature against model constraints
            validated_temperature, temp_warnings = self.get_validated_temperature(request, self._model_context)

            # Log any temperature corrections
            for warning in temp_warnings:
                logger.warning(warning)

            # Generate AI response - use request parameters if available
            model_response = provider.generate_content(
                prompt=prompt,
                model_name=model_name,
                system_prompt=system_prompt,
                temperature=validated_temperature,
                thinking_mode=self.get_request_thinking_mode(request),
                images=list(set(self.consolidated_findings.images)) if self.consolidated_findings.images else None,
            )

            if model_response.content:
                content = model_response.content.strip()

                # Try to extract JSON from markdown code blocks if present
                if "```json" in content or "```" in content:
                    json_match = re.search(r"```(?:json)?\s*(.*?)\s*```", content, re.DOTALL)
                    if json_match:
                        content = json_match.group(1).strip()

                try:
                    # Try to parse as JSON
                    analysis_result = json.loads(content)
                    return analysis_result
                except json.JSONDecodeError as e:
                    # Log the parse error with more details but don't fail
                    logger.info(
                        f"[{self.get_name()}] Expert analysis returned non-JSON response (this is OK for smaller models). "
                        f"Parse error: {str(e)}. Response length: {len(model_response.content)} chars."
                    )
                    logger.debug(f"First 500 chars of response: {model_response.content[:500]!r}")

                    # Still return the analysis as plain text - this is valid
                    return {
                        "status": "analysis_complete",
                        "raw_analysis": model_response.content,
                        "format": "text",  # Indicate it's plain text, not an error
                        "note": "Analysis provided in plain text format",
                    }
            else:
                return {"error": "No response from model", "status": "empty_response"}

        except Exception as e:
            logger.error(f"Error calling expert analysis: {e}", exc_info=True)
            return {"error": str(e), "status": "analysis_error"}

    def _process_work_step(self, step_data: dict):
        """
        Process a single work step and update internal state.

        This method is useful for testing and manual step processing.
        It adds the step to work history and updates consolidated findings.

        Args:
            step_data: Dictionary containing step information including:
                      step, step_number, findings, files_checked, etc.
        """
        # Store in history
        self.work_history.append(step_data)

        # Update consolidated findings
        self._update_consolidated_findings(step_data)

    # Common execute method for workflow-based tools

    async def execute(self, arguments: dict[str, Any]) -> list[TextContent]:
        """
        Common execute logic for workflow-based tools.

        This method provides common validation and delegates to execute_workflow.
        Tools that need custom execute logic can override this method.
        """
        try:
            # Common validation
            if not arguments:
                error_data = {"status": "error", "content": "No arguments provided"}
                # Add basic metadata even for validation errors
                error_data["metadata"] = {"tool_name": self.get_name()}
                raise ToolExecutionError(json.dumps(error_data, ensure_ascii=False))

            # Delegate to execute_workflow
            return await self.execute_workflow(arguments)

        except ToolExecutionError:
            raise
        except Exception as e:
            logger.error(f"Error in {self.get_name()} tool execution: {e}", exc_info=True)
            error_data = {
                "status": "error",
                "content": f"Error in {self.get_name()}: {str(e)}",
            }  # Add metadata to error responses
            self._add_workflow_metadata(error_data, arguments)
            raise ToolExecutionError(json.dumps(error_data, ensure_ascii=False)) from e

    # Default implementations for methods that workflow-based tools typically don't need

    async def prepare_prompt(self, request) -> str:
        """
        Base implementation for workflow tools - compatible with BaseTool signature.

        Workflow tools typically don't need to return a prompt since they handle
        their own prompt preparation internally through the workflow execution.

        Args:
            request: The validated request object

        Returns:
            Empty string since workflow tools manage prompts internally
        """
        # Workflow tools handle their prompts internally during workflow execution
        return ""

    def format_response(self, response: str, request, model_info=None):
        """
        Workflow tools handle their own response formatting.
        The BaseWorkflowMixin formats responses internally.
        """
        return response

```

--------------------------------------------------------------------------------
/run-server.ps1:
--------------------------------------------------------------------------------

```
<#
.SYNOPSIS
    Installation, configuration, and launch script for Zen MCP server on Windows.

.DESCRIPTION
    This PowerShell script prepares the environment for the Zen MCP server:
    - Installs and checks Python 3.10+ (with venv or uv if available)
    - Installs required Python dependencies
    - Configures environment files (.env)
    - Validates presence of required API keys
    - Cleans Python caches and obsolete Docker artifacts
    - Offers automatic integration with Claude Desktop, Gemini CLI, VSCode, Cursor, Windsurf, and Trae
    - Manages configuration file backups (max 3 retained)
    - Allows real-time log following or server launch

.PARAMETER Help
    Shows script help.

.PARAMETER Version
    Shows Zen MCP server version.

.PARAMETER Follow
    Follows server logs in real time.

.PARAMETER Config
    Shows configuration instructions for Claude and other compatible clients.

.PARAMETER ClearCache
    Removes Python cache files (__pycache__, .pyc).

.PARAMETER SkipVenv
    Skips Python virtual environment creation.

.PARAMETER SkipDocker
    Skips Docker checks and cleanup.

.PARAMETER Force
    Forces recreation of the Python virtual environment.
    
.PARAMETER VerboseOutput
    Enables more detailed output (currently unused).

.PARAMETER Dev
    Installs development dependencies from requirements-dev.txt if available.

.PARAMETER Docker
    Uses Docker to build and run the MCP server instead of Python virtual environment.

.EXAMPLE
    .\run-server.ps1
    Prepares the environment and starts the Zen MCP server.

    .\run-server.ps1 -Follow
    Follows server logs in real time.

    .\run-server.ps1 -Config
    Shows configuration instructions for clients.

    .\run-server.ps1 -Dev
    Prepares the environment with development dependencies and starts the server.

    .\run-server.ps1 -Docker
    Builds and runs the server using Docker containers.

    .\run-server.ps1 -Docker -Follow
    Builds and runs the server using Docker containers and follows the logs.

    .\run-server.ps1 -Docker -Force
    Forces rebuilding of the Docker image and runs the server.

.NOTES
    Project Author     : BeehiveInnovations
    Script Author      : GiGiDKR (https://github.com/GiGiDKR)
    Date               : 07-05-2025
    Version            : See config.py (__version__)
    References         : https://github.com/BeehiveInnovations/zen-mcp-server

#>
#Requires -Version 5.1
[CmdletBinding()]
param(
    [switch]$Help,
    [switch]$Version,
    [switch]$Follow,
    [switch]$Config,
    [switch]$ClearCache,
    [switch]$SkipVenv,
    [switch]$SkipDocker,
    [switch]$Force,
    [switch]$VerboseOutput,
    [switch]$Dev,
    [switch]$Docker
)

# ============================================================================
# Zen MCP Server Setup Script for Windows
# 
# A Windows-compatible setup script that handles environment setup, 
# dependency installation, and configuration.
# ============================================================================

# Set error action preference
$ErrorActionPreference = "Stop"

# ----------------------------------------------------------------------------
# Constants and Configuration  
# ----------------------------------------------------------------------------

$script:VENV_PATH = ".zen_venv"
$script:DOCKER_CLEANED_FLAG = ".docker_cleaned"
$script:DESKTOP_CONFIG_FLAG = ".desktop_configured"
$script:LOG_DIR = "logs"
$script:LOG_FILE = "mcp_server.log"

# ----------------------------------------------------------------------------
# Utility Functions
# ----------------------------------------------------------------------------

function Write-Success {
    param([string]$Message)
    Write-Host "✓ " -ForegroundColor Green -NoNewline
    Write-Host $Message
}

function Write-Error {
    param([string]$Message)
    Write-Host "✗ " -ForegroundColor Red -NoNewline
    Write-Host $Message
}

function Write-Warning {
    param([string]$Message)
    Write-Host "⚠ " -ForegroundColor Yellow -NoNewline
    Write-Host $Message
}

function Write-Info {
    param([string]$Message)
    Write-Host "ℹ " -ForegroundColor Cyan -NoNewline
    Write-Host $Message
}

function Write-Step {
    param([string]$Message)
    Write-Host ""
    Write-Host "=== $Message ===" -ForegroundColor Cyan
}

# Check if command exists
function Test-Command {
    param([string]$Command)
    try {
        $null = Get-Command $Command -ErrorAction Stop
        return $true
    }
    catch {
        return $false
    }
}

# Alternative method to force remove locked directories
function Remove-LockedDirectory {
    param([string]$Path)
    
    if (!(Test-Path $Path)) {
        return $true
    }
    
    try {
        # Try standard removal first
        Remove-Item -Recurse -Force $Path -ErrorAction Stop
        return $true
    }
    catch {
        Write-Warning "Standard removal failed, trying alternative methods..."
        
        # Method 1: Use takeown and icacls to force ownership
        try {
            Write-Info "Attempting to take ownership of locked files..."
            takeown /F "$Path" /R /D Y 2>$null | Out-Null
            icacls "$Path" /grant administrators:F /T 2>$null | Out-Null
            Remove-Item -Recurse -Force $Path -ErrorAction Stop
            return $true
        }
        catch {
            Write-Warning "Ownership method failed"
        }
        
        # Method 2: Rename and schedule for deletion on reboot
        try {
            $tempName = "$Path.delete_$(Get-Random)"
            Write-Info "Renaming to: $tempName (will be deleted on next reboot)"
            Rename-Item $Path $tempName -ErrorAction Stop
            
            # Schedule for deletion on reboot using movefile
            if (Get-Command "schtasks" -ErrorAction SilentlyContinue) {
                Write-Info "Scheduling for deletion on next reboot..."
            }
            
            Write-Warning "Environment renamed to $tempName and will be deleted on next reboot"
            return $true
        }
        catch {
            Write-Warning "Rename method failed"
        }
        
        # If all methods fail, return false
        return $false
    }
}

# Manage configuration file backups with maximum 3 files retention
function Manage-ConfigBackups {
    param(
        [string]$ConfigFilePath,
        [int]$MaxBackups = 3
    )
    
    if (!(Test-Path $ConfigFilePath)) {
        Write-Warning "Configuration file not found: $ConfigFilePath"
        return $null
    }
    
    try {
        # Create new backup with timestamp
        $timestamp = Get-Date -Format 'yyyyMMdd_HHmmss'
        $backupPath = "$ConfigFilePath.backup_$timestamp"
        Copy-Item $ConfigFilePath $backupPath -ErrorAction Stop
        
        # Find all existing backups for this config file
        $configDir = Split-Path $ConfigFilePath -Parent
        $configFileName = Split-Path $ConfigFilePath -Leaf
        $backupPattern = "$configFileName.backup_*"
        
        $existingBackups = Get-ChildItem -Path $configDir -Filter $backupPattern -ErrorAction SilentlyContinue |
        Sort-Object LastWriteTime -Descending
        
        # Keep only the most recent MaxBackups files
        if ($existingBackups.Count -gt $MaxBackups) {
            $backupsToRemove = $existingBackups | Select-Object -Skip $MaxBackups
            foreach ($backup in $backupsToRemove) {
                try {
                    Remove-Item $backup.FullName -Force -ErrorAction Stop
                    Write-Info "Removed old backup: $($backup.Name)"
                }
                catch {
                    Write-Warning "Could not remove old backup: $($backup.Name)"
                }
            }
            Write-Success "Backup retention: kept $MaxBackups most recent backups"
        }
        
        Write-Success "Backup created: $(Split-Path $backupPath -Leaf)"
        return $backupPath
        
    }
    catch {
        Write-Warning "Failed to create backup: $_"
        return $null
    }
}

# Get version from config.py
function Get-Version {
    try {
        if (Test-Path "config.py") {
            $content = Get-Content "config.py" -ErrorAction Stop
            $versionLine = $content | Where-Object { $_ -match '^__version__ = ' }
            if ($versionLine) {
                return ($versionLine -replace '__version__ = "([^"]*)"', '$1')
            }
        }
        return "unknown"
    }
    catch {
        return "unknown"
    }
}

# Clear Python cache files
function Clear-PythonCache {
    Write-Info "Clearing Python cache files..."
    
    try {
        # Remove .pyc files
        Get-ChildItem -Path . -Recurse -Filter "*.pyc" -ErrorAction SilentlyContinue | Remove-Item -Force
        
        # Remove __pycache__ directories
        Get-ChildItem -Path . -Recurse -Name "__pycache__" -Directory -ErrorAction SilentlyContinue | 
        ForEach-Object { Remove-Item -Path $_ -Recurse -Force }
        
        Write-Success "Python cache cleared"
    }
    catch {
        Write-Warning "Could not clear all cache files: $_"
    }
}

# Get absolute path
function Get-AbsolutePath {
    param([string]$Path)
    
    if (Test-Path $Path) {
        # Use Resolve-Path for full resolution
        return Resolve-Path $Path
    }
    else {
        # Use unresolved method
        return $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($Path)
    }
}

# Check Python version
function Test-PythonVersion {
    param([string]$PythonCmd)
    try {
        $version = & $PythonCmd --version 2>&1
        if ($version -match "Python (\d+)\.(\d+)") {
            $major = [int]$matches[1]
            $minor = [int]$matches[2]
            return ($major -gt 3) -or ($major -eq 3 -and $minor -ge 10)
        }
        return $false
    }
    catch {
        return $false
    }
}

# Find Python installation
function Find-Python {
    $pythonCandidates = @("python", "python3", "py")
    
    foreach ($cmd in $pythonCandidates) {
        if (Test-Command $cmd) {
            if (Test-PythonVersion $cmd) {
                $version = & $cmd --version 2>&1
                Write-Success "Found Python: $version"
                return $cmd
            }
        }
    }
    
    # Try Windows Python Launcher with specific versions
    $pythonVersions = @("3.12", "3.11", "3.10", "3.9")
    foreach ($version in $pythonVersions) {
        $cmd = "py -$version"
        try {
            $null = Invoke-Expression "$cmd --version" 2>$null
            Write-Success "Found Python via py launcher: $cmd"
            return $cmd
        }
        catch {
            continue
        }
    }
    
    Write-Error "Python 3.10+ not found. Please install Python from https://python.org"
    return $null
}

# Clean up old Docker artifacts
function Cleanup-Docker {
    if (Test-Path $DOCKER_CLEANED_FLAG) {
        return
    }
    
    if (!(Test-Command "docker")) {
        return
    }
    
    try {
        $null = docker info 2>$null
    }
    catch {
        return
    }
    
    $foundArtifacts = $false
    
    # Define containers to remove
    $containers = @(
        "gemini-mcp-server",
        "gemini-mcp-redis", 
        "zen-mcp-server",
        "zen-mcp-redis",
        "zen-mcp-log-monitor"
    )
    
    # Remove containers
    foreach ($container in $containers) {
        try {
            $exists = docker ps -a --format "{{.Names}}" | Where-Object { $_ -eq $container }
            if ($exists) {
                if (!$foundArtifacts) {
                    Write-Info "One-time Docker cleanup..."
                    $foundArtifacts = $true
                }
                Write-Info "  Removing container: $container"
                docker stop $container 2>$null | Out-Null
                docker rm $container 2>$null | Out-Null
            }
        }
        catch {
            # Ignore errors
        }
    }
    
    # Remove images
    $images = @("gemini-mcp-server:latest", "zen-mcp-server:latest")
    foreach ($image in $images) {
        try {
            $exists = docker images --format "{{.Repository}}:{{.Tag}}" | Where-Object { $_ -eq $image }
            if ($exists) {
                if (!$foundArtifacts) {
                    Write-Info "One-time Docker cleanup..."
                    $foundArtifacts = $true
                }
                Write-Info "  Removing image: $image"
                docker rmi $image 2>$null | Out-Null
            }
        }
        catch {
            # Ignore errors
        }
    }
    
    # Remove volumes
    $volumes = @("redis_data", "mcp_logs")
    foreach ($volume in $volumes) {
        try {
            $exists = docker volume ls --format "{{.Name}}" | Where-Object { $_ -eq $volume }
            if ($exists) {
                if (!$foundArtifacts) {
                    Write-Info "One-time Docker cleanup..."
                    $foundArtifacts = $true
                }
                Write-Info "  Removing volume: $volume"
                docker volume rm $volume 2>$null | Out-Null
            }
        }
        catch {
            # Ignore errors
        }
    }
    
    if ($foundArtifacts) {
        Write-Success "Docker cleanup complete"
    }
    
    New-Item -Path $DOCKER_CLEANED_FLAG -ItemType File -Force | Out-Null
}

# Validate API keys
function Test-ApiKeys {
    Write-Step "Validating API Keys"
    
    if (!(Test-Path ".env")) {
        Write-Warning "No .env file found. API keys should be configured."
        return $false
    }
    
    $envContent = Get-Content ".env"
    $hasValidKey = $false
    
    $keyPatterns = @{
        "GEMINI_API_KEY"     = "AIza[0-9A-Za-z-_]{35}"
        "OPENAI_API_KEY"     = "sk-[a-zA-Z0-9]{20}T3BlbkFJ[a-zA-Z0-9]{20}"
        "XAI_API_KEY"        = "xai-[a-zA-Z0-9-_]+"
        "OPENROUTER_API_KEY" = "sk-or-[a-zA-Z0-9-_]+"
    }
    
    foreach ($line in $envContent) {
        if ($line -match '^([^#][^=]*?)=(.*)$') {
            $key = $matches[1].Trim()
            $value = $matches[2].Trim() -replace '^["'']|["'']$', ''
            
            if ($keyPatterns.ContainsKey($key) -and $value -ne "your_${key.ToLower()}_here" -and $value.Length -gt 10) {
                Write-Success "Found valid $key"
                $hasValidKey = $true
            }
        }
    }
    
    if (!$hasValidKey) {
        Write-Warning "No valid API keys found in .env file"
        Write-Info "Please edit .env file with your actual API keys"
        return $false
    }
    
    return $true
}

# Check if uv is available
function Test-Uv {
    return Test-Command "uv"
}

# Setup environment using uv-first approach
function Initialize-Environment {
    Write-Step "Setting up Python Environment"
    
    # Try uv first for faster package management
    if (Test-Uv) {
        Write-Info "Using uv for faster package management..."
        
        if (Test-Path $VENV_PATH) {
            if ($Force) {
                Write-Warning "Removing existing environment..."
                Remove-Item -Recurse -Force $VENV_PATH
            }
            else {
                Write-Success "Virtual environment already exists"
                $pythonPath = "$VENV_PATH\Scripts\python.exe"
                if (Test-Path $pythonPath) {
                    return Get-AbsolutePath $pythonPath
                }
            }
        }
        
        try {
            Write-Info "Creating virtual environment with uv..."
            uv venv $VENV_PATH --python 3.12
            if ($LASTEXITCODE -eq 0) {
                Write-Success "Environment created with uv"
                return Get-AbsolutePath "$VENV_PATH\Scripts\python.exe"
            }
        }
        catch {
            Write-Warning "uv failed, falling back to venv"
        }
    }
    
    # Fallback to standard venv
    $pythonCmd = Find-Python
    if (!$pythonCmd) {
        throw "Python 3.10+ not found"
    }
    
    if (Test-Path $VENV_PATH) {
        if ($Force) {
            Write-Warning "Removing existing environment..."
            try {
                # Stop any Python processes that might be using the venv
                Get-Process python* -ErrorAction SilentlyContinue | Where-Object { $_.Path -like "*$VENV_PATH*" } | Stop-Process -Force -ErrorAction SilentlyContinue
                
                # Wait a moment for processes to terminate
                Start-Sleep -Seconds 2
                
                # Use the robust removal function
                if (Remove-LockedDirectory $VENV_PATH) {
                    Write-Success "Existing environment removed"
                }
                else {
                    throw "Unable to remove existing environment. Please restart your computer and try again."
                }
                
            }
            catch {
                Write-Error "Failed to remove existing environment: $_"
                Write-Host ""
                Write-Host "Try these solutions:" -ForegroundColor Yellow
                Write-Host "1. Close all terminals and VS Code instances" -ForegroundColor White
                Write-Host "2. Run: Get-Process python* | Stop-Process -Force" -ForegroundColor White
                Write-Host "3. Manually delete: $VENV_PATH" -ForegroundColor White
                Write-Host "4. Then run the script again" -ForegroundColor White
                exit 1
            }
        }
        else {
            Write-Success "Virtual environment already exists"
            return Get-AbsolutePath "$VENV_PATH\Scripts\python.exe"
        }
    }
    
    Write-Info "Creating virtual environment with $pythonCmd..."
    if ($pythonCmd.StartsWith("py ")) {
        Invoke-Expression "$pythonCmd -m venv $VENV_PATH"
    }
    else {
        & $pythonCmd -m venv $VENV_PATH
    }
    
    if ($LASTEXITCODE -ne 0) {
        throw "Failed to create virtual environment"
    }
    
    Write-Success "Virtual environment created"
    return Get-AbsolutePath "$VENV_PATH\Scripts\python.exe"
}

# Setup virtual environment (legacy function for compatibility)
function Initialize-VirtualEnvironment {
    Write-Step "Setting up Python Virtual Environment"
    
    if (!$SkipVenv -and (Test-Path $VENV_PATH)) {
        if ($Force) {
            Write-Warning "Removing existing virtual environment..."
            try {
                # Stop any Python processes that might be using the venv
                Get-Process python* -ErrorAction SilentlyContinue | Where-Object { $_.Path -like "*$VENV_PATH*" } | Stop-Process -Force -ErrorAction SilentlyContinue
                
                # Wait a moment for processes to terminate
                Start-Sleep -Seconds 2
                
                # Use the robust removal function
                if (Remove-LockedDirectory $VENV_PATH) {
                    Write-Success "Existing environment removed"
                }
                else {
                    throw "Unable to remove existing environment. Please restart your computer and try again."
                }
                
            }
            catch {
                Write-Error "Failed to remove existing environment: $_"
                Write-Host ""
                Write-Host "Try these solutions:" -ForegroundColor Yellow
                Write-Host "1. Close all terminals and VS Code instances" -ForegroundColor White
                Write-Host "2. Run: Get-Process python* | Stop-Process -Force" -ForegroundColor White
                Write-Host "3. Manually delete: $VENV_PATH" -ForegroundColor White
                Write-Host "4. Then run the script again" -ForegroundColor White
                exit 1
            }
        }
        else {
            Write-Success "Virtual environment already exists"
            return
        }
    }
    
    if ($SkipVenv) {
        Write-Warning "Skipping virtual environment setup"
        return
    }
    
    $pythonCmd = Find-Python
    if (!$pythonCmd) {
        Write-Error "Python 3.10+ not found. Please install Python from https://python.org"
        exit 1
    }
    
    Write-Info "Using Python: $pythonCmd"
    Write-Info "Creating virtual environment..."
    
    try {
        if ($pythonCmd.StartsWith("py ")) {
            Invoke-Expression "$pythonCmd -m venv $VENV_PATH"
        }
        else {
            & $pythonCmd -m venv $VENV_PATH
        }
        
        if ($LASTEXITCODE -ne 0) {
            throw "Failed to create virtual environment"
        }
        
        Write-Success "Virtual environment created"
    }
    catch {
        Write-Error "Failed to create virtual environment: $_"
        exit 1
    }
}

# Install dependencies function - Simplified uv-first approach
function Install-Dependencies {
    param(
        [Parameter(Mandatory = $true)]
        [string]$PythonPath,
        [switch]$InstallDevDependencies = $false
    )
    
    Write-Step "Installing Dependencies"

    # Build requirements files list
    $requirementsFiles = @("requirements.txt")
    if ($InstallDevDependencies) {
        if (Test-Path "requirements-dev.txt") {
            $requirementsFiles += "requirements-dev.txt"
            Write-Info "Including development dependencies from requirements-dev.txt"
        }
        else {
            Write-Warning "Development dependencies requested but requirements-dev.txt not found"
        }
    }

    # Try uv first for faster package management
    $useUv = Test-Uv
    if ($useUv) {
        Write-Info "Installing dependencies with uv (fast)..."
        try {
            foreach ($file in $requirementsFiles) {
                Write-Info "Installing from $file with uv..."
                $uv = (Get-Command uv -ErrorAction Stop).Source
                $arguments = @('pip', 'install', '-r', $file, '--python', $PythonPath)
                $proc = Start-Process -FilePath $uv -ArgumentList $arguments -NoNewWindow -Wait -PassThru

                if ($proc.ExitCode -ne 0) { 
                    throw "uv failed to install $file with exit code $($proc.ExitCode)" 
                }

            }
            Write-Success "Dependencies installed successfully with uv"
            return
        }
        catch {
            Write-Warning "uv installation failed: $_. Falling back to pip"
            $useUv = $false
        }
    }

    # Fallback to pip
    Write-Info "Installing dependencies with pip..."
    $pipCmd = Join-Path (Split-Path $PythonPath -Parent) "pip.exe"
    
    try {
        # Upgrade pip first
        & $pipCmd install --upgrade pip | Out-Null
    }
    catch {
        Write-Warning "Could not upgrade pip, continuing..."
    }

    try {
        foreach ($file in $requirementsFiles) {
            Write-Info "Installing from $file with pip..."
            & $pipCmd install -r $file
            if ($LASTEXITCODE -ne 0) {
                throw "pip failed to install $file"
            }
        }
        Write-Success "Dependencies installed successfully with pip"
    }
    catch {
        Write-Error "Failed to install dependencies with pip: $_"
        exit 1
    }
}

# ----------------------------------------------------------------------------
# Docker Functions
# ============================================================================

# Test Docker availability and requirements
function Test-DockerRequirements {
    Write-Step "Checking Docker Requirements"
    
    if (!(Test-Command "docker")) {
        Write-Error "Docker not found. Please install Docker Desktop from https://docker.com"
        return $false
    }
    
    try {
        $null = docker version 2>$null
        Write-Success "Docker is installed and running"
    }
    catch {
        Write-Error "Docker is installed but not running. Please start Docker Desktop."
        return $false
    }
    
    if (!(Test-Command "docker-compose")) {
        Write-Warning "docker-compose not found. Trying docker compose..."
        try {
            $null = docker compose version 2>$null
            Write-Success "Docker Compose (v2) is available"
            return $true
        }
        catch {
            Write-Error "Docker Compose not found. Please install Docker Compose."
            return $false
        }
    }
    else {
        Write-Success "Docker Compose is available"
        return $true
    }
}

# Build Docker image
function Build-DockerImage {
    param([switch]$Force = $false)
    
    Write-Step "Building Docker Image"
    
    # Check if image exists
    try {
        $imageExists = docker images --format "{{.Repository}}:{{.Tag}}" | Where-Object { $_ -eq "zen-mcp-server:latest" }
        if ($imageExists -and !$Force) {
            Write-Success "Docker image already exists. Use -Force to rebuild."
            return $true
        }
    }
    catch {
        # Continue if command fails
    }
    
    if ($Force -and $imageExists) {
        Write-Info "Forcing rebuild of Docker image..."
        try {
            docker rmi zen-mcp-server:latest 2>$null
        }
        catch {
            Write-Warning "Could not remove existing image, continuing..."
        }
    }
    
    Write-Info "Building Docker image from Dockerfile..."
    try {
        $buildArgs = @()
        if ($Dev) {
            # For development builds, we could add specific build args
            Write-Info "Building with development support..."
        }
        
        docker build -t zen-mcp-server:latest .
        if ($LASTEXITCODE -ne 0) {
            throw "Docker build failed"
        }
        
        Write-Success "Docker image built successfully"
        return $true
    }
    catch {
        Write-Error "Failed to build Docker image: $_"
        return $false
    }
}

# Prepare Docker environment file
function Initialize-DockerEnvironment {
    Write-Step "Preparing Docker Environment"
    
    # Ensure .env file exists
    if (!(Test-Path ".env")) {
        Write-Warning "No .env file found. Creating default .env file..."
        
        $defaultEnv = @"
# API Keys - Replace with your actual keys
GEMINI_API_KEY=your_gemini_api_key_here
GOOGLE_API_KEY=your_google_api_key_here
OPENAI_API_KEY=your_openai_api_key_here
ANTHROPIC_API_KEY=your_anthropic_api_key_here
XAI_API_KEY=your_xai_api_key_here
DIAL_API_KEY=your_dial_api_key_here
DIAL_API_HOST=your_dial_api_host_here
DIAL_API_VERSION=your_dial_api_version_here
OPENROUTER_API_KEY=your_openrouter_api_key_here
CUSTOM_API_URL=your_custom_api_url_here
CUSTOM_API_KEY=your_custom_api_key_here
CUSTOM_MODEL_NAME=your_custom_model_name_here

# Server Configuration
DEFAULT_MODEL=auto
LOG_LEVEL=INFO
LOG_MAX_SIZE=10MB
LOG_BACKUP_COUNT=5
DEFAULT_THINKING_MODE_THINKDEEP=high

# Optional Advanced Settings
#DISABLED_TOOLS=
#MAX_MCP_OUTPUT_TOKENS=
#TZ=UTC
"@
        
        $defaultEnv | Out-File -FilePath ".env" -Encoding UTF8
        Write-Success "Default .env file created"
        Write-Warning "Please edit .env file with your actual API keys"
    }
    else {
        Write-Success ".env file exists"
    }
    
    # Create logs directory for volume mount
    Initialize-Logging
    
    return $true
}

# Start Docker services
function Start-DockerServices {
    param([switch]$Follow = $false)
    
    Write-Step "Starting Docker Services"
    
    # Check if docker-compose.yml exists
    if (!(Test-Path "docker-compose.yml")) {
        Write-Error "docker-compose.yml not found in current directory"
        return $false
    }
    
    try {
        # Stop any existing services
        Write-Info "Stopping any existing services..."
        if (Test-Command "docker-compose") {
            docker-compose down 2>$null
        }
        else {
            docker compose down 2>$null
        }
        
        # Start services
        Write-Info "Starting Zen MCP Server with Docker Compose..."
        if (Test-Command "docker-compose") {
            if ($Follow) {
                docker-compose up --build
            }
            else {
                docker-compose up -d --build
            }
        }
        else {
            if ($Follow) {
                docker compose up --build
            }
            else {
                docker compose up -d --build
            }
        }
        
        if ($LASTEXITCODE -ne 0) {
            throw "Failed to start Docker services"
        }
        
        if (!$Follow) {
            Write-Success "Docker services started successfully"
            Write-Info "Container name: zen-mcp-server"
            Write-Host ""
            Write-Host "To view logs: " -NoNewline
            Write-Host "docker logs -f zen-mcp-server" -ForegroundColor Yellow
            Write-Host "To stop: " -NoNewline
            Write-Host "docker-compose down" -ForegroundColor Yellow
        }
        
        return $true
    }
    catch {
        Write-Error "Failed to start Docker services: $_"
        return $false
    }
}

# Get Docker container status
function Get-DockerStatus {
    try {
        $containerStatus = docker ps --filter "name=zen-mcp-server" --format "{{.Status}}"
        if ($containerStatus) {
            Write-Success "Container status: $containerStatus"
            return $true
        }
        else {
            Write-Warning "Container not running"
            return $false
        }
    }
    catch {
        Write-Warning "Could not get container status: $_"
        return $false
    }
}

# ============================================================================
# End Docker Functions
# ============================================================================

# Setup logging directory
function Initialize-Logging {
    Write-Step "Setting up Logging"
    
    if (!(Test-Path $LOG_DIR)) {
        New-Item -ItemType Directory -Path $LOG_DIR -Force | Out-Null
        Write-Success "Logs directory created"
    }
    else {
        Write-Success "Logs directory already exists"
    }
}

# Check Docker
function Test-Docker {
    Write-Step "Checking Docker Setup"
    
    if ($SkipDocker) {
        Write-Warning "Skipping Docker checks"
        return
    }
    
    if (Test-Command "docker") {
        try {
            $null = docker version 2>$null
            Write-Success "Docker is installed and running"
            
            if (Test-Command "docker-compose") {
                Write-Success "Docker Compose is available"
            }
            else {
                Write-Warning "Docker Compose not found. Install Docker Desktop for Windows."
            }
        }
        catch {
            Write-Warning "Docker is installed but not running. Please start Docker Desktop."
        }
    }
    else {
        Write-Warning "Docker not found. Install Docker Desktop from https://docker.com"
    }
}

# ----------------------------------------------------------------------------
# MCP Client Configuration System
# ----------------------------------------------------------------------------

# Centralized MCP client definitions
$script:McpClientDefinitions = @(
    @{
        Name           = "Claude Desktop"
        DetectionPath  = "$env:APPDATA\Claude\claude_desktop_config.json"
        DetectionType  = "Path"
        ConfigPath     = "$env:APPDATA\Claude\claude_desktop_config.json"
        ConfigJsonPath = "mcpServers.zen"
        NeedsConfigDir = $true
    },
    @{
        Name             = "VSCode"
        DetectionCommand = "code"
        DetectionType    = "Command"
        ConfigPath       = "$env:APPDATA\Code\User\settings.json"
        ConfigJsonPath   = "mcp.servers.zen"
        IsVSCode         = $true
    },
    @{
        Name             = "VSCode Insiders"
        DetectionCommand = "code-insiders"
        DetectionType    = "Command"
        ConfigPath       = "$env:APPDATA\Code - Insiders\User\mcp.json"
        ConfigJsonPath   = "servers.zen"
        IsVSCodeInsiders = $true
    },
    @{
        Name             = "Cursor"
        DetectionCommand = "cursor"
        DetectionType    = "Command"
        ConfigPath       = "$env:USERPROFILE\.cursor\mcp.json"
        ConfigJsonPath   = "mcpServers.zen"
    },
    @{
        Name           = "Windsurf"
        DetectionPath  = "$env:USERPROFILE\.codeium\windsurf"
        DetectionType  = "Path"
        ConfigPath     = "$env:USERPROFILE\.codeium\windsurf\mcp_config.json"
        ConfigJsonPath = "mcpServers.zen"
    },
    @{
        Name           = "Trae"
        DetectionPath  = "$env:APPDATA\Trae"
        DetectionType  = "Path"
        ConfigPath     = "$env:APPDATA\Trae\User\mcp.json"
        ConfigJsonPath = "mcpServers.zen"
    }
)

# Docker MCP configuration template (legacy, kept for backward compatibility)
$script:DockerMcpConfig = @{
    command = "docker"
    args    = @("exec", "-i", "zen-mcp-server", "python", "server.py")
    type    = "stdio"
}

# Generate Docker MCP configuration using docker run (recommended for all clients)
function Get-DockerMcpConfigRun {
    param([string]$ServerPath)
    
    $scriptDir = Split-Path $ServerPath -Parent
    $envFile = Join-Path $scriptDir ".env"
    
    return @{
        command = "docker"
        args    = @("run", "--rm", "-i", "--env-file", $envFile, "zen-mcp-server:latest", "python", "server.py")
        type    = "stdio"
    }
}

# Generate Python MCP configuration
function Get-PythonMcpConfig {
    param([string]$PythonPath, [string]$ServerPath)
    return @{
        command = $PythonPath
        args    = @($ServerPath)
        type    = "stdio"
    }
}

# Check if client uses mcp.json format with servers structure
function Test-McpJsonFormat {
    param([hashtable]$Client)
    
    $configFileName = Split-Path $Client.ConfigPath -Leaf
    return $configFileName -eq "mcp.json"
}

# Check if client uses the new VS Code Insiders format (servers instead of mcpServers)
function Test-VSCodeInsidersFormat {
    param([hashtable]$Client)
    
    return $Client.IsVSCodeInsiders -eq $true -and $Client.ConfigJsonPath -eq "servers.zen"
}

# Analyze existing MCP configuration to determine type (Python or Docker)
function Get-ExistingMcpConfigType {
    param(
        [Parameter(Mandatory = $true)]
        [hashtable]$Client,
        [Parameter(Mandatory = $true)]
        [string]$ConfigPath
    )
    
    if (!(Test-Path $ConfigPath)) {
        return @{
            Exists  = $false
            Type    = "None"
            Details = "No configuration found"
        }
    }
    
    try {
        $content = Get-Content $ConfigPath -Raw | ConvertFrom-Json -ErrorAction SilentlyContinue
        if (!$content) {
            return @{
                Exists  = $false
                Type    = "None"
                Details = "Invalid JSON configuration"
            }
        }
        
        # Navigate to zen configuration
        $pathParts = $Client.ConfigJsonPath.Split('.')
        $zenKey = $pathParts[-1]
        $parentPath = $pathParts[0..($pathParts.Length - 2)]
        
        $targetObject = $content
        foreach ($key in $parentPath) {
            if (!$targetObject.PSObject.Properties[$key]) {
                return @{
                    Exists  = $false
                    Type    = "None"
                    Details = "Configuration structure not found"
                }
            }
            $targetObject = $targetObject.$key
        }
        
        if (!$targetObject.PSObject.Properties[$zenKey]) {
            return @{
                Exists  = $false
                Type    = "None"
                Details = "Zen configuration not found"
            }
        }
        
        $zenConfig = $targetObject.$zenKey
        
        # Analyze configuration type
        if ($zenConfig.command -eq "docker") {
            $dockerType = "Unknown"
            $details = "Docker configuration"
            
            if ($zenConfig.args -and $zenConfig.args.Count -gt 0) {
                if ($zenConfig.args[0] -eq "run") {
                    $dockerType = "Docker Run"
                    $details = "Docker run (dedicated container)"
                }
                elseif ($zenConfig.args[0] -eq "exec") {
                    $dockerType = "Docker Exec"
                    $details = "Docker exec (existing container)"
                }
                else {
                    $details = "Docker ($($zenConfig.args[0]))"
                }
            }
            
            return @{
                Exists  = $true
                Type    = "Docker"
                SubType = $dockerType
                Details = $details
                Command = $zenConfig.command
                Args    = $zenConfig.args
            }
        }
        elseif ($zenConfig.command -and $zenConfig.command.EndsWith("python.exe")) {
            $pythonType = "Python"
            $details = "Python virtual environment"
            
            if ($zenConfig.command.Contains(".zen_venv")) {
                $details = "Python (zen virtual environment)"
            }
            elseif ($zenConfig.command.Contains("venv")) {
                $details = "Python (virtual environment)"
            }
            else {
                $details = "Python (system installation)"
            }
            
            return @{
                Exists  = $true
                Type    = "Python"
                SubType = $pythonType
                Details = $details
                Command = $zenConfig.command
                Args    = $zenConfig.args
            }
        }
        else {
            return @{
                Exists  = $true
                Type    = "Unknown"
                Details = "Unknown configuration type: $($zenConfig.command)"
                Command = $zenConfig.command
                Args    = $zenConfig.args
            }
        }
        
    }
    catch {
        return @{
            Exists  = $false
            Type    = "Error"
            Details = "Error reading configuration: $_"
        }
    }
}

# Generic MCP client configuration function
function Configure-McpClient {
    param(
        [Parameter(Mandatory = $true)]
        [hashtable]$Client,
        [Parameter(Mandatory = $true)]
        [bool]$UseDocker,
        [string]$PythonPath = "",
        [string]$ServerPath = ""
    )

    Write-Step "Checking $($Client.Name) Integration"

    # Client detection
    $detected = $false
    if ($Client.DetectionType -eq "Command" -and (Test-Command $Client.DetectionCommand)) {
        $detected = $true
    }
    elseif ($Client.DetectionType -eq "Path" -and (Test-Path ($Client.DetectionPath -as [string]))) {
        $detected = $true
    }

    if (!$detected) {
        Write-Info "$($Client.Name) not detected - skipping integration"
        return
    }
    Write-Info "Found $($Client.Name)"

    # Handle VSCode special logic for profiles
    $configPath = $Client.ConfigPath
    if ($Client.IsVSCode) {
        $userPath = Split-Path $configPath -Parent
        if (!(Test-Path $userPath)) {
            Write-Warning "$($Client.Name) user directory not found. Skipping."
            return
        }
        
        # Find most recent settings.json (default or profile)
        $settingsFiles = @()
        $defaultSettings = $configPath
        if (Test-Path $defaultSettings) {
            $settingsFiles += @{
                Path         = $defaultSettings
                LastModified = (Get-Item $defaultSettings).LastWriteTime
            }
        }
        
        $profilesPath = Join-Path $userPath "profiles"
        if (Test-Path $profilesPath) {
            Get-ChildItem $profilesPath -Directory | ForEach-Object {
                $profileSettings = Join-Path $_.FullName "settings.json"
                if (Test-Path $profileSettings) {
                    $settingsFiles += @{
                        Path         = $profileSettings
                        LastModified = (Get-Item $profileSettings).LastWriteTime
                    }
                }
            }
        }
        
        if ($settingsFiles.Count -gt 0) {
            $configPath = ($settingsFiles | Sort-Object LastModified -Descending | Select-Object -First 1).Path
        }
    }

    # Handle VSCode Insiders special logic for profiles (uses mcp.json)
    if ($Client.IsVSCodeInsiders) {
        $userPath = Split-Path $configPath -Parent
        if (!(Test-Path $userPath)) {
            Write-Warning "$($Client.Name) user directory not found. Skipping."
            return
        }
        
        # Find most recent mcp.json (default or profile)
        $mcpFiles = @()
        $defaultMcp = $configPath
        if (Test-Path $defaultMcp) {
            $mcpFiles += @{
                Path         = $defaultMcp
                LastModified = (Get-Item $defaultMcp).LastWriteTime
            }
        }
        
        $profilesPath = Join-Path $userPath "profiles"
        if (Test-Path $profilesPath) {
            Get-ChildItem $profilesPath -Directory | ForEach-Object {
                $profileMcp = Join-Path $_.FullName "mcp.json"
                if (Test-Path $profileMcp) {
                    $mcpFiles += @{
                        Path         = $profileMcp
                        LastModified = (Get-Item $profileMcp).LastWriteTime
                    }
                }
            }
        }
        
        if ($mcpFiles.Count -gt 0) {
            $configPath = ($mcpFiles | Sort-Object LastModified -Descending | Select-Object -First 1).Path
        }
    }

    # Check if already configured and analyze existing configuration
    $existingConfig = Get-ExistingMcpConfigType -Client $Client -ConfigPath $configPath
    $newConfigType = if ($UseDocker) { "Docker" } else { "Python" }
    
    if ($existingConfig.Exists) {
        Write-Info "Found existing Zen MCP configuration in $($Client.Name)"
        Write-Info "  Current: $($existingConfig.Details)"
        Write-Info "  New: $newConfigType configuration"
        
        if ($existingConfig.Type -eq $newConfigType) {
            Write-Warning "Same configuration type ($($existingConfig.Type)) already exists"
            $response = Read-Host "`nOverwrite existing $($existingConfig.Type) configuration? (y/N)"
        }
        else {
            Write-Warning "Different configuration type detected"
            Write-Info "  Replacing: $($existingConfig.Type) → $newConfigType"
            $response = Read-Host "`nReplace $($existingConfig.Type) with $newConfigType configuration? (y/N)"
        }
        
        if ($response -ne 'y' -and $response -ne 'Y') {
            Write-Info "Keeping existing configuration in $($Client.Name)"
            return
        }
        
        Write-Info "Proceeding with configuration update..."
    }
    else {
        # User confirmation for new installation
        $response = Read-Host "`nConfigure Zen MCP for $($Client.Name) (mode: $newConfigType)? (y/N)"
        if ($response -ne 'y' -and $response -ne 'Y') {
            Write-Info "Skipping $($Client.Name) integration"
            return
        }
    }

    try {
        # Create config directory if needed
        $configDir = Split-Path $configPath -Parent
        if (!(Test-Path $configDir)) {
            New-Item -ItemType Directory -Path $configDir -Force | Out-Null
        }

        # Backup existing config
        if (Test-Path $configPath) {
            Manage-ConfigBackups -ConfigFilePath $configPath
        }

        # Read or create config
        $config = New-Object PSObject
        $usesMcpJsonFormat = Test-McpJsonFormat -Client $Client
        $usesVSCodeInsidersFormat = Test-VSCodeInsidersFormat -Client $Client
        
        if (Test-Path $configPath) {
            $fileContent = Get-Content $configPath -Raw
            if ($fileContent.Trim()) {
                $config = $fileContent | ConvertFrom-Json -ErrorAction SilentlyContinue
            }
            if ($null -eq $config) { $config = New-Object PSObject }
        }
        
        # Initialize structure for mcp.json format files if they don't exist or are empty
        if ($usesMcpJsonFormat) {
            if ($usesVSCodeInsidersFormat) {
                # For VS Code Insiders format: {"servers": {...}}
                if (!$config.PSObject.Properties["servers"]) {
                    $config | Add-Member -MemberType NoteProperty -Name "servers" -Value (New-Object PSObject)
                }
            }
            else {
                # For other clients format: {"mcpServers": {...}}
                if (!$config.PSObject.Properties["mcpServers"]) {
                    $config | Add-Member -MemberType NoteProperty -Name "mcpServers" -Value (New-Object PSObject)
                }
            }
        }
        
        # Initialize MCP structure for VS Code settings.json if it doesn't exist
        if ($Client.IsVSCode -and $Client.ConfigJsonPath.StartsWith("mcp.")) {
            if (!$config.PSObject.Properties["mcp"]) {
                $config | Add-Member -MemberType NoteProperty -Name "mcp" -Value (New-Object PSObject)
            }
            if (!$config.mcp.PSObject.Properties["servers"]) {
                $config.mcp | Add-Member -MemberType NoteProperty -Name "servers" -Value (New-Object PSObject)
            }
        }

        # Generate server config
        $serverConfig = if ($UseDocker) { 
            # Use docker run for all clients (more reliable than docker exec)
            Get-DockerMcpConfigRun $ServerPath
        }
        else { 
            Get-PythonMcpConfig $PythonPath $ServerPath 
        }

        # Navigate and set configuration
        $pathParts = $Client.ConfigJsonPath.Split('.')
        $zenKey = $pathParts[-1]
        $parentPath = $pathParts[0..($pathParts.Length - 2)]
        
        $targetObject = $config
        foreach ($key in $parentPath) {
            if (!$targetObject.PSObject.Properties[$key]) {
                $targetObject | Add-Member -MemberType NoteProperty -Name $key -Value (New-Object PSObject)
            }
            $targetObject = $targetObject.$key
        }

        $targetObject | Add-Member -MemberType NoteProperty -Name $zenKey -Value $serverConfig -Force

        # Write config
        $config | ConvertTo-Json -Depth 10 | Out-File $configPath -Encoding UTF8
        Write-Success "Successfully configured $($Client.Name)"
        Write-Host "  Config: $configPath" -ForegroundColor Gray
        Write-Host "  Restart $($Client.Name) to use the new MCP server" -ForegroundColor Gray

    }
    catch {
        Write-Error "Failed to update $($Client.Name) configuration: $_"
    }
}

# Main MCP client configuration orchestrator
function Invoke-McpClientConfiguration {
    param(
        [Parameter(Mandatory = $true)]
        [bool]$UseDocker,
        [string]$PythonPath = "",
        [string]$ServerPath = ""
    )
    
    Write-Step "Checking Client Integrations"
    
    # Configure GUI clients
    foreach ($client in $script:McpClientDefinitions) {
        Configure-McpClient -Client $client -UseDocker $UseDocker -PythonPath $PythonPath -ServerPath $ServerPath
    }
    
    # Handle CLI tools separately (they don't follow JSON config pattern)
    if (!$UseDocker) {
        Test-ClaudeCliIntegration $PythonPath $ServerPath
        Test-GeminiCliIntegration (Split-Path $ServerPath -Parent)
        Test-QwenCliIntegration $PythonPath $ServerPath
    }
}

# Keep existing CLI integration functions
function Test-ClaudeCliIntegration {
    param([string]$PythonPath, [string]$ServerPath)
    
    if (!(Test-Command "claude")) {
        return
    }
    
    Write-Info "Claude CLI detected - checking configuration..."
    
    try {
        $claudeConfig = claude mcp list 2>$null
        if ($claudeConfig -match "zen") {
            Write-Success "Claude CLI already configured for zen server"
        }
        else {
            Write-Info "To add zen server to Claude CLI, run:"
            Write-Host "  claude mcp add -s user zen $PythonPath $ServerPath" -ForegroundColor Cyan
        }
    }
    catch {
        Write-Info "To configure Claude CLI manually, run:"
        Write-Host "  claude mcp add -s user zen $PythonPath $ServerPath" -ForegroundColor Cyan
    }
}

function Test-GeminiCliIntegration {
    param([string]$ScriptDir)
    
    $zenWrapper = Join-Path $ScriptDir "zen-mcp-server.cmd"
    
    # Check if Gemini settings file exists (Windows path)
    $geminiConfig = "$env:USERPROFILE\.gemini\settings.json"
    if (!(Test-Path $geminiConfig)) {
        return
    }
    
    # Check if zen is already configured
    $configContent = Get-Content $geminiConfig -Raw -ErrorAction SilentlyContinue
    if ($configContent -and $configContent -match '"zen"') {
        return
    }
    
    # Ask user if they want to add Zen to Gemini CLI
    Write-Host ""
    $response = Read-Host "Configure Zen for Gemini CLI? (y/N)"
    if ($response -ne 'y' -and $response -ne 'Y') {
        Write-Info "Skipping Gemini CLI integration"
        return
    }
    
    # Ensure wrapper script exists
    if (!(Test-Path $zenWrapper)) {
        Write-Info "Creating wrapper script for Gemini CLI..."
        @"
@echo off
cd /d "%~dp0"
if exist ".zen_venv\Scripts\python.exe" (
    .zen_venv\Scripts\python.exe server.py %*
) else (
    python server.py %*
)
"@ | Out-File -FilePath $zenWrapper -Encoding ASCII
        
        Write-Success "Created zen-mcp-server.cmd wrapper script"
    }
    
    # Update Gemini settings
    Write-Info "Updating Gemini CLI configuration..."
    
    try {
        # Create backup with retention management
        $backupPath = Manage-ConfigBackups $geminiConfig
        
        # Read existing config or create new one
        $config = @{}
        if (Test-Path $geminiConfig) {
            $config = Get-Content $geminiConfig -Raw | ConvertFrom-Json
        }
        
        # Ensure mcpServers exists
        if (!$config.mcpServers) {
            $config | Add-Member -MemberType NoteProperty -Name "mcpServers" -Value @{} -Force
        }
        
        # Add zen server
        $zenConfig = @{
            command = $zenWrapper
        }
        
        $config.mcpServers | Add-Member -MemberType NoteProperty -Name "zen" -Value $zenConfig -Force
        
        # Write updated config
        $config | ConvertTo-Json -Depth 10 | Out-File $geminiConfig -Encoding UTF8
        
        Write-Success "Successfully configured Gemini CLI"
        Write-Host "  Config: $geminiConfig" -ForegroundColor Gray
        Write-Host "  Restart Gemini CLI to use Zen MCP Server" -ForegroundColor Gray
        
    }
    catch {
        Write-Error "Failed to update Gemini CLI config: $_"
        Write-Host ""
        Write-Host "Manual config location: $geminiConfig"
        Write-Host "Add this configuration:"
        Write-Host @"
{
  "mcpServers": {
    "zen": {
      "command": "$zenWrapper"
    }
  }
}
"@ -ForegroundColor Yellow
    }
}   

function Show-QwenManualConfig {
    param(
        [string]$PythonPath,
        [string]$ServerPath,
        [string]$ScriptDir,
        [string]$ConfigPath,
        [System.Collections.IDictionary]$EnvironmentMap
    )

    Write-Host "Manual config location: $ConfigPath" -ForegroundColor Yellow
    Write-Host "Add or update this entry:" -ForegroundColor Yellow

    if ($EnvironmentMap -and $EnvironmentMap.Count -gt 0) {
        $pairs = $EnvironmentMap.GetEnumerator() | ForEach-Object {
            $escaped = ($_.Value -replace '\\', '\\\\' -replace '"', '\\"')
            '        "{0}": "{1}"' -f $_.Key, $escaped
        }

        Write-Host "{" -ForegroundColor Yellow
        Write-Host "  \"mcpServers\": {" -ForegroundColor Yellow
        Write-Host "    \"zen\": {" -ForegroundColor Yellow
        Write-Host "      \"command\": \"$PythonPath\"," -ForegroundColor Yellow
        Write-Host "      \"args\": [\"$ServerPath\"]," -ForegroundColor Yellow
        Write-Host "      \"cwd\": \"$ScriptDir\"," -ForegroundColor Yellow
        Write-Host "      \"env\": {" -ForegroundColor Yellow
        Write-Host ($pairs -join "`n") -ForegroundColor Yellow
        Write-Host "      }" -ForegroundColor Yellow
        Write-Host "    }" -ForegroundColor Yellow
        Write-Host "  }" -ForegroundColor Yellow
        Write-Host "}" -ForegroundColor Yellow
    }
    else {
        Write-Host "{" -ForegroundColor Yellow
        Write-Host "  \"mcpServers\": {" -ForegroundColor Yellow
        Write-Host "    \"zen\": {" -ForegroundColor Yellow
        Write-Host "      \"command\": \"$PythonPath\"," -ForegroundColor Yellow
        Write-Host "      \"args\": [\"$ServerPath\"]," -ForegroundColor Yellow
        Write-Host "      \"cwd\": \"$ScriptDir\"" -ForegroundColor Yellow
        Write-Host "    }" -ForegroundColor Yellow
        Write-Host "  }" -ForegroundColor Yellow
        Write-Host "}" -ForegroundColor Yellow
    }
}

function Test-QwenCliIntegration {
    param([string]$PythonPath, [string]$ServerPath)

    if (!(Test-Command "qwen")) {
        return
    }

    Write-Info "Qwen CLI detected - checking configuration..."

    $configPath = Join-Path $env:USERPROFILE ".qwen\settings.json"
    $configDir = Split-Path $configPath -Parent
    $scriptDir = Split-Path $ServerPath -Parent

    $configStatus = "missing"
    $config = @{}

    if (Test-Path $configPath) {
        try {
            Add-Type -AssemblyName System.Web.Extensions -ErrorAction SilentlyContinue
            $serializer = New-Object System.Web.Script.Serialization.JavaScriptSerializer
            $serializer.MaxJsonLength = 67108864
            $rawJson = Get-Content $configPath -Raw
            $config = $serializer.DeserializeObject($rawJson)
            if (-not ($config -is [System.Collections.IDictionary])) {
                $config = @{}
            }

            if ($config.ContainsKey('mcpServers') -and $config['mcpServers'] -is [System.Collections.IDictionary]) {
                $servers = $config['mcpServers']
                if ($servers.Contains('zen') -and $servers['zen'] -is [System.Collections.IDictionary]) {
                    $zenConfig = $servers['zen']
                    $commandMatches = ($zenConfig['command'] -eq $PythonPath)

                    $argsValue = $zenConfig['args']
                    $argsList = @()
                    if ($argsValue -is [System.Collections.IEnumerable] -and $argsValue -isnot [string]) {
                        $argsList = @($argsValue)
                    }
                    elseif ($null -ne $argsValue) {
                        $argsList = @($argsValue)
                    }
                    $argsMatches = ($argsList.Count -eq 1 -and $argsList[0] -eq $ServerPath)

                    $cwdValue = $null
                    if ($zenConfig.Contains('cwd')) {
                        $cwdValue = $zenConfig['cwd']
                    }
                    $cwdMatches = ([string]::IsNullOrEmpty($cwdValue) -or $cwdValue -eq $scriptDir)

                    if ($commandMatches -and $argsMatches -and $cwdMatches) {
                        Write-Success "Qwen CLI already configured for zen server"
                        return
                    }

                    $configStatus = "mismatch"
                    Write-Warning "Existing Qwen CLI configuration differs from the current setup."
                }
            }
        }
        catch {
            $configStatus = "invalid"
            Write-Warning "Unable to parse Qwen CLI settings at $configPath ($_)."
            $config = @{}
        }
    }

    $envMap = [ordered]@{}
    if (Test-Path ".env") {
        foreach ($line in Get-Content ".env") {
            $trimmed = $line.Trim()
            if ([string]::IsNullOrWhiteSpace($trimmed) -or $trimmed.StartsWith('#')) {
                continue
            }

            if ($line -match '^\s*([^=]+)=(.*)$') {
                $key = $matches[1].Trim()
                $value = $matches[2]
                $value = ($value -replace '\s+#.*$', '').Trim()
                if ($value.StartsWith('"') -and $value.EndsWith('"')) {
                    $value = $value.Substring(1, $value.Length - 2)
                }
                if ([string]::IsNullOrWhiteSpace($value)) {
                    $value = [Environment]::GetEnvironmentVariable($key, "Process")
                }
                if (![string]::IsNullOrWhiteSpace($value) -and $value -notmatch '^your_.*_here$') {
                    $envMap[$key] = $value
                }
            }
        }
    }

    $extraKeys = @(
        "GEMINI_API_KEY", "OPENAI_API_KEY", "XAI_API_KEY", "DIAL_API_KEY", "OPENROUTER_API_KEY",
        "AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_API_VERSION", "AZURE_OPENAI_ALLOWED_MODELS", "AZURE_MODELS_CONFIG_PATH",
        "CUSTOM_API_URL", "CUSTOM_API_KEY", "CUSTOM_MODEL_NAME", "DEFAULT_MODEL", "GOOGLE_ALLOWED_MODELS",
        "OPENAI_ALLOWED_MODELS", "OPENROUTER_ALLOWED_MODELS", "XAI_ALLOWED_MODELS", "DEFAULT_THINKING_MODE_THINKDEEP",
        "DISABLED_TOOLS", "CONVERSATION_TIMEOUT_HOURS", "MAX_CONVERSATION_TURNS", "LOG_LEVEL", "ZEN_MCP_FORCE_ENV_OVERRIDE"
    )

    foreach ($key in $extraKeys) {
        if (-not $envMap.Contains($key)) {
            $value = [Environment]::GetEnvironmentVariable($key, "Process")
            if (![string]::IsNullOrWhiteSpace($value) -and $value -notmatch '^your_.*_here$') {
                $envMap[$key] = $value
            }
        }
    }

    $prompt = "Configure Zen for Qwen CLI? (y/N)"
    if ($configStatus -eq "mismatch" -or $configStatus -eq "invalid") {
        $prompt = "Update Qwen CLI zen configuration? (y/N)"
    }

    $response = Read-Host $prompt
    if ($response -ne 'y' -and $response -ne 'Y') {
        Write-Info "Skipping Qwen CLI integration"
        Show-QwenManualConfig $PythonPath $ServerPath $scriptDir $configPath $envMap
        return
    }

    if (!(Test-Path $configDir)) {
        New-Item -ItemType Directory -Path $configDir -Force | Out-Null
    }

    if (Test-Path $configPath -and $configStatus -ne "missing") {
        Manage-ConfigBackups $configPath | Out-Null
    }

    try {
        if (-not ($config -is [System.Collections.IDictionary])) {
            $config = @{}
        }

        if (-not $config.ContainsKey('mcpServers') -or $config['mcpServers'] -isnot [System.Collections.IDictionary]) {
            $config['mcpServers'] = @{}
        }

        $zenConfig = [ordered]@{
            command = $PythonPath
            args    = @($ServerPath)
            cwd     = $scriptDir
        }

        if ($envMap.Count -gt 0) {
            $zenConfig['env'] = $envMap
        }

        $config['mcpServers']['zen'] = $zenConfig

        $json = ($config | ConvertTo-Json -Depth 20)
        Set-Content -Path $configPath -Value $json -Encoding UTF8

        Write-Success "Successfully configured Qwen CLI"
        Write-Host "  Config: $configPath" -ForegroundColor Gray
        Write-Host "  Restart Qwen CLI to use Zen MCP Server" -ForegroundColor Gray
    }
    catch {
        Write-Error "Failed to update Qwen CLI configuration: $_"
        Show-QwenManualConfig $PythonPath $ServerPath $scriptDir $configPath $envMap
    }
}


# ----------------------------------------------------------------------------
# End MCP Client Configuration System
# ----------------------------------------------------------------------------

# ----------------------------------------------------------------------------
# User Interface Functions
# ----------------------------------------------------------------------------

# Show script help
function Show-Help {
    Write-Host @"
Zen MCP Server - Setup and Launch Script

USAGE:
.\run-server.ps1 [OPTIONS]

OPTIONS:
-Help                   Show this help message
-Version                Show version information
-Follow                 Follow server logs in real time
-Config                 Show configuration instructions for MCP clients
-ClearCache             Clear Python cache files and exit
-Force                  Force recreation of Python virtual environment
-Dev                    Install development dependencies from requirements-dev.txt
-Docker                 Use Docker instead of Python virtual environment
-SkipVenv              Skip Python virtual environment creation
-SkipDocker            Skip Docker checks and cleanup

EXAMPLES:
.\run-server.ps1                      # Normal startup
.\run-server.ps1 -Follow              # Start and follow logs
.\run-server.ps1 -Config              # Show configuration help
.\run-server.ps1 -Dev                 # Include development dependencies
.\run-server.ps1 -Docker              # Use Docker deployment
.\run-server.ps1 -Docker -Follow      # Docker with log following

For more information, visit: https://github.com/BeehiveInnovations/zen-mcp-server
"@ -ForegroundColor White
}

# Show version information
function Show-Version {
    $version = Get-Version
    Write-Host "Zen MCP Server version: $version" -ForegroundColor Green
    Write-Host "PowerShell Setup Script for Windows" -ForegroundColor Cyan
    Write-Host "Author: GiGiDKR (https://github.com/GiGiDKR)" -ForegroundColor Gray
    Write-Host "Project: BeehiveInnovations/zen-mcp-server" -ForegroundColor Gray
}

# Show configuration instructions
function Show-ConfigInstructions {
    param(
        [string]$PythonPath = "",
        [string]$ServerPath = "",
        [switch]$UseDocker = $false
    )
    
    Write-Step "Configuration Instructions"
    
    if ($UseDocker) {
        Write-Host "Docker Configuration:" -ForegroundColor Yellow
        Write-Host "The MCP clients have been configured to use Docker containers." -ForegroundColor White
        Write-Host "Make sure the Docker container is running with: docker-compose up -d" -ForegroundColor Cyan
        Write-Host ""
    }
    else {
        Write-Host "Python Virtual Environment Configuration:" -ForegroundColor Yellow
        Write-Host "Python Path: $PythonPath" -ForegroundColor Cyan
        Write-Host "Server Path: $ServerPath" -ForegroundColor Cyan
        Write-Host ""
    }
    
    Write-Host "Supported MCP Clients:" -ForegroundColor Green
    Write-Host "✓ Claude Desktop" -ForegroundColor White
    Write-Host "✓ Claude CLI" -ForegroundColor White  
    Write-Host "✓ VSCode (with MCP extension)" -ForegroundColor White
    Write-Host "✓ VSCode Insiders" -ForegroundColor White
    Write-Host "✓ Cursor" -ForegroundColor White
    Write-Host "✓ Windsurf" -ForegroundColor White
    Write-Host "✓ Trae" -ForegroundColor White
    Write-Host "✓ Gemini CLI" -ForegroundColor White
    Write-Host "✓ Qwen CLI" -ForegroundColor White
    Write-Host ""
    Write-Host "The script automatically detects and configures compatible clients." -ForegroundColor Gray
    Write-Host "Restart your MCP clients after configuration to use the Zen MCP Server." -ForegroundColor Yellow
}

# Show setup instructions
function Show-SetupInstructions {
    param(
        [string]$PythonPath = "",
        [string]$ServerPath = "",
        [switch]$UseDocker = $false
    )
    
    Write-Step "Setup Complete"
    
    if ($UseDocker) {
        Write-Success "Zen MCP Server is configured for Docker deployment"
        Write-Host "Docker command: docker exec -i zen-mcp-server python server.py" -ForegroundColor Cyan
    }
    else {
        Write-Success "Zen MCP Server is configured for Python virtual environment"
        Write-Host "Python: $PythonPath" -ForegroundColor Cyan
        Write-Host "Server: $ServerPath" -ForegroundColor Cyan
    }
    
    Write-Host ""
    Write-Host "MCP clients will automatically connect to the server." -ForegroundColor Green
    Write-Host "For manual configuration, use the paths shown above." -ForegroundColor Gray
}

# Start the server
function Start-Server {
    Write-Step "Starting Zen MCP Server"
    
    $pythonPath = "$VENV_PATH\Scripts\python.exe"
    if (!(Test-Path $pythonPath)) {
        Write-Error "Python virtual environment not found. Please run setup first."
        return
    }
    
    $serverPath = "server.py"
    if (!(Test-Path $serverPath)) {
        Write-Error "Server script not found: $serverPath"
        return
    }
    
    try {
        Write-Info "Launching server..."
        & $pythonPath $serverPath
    }
    catch {
        Write-Error "Failed to start server: $_"
    }
}

# Follow server logs
function Follow-Logs {
    Write-Step "Following Server Logs"
    
    $logPath = Join-Path $LOG_DIR $LOG_FILE
    
    if (!(Test-Path $logPath)) {
        Write-Warning "Log file not found: $logPath"
        Write-Info "Starting server to generate logs..."
        Start-Server
        return
    }
    
    try {
        Write-Info "Following logs at: $logPath"
        Write-Host "Press Ctrl+C to stop following logs"
        Write-Host ""
        Get-Content $logPath -Wait
    }
    catch {
        Write-Error "Failed to follow logs: $_"
    }
}

# ----------------------------------------------------------------------------
# Environment File Management
# ----------------------------------------------------------------------------

# Initialize .env file if it doesn't exist
function Initialize-EnvFile {
    Write-Step "Setting up Environment File"
    
    if (!(Test-Path ".env")) {
        Write-Info "Creating default .env file..."
        @"
# API Keys - Replace with your actual keys
GEMINI_API_KEY=your_gemini_api_key_here
GOOGLE_API_KEY=your_google_api_key_here
OPENAI_API_KEY=your_openai_api_key_here
ANTHROPIC_API_KEY=your_anthropic_api_key_here
XAI_API_KEY=your_xai_api_key_here
DIAL_API_KEY=your_dial_api_key_here
DIAL_API_HOST=your_dial_api_host_here
DIAL_API_VERSION=your_dial_api_version_here
OPENROUTER_API_KEY=your_openrouter_api_key_here
CUSTOM_API_URL=your_custom_api_url_here
CUSTOM_API_KEY=your_custom_api_key_here
CUSTOM_MODEL_NAME=your_custom_model_name_here

# Server Configuration
DEFAULT_MODEL=auto
LOG_LEVEL=INFO
LOG_MAX_SIZE=10MB
LOG_BACKUP_COUNT=5
DEFAULT_THINKING_MODE_THINKDEEP=high

# Optional Advanced Settings
#DISABLED_TOOLS=
#MAX_MCP_OUTPUT_TOKENS=
#TZ=UTC
"@ | Out-File -FilePath ".env" -Encoding UTF8
        
        Write-Success "Default .env file created"
        Write-Warning "Please edit .env file with your actual API keys"
    }
    else {
        Write-Success ".env file already exists"
    }
}

# Import environment variables from .env file
function Import-EnvFile {
    if (!(Test-Path ".env")) {
        Write-Warning "No .env file found"
        return
    }
    
    try {
        $envContent = Get-Content ".env" -ErrorAction Stop
        foreach ($line in $envContent) {
            if ($line -match '^([^#][^=]*?)=(.*)$') {
                $key = $matches[1].Trim()
                $value = $matches[2].Trim() -replace '^["'']|["'']$', ''
                
                # Set environment variable for the current session
                [Environment]::SetEnvironmentVariable($key, $value, "Process")
            }
        }
        Write-Success "Environment variables loaded from .env file"
    }
    catch {
        Write-Warning "Could not load .env file: $_"
    }
}

# ----------------------------------------------------------------------------
# Workflow Functions
# ----------------------------------------------------------------------------

# Docker deployment workflow
function Invoke-DockerWorkflow {
    Write-Step "Starting Docker Workflow"
    Write-Host "Zen MCP Server" -ForegroundColor Green
    Write-Host "=================" -ForegroundColor Cyan
    
    $version = Get-Version
    Write-Host "Version: $version"
    Write-Host "Mode: Docker Container" -ForegroundColor Yellow
    Write-Host ""
    
    # Docker setup and validation
    if (!(Test-DockerRequirements)) { exit 1 }
    if (!(Initialize-DockerEnvironment)) { exit 1 }
    
    Import-EnvFile
    Test-ApiKeys
    
    if (!(Build-DockerImage -Force:$Force)) { exit 1 }
    
    # Configure MCP clients for Docker
    Invoke-McpClientConfiguration -UseDocker $true
    
    Show-SetupInstructions -UseDocker
    
    # Start Docker services
    Write-Step "Starting Zen MCP Server"
    if ($Follow) {
        Write-Info "Starting server and following logs..."
        Start-DockerServices -Follow
        exit 0
    }
    
    if (!(Start-DockerServices)) { exit 1 }
    
    Write-Host ""
    Write-Success "Zen MCP Server is running in Docker!"
    Write-Host ""
    
    Write-Host "Next steps:" -ForegroundColor Cyan
    Write-Host "1. Restart your MCP clients (Claude Desktop, etc.)" -ForegroundColor White
    Write-Host "2. The server is now ready to use" -ForegroundColor White
    Write-Host ""
    Write-Host "Useful commands:" -ForegroundColor Cyan
    Write-Host "  View logs: " -NoNewline -ForegroundColor White
    Write-Host "docker logs -f zen-mcp-server" -ForegroundColor Yellow
    Write-Host "  Stop server: " -NoNewline -ForegroundColor White
    Write-Host "docker-compose down" -ForegroundColor Yellow
    Write-Host "  Restart server: " -NoNewline -ForegroundColor White
    Write-Host "docker-compose restart" -ForegroundColor Yellow
}

# Python virtual environment deployment workflow
function Invoke-PythonWorkflow {
    Write-Step "Starting Python Virtual Environment Workflow"
    Write-Host "Zen MCP Server" -ForegroundColor Green
    Write-Host "=================" -ForegroundColor Cyan
    
    $version = Get-Version
    Write-Host "Version: $version"
    Write-Host ""
    
    if (!(Test-Path $VENV_PATH)) {
        Write-Info "Setting up Python environment for first time..."
    }
    
    # Python environment setup
    Cleanup-Docker
    Clear-PythonCache
    Initialize-EnvFile
    Import-EnvFile
    Test-ApiKeys
    
    try {
        $pythonPath = Initialize-Environment
    }
    catch {
        Write-Error "Failed to setup Python environment: $_"
        exit 1
    }
    
    try {
        Install-Dependencies $pythonPath -InstallDevDependencies:$Dev
    }
    catch {
        Write-Error "Failed to install dependencies: $_"
        exit 1
    }
    
    $serverPath = Get-AbsolutePath "server.py"
    
    # Configure MCP clients for Python
    Invoke-McpClientConfiguration -UseDocker $false -PythonPath $pythonPath -ServerPath $serverPath
    
    Show-SetupInstructions $pythonPath $serverPath
    Initialize-Logging
    
    Write-Host ""
    Write-Host "Logs will be written to: $(Get-AbsolutePath $LOG_DIR)\$LOG_FILE"
    Write-Host ""
    
    if ($Follow) {
        Follow-Logs
    }
    else {
        Write-Host "To follow logs: .\run-server.ps1 -Follow" -ForegroundColor Yellow
        Write-Host "To show config: .\run-server.ps1 -Config" -ForegroundColor Yellow
        Write-Host "To update: git pull, then run .\run-server.ps1 again" -ForegroundColor Yellow
        Write-Host ""
        Write-Host "Happy coding! 🎉" -ForegroundColor Green
        
        $response = Read-Host "`nStart the server now? (y/N)"
        if ($response -eq 'y' -or $response -eq 'Y') {
            Start-Server
        }
    }
}

# ----------------------------------------------------------------------------
# End Workflow Functions
# ----------------------------------------------------------------------------

# ----------------------------------------------------------------------------
# Main Execution
# ----------------------------------------------------------------------------

# Main execution function
function Start-MainProcess {
    # Parse command line arguments
    if ($Help) {
        Show-Help
        exit 0
    }
    
    if ($Version) {
        Show-Version  
        exit 0
    }
    
    if ($ClearCache) {
        Clear-PythonCache
        Write-Success "Cache cleared successfully"
        Write-Host ""
        Write-Host "You can now run '.\run-server.ps1' normally"
        exit 0
    }
    
    if ($Config) {
        # Setup minimal environment to get paths for config display
        Write-Info "Setting up environment for configuration display..."
        Write-Host ""
        try {
            if ($Docker) {
                # Docker configuration mode
                if (!(Test-DockerRequirements)) {
                    exit 1
                }
                Initialize-DockerEnvironment
                Show-ConfigInstructions "" "" -UseDocker
            }
            else {
                # Python virtual environment configuration mode
                $pythonPath = Initialize-Environment
                $serverPath = Get-AbsolutePath "server.py"
                Show-ConfigInstructions $pythonPath $serverPath
            }
        }
        catch {
            Write-Error "Failed to setup environment for configuration: $_"
            exit 1
        }
        exit 0
    }

    # ============================================================================
    # Docker Workflow
    # ============================================================================
    if ($Docker) {
        Invoke-DockerWorkflow
        exit 0
    }

    # ============================================================================
    # Python Virtual Environment Workflow (Default)
    # ============================================================================
    Invoke-PythonWorkflow
    exit 0
}

# ============================================================================
# Main Script Execution
# ============================================================================

# Execute main process
Start-MainProcess

```
Page 18/19FirstPrevNextLast