#
tokens: 29750/50000 1/207 files (page 28/45)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 28 of 45. Use http://codebase.md/dicklesworthstone/llm_gateway_mcp_server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cursorignore
├── .env.example
├── .envrc
├── .gitignore
├── additional_features.md
├── check_api_keys.py
├── completion_support.py
├── comprehensive_test.py
├── docker-compose.yml
├── Dockerfile
├── empirically_measured_model_speeds.json
├── error_handling.py
├── example_structured_tool.py
├── examples
│   ├── __init__.py
│   ├── advanced_agent_flows_using_unified_memory_system_demo.py
│   ├── advanced_extraction_demo.py
│   ├── advanced_unified_memory_system_demo.py
│   ├── advanced_vector_search_demo.py
│   ├── analytics_reporting_demo.py
│   ├── audio_transcription_demo.py
│   ├── basic_completion_demo.py
│   ├── cache_demo.py
│   ├── claude_integration_demo.py
│   ├── compare_synthesize_demo.py
│   ├── cost_optimization.py
│   ├── data
│   │   ├── sample_event.txt
│   │   ├── Steve_Jobs_Introducing_The_iPhone_compressed.md
│   │   └── Steve_Jobs_Introducing_The_iPhone_compressed.mp3
│   ├── docstring_refiner_demo.py
│   ├── document_conversion_and_processing_demo.py
│   ├── entity_relation_graph_demo.py
│   ├── filesystem_operations_demo.py
│   ├── grok_integration_demo.py
│   ├── local_text_tools_demo.py
│   ├── marqo_fused_search_demo.py
│   ├── measure_model_speeds.py
│   ├── meta_api_demo.py
│   ├── multi_provider_demo.py
│   ├── ollama_integration_demo.py
│   ├── prompt_templates_demo.py
│   ├── python_sandbox_demo.py
│   ├── rag_example.py
│   ├── research_workflow_demo.py
│   ├── sample
│   │   ├── article.txt
│   │   ├── backprop_paper.pdf
│   │   ├── buffett.pdf
│   │   ├── contract_link.txt
│   │   ├── legal_contract.txt
│   │   ├── medical_case.txt
│   │   ├── northwind.db
│   │   ├── research_paper.txt
│   │   ├── sample_data.json
│   │   └── text_classification_samples
│   │       ├── email_classification.txt
│   │       ├── news_samples.txt
│   │       ├── product_reviews.txt
│   │       └── support_tickets.txt
│   ├── sample_docs
│   │   └── downloaded
│   │       └── attention_is_all_you_need.pdf
│   ├── sentiment_analysis_demo.py
│   ├── simple_completion_demo.py
│   ├── single_shot_synthesis_demo.py
│   ├── smart_browser_demo.py
│   ├── sql_database_demo.py
│   ├── sse_client_demo.py
│   ├── test_code_extraction.py
│   ├── test_content_detection.py
│   ├── test_ollama.py
│   ├── text_classification_demo.py
│   ├── text_redline_demo.py
│   ├── tool_composition_examples.py
│   ├── tournament_code_demo.py
│   ├── tournament_text_demo.py
│   ├── unified_memory_system_demo.py
│   ├── vector_search_demo.py
│   ├── web_automation_instruction_packs.py
│   └── workflow_delegation_demo.py
├── LICENSE
├── list_models.py
├── marqo_index_config.json.example
├── mcp_protocol_schema_2025-03-25_version.json
├── mcp_python_lib_docs.md
├── mcp_tool_context_estimator.py
├── model_preferences.py
├── pyproject.toml
├── quick_test.py
├── README.md
├── resource_annotations.py
├── run_all_demo_scripts_and_check_for_errors.py
├── storage
│   └── smart_browser_internal
│       ├── locator_cache.db
│       ├── readability.js
│       └── storage_state.enc
├── test_client.py
├── test_connection.py
├── TEST_README.md
├── test_sse_client.py
├── test_stdio_client.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── integration
│   │   ├── __init__.py
│   │   └── test_server.py
│   ├── manual
│   │   ├── test_extraction_advanced.py
│   │   └── test_extraction.py
│   └── unit
│       ├── __init__.py
│       ├── test_cache.py
│       ├── test_providers.py
│       └── test_tools.py
├── TODO.md
├── tool_annotations.py
├── tools_list.json
├── ultimate_mcp_banner.webp
├── ultimate_mcp_logo.webp
├── ultimate_mcp_server
│   ├── __init__.py
│   ├── __main__.py
│   ├── cli
│   │   ├── __init__.py
│   │   ├── __main__.py
│   │   ├── commands.py
│   │   ├── helpers.py
│   │   └── typer_cli.py
│   ├── clients
│   │   ├── __init__.py
│   │   ├── completion_client.py
│   │   └── rag_client.py
│   ├── config
│   │   └── examples
│   │       └── filesystem_config.yaml
│   ├── config.py
│   ├── constants.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── evaluation
│   │   │   ├── base.py
│   │   │   └── evaluators.py
│   │   ├── providers
│   │   │   ├── __init__.py
│   │   │   ├── anthropic.py
│   │   │   ├── base.py
│   │   │   ├── deepseek.py
│   │   │   ├── gemini.py
│   │   │   ├── grok.py
│   │   │   ├── ollama.py
│   │   │   ├── openai.py
│   │   │   └── openrouter.py
│   │   ├── server.py
│   │   ├── state_store.py
│   │   ├── tournaments
│   │   │   ├── manager.py
│   │   │   ├── tasks.py
│   │   │   └── utils.py
│   │   └── ums_api
│   │       ├── __init__.py
│   │       ├── ums_database.py
│   │       ├── ums_endpoints.py
│   │       ├── ums_models.py
│   │       └── ums_services.py
│   ├── exceptions.py
│   ├── graceful_shutdown.py
│   ├── services
│   │   ├── __init__.py
│   │   ├── analytics
│   │   │   ├── __init__.py
│   │   │   ├── metrics.py
│   │   │   └── reporting.py
│   │   ├── cache
│   │   │   ├── __init__.py
│   │   │   ├── cache_service.py
│   │   │   ├── persistence.py
│   │   │   ├── strategies.py
│   │   │   └── utils.py
│   │   ├── cache.py
│   │   ├── document.py
│   │   ├── knowledge_base
│   │   │   ├── __init__.py
│   │   │   ├── feedback.py
│   │   │   ├── manager.py
│   │   │   ├── rag_engine.py
│   │   │   ├── retriever.py
│   │   │   └── utils.py
│   │   ├── prompts
│   │   │   ├── __init__.py
│   │   │   ├── repository.py
│   │   │   └── templates.py
│   │   ├── prompts.py
│   │   └── vector
│   │       ├── __init__.py
│   │       ├── embeddings.py
│   │       └── vector_service.py
│   ├── tool_token_counter.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── audio_transcription.py
│   │   ├── base.py
│   │   ├── completion.py
│   │   ├── docstring_refiner.py
│   │   ├── document_conversion_and_processing.py
│   │   ├── enhanced-ums-lookbook.html
│   │   ├── entity_relation_graph.py
│   │   ├── excel_spreadsheet_automation.py
│   │   ├── extraction.py
│   │   ├── filesystem.py
│   │   ├── html_to_markdown.py
│   │   ├── local_text_tools.py
│   │   ├── marqo_fused_search.py
│   │   ├── meta_api_tool.py
│   │   ├── ocr_tools.py
│   │   ├── optimization.py
│   │   ├── provider.py
│   │   ├── pyodide_boot_template.html
│   │   ├── python_sandbox.py
│   │   ├── rag.py
│   │   ├── redline-compiled.css
│   │   ├── sentiment_analysis.py
│   │   ├── single_shot_synthesis.py
│   │   ├── smart_browser.py
│   │   ├── sql_databases.py
│   │   ├── text_classification.py
│   │   ├── text_redline_tools.py
│   │   ├── tournament.py
│   │   ├── ums_explorer.html
│   │   └── unified_memory_system.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── async_utils.py
│   │   ├── display.py
│   │   ├── logging
│   │   │   ├── __init__.py
│   │   │   ├── console.py
│   │   │   ├── emojis.py
│   │   │   ├── formatter.py
│   │   │   ├── logger.py
│   │   │   ├── panels.py
│   │   │   ├── progress.py
│   │   │   └── themes.py
│   │   ├── parse_yaml.py
│   │   ├── parsing.py
│   │   ├── security.py
│   │   └── text.py
│   └── working_memory_api.py
├── unified_memory_system_technical_analysis.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/ultimate_mcp_server/utils/display.py:
--------------------------------------------------------------------------------

```python
   1 | """Display utilities for the Ultimate MCP Server.
   2 | 
   3 | This module contains reusable display functions for formatting and
   4 | presenting results from Ultimate MCP Server operations using Rich.
   5 | """
   6 | 
   7 | import json
   8 | import os
   9 | import time
  10 | 
  11 | # --- Filesystem Tool Display Helper ---
  12 | from pathlib import Path
  13 | from typing import Any, Dict, List, Optional, Union
  14 | 
  15 | from rich import box
  16 | from rich.console import (
  17 |     Capture,  # For capturing table output
  18 |     Console,
  19 | )
  20 | from rich.markup import escape
  21 | from rich.panel import Panel
  22 | from rich.pretty import pretty_repr
  23 | from rich.rule import Rule
  24 | from rich.syntax import Syntax
  25 | from rich.table import Table
  26 | from rich.tree import Tree
  27 | 
  28 | # Import the console for consistent styling
  29 | from ultimate_mcp_server.utils.logging.console import console
  30 | 
  31 | from ..exceptions import ToolError, ToolInputError
  32 | 
  33 | # Restore direct tool import
  34 | from ..tools.filesystem import list_directory
  35 | from .logging.logger import get_logger  # <-- ADD specific import
  36 | 
  37 | try:
  38 |     # Import only the exception needed by safe_tool_call
  39 |     from ..tools.filesystem import ProtectionTriggeredError
  40 | except ImportError:
  41 |     # Handle case where filesystem tools might not be installed/available
  42 |     # Define a dummy exception class if ProtectionTriggeredError cannot be imported
  43 |     class ProtectionTriggeredError(Exception):
  44 |         def __init__(self, message, context=None):
  45 |             super().__init__(message)
  46 |             self.context = context if context is not None else {}
  47 | 
  48 | def extract_and_parse_content(result: Any) -> Dict[str, Any]:
  49 |     """
  50 |     Extract content from various result formats and parse JSON if present.
  51 |     This handles TextContent objects, lists of TextContent, and other formats.
  52 |     
  53 |     Args:
  54 |         result: Result object that might be TextContent, list, dict, etc.
  55 |         
  56 |     Returns:
  57 |         Dictionary with parsed data or error information
  58 |     """
  59 |     # Handle list of objects (common in MCP responses)
  60 |     if isinstance(result, list):
  61 |         if not result:
  62 |             return {"error": "Empty result list"}
  63 |         # Just use the first item for now (we could process all in the future)
  64 |         result = result[0]
  65 |     
  66 |     # Extract text from TextContent object
  67 |     text_content = ""
  68 |     if hasattr(result, 'text'):
  69 |         text_content = result.text
  70 |     elif isinstance(result, str):
  71 |         text_content = result
  72 |     elif isinstance(result, dict):
  73 |         return result  # Already a dict, no need to parse
  74 |     else:
  75 |         # Convert other types to string representation
  76 |         text_content = str(result)
  77 |     
  78 |     # Try to parse as JSON
  79 |     if text_content:
  80 |         try:
  81 |             parsed_data = json.loads(text_content)
  82 |             return parsed_data
  83 |         except json.JSONDecodeError:
  84 |             # Not JSON, return as raw text
  85 |             return {"raw_text": text_content, "error": "Not valid JSON"}
  86 |     
  87 |     # Empty content
  88 |     return {"error": "Empty content"}
  89 | 
  90 | 
  91 | def display_text_content_result(
  92 |     title: str, 
  93 |     result: Any, 
  94 |     console_instance: Optional[Console] = None
  95 | ):
  96 |     """
  97 |     Display results from TextContent objects more reliably, which is useful for demos.
  98 |     This function is more forgiving with different formats and provides better handling
  99 |     for TextContent objects that might contain JSON strings.
 100 |     
 101 |     Args:
 102 |         title: Title to display for this result section
 103 |         result: Result object from an Ultimate MCP Server tool call (often a TextContent)
 104 |         console_instance: Optional console instance to use (defaults to shared console)
 105 |     """
 106 |     # Use provided console or default to shared console
 107 |     output = console_instance or console
 108 |     
 109 |     # Display section title
 110 |     output.print(Rule(f"[bold blue]{escape(title)}[/bold blue]"))
 111 |     
 112 |     # Extract and parse content
 113 |     parsed_data = extract_and_parse_content(result)
 114 |     
 115 |     # Check for extraction errors
 116 |     if "error" in parsed_data and "raw_text" in parsed_data:
 117 |         # Error parsing JSON, display as text
 118 |         output.print(Panel(
 119 |             escape(parsed_data["raw_text"]),
 120 |             title="[bold]Result Text[/bold]",
 121 |             border_style="green"
 122 |         ))
 123 |         return
 124 |     elif "error" in parsed_data and "raw_text" not in parsed_data:
 125 |         # Other error
 126 |         output.print(f"[red]{escape(parsed_data['error'])}[/red]")
 127 |         return
 128 |     
 129 |     # Display based on content type
 130 |     if isinstance(parsed_data, dict):
 131 |         # Special handling for QA pairs
 132 |         if "qa_pairs" in parsed_data and isinstance(parsed_data["qa_pairs"], list):
 133 |             qa_pairs = parsed_data["qa_pairs"]
 134 |             output.print(Panel(
 135 |                 "\n".join([f"[bold]Q{i+1}:[/bold] {escape(pair.get('question', 'N/A'))}\n[bold]A{i+1}:[/bold] {escape(pair.get('answer', 'N/A'))}" 
 136 |                         for i, pair in enumerate(qa_pairs)]),
 137 |                 title="[bold]Q&A Pairs[/bold]", 
 138 |                 border_style="blue"
 139 |             ))
 140 |         # Special handling for entities
 141 |         elif "entities" in parsed_data:
 142 |             entities_data = parsed_data["entities"]
 143 |             if isinstance(entities_data, dict):
 144 |                 # If it's a dict with entity types as keys
 145 |                 entity_count = 0
 146 |                 entity_table = Table(box=box.ROUNDED)
 147 |                 entity_table.add_column("Type", style="cyan")
 148 |                 entity_table.add_column("Entity", style="white")
 149 |                 
 150 |                 for entity_type, entities in entities_data.items():
 151 |                     if entities:
 152 |                         for entity in entities:
 153 |                             entity_text = entity if isinstance(entity, str) else entity.get('text', str(entity))
 154 |                             entity_table.add_row(entity_type, escape(entity_text))
 155 |                             entity_count += 1
 156 |                 
 157 |                 if entity_count > 0:
 158 |                     output.print(entity_table)
 159 |                 else:
 160 |                     output.print("[yellow]No entities found in the document.[/yellow]")
 161 |             else:
 162 |                 # If it's some other format, just show the raw data
 163 |                 output.print(Panel(
 164 |                     escape(json.dumps(entities_data, indent=2)),
 165 |                     title="[bold]Entities Data[/bold]",
 166 |                     border_style="blue"
 167 |                 ))
 168 |         # Summary
 169 |         elif "summary" in parsed_data and isinstance(parsed_data["summary"], str):
 170 |             output.print(Panel(
 171 |                 escape(parsed_data["summary"]),
 172 |                 title="[bold]Generated Summary[/bold]",
 173 |                 border_style="green"
 174 |             ))
 175 |         # Generic JSON display for other data
 176 |         else:
 177 |             # Filter out stats fields for cleaner display
 178 |             display_data = {k: v for k, v in parsed_data.items() 
 179 |                            if k not in ["model", "provider", "cost", "tokens", "processing_time"]}
 180 |             
 181 |             # Only show JSON panel if we have data to display
 182 |             if display_data:
 183 |                 output.print(Panel(
 184 |                     escape(json.dumps(display_data, indent=2)),
 185 |                     title="[bold]Result Data[/bold]",
 186 |                     border_style="blue"
 187 |                 ))
 188 |         
 189 |         # Display stats if available
 190 |         if any(k in parsed_data for k in ["model", "provider", "cost", "tokens", "processing_time"]):
 191 |             _display_stats(parsed_data, output)
 192 |     else:
 193 |         # For other types (arrays, etc.)
 194 |         output.print(Panel(
 195 |             escape(json.dumps(parsed_data, indent=2)),
 196 |             title="[bold]Result Data[/bold]",
 197 |             border_style="blue"
 198 |         ))
 199 | 
 200 | 
 201 | def _display_input_data(input_data: Dict, output: Console):
 202 |     """
 203 |     Display input data with consistent formatting.
 204 |     
 205 |     This function formats and displays various types of input data using the Rich
 206 |     console library. It handles text content, JSON schemas, search queries, and
 207 |     embedding vectors, adjusting the display format appropriately for each type.
 208 |     
 209 |     Args:
 210 |         input_data: Dictionary containing input data to display. May include keys
 211 |                    like 'text', 'json_schema', 'query', and 'embeddings'.
 212 |         output: Rich Console instance to use for printing formatted output.
 213 |     """
 214 |     # Display input text if available
 215 |     if "text" in input_data:
 216 |         text_snippet = input_data["text"][:500] + ("..." if len(input_data["text"]) > 500 else "")
 217 |         output.print(Panel(
 218 |             escape(text_snippet), 
 219 |             title="[cyan]Input Text Snippet[/cyan]", 
 220 |             border_style="dim blue"
 221 |         ))
 222 |     
 223 |     # Display schema if available
 224 |     if "json_schema" in input_data and input_data["json_schema"]:
 225 |         try:
 226 |             schema_json = json.dumps(input_data["json_schema"], indent=2)
 227 |             output.print(Panel(
 228 |                 Syntax(schema_json, "json", theme="default", line_numbers=False), 
 229 |                 title="[cyan]Input Schema[/cyan]", 
 230 |                 border_style="dim blue"
 231 |             ))
 232 |         except Exception as e:
 233 |             output.print(f"[red]Could not display schema: {escape(str(e))}[/red]")
 234 |     
 235 |     # Display query if available (for search results)
 236 |     if "query" in input_data:
 237 |         output.print(Panel(
 238 |             escape(input_data["query"]), 
 239 |             title="[cyan]Search Query[/cyan]", 
 240 |             border_style="dim blue"
 241 |         ))
 242 |         
 243 |     # Display embeddings/vectors if available
 244 |     if "embeddings" in input_data:
 245 |         if isinstance(input_data["embeddings"], list) and len(input_data["embeddings"]) > 0:
 246 |             sample = input_data["embeddings"][0]
 247 |             dims = len(sample) if isinstance(sample, (list, tuple)) else "unknown"
 248 |             sample_str = str(sample[:3]) + "..." if isinstance(sample, (list, tuple)) else str(sample)
 249 |             output.print(Panel(
 250 |                 f"[cyan]Dimensions:[/cyan] {dims}\n[cyan]Sample:[/cyan] {escape(sample_str)}", 
 251 |                 title="[cyan]Embedding Sample[/cyan]", 
 252 |                 border_style="dim blue"
 253 |             ))
 254 | 
 255 | 
 256 | def _parse_and_display_output(result: Any, output: Console):
 257 |     """
 258 |     Parse result object and display appropriate visualizations.
 259 |     
 260 |     This function examines the structure of a result object and determines the best
 261 |     way to display it based on its content type. It automatically extracts and formats
 262 |     different data types like JSON data, vector search results, tables, key-value pairs,
 263 |     entity data, and embeddings.
 264 |     
 265 |     The function serves as an intelligent formatter that routes different content types
 266 |     to specialized display handlers that can present each type with appropriate
 267 |     rich formatting and visualization.
 268 |     
 269 |     Args:
 270 |         result: The result object to parse and display. Can be a list, dict, object
 271 |                with a 'text' attribute, or other structures.
 272 |         output: Rich Console instance to use for displaying the formatted content.
 273 |                
 274 |     Note:
 275 |         This is an internal utility function used by higher-level display functions
 276 |         to handle the details of content extraction and formatting.
 277 |     """
 278 |     # Extract result content
 279 |     parsed_result = {}
 280 |     raw_text = None
 281 |     
 282 |     # Handle list results (take first item)
 283 |     if isinstance(result, list) and result:
 284 |         result = result[0]
 285 |         
 286 |     # Handle object with text attribute
 287 |     if hasattr(result, 'text'):
 288 |         raw_text = result.text
 289 |         try:
 290 |             parsed_result = json.loads(raw_text)
 291 |         except json.JSONDecodeError:
 292 |             parsed_result = {"error": "Failed to parse JSON", "raw_text": raw_text}
 293 |     
 294 |     # Handle dictionary result
 295 |     elif isinstance(result, dict):
 296 |         parsed_result = result
 297 |     
 298 |     # Handle unknown result type
 299 |     else:
 300 |         parsed_result = {"error": f"Unexpected result type: {type(result)}"}
 301 |     
 302 |     # Display results based on content
 303 |     _display_result_content(parsed_result, output)
 304 | 
 305 | 
 306 | def _display_result_content(parsed_result: Dict, output: Console):
 307 |     """
 308 |     Display the content of results with appropriate formatting.
 309 |     
 310 |     This function intelligently selects appropriate display handlers for different
 311 |     types of result content. It checks for various data types (JSON data, vector search
 312 |     results, tables, key-value pairs, entities, embeddings, etc.) and routes the content
 313 |     to specialized display functions.
 314 |     
 315 |     Args:
 316 |         parsed_result: Dictionary containing parsed result data with various possible
 317 |                       content types to display.
 318 |         output: Rich Console instance to use for printing formatted output.
 319 |     """
 320 |     # Check for errors first
 321 |     if parsed_result.get("error"):
 322 |         _display_error(parsed_result, output)
 323 |         return
 324 |     
 325 |     # Display different result types
 326 |     
 327 |     # JSON Data
 328 |     if "data" in parsed_result and parsed_result["data"] is not None:
 329 |         _display_json_data(parsed_result["data"], "Extracted JSON Data", output)
 330 |     
 331 |     # Vector Search Results
 332 |     if "results" in parsed_result and isinstance(parsed_result["results"], list):
 333 |         _display_vector_results(parsed_result["results"], output)
 334 |     
 335 |     # Tables
 336 |     if "tables" in parsed_result and parsed_result["tables"]:
 337 |         _display_tables(parsed_result["tables"], output)
 338 |     
 339 |     # Key-Value Pairs
 340 |     if "key_value_pairs" in parsed_result or "pairs" in parsed_result:
 341 |         pairs = parsed_result.get("key_value_pairs", parsed_result.get("pairs", {}))
 342 |         _display_key_value_pairs(pairs, output)
 343 |     
 344 |     # Semantic Schema
 345 |     if "schema" in parsed_result and parsed_result["schema"]:
 346 |         _display_json_data(parsed_result["schema"], "Inferred Semantic Schema", output)
 347 |     
 348 |     # Entities
 349 |     if "entities" in parsed_result and parsed_result["entities"]:
 350 |         _display_entities(parsed_result["entities"], output)
 351 |     
 352 |     # Embeddings
 353 |     if "embeddings" in parsed_result and parsed_result["embeddings"]:
 354 |         _display_embeddings_info(parsed_result["embeddings"], 
 355 |                                 parsed_result.get("model", "unknown"),
 356 |                                 output)
 357 |     
 358 |     # Display execution stats if available
 359 |     _display_stats(parsed_result, output)
 360 | 
 361 | 
 362 | def _display_error(result: Dict, output: Console):
 363 |     """
 364 |     Display error information.
 365 |     
 366 |     This function formats and displays error information in a visually distinct way.
 367 |     It creates a red-bordered panel containing the error message and optional raw text
 368 |     output for debugging purposes.
 369 |     
 370 |     Args:
 371 |         result: Dictionary containing error information. Should include an 'error' key
 372 |                and optionally a 'raw_text' key with the original output.
 373 |         output: Rich Console instance to use for printing formatted output.
 374 |     """
 375 |     error_content = f"[red]Error:[/red] {escape(result['error'])}"
 376 |     if result.get("raw_text"):
 377 |         error_content += f"\n\n[yellow]Raw Text Output:[/yellow]\n{escape(result['raw_text'])}"
 378 |     output.print(Panel(
 379 |         error_content, 
 380 |         title="[bold red]Tool Error[/bold red]", 
 381 |         border_style="red"
 382 |     ))
 383 | 
 384 | 
 385 | def _display_json_data(data: Any, title: str, output: Console):
 386 |     """
 387 |     Display JSON data with proper formatting.
 388 |     
 389 |     This function formats and displays JSON data with syntax highlighting and proper
 390 |     indentation. It handles JSON serialization errors gracefully and displays the data
 391 |     in a visually appealing panel with a descriptive title.
 392 |     
 393 |     Args:
 394 |         data: Any data structure that can be serialized to JSON.
 395 |         title: Title string to display above the JSON content.
 396 |         output: Rich Console instance to use for printing formatted output.
 397 |     """
 398 |     try:
 399 |         data_json = json.dumps(data, indent=2)
 400 |         output.print(Panel(
 401 |             Syntax(data_json, "json", theme="default", line_numbers=True, word_wrap=True),
 402 |             title=f"[bold green]{title}[/bold green]",
 403 |             border_style="green"
 404 |         ))
 405 |     except Exception as e:
 406 |         output.print(f"[red]Could not display JSON data: {escape(str(e))}[/red]")
 407 | 
 408 | 
 409 | def _display_vector_results(results: List[Dict], output: Console):
 410 |     """
 411 |     Display vector search results.
 412 |     
 413 |     This function creates and displays a formatted table showing vector search results,
 414 |     including IDs, similarity scores, metadata, and text snippets. It automatically 
 415 |     adapts the table columns based on the structure of the first result item, handling
 416 |     various metadata fields dynamically.
 417 |     
 418 |     Args:
 419 |         results: List of dictionaries containing vector search results. Each dictionary
 420 |                 typically includes 'id', 'similarity' or 'score', optional 'metadata',
 421 |                 and 'text' fields.
 422 |         output: Rich Console instance to use for printing formatted output.
 423 |     """
 424 |     results_table = Table(title="[bold green]Vector Search Results[/bold green]", box=box.ROUNDED)
 425 |     
 426 |     # Determine columns based on first result
 427 |     if not results:
 428 |         output.print("[yellow]No vector search results to display[/yellow]")
 429 |         return
 430 |     
 431 |     first_result = results[0]
 432 |     
 433 |     # Add standard columns
 434 |     results_table.add_column("ID", style="cyan")
 435 |     results_table.add_column("Score", style="green", justify="right")
 436 |     
 437 |     # Add metadata columns if available
 438 |     metadata_keys = []
 439 |     if "metadata" in first_result and isinstance(first_result["metadata"], dict):
 440 |         metadata_keys = list(first_result["metadata"].keys())
 441 |         for key in metadata_keys:
 442 |             results_table.add_column(key.capitalize(), style="magenta")
 443 |     
 444 |     # Add text column
 445 |     results_table.add_column("Text", style="white")
 446 |     
 447 |     # Add rows
 448 |     for item in results:
 449 |         row = [
 450 |             escape(str(item.get("id", ""))),
 451 |             f"{item.get('similarity', item.get('score', 0.0)):.4f}"
 452 |         ]
 453 |         
 454 |         # Add metadata values
 455 |         if metadata_keys:
 456 |             metadata = item.get("metadata", {})
 457 |             for key in metadata_keys:
 458 |                 row.append(escape(str(metadata.get(key, ""))))
 459 |         
 460 |         # Add text
 461 |         text = item.get("text", "")
 462 |         text_snippet = text[:80] + ("..." if len(text) > 80 else "")
 463 |         row.append(escape(text_snippet))
 464 |         
 465 |         results_table.add_row(*row)
 466 |     
 467 |     output.print(results_table)
 468 | 
 469 | 
 470 | def _display_tables(tables: List[Dict], output: Console):
 471 |     """
 472 |     Display extracted tables.
 473 |     
 474 |     This function formats and displays extracted table data in multiple formats
 475 |     (JSON, Markdown) with appropriate syntax highlighting. It includes table titles
 476 |     and associated metadata when available.
 477 |     
 478 |     Args:
 479 |         tables: List of dictionaries containing table information. Each dictionary may
 480 |                include 'title', 'json', 'markdown', and 'metadata' fields.
 481 |         output: Rich Console instance to use for printing formatted output.
 482 |     """
 483 |     for i, table_info in enumerate(tables):
 484 |         table_title = table_info.get('title', f'Table {i+1}')
 485 |         output.print(Rule(f"[green]Extracted: {escape(table_title)}[/green]"))
 486 |         
 487 |         # JSON format
 488 |         if table_info.get("json"):
 489 |             try:
 490 |                 table_json = json.dumps(table_info["json"], indent=2)
 491 |                 output.print(Panel(
 492 |                     Syntax(table_json, "json", theme="default", line_numbers=False, word_wrap=True),
 493 |                     title="[bold]JSON Format[/bold]",
 494 |                     border_style="dim green"
 495 |                 ))
 496 |             except Exception as e:
 497 |                 output.print(f"[red]Could not display table JSON: {escape(str(e))}[/red]")
 498 |         
 499 |         # Markdown format
 500 |         if table_info.get("markdown"):
 501 |             output.print(Panel(
 502 |                 Syntax(table_info["markdown"], "markdown", theme="default"),
 503 |                 title="[bold]Markdown Format[/bold]",
 504 |                 border_style="dim green"
 505 |             ))
 506 |         
 507 |         # Metadata
 508 |         if table_info.get("metadata"):
 509 |             try:
 510 |                 meta_json = json.dumps(table_info["metadata"], indent=2)
 511 |                 output.print(Panel(
 512 |                     Syntax(meta_json, "json", theme="default", line_numbers=False),
 513 |                     title="[bold]Metadata[/bold]",
 514 |                     border_style="dim green"
 515 |                 ))
 516 |             except Exception as e:
 517 |                 output.print(f"[red]Could not display metadata: {escape(str(e))}[/red]")
 518 | 
 519 | 
 520 | def _display_key_value_pairs(pairs: Union[Dict, List], output: Console):
 521 |     """
 522 |     Display key-value pairs in a table.
 523 |     
 524 |     This function creates and displays a formatted table showing key-value pairs.
 525 |     It handles both dictionary and list inputs, adapting the display format
 526 |     appropriately for each case.
 527 |     
 528 |     Args:
 529 |         pairs: Dictionary of key-value pairs or list of dictionaries containing
 530 |               key-value pairs to display.
 531 |         output: Rich Console instance to use for printing formatted output.
 532 |     """
 533 |     kv_table = Table(title="[bold green]Extracted Key-Value Pairs[/bold green]", box=box.ROUNDED)
 534 |     kv_table.add_column("Key", style="magenta")
 535 |     kv_table.add_column("Value", style="white")
 536 |     
 537 |     if isinstance(pairs, dict):
 538 |         for k, v in pairs.items():
 539 |             kv_table.add_row(escape(str(k)), escape(str(v)))
 540 |     elif isinstance(pairs, list):
 541 |         for item in pairs:
 542 |             if isinstance(item, dict):
 543 |                 for k, v in item.items():
 544 |                     kv_table.add_row(escape(str(k)), escape(str(v)))
 545 |     
 546 |     if kv_table.row_count > 0:
 547 |         output.print(kv_table)
 548 | 
 549 | 
 550 | def _display_entities(entities: List[Dict], output: Console):
 551 |     """
 552 |     Display extracted entities.
 553 |     
 554 |     This function creates and displays a formatted table showing extracted entities,
 555 |     including their type, text, context snippet, and confidence score. It's optimized
 556 |     for displaying named entity recognition (NER) results.
 557 |     
 558 |     Args:
 559 |         entities: List of dictionaries containing entity information. Each dictionary
 560 |                  typically includes 'type', 'text', 'context', and 'score' fields.
 561 |         output: Rich Console instance to use for printing formatted output.
 562 |     """
 563 |     entity_table = Table(title="[bold green]Extracted Entities[/bold green]", box=box.ROUNDED)
 564 |     entity_table.add_column("Type", style="cyan")
 565 |     entity_table.add_column("Text", style="white")
 566 |     entity_table.add_column("Context", style="dim")
 567 |     entity_table.add_column("Score", style="green", justify="right")
 568 |     
 569 |     for entity in entities:
 570 |         context_snippet = entity.get("context", "")[:50] + ("..." if len(entity.get("context", "")) > 50 else "")
 571 |         score_str = f"{entity.get('score', 0.0):.2f}" if entity.get('score') is not None else "N/A"
 572 |         
 573 |         entity_table.add_row(
 574 |             escape(entity.get("type", "N/A")),
 575 |             escape(entity.get("text", "N/A")),
 576 |             escape(context_snippet),
 577 |             score_str
 578 |         )
 579 |     
 580 |     output.print(entity_table)
 581 | 
 582 | 
 583 | def _display_embeddings_info(embeddings: List, model: str, output: Console):
 584 |     """
 585 |     Display information about embeddings.
 586 |     
 587 |     This function creates and displays a summary table of embedding information,
 588 |     including model name, embedding count, dimensions, and sample values. It handles
 589 |     edge cases like empty embedding lists and non-numeric embedding values.
 590 |     
 591 |     Args:
 592 |         embeddings: List of embedding vectors. Each vector is typically a list of
 593 |                   floating-point numbers.
 594 |         model: Name of the embedding model used to generate the embeddings.
 595 |         output: Rich Console instance to use for printing formatted output.
 596 |     """
 597 |     if not isinstance(embeddings, list) or len(embeddings) == 0:
 598 |         return
 599 |     
 600 |     # Just display summary info about the embeddings
 601 |     sample = embeddings[0]
 602 |     dims = len(sample) if isinstance(sample, (list, tuple)) else "unknown"
 603 |     
 604 |     embed_table = Table(title="[bold green]Embedding Information[/bold green]", box=box.MINIMAL)
 605 |     embed_table.add_column("Property", style="cyan")
 606 |     embed_table.add_column("Value", style="white")
 607 |     
 608 |     embed_table.add_row("Model", escape(model))
 609 |     embed_table.add_row("Count", str(len(embeddings)))
 610 |     embed_table.add_row("Dimensions", str(dims))
 611 |     
 612 |     # Show a few values from first embedding
 613 |     if isinstance(sample, (list, tuple)) and len(sample) > 0:
 614 |         sample_values = sample[:3]
 615 |         try:
 616 |             # Try to round values if they're numeric
 617 |             rounded_values = [round(x, 6) for x in sample_values]
 618 |             sample_str = str(rounded_values) + "..."
 619 |         except (TypeError, ValueError):
 620 |             sample_str = str(sample_values) + "..."
 621 |         embed_table.add_row("Sample Values", escape(sample_str))
 622 |     
 623 |     output.print(embed_table)
 624 | 
 625 | 
 626 | def _display_stats(result: Dict, output: Console):
 627 |     """
 628 |     Display execution statistics.
 629 |     
 630 |     This function creates and displays a summary table of execution statistics,
 631 |     including provider, model, cost, token usage, and processing time. It only
 632 |     displays statistics that are actually present in the input data.
 633 |     
 634 |     Args:
 635 |         result: Dictionary containing execution statistics. May include keys like
 636 |                'provider', 'model', 'cost', 'tokens', and 'processing_time'.
 637 |         output: Rich Console instance to use for printing formatted output.
 638 |     """
 639 |     # Check if we have stats data
 640 |     has_stats = any(k in result for k in ["model", "provider", "cost", "tokens", "processing_time"])
 641 |     if not has_stats:
 642 |         return
 643 |     
 644 |     stats_table = Table(title="Execution Stats", box=box.MINIMAL, show_header=False)
 645 |     stats_table.add_column("Metric", style="cyan")
 646 |     stats_table.add_column("Value", style="white")
 647 |     
 648 |     if "provider" in result:
 649 |         stats_table.add_row("Provider", escape(result.get("provider", "N/A")))
 650 |     
 651 |     if "model" in result:
 652 |         stats_table.add_row("Model", escape(result.get("model", "N/A")))
 653 |     
 654 |     if "cost" in result:
 655 |         stats_table.add_row("Cost", f"${result.get('cost', 0.0):.6f}")
 656 |     
 657 |     if "tokens" in result:
 658 |         tokens = result.get("tokens", {})
 659 |         if isinstance(tokens, dict):
 660 |             stats_table.add_row(
 661 |                 "Tokens (In/Out/Total)", 
 662 |                 f"{tokens.get('input', 0)} / {tokens.get('output', 0)} / {tokens.get('total', 0)}"
 663 |             )
 664 |     
 665 |     if "processing_time" in result:
 666 |         stats_table.add_row("Processing Time", f"{result.get('processing_time', 0.0):.3f}s")
 667 |     
 668 |     if stats_table.row_count > 0:
 669 |         output.print(stats_table)
 670 |     
 671 |     # Add a blank line after stats
 672 |     output.print()
 673 | 
 674 | 
 675 | # Specialized display functions for different demo types
 676 | 
 677 | def display_embedding_generation_results(results_data: Dict, output: Optional[Console] = None):
 678 |     """
 679 |     Display embedding generation results in a formatted table.
 680 |     
 681 |     This function creates a rich, formatted table visualization of embedding generation 
 682 |     results from multiple models. It organizes and presents key information including 
 683 |     model names, embedding dimensions, generation times, costs, sample values, and 
 684 |     success status for each embedding model.
 685 |     
 686 |     The visualization is designed to help users compare embedding results across different
 687 |     models and providers at a glance, making it easier to evaluate performance, cost,
 688 |     and quality differences between embedding options.
 689 |     
 690 |     Args:
 691 |         results_data: Dictionary containing embedding generation results. Expected to
 692 |                      contain a 'models' key with a list of model result dictionaries.
 693 |                      Each model result should include fields like 'name', 'dimensions',
 694 |                      'time', 'cost', 'embedding_sample', and 'success'.
 695 |         output: Optional Rich Console instance to use for display. If not provided,
 696 |                 uses the default shared console.
 697 |     
 698 |     Note:
 699 |         If the results_data dictionary doesn't contain a 'models' key or the list is empty,
 700 |         the function will display a warning message instead of a table.
 701 |     """
 702 |     display = output or console
 703 |     
 704 |     if not results_data.get("models"):
 705 |         display.print("[yellow]No embedding results to display[/yellow]")
 706 |         return
 707 |     
 708 |     results_table = Table(title="Embedding Generation Results", box=box.ROUNDED, show_header=True)
 709 |     results_table.add_column("Model", style="magenta")
 710 |     results_table.add_column("Dimensions", style="cyan", justify="right")
 711 |     results_table.add_column("Gen Time (s)", style="yellow", justify="right")
 712 |     results_table.add_column("Cost ($)", style="green", justify="right")
 713 |     results_table.add_column("Sample Values", style="dim")
 714 |     results_table.add_column("Status", style="white")
 715 |     
 716 |     for model_info in results_data["models"]:
 717 |         status_str = "[green]Success[/green]" if model_info.get("success") else "[red]Failed[/red]"
 718 |         
 719 |         # Format sample values if available
 720 |         sample_str = "N/A"
 721 |         if model_info.get("embedding_sample") is not None:
 722 |             sample_str = escape(str(model_info["embedding_sample"]) + "...")
 723 |         
 724 |         results_table.add_row(
 725 |             escape(model_info.get("name", "Unknown")),
 726 |             str(model_info.get("dimensions", "-")),
 727 |             f"{model_info.get('time', 0.0):.3f}",
 728 |             f"{model_info.get('cost', 0.0):.6f}",
 729 |             sample_str,
 730 |             status_str
 731 |         )
 732 |     
 733 |     display.print(results_table)
 734 |     display.print()
 735 | 
 736 | def display_vector_similarity_results(similarity_data: Dict, output: Optional[Console] = None):
 737 |     """
 738 |     Display semantic similarity scores between text pairs in a formatted table.
 739 |     
 740 |     This function creates a rich, visually appealing table visualization of semantic
 741 |     similarity results between text pairs. It extracts and presents information about
 742 |     each compared text pair and their corresponding similarity score, making it easy
 743 |     to see which text segments are semantically related.
 744 |     
 745 |     The table includes columns for text snippets from each pair (truncated if too long)
 746 |     and their corresponding similarity score. This visualization is particularly useful
 747 |     for comparing multiple text pairs at once and identifying patterns of semantic
 748 |     relatedness across a dataset.
 749 |     
 750 |     Args:
 751 |         similarity_data: Dictionary containing semantic similarity results. Expected
 752 |                         to contain a 'pairs' key with a list of comparison result
 753 |                         dictionaries. Each pair should include 'text1', 'text2', and
 754 |                         'score' fields.
 755 |         output: Optional Rich Console instance to use for display. If not provided,
 756 |                 uses the default shared console.
 757 |     
 758 |     Note:
 759 |         If the similarity_data dictionary doesn't contain valid pairs data or the list
 760 |         is empty, the function will display a warning message instead of a table.
 761 |         Similarity scores are displayed with 4 decimal places of precision.
 762 |     """
 763 |     display = output or console
 764 |     
 765 |     pairs = similarity_data.get("pairs", [])
 766 |     if not pairs or not isinstance(pairs, list) or len(pairs) == 0:
 767 |         display.print("[yellow]No similarity data to display[/yellow]")
 768 |         return
 769 |     
 770 |     similarity_table = Table(title="Semantic Similarity Scores", box=box.ROUNDED, show_header=True)
 771 |     similarity_table.add_column("Text 1 Snippet", style="white")
 772 |     similarity_table.add_column("Text 2 Snippet", style="white")
 773 |     similarity_table.add_column("Similarity Score", style="green", justify="right")
 774 |     
 775 |     for pair in pairs:
 776 |         text1 = pair.get("text1", "")[:50] + ("..." if len(pair.get("text1", "")) > 50 else "")
 777 |         text2 = pair.get("text2", "")[:50] + ("..." if len(pair.get("text2", "")) > 50 else "")
 778 |         score = pair.get("score", 0.0)
 779 |         
 780 |         # If score is a numpy array, convert to scalar
 781 |         try:
 782 |             if hasattr(score, 'item'):  # Check if it's potentially a numpy scalar
 783 |                 score = score.item()
 784 |         except (AttributeError, TypeError):
 785 |             pass
 786 |             
 787 |         similarity_table.add_row(
 788 |             escape(text1),
 789 |             escape(text2),
 790 |             f"{score:.4f}"
 791 |         )
 792 |     
 793 |     display.print(similarity_table)
 794 |     display.print()
 795 | 
 796 | 
 797 | def display_analytics_metrics(metrics_data: Dict, output: Optional[Console] = None):
 798 |     """
 799 |     Display analytics metrics in an attractive format.
 800 |     
 801 |     This function takes a dictionary of analytics metrics data and displays it in a
 802 |     formatted table using the Rich library. The metrics are grouped by category,
 803 |     and each category is displayed as a separate table.
 804 |     
 805 |     Args:
 806 |         metrics_data: Dictionary containing analytics metrics data
 807 |         output: Optional Rich Console instance to use for display. If not provided,
 808 |                 the default console will be used.
 809 |     """
 810 |     # Use provided console or default
 811 |     output = output or console
 812 | 
 813 |     # Check required data
 814 |     if not metrics_data or not isinstance(metrics_data, dict):
 815 |         output.print("[yellow]No analytics metrics data to display[/yellow]")
 816 |         return
 817 |     
 818 |     # Display section header
 819 |     output.print(Rule("[bold blue]Analytics Metrics[/bold blue]"))
 820 |     
 821 |     # Create metrics table
 822 |     metrics_table = Table(title="[bold]Metrics Overview[/bold]", box=box.ROUNDED)
 823 |     metrics_table.add_column("Metric", style="cyan")
 824 |     metrics_table.add_column("Count", style="green", justify="right")
 825 |     metrics_table.add_column("Details", style="dim")
 826 |     
 827 |     # Process data
 828 |     if "request_counts" in metrics_data:
 829 |         for metric, count in metrics_data["request_counts"].items():
 830 |             metrics_table.add_row(
 831 |                 metric.replace("_", " ").title(),
 832 |                 str(count),
 833 |                 ""
 834 |             )
 835 |     
 836 |     # Display table
 837 |     output.print(metrics_table)
 838 |     
 839 |     # Display any grouped metrics
 840 |     if "request_distributions" in metrics_data:
 841 |         for group_name, distribution in metrics_data["request_distributions"].items():
 842 |             distribution_table = Table(
 843 |                 title=f"[bold]{group_name.replace('_', ' ').title()} Distribution[/bold]",
 844 |                 box=box.SIMPLE
 845 |             )
 846 |             distribution_table.add_column("Category", style="cyan")
 847 |             distribution_table.add_column("Count", style="green", justify="right")
 848 |             distribution_table.add_column("Percentage", style="yellow", justify="right")
 849 |             
 850 |             total = sum(distribution.values())
 851 |             for category, count in distribution.items():
 852 |                 percentage = (count / total) * 100 if total > 0 else 0
 853 |                 distribution_table.add_row(
 854 |                     category,
 855 |                     str(count),
 856 |                     f"{percentage:.1f}%"
 857 |                 )
 858 |             
 859 |             output.print(distribution_table)
 860 | 
 861 | # --- Tournament Display Functions ---
 862 | 
 863 | def display_tournament_status(status_data: Dict[str, Any], output: Optional[Console] = None):
 864 |     """
 865 |     Display tournament status with better formatting using Rich.
 866 |     
 867 |     This function takes a dictionary containing tournament status information
 868 |     and displays it in a formatted table using the Rich library. The table
 869 |     includes the tournament status, current round, total rounds, progress
 870 |     percentage, and timestamps if available.
 871 |     
 872 |     Args:
 873 |         status_data: Dictionary with tournament status information
 874 |         output: Optional console to use (defaults to shared console)
 875 |     """
 876 |     # Use provided console or default
 877 |     display = output or console
 878 |     
 879 |     # Extract status information
 880 |     status = status_data.get("status", "UNKNOWN")
 881 |     current_round = status_data.get("current_round", 0)
 882 |     total_rounds = status_data.get("total_rounds", 0)
 883 |     
 884 |     # Calculate progress percentage
 885 |     if total_rounds > 0:
 886 |         progress = (current_round / total_rounds) * 100
 887 |     else:
 888 |         progress = 0
 889 |         
 890 |     # Create status table with improved formatting
 891 |     status_table = Table(box=box.SIMPLE, show_header=False, expand=False)
 892 |     status_table.add_column("Metric", style="cyan")
 893 |     status_table.add_column("Value", style="white")
 894 |     
 895 |     # Add status row with color based on status value
 896 |     status_color = "green" if status == "COMPLETED" else "yellow" if status == "RUNNING" else "red"
 897 |     status_table.add_row("Status", f"[bold {status_color}]{status}[/bold {status_color}]")
 898 |     
 899 |     # Add rounds progress
 900 |     status_table.add_row("Round", f"{current_round}/{total_rounds}")
 901 |     
 902 |     # Add progress percentage
 903 |     status_table.add_row("Progress", f"[green]{progress:.1f}%[/green]")
 904 |     
 905 |     # Add timestamps if available
 906 |     if "created_at" in status_data:
 907 |         status_table.add_row("Created", status_data.get("created_at", "N/A").replace("T", " ").split(".")[0])
 908 |     if "updated_at" in status_data:
 909 |         status_table.add_row("Last Updated", status_data.get("updated_at", "N/A").replace("T", " ").split(".")[0])
 910 |     
 911 |     display.print(status_table)
 912 |     
 913 |     # Add progress bar visual for better UX
 914 |     if status == "RUNNING":
 915 |         from rich.progress import BarColumn, Progress, TextColumn
 916 |         progress_bar = Progress(
 917 |             TextColumn("[progress.description]{task.description}"),
 918 |             BarColumn(),
 919 |             TextColumn("[progress.percentage]{task.percentage:>3.0f}%")
 920 |         )
 921 |         
 922 |         with progress_bar:
 923 |             task = progress_bar.add_task("Tournament Progress", total=100, completed=progress)  # noqa: F841
 924 |             # Just show the bar visualization, don't actually wait/update
 925 | 
 926 | def display_tournament_results(results_data: Dict[str, Any], output: Optional[Console] = None):
 927 |     """
 928 |     Display tournament results with better formatting using Rich.
 929 |     
 930 |     This function takes a dictionary containing tournament results and displays
 931 |     it in a formatted table using the Rich library. The table includes the
 932 |     tournament name, type, final status, total rounds, storage path, models
 933 |     used, and execution stats if available.
 934 |     
 935 |     Args:
 936 |         results_data: Dictionary with tournament results
 937 |         output: Optional console to use (defaults to shared console)
 938 |     """
 939 |     # Use provided console or default
 940 |     display = output or console
 941 |     
 942 |     # Display section title
 943 |     display.print(Rule("[bold blue]Tournament Results[/bold blue]"))
 944 |     
 945 |     # Create summary table
 946 |     summary_table = Table(
 947 |         title="[bold green]Final Results Summary[/bold green]", 
 948 |         box=box.ROUNDED, 
 949 |         show_header=False,
 950 |         expand=False
 951 |     )
 952 |     summary_table.add_column("Metric", style="cyan", no_wrap=True)
 953 |     summary_table.add_column("Value", style="white")
 954 | 
 955 |     # Add tournament information
 956 |     summary_table.add_row("Tournament Name", escape(results_data.get('config', {}).get('name', 'N/A')))
 957 |     summary_table.add_row("Tournament Type", escape(results_data.get('config', {}).get('tournament_type', 'N/A')))
 958 |     summary_table.add_row("Final Status", f"[bold green]{escape(results_data.get('status', 'N/A'))}[/bold green]")
 959 |     summary_table.add_row("Total Rounds", str(results_data.get('config', {}).get('rounds', 'N/A')))
 960 |     
 961 |     # Add storage path if available
 962 |     storage_path = results_data.get("storage_path")
 963 |     summary_table.add_row("Storage Path", escape(storage_path) if storage_path else "[dim]Not available[/dim]")
 964 |     
 965 |     # Display summary table
 966 |     display.print(summary_table)
 967 |     
 968 |     # Display models used in tournament
 969 |     models = results_data.get('config', {}).get('models', [])
 970 |     if models:
 971 |         model_table = Table(title="[bold]Models Used[/bold]", box=box.SIMPLE, show_header=True)
 972 |         model_table.add_column("Provider", style="magenta")
 973 |         model_table.add_column("Model", style="blue")
 974 |         
 975 |         for model_config in models:
 976 |             model_id = model_config.get('model_id', 'N/A')
 977 |             if ':' in model_id:
 978 |                 provider, model = model_id.split(':', 1)
 979 |                 model_table.add_row(provider, model)
 980 |             else:
 981 |                 model_table.add_row("Unknown", model_id)
 982 |         
 983 |         display.print(model_table)
 984 |     
 985 |     # Display execution stats if available
 986 |     if any(key in results_data for key in ["processing_time", "cost", "tokens"]):
 987 |         _display_stats(results_data, display)
 988 | 
 989 | def display_completion_result(
 990 |     console: Console, 
 991 |     result: Any, 
 992 |     title: str = "Completion Result"
 993 | ):
 994 |     """
 995 |     Display a completion result with stats.
 996 |     
 997 |     This function takes a completion result and displays it in a formatted panel
 998 |     using the Rich library. The panel includes the completion text and various
 999 |     stats such as input tokens, output tokens, total tokens, cost, and processing
1000 |     time if available.
1001 |     
1002 |     Args:
1003 |         console: Rich console to print to
1004 |         result: Completion result to display
1005 |         title: Title for the result panel
1006 |     """
1007 |     # Display the completion text
1008 |     console.print(Panel(
1009 |         result.text.strip(),
1010 |         title=title,
1011 |         border_style="green",
1012 |         expand=False
1013 |     ))
1014 |     
1015 |     # Display stats
1016 |     stats_table = Table(title="Completion Stats", show_header=False, box=None)
1017 |     stats_table.add_column("Metric", style="green")
1018 |     stats_table.add_column("Value", style="white")
1019 |     
1020 |     # Add standard metrics if they exist
1021 |     if hasattr(result, "input_tokens"):
1022 |         stats_table.add_row("Input Tokens", str(result.input_tokens))
1023 |     if hasattr(result, "output_tokens"):
1024 |         stats_table.add_row("Output Tokens", str(result.output_tokens))
1025 |     if hasattr(result, "total_tokens"):
1026 |         stats_table.add_row("Total Tokens", str(result.total_tokens))
1027 |     if hasattr(result, "cost"):
1028 |         stats_table.add_row("Cost", f"${result.cost:.6f}")
1029 |     if hasattr(result, "processing_time"):
1030 |         stats_table.add_row("Processing Time", f"{result.processing_time:.3f}s")
1031 |     
1032 |     console.print(stats_table)
1033 | 
1034 | def display_cache_stats(
1035 |     stats: Dict[str, Any], 
1036 |     stats_log: Optional[Dict[int, Dict[str, int]]] = None,
1037 |     console: Optional[Console] = None
1038 | ):
1039 |     """
1040 |     Display cache statistics in a formatted table.
1041 |     
1042 |     This function takes a dictionary of cache statistics and displays it in a
1043 |     formatted table using the Rich library. The table includes information such
1044 |     as cache enabled status, persistence, hit rate, total gets, cache hits,
1045 |     cache misses, total sets, and estimated savings if available.
1046 |     
1047 |     Args:
1048 |         stats: Cache statistics dictionary
1049 |         stats_log: Optional log of statistics at different stages
1050 |         console: Rich console to print to (creates one if None)
1051 |     """
1052 |     if console is None:
1053 |         from ultimate_mcp_server.utils.logging.console import console
1054 |     
1055 |     # Create the stats table
1056 |     stats_table = Table(title="Cache Statistics", box=box.SIMPLE)
1057 |     stats_table.add_column("Metric", style="cyan")
1058 |     stats_table.add_column("Value", style="white")
1059 |     
1060 |     # Add enabled state
1061 |     stats_table.add_row(
1062 |         "Cache Enabled",
1063 |         "[green]Yes[/green]" if stats.get("enabled", False) else "[red]No[/red]"
1064 |     )
1065 |     
1066 |     # Add persistence information
1067 |     stats_table.add_row(
1068 |         "Persistence",
1069 |         "[green]Enabled[/green]" if stats.get("persistence", False) else "[yellow]Disabled[/yellow]"
1070 |     )
1071 |     
1072 |     # Add hit and miss counts
1073 |     cache_stats = stats.get("stats", {})
1074 |     stats_table.add_row("Total Gets", str(cache_stats.get("get_count", 0)))
1075 |     stats_table.add_row("Cache Hits", str(cache_stats.get("hit_count", 0)))
1076 |     stats_table.add_row("Cache Misses", str(cache_stats.get("miss_count", 0)))
1077 |     stats_table.add_row("Total Sets", str(cache_stats.get("set_count", 0)))
1078 |     
1079 |     # Calculate hit rate
1080 |     gets = cache_stats.get("get_count", 0)
1081 |     hits = cache_stats.get("hit_count", 0)
1082 |     hit_rate = (hits / gets) * 100 if gets > 0 else 0
1083 |     stats_table.add_row("Hit Rate", f"{hit_rate:.1f}%")
1084 |     
1085 |     # Add estimated savings if available
1086 |     if "savings" in stats:
1087 |         savings = stats["savings"]
1088 |         if isinstance(savings, dict) and "cost" in savings:
1089 |             stats_table.add_row("Cost Savings", f"${savings['cost']:.6f}")
1090 |         if isinstance(savings, dict) and "time" in savings:
1091 |             stats_table.add_row("Time Savings", f"{savings['time']:.3f}s")
1092 |     
1093 |     console.print(stats_table)
1094 |     
1095 |     # Display changes over time if stats_log is provided
1096 |     if stats_log and len(stats_log) > 1:
1097 |         changes_table = Table(title="Cache Changes During Demo", box=box.SIMPLE)
1098 |         changes_table.add_column("Stage", style="cyan")
1099 |         changes_table.add_column("Gets", style="white")
1100 |         changes_table.add_column("Hits", style="green")
1101 |         changes_table.add_column("Misses", style="yellow")
1102 |         changes_table.add_column("Sets", style="blue")
1103 |         
1104 |         for stage, stage_stats in sorted(stats_log.items()):
1105 |             changes_table.add_row(
1106 |                 f"Step {stage}",
1107 |                 str(stage_stats.get("get_count", 0)),
1108 |                 str(stage_stats.get("hit_count", 0)),
1109 |                 str(stage_stats.get("miss_count", 0)),
1110 |                 str(stage_stats.get("set_count", 0))
1111 |             )
1112 |         
1113 |         console.print(changes_table)
1114 | 
1115 | def parse_and_display_result(
1116 |     title: str, 
1117 |     input_data: Dict, 
1118 |     result: Any,
1119 |     console: Optional[Console] = None
1120 | ):
1121 |     """
1122 |     Parse and display extraction results.
1123 |     
1124 |     This function takes a title, input data, and extraction result, and displays
1125 |     the extracted data in a formatted panel using the Rich library. The function
1126 |     supports various extraction formats such as JSON, tables, and entity data.
1127 |     
1128 |     Args:
1129 |         title: Title for the display
1130 |         input_data: Input data used for the extraction
1131 |         result: Extraction result
1132 |         console: Rich console to print to (creates one if None)
1133 |     """
1134 |     if console is None:
1135 |         from ultimate_mcp_server.utils.logging.console import console
1136 |     
1137 |     console.print(Rule(f"[bold blue]{title}[/bold blue]"))
1138 |     
1139 |     # Check for errors first
1140 |     if "error" in result and result["error"]:
1141 |         console.print(f"[bold red]Error:[/bold red] {result['error']}")
1142 |         if "raw_text" in result:
1143 |             console.print(Panel(result["raw_text"], title="Raw Response", border_style="red"))
1144 |         return
1145 |     
1146 |     # Display the extracted data based on expected keys for different demos
1147 |     extracted_data_displayed = False
1148 |     
1149 |     # 1. JSON Extraction (expects 'json' key)
1150 |     if "json" in result and isinstance(result["json"], dict):
1151 |         data = result["json"]
1152 |         json_str = json.dumps(data, indent=2)
1153 |         syntax = Syntax(json_str, "json", theme="monokai", line_numbers=True)
1154 |         console.print(Panel(syntax, title="Extracted JSON Data", border_style="green"))
1155 |         extracted_data_displayed = True
1156 |         
1157 |     # 2. Table Extraction (expects 'formats' and 'metadata')
1158 |     elif "formats" in result and isinstance(result["formats"], dict):
1159 |         formats = result["formats"]
1160 |         if "json" in formats and formats["json"]:
1161 |             try:
1162 |                 _display_json_data(formats["json"], "Extracted Table (JSON)", console)
1163 |                 extracted_data_displayed = True
1164 |             except Exception as e:
1165 |                 console.print(f"[red]Error displaying table JSON: {e}[/red]")
1166 |         if "markdown" in formats and formats["markdown"]:
1167 |             try:
1168 |                 console.print(Panel(Syntax(formats["markdown"], "markdown", theme="default"), title="Extracted Table (Markdown)", border_style="dim green"))
1169 |                 extracted_data_displayed = True # Even if JSON fails, MD might succeed
1170 |             except Exception as e:
1171 |                  console.print(f"[red]Error displaying table Markdown: {e}[/red]")
1172 |         if "metadata" in result and result["metadata"]:
1173 |              try:
1174 |                 _display_json_data(result["metadata"], "Table Metadata", console)
1175 |              except Exception as e:
1176 |                 console.print(f"[red]Error displaying table metadata: {e}[/red]")
1177 |                 
1178 |     # 3. Schema Inference / Entity Extraction (expects 'extracted_data')
1179 |     elif "extracted_data" in result:
1180 |         data = result["extracted_data"]
1181 |         # Check if it looks like entity data (dict with list values)
1182 |         is_entity_data = False
1183 |         if isinstance(data, dict):
1184 |             is_entity_data = all(isinstance(v, list) for v in data.values()) 
1185 |             
1186 |         if is_entity_data: 
1187 |             # Simplified entity display for this function
1188 |             entity_table = Table(title="[bold green]Extracted Entities[/bold green]", box=box.ROUNDED)
1189 |             entity_table.add_column("Category", style="cyan")
1190 |             entity_table.add_column("Value", style="white")
1191 |             for category, items in data.items():
1192 |                 for item in items:
1193 |                      entity_text = str(item.get('name', item)) if isinstance(item, dict) else str(item)
1194 |                      entity_table.add_row(category, escape(entity_text))
1195 |             if entity_table.row_count > 0:
1196 |                 console.print(entity_table)
1197 |                 extracted_data_displayed = True
1198 |             else:
1199 |                  console.print("[yellow]No entities found.[/yellow]")
1200 |                  extracted_data_displayed = True # Still counts as displayed
1201 |         else:
1202 |             # Assume other 'extracted_data' is generic JSON
1203 |             try:
1204 |                 _display_json_data(data, "Extracted Data", console)
1205 |                 extracted_data_displayed = True
1206 |             except Exception as e:
1207 |                 console.print(f"[red]Error displaying extracted data: {e}[/red]")
1208 |                 
1209 |     # Fallback if no specific keys matched
1210 |     if not extracted_data_displayed:
1211 |         console.print("[yellow]Could not find expected data keys (json, formats, extracted_data) in result.[/yellow]")
1212 |         # Optionally display the whole result as JSON for debugging
1213 |         try:
1214 |             full_result_json = json.dumps(result, indent=2, default=str) # Use default=str for non-serializable items
1215 |             console.print(Panel(Syntax(full_result_json, "json", theme="monokai", line_numbers=False), title="[dim]Full Result Object[/dim]", border_style="dim"))
1216 |         except Exception:
1217 |             pass # Ignore if full result can't be serialized
1218 | 
1219 |     # Display performance metrics
1220 |     if any(k in result for k in ["tokens", "cost", "processing_time"]):
1221 |         metrics_table = Table(title="Performance Metrics", box=None)
1222 |         metrics_table.add_column("Metric", style="cyan")
1223 |         metrics_table.add_column("Value", style="white")
1224 |         
1225 |         # Add provider and model info
1226 |         if "provider" in result:
1227 |             metrics_table.add_row("Provider", result["provider"])
1228 |         if "model" in result:
1229 |             metrics_table.add_row("Model", result["model"])
1230 |         
1231 |         # Add token usage
1232 |         if "tokens" in result:
1233 |             tokens = result["tokens"]
1234 |             if isinstance(tokens, dict):
1235 |                 for token_type, count in tokens.items():
1236 |                     metrics_table.add_row(f"{token_type.title()} Tokens", str(count))
1237 |             else:
1238 |                 metrics_table.add_row("Total Tokens", str(tokens))
1239 |         
1240 |         # Add cost and timing
1241 |         if "cost" in result:
1242 |             metrics_table.add_row("Cost", f"${result['cost']:.6f}")
1243 |         if "processing_time" in result:
1244 |             metrics_table.add_row("Processing Time", f"{result['processing_time']:.3f}s")
1245 |         
1246 |         console.print(metrics_table)
1247 | 
1248 | def display_table_data(table_data: List[Dict], console: Console):
1249 |     """
1250 |     Display tabular data extracted from text.
1251 |     
1252 |     This function takes a list of dictionaries representing table rows and
1253 |     displays it in a formatted table using the Rich library. The table is also
1254 |     displayed as JSON for reference.
1255 |     
1256 |     Args:
1257 |         table_data: List of dictionaries representing table rows
1258 |         console: Rich console to print to
1259 |     """
1260 |     if not table_data:
1261 |         console.print("[yellow]No table data found[/yellow]")
1262 |         return
1263 |     
1264 |     # Create a Rich table from the data
1265 |     rich_table = Table(box=box.SIMPLE)
1266 |     
1267 |     # Add columns from the first row's keys
1268 |     columns = list(table_data[0].keys())
1269 |     for column in columns:
1270 |         rich_table.add_column(str(column), style="cyan")
1271 |     
1272 |     # Add rows
1273 |     for row in table_data:
1274 |         rich_table.add_row(*[str(row.get(col, "")) for col in columns])
1275 |     
1276 |     console.print(rich_table)
1277 |     
1278 |     # Also display as JSON for reference
1279 |     json_str = json.dumps(table_data, indent=2)
1280 |     syntax = Syntax(json_str, "json", theme="monokai", line_numbers=True)
1281 |     console.print(Panel(syntax, title="Table Data (JSON)", border_style="blue"))
1282 | 
1283 | def display_key_value_pairs(pairs: List[Dict], console: Console):
1284 |     """
1285 |     Display key-value pairs extracted from text.
1286 |     
1287 |     This function takes a list of dictionaries with 'key' and 'value' fields
1288 |     and displays it in a formatted table using the Rich library.
1289 |     
1290 |     Args:
1291 |         pairs: List of dictionaries with 'key' and 'value' fields
1292 |         console: Rich console to print to
1293 |     """
1294 |     if not pairs:
1295 |         console.print("[yellow]No key-value pairs found[/yellow]")
1296 |         return
1297 |     
1298 |     # Create a Rich table for the key-value pairs
1299 |     kv_table = Table(box=None)
1300 |     kv_table.add_column("Key", style="green")
1301 |     kv_table.add_column("Value", style="white")
1302 |     
1303 |     for pair in pairs:
1304 |         kv_table.add_row(pair.get("key", ""), pair.get("value", ""))
1305 |     
1306 |     console.print(Panel(kv_table, title="Extracted Key-Value Pairs", border_style="green")) 
1307 | 
1308 | 
1309 | 
1310 | logger = get_logger(__name__) # Initialize logger for this module
1311 | 
1312 | async def safe_tool_call(tool_func, args_dict, description=""):
1313 |     """
1314 |     Helper function to safely call an async tool function and display results/errors.
1315 |     
1316 |     This function wraps an async tool function call and handles common error patterns
1317 |     (ToolError, ProtectionTriggeredError, generic exceptions) and formats successful
1318 |     outputs for various common tool result structures using Rich.
1319 |     
1320 |     Args:
1321 |         tool_func: The asynchronous tool function to call.
1322 |         args_dict: A dictionary of arguments to pass to the tool function.
1323 |         description: A description of the tool call for display purposes.
1324 |     
1325 |     Returns:
1326 |         A dictionary containing:
1327 |         - 'success': Boolean indicating if the call was successful (no errors/protection).
1328 |         - 'result': The raw result from the tool function.
1329 |         - 'error' (optional): Error message if an error occurred.
1330 |         - 'details' (optional): Additional error details.
1331 |         - 'protection_triggered' (optional): Boolean, true if deletion protection was triggered.
1332 |         - 'context' (optional): Context dictionary from ProtectionTriggeredError.
1333 |     """
1334 |     tool_name = tool_func.__name__
1335 |     call_desc = description or f"Calling [bold magenta]{tool_name}[/bold magenta]"
1336 |     # Use pretty_repr for args for better complex type display
1337 |     args_str = ", ".join(f"{k}=[yellow]{pretty_repr(v)}[/yellow]" for k, v in args_dict.items())
1338 |     console.print(Panel(f"{call_desc}\nArgs: {args_str}", title="Tool Call", border_style="blue", expand=False))
1339 | 
1340 |     start_time = time.monotonic()
1341 |     try:
1342 |         # Directly await the function
1343 |         result = await tool_func(**args_dict)
1344 |         duration = time.monotonic() - start_time
1345 | 
1346 |         # Check for error/protection structure (often returned by @with_error_handling)
1347 |         is_error = isinstance(result, dict) and (result.get("error") is not None or result.get("isError") is True)
1348 |         is_protection_triggered = isinstance(result, dict) and result.get("protectionTriggered") is True
1349 | 
1350 |         if is_protection_triggered:
1351 |              error_msg = result.get("error", "Protection triggered, reason unspecified.")
1352 |              context = result.get("details", {}).get("context", {}) # Context might be nested
1353 |              console.print(Panel(
1354 |                  f"[bold yellow]🛡️ Protection Triggered![/bold yellow]\n"
1355 |                  f"Message: {escape(error_msg)}\n"
1356 |                  f"Context: {pretty_repr(context)}",
1357 |                  title=f"Result: {tool_name} (Blocked)",
1358 |                  border_style="yellow",
1359 |                  subtitle=f"Duration: {duration:.3f}s"
1360 |              ))
1361 |              return {"success": False, "protection_triggered": True, "result": result, "error": error_msg, "context": context}
1362 | 
1363 |         elif is_error:
1364 |             error_msg = result.get("error", "Unknown error occurred.")
1365 |             error_code = result.get("error_code", "UNKNOWN_ERROR")
1366 |             error_type = result.get("error_type", "ERROR")
1367 |             details = result.get("details", None)
1368 | 
1369 |             logger.debug(f"Error response structure from {tool_name}: {pretty_repr(result)}")
1370 | 
1371 |             error_content = f"[bold red]Error ({error_code})[/bold red]\n"
1372 |             error_content += f"Type: {error_type}\n"
1373 |             error_content += f"Message: {escape(str(error_msg))}"
1374 |             if details:
1375 |                  error_content += f"\nDetails:\n{pretty_repr(details)}"
1376 |             else:
1377 |                  error_content += "\nDetails: N/A"
1378 | 
1379 |             console.print(Panel(
1380 |                 error_content,
1381 |                 title=f"Result: {tool_name} (Failed)",
1382 |                 border_style="red",
1383 |                 subtitle=f"Duration: {duration:.3f}s"
1384 |             ))
1385 |             return {"success": False, "error": error_msg, "details": details, "result": result, "error_code": error_code}
1386 | 
1387 |         else:
1388 |             # Successful result - display nicely
1389 |             output_content = ""
1390 |             if isinstance(result, dict):
1391 |                  # Common success patterns
1392 |                  if "message" in result:
1393 |                       output_content += f"Message: [green]{escape(result['message'])}[/green]\n"
1394 |                  if "path" in result:
1395 |                       output_content += f"Path: [cyan]{escape(str(result['path']))}[/cyan]\n"
1396 |                  if "size" in result and not any(k in result for k in ["content", "files"]): # Avoid printing size if content/files also present
1397 |                       # Only print size if it's the primary info (like in write_file result)
1398 |                       output_content += f"Size: [yellow]{result['size']}[/yellow] bytes\n"
1399 |                  if "created" in result and isinstance(result['created'], bool):
1400 |                     output_content += f"Created: {'Yes' if result['created'] else 'No (already existed)'}\n"
1401 |                  # Handle 'diff' from edit_file
1402 |                  if "diff" in result and result.get("diff") not in ["No changes detected after applying edits.", "No edits provided.", None, ""]:
1403 |                        diff_content = result['diff']
1404 |                        output_content += f"Diff:\n{Syntax(diff_content, 'diff', theme='monokai', background_color='default')}\n"
1405 |                  # Handle 'matches' from search_files
1406 |                  if "matches" in result and "pattern" in result:
1407 |                        output_content += f"Search Matches ({len(result['matches'])} for pattern '{result['pattern']}'):\n"
1408 |                        rel_base = Path(result.get("path", "."))
1409 |                        output_content += "\n".join(f"- [cyan]{escape(os.path.relpath(m, rel_base))}[/cyan]" for m in result['matches'][:20])
1410 |                        if len(result['matches']) > 20:
1411 |                             output_content += "\n- ... (more matches)"
1412 |                        if result.get("warnings"):
1413 |                             output_content += "\n[yellow]Warnings:[/yellow]\n" + "\n".join(f"- {escape(w)}" for w in result['warnings']) + "\n"
1414 |                  # Handle 'entries' from list_directory
1415 |                  elif "entries" in result and "path" in result:
1416 |                        output_content += f"Directory Listing for [cyan]{escape(str(result['path']))}[/cyan]:\n"
1417 |                        table = Table(show_header=True, header_style="bold magenta", box=None)
1418 |                        table.add_column("Name", style="cyan", no_wrap=True)
1419 |                        table.add_column("Type", style="green")
1420 |                        table.add_column("Info", style="yellow")
1421 |                        for entry in result.get('entries', []):
1422 |                             name = entry.get('name', '?')
1423 |                             etype = entry.get('type', 'unknown')
1424 |                             info_str = ""
1425 |                             if etype == 'file' and 'size' in entry:
1426 |                                  info_str += f"{entry['size']} bytes"
1427 |                             elif etype == 'symlink' and 'symlink_target' in entry:
1428 |                                  info_str += f"-> {escape(str(entry['symlink_target']))}"
1429 |                             if 'error' in entry:
1430 |                                  info_str += f" [red](Error: {escape(entry['error'])})[/red]"
1431 |                             icon = "📄" if etype == "file" else "📁" if etype == "directory" else "🔗" if etype=="symlink" else "❓"
1432 |                             table.add_row(f"{icon} {escape(name)}", etype, info_str)
1433 |                        with Capture(console) as capture: # Use Capture from rich.console
1434 |                             console.print(table)
1435 |                        output_content += capture.get()
1436 |                        if result.get("warnings"):
1437 |                             output_content += "\n[yellow]Warnings:[/yellow]\n" + "\n".join(f"- {escape(w)}" for w in result['warnings']) + "\n"
1438 |                  # Handle 'tree' from directory_tree
1439 |                  elif "tree" in result and "path" in result:
1440 |                        output_content += f"Directory Tree for [cyan]{escape(str(result['path']))}[/cyan]:\n"
1441 |                        # Local helper function to build the rich tree recursively
1442 |                        def build_rich_tree_display(parent_node, children):
1443 |                             for item in children:
1444 |                                 name = item.get("name", "?")
1445 |                                 item_type = item.get("type", "unknown")
1446 |                                 info = ""
1447 |                                 if "size" in item:
1448 |                                      size_bytes = item['size']
1449 |                                      if size_bytes < 1024: 
1450 |                                          info += f" ({size_bytes}b)"
1451 |                                      elif size_bytes < 1024 * 1024: 
1452 |                                          info += f" ({size_bytes/1024:.1f}KB)"
1453 |                                      else: 
1454 |                                          info += f" ({size_bytes/(1024*1024):.1f}MB)"
1455 |                                 if "target" in item: 
1456 |                                     info += f" → {escape(item['target'])}"
1457 |                                 if "error" in item: 
1458 |                                     info += f" [red](Error: {escape(item['error'])})[/red]"
1459 | 
1460 |                                 if item_type == "directory":
1461 |                                     node = parent_node.add(f"📁 [bold cyan]{escape(name)}[/bold cyan]{info}")
1462 |                                     if "children" in item: 
1463 |                                         build_rich_tree_display(node, item["children"])
1464 |                                 elif item_type == "file":
1465 |                                      icon = "📄" # Default icon
1466 |                                      ext = os.path.splitext(name)[1].lower()
1467 |                                      if ext in ['.jpg', '.png', '.gif', '.bmp', '.jpeg', '.svg']: 
1468 |                                          icon = "🖼️"
1469 |                                      elif ext in ['.mp3', '.wav', '.ogg', '.flac']: 
1470 |                                          icon = "🎵"
1471 |                                      elif ext in ['.mp4', '.avi', '.mov', '.mkv']: 
1472 |                                          icon = "🎬"
1473 |                                      elif ext in ['.py', '.js', '.java', '.c', '.cpp', '.go', '.rs']: 
1474 |                                          icon = "📜"
1475 |                                      elif ext in ['.json', '.xml', '.yaml', '.yml']: 
1476 |                                          icon = "📋"
1477 |                                      elif ext in ['.zip', '.tar', '.gz', '.7z', '.rar']: 
1478 |                                          icon = "📦"
1479 |                                      elif ext in ['.md', '.txt', '.doc', '.docx', '.pdf']: 
1480 |                                          icon = "📝"
1481 |                                      parent_node.add(f"{icon} [green]{escape(name)}[/green]{info}")
1482 |                                 elif item_type == "symlink": 
1483 |                                     parent_node.add(f"🔗 [magenta]{escape(name)}[/magenta]{info}")
1484 |                                 elif item_type == "info": 
1485 |                                     parent_node.add(f"ℹ️ [dim]{escape(name)}[/dim]")
1486 |                                 elif item_type == "error": 
1487 |                                     parent_node.add(f"❌ [red]{escape(name)}[/red]{info}")
1488 |                                 else: 
1489 |                                     parent_node.add(f"❓ [yellow]{escape(name)}[/yellow]{info}")
1490 | 
1491 |                        rich_tree_root = Tree(f"📁 [bold cyan]{escape(os.path.basename(result['path']))}[/bold cyan]")
1492 |                        build_rich_tree_display(rich_tree_root, result["tree"])
1493 |                        with Capture(console) as capture: # Use Capture from rich.console
1494 |                            console.print(rich_tree_root)
1495 |                        output_content += capture.get()
1496 |                  # Handle 'directories' from list_allowed_directories
1497 |                  elif "directories" in result and "count" in result:
1498 |                         output_content += f"Allowed Directories ({result['count']}):\n"
1499 |                         output_content += "\n".join(f"- [green]{escape(d)}[/green]" for d in result['directories']) + "\n"
1500 |                  # Handle 'files' from read_multiple_files
1501 |                  elif "files" in result and "succeeded" in result:
1502 |                         output_content += f"Read Results: [green]{result['succeeded']} succeeded[/green], [red]{result['failed']} failed[/red]\n"
1503 |                         for file_res in result.get('files', []):
1504 |                             path_str = escape(str(file_res.get('path', 'N/A')))
1505 |                             if file_res.get('success'):
1506 |                                 size_info = f" ({file_res.get('size', 'N/A')}b)" if 'size' in file_res else ""
1507 |                                 # Use preview if available, else content snippet
1508 |                                 content_display = file_res.get('preview', file_res.get('content', ''))
1509 |                                 output_content += f"- [green]Success[/green]: [cyan]{path_str}[/cyan]{size_info}\n  Content: '{escape(str(content_display))}'\n"
1510 |                             else:
1511 |                                 output_content += f"- [red]Failed[/red]: [cyan]{path_str}[/cyan]\n  Error: {escape(str(file_res.get('error', 'Unknown')))}\n"
1512 |                  # Handle 'content' block (from read_file)
1513 |                  elif "content" in result and "path" in result: # More specific check for read_file
1514 |                      # Check if content is list of blocks (MCP format) or simple string/bytes
1515 |                      content_data = result["content"]
1516 |                      preview_content = ""
1517 |                      if isinstance(content_data, list) and content_data and "text" in content_data[0]:
1518 |                          # Assumes MCP text block format
1519 |                          preview_content = "\n".join([escape(block.get("text","")) for block in content_data if block.get("type")=="text"])
1520 |                      elif isinstance(content_data, str):
1521 |                          # Simple string content
1522 |                          preview_content = escape(content_data[:1000] + ('...' if len(content_data) > 1000 else '')) # Limit preview
1523 |                      elif isinstance(content_data, bytes):
1524 |                          # Handle bytes (e.g., hex preview)
1525 |                          try:
1526 |                              import binascii
1527 |                              hex_preview = binascii.hexlify(content_data[:64]).decode('ascii') # Preview first 64 bytes
1528 |                              preview_content = f"[dim]Binary Content (Hex Preview):[/dim]\n{hex_preview}{'...' if len(content_data) > 64 else ''}"
1529 |                          except Exception:
1530 |                              preview_content = "[dim]Binary Content (Preview unavailable)[/dim]"
1531 |                      
1532 |                      if preview_content: # Only add if we have something to show
1533 |                          output_content += f"Content ({result.get('size', 'N/A')} bytes):\n{preview_content}\n"
1534 |                      elif 'size' in result: # If no content preview but size exists
1535 |                          output_content += f"Size: [yellow]{result['size']}[/yellow] bytes\n"
1536 | 
1537 |                  # Handle 'modified' from get_file_info
1538 |                  elif "name" in result and "modified" in result:
1539 |                       output_content += f"File Info for [cyan]{escape(result['name'])}[/cyan]:\n"
1540 |                       info_table = Table(show_header=False, box=None)
1541 |                       info_table.add_column("Property", style="blue")
1542 |                       info_table.add_column("Value", style="yellow")
1543 |                       skip_keys = {"success", "message", "path", "name"}
1544 |                       for k, v in result.items():
1545 |                            if k not in skip_keys:
1546 |                                info_table.add_row(escape(k), pretty_repr(v))
1547 |                       with Capture(console) as capture: # Use Capture from rich.console
1548 |                            console.print(info_table)
1549 |                       output_content += capture.get()
1550 | 
1551 |                  # Fallback for other dictionaries
1552 |                  else:
1553 |                      excluded_keys = {'content', 'tree', 'entries', 'matches', 'files', 'success', 'message'}
1554 |                      display_dict = {k:v for k,v in result.items() if k not in excluded_keys}
1555 |                      if display_dict:
1556 |                          output_content += "Result Data:\n" + pretty_repr(display_dict) + "\n"
1557 |                      elif not output_content: # If nothing else was printed
1558 |                           output_content = "[dim](Tool executed successfully, no specific output format matched)[/dim]"
1559 | 
1560 |             # Handle non-dict results (should be rare)
1561 |             else:
1562 |                  output_content = escape(str(result))
1563 | 
1564 |             console.print(Panel(
1565 |                  output_content,
1566 |                  title=f"Result: {tool_name} (Success)",
1567 |                  border_style="green",
1568 |                  subtitle=f"Duration: {duration:.3f}s"
1569 |             ))
1570 |             return {"success": True, "result": result}
1571 | 
1572 |     except ProtectionTriggeredError as pte:
1573 |          duration = time.monotonic() - start_time
1574 |          logger.warning(f"Protection triggered calling {tool_name}: {pte}", exc_info=True) # Use logger from display.py
1575 |          console.print(Panel(
1576 |              f"[bold yellow]🛡️ Protection Triggered![/bold yellow]\n"
1577 |              f"Message: {escape(str(pte))}\n"
1578 |              f"Context: {pretty_repr(pte.context)}",
1579 |              title=f"Result: {tool_name} (Blocked)",
1580 |              border_style="yellow",
1581 |              subtitle=f"Duration: {duration:.3f}s"
1582 |          ))
1583 |          return {"success": False, "protection_triggered": True, "error": str(pte), "context": pte.context, "result": None}
1584 |     except (ToolInputError, ToolError) as tool_err:
1585 |          duration = time.monotonic() - start_time
1586 |          error_code = getattr(tool_err, 'error_code', 'TOOL_ERROR')
1587 |          details = getattr(tool_err, 'details', None) # Use getattr with default None
1588 |          logger.error(f"Tool Error calling {tool_name}: {tool_err} ({error_code})", exc_info=True, extra={'details': details}) # Use logger from display.py
1589 | 
1590 |          error_content = f"[bold red]{type(tool_err).__name__} ({error_code})[/bold red]\n"
1591 |          error_content += f"Message: {escape(str(tool_err))}"
1592 |          if details:
1593 |               error_content += f"\nDetails:\n{pretty_repr(details)}"
1594 |          else:
1595 |               error_content += "\nDetails: N/A"
1596 | 
1597 |          error_content += f"\n\nFunction: [yellow]{tool_name}[/yellow]"
1598 |          error_content += f"\nArguments: [dim]{pretty_repr(args_dict)}[/dim]" # Use pretty_repr for args
1599 | 
1600 |          console.print(Panel(
1601 |              error_content,
1602 |              title=f"Result: {tool_name} (Failed)",
1603 |              border_style="red",
1604 |              subtitle=f"Duration: {duration:.3f}s"
1605 |          ))
1606 |          return {"success": False, "error": str(tool_err), "details": details, "error_code": error_code, "result": None}
1607 |     except Exception as e:
1608 |         duration = time.monotonic() - start_time
1609 |         logger.critical(f"Unexpected Exception calling {tool_name}: {e}", exc_info=True) # Use logger from display.py
1610 |         console.print(Panel(
1611 |             f"[bold red]Unexpected Error ({type(e).__name__})[/bold red]\n"
1612 |             f"{escape(str(e))}",
1613 |             title=f"Result: {tool_name} (Critical Failure)",
1614 |             border_style="red",
1615 |             subtitle=f"Duration: {duration:.3f}s"
1616 |         ))
1617 |         # Include basic args in details for unexpected errors too
1618 |         return {"success": False, "error": f"Unexpected: {str(e)}", "details": {"type": type(e).__name__, "args": args_dict}, "result": None} 
1619 | 
1620 | # --- Async Rich Directory Tree Builder ---
1621 | # RESTORED ASYNC VERSION
1622 | 
1623 | async def _build_rich_directory_tree_recursive(
1624 |     path: Path, 
1625 |     tree_node: Tree, 
1626 |     depth: int, 
1627 |     max_depth: int
1628 | ):
1629 |     """
1630 |     Recursive helper to build a Rich Tree using async list_directory.
1631 |     
1632 |     This function is a recursive helper for generating a Rich Tree representation
1633 |     of a directory structure using the async list_directory tool. It traverses the
1634 |     directory tree and adds nodes for each file, directory, and symlink encountered,
1635 |     with appropriate icons and styling based on file types.
1636 |     
1637 |     Args:
1638 |         path: The current directory path (Path object) to display.
1639 |         tree_node: The parent Tree node to add child nodes to.
1640 |         depth: The current recursion depth in the directory tree.
1641 |         max_depth: The maximum depth to traverse, preventing excessive recursion.
1642 |     
1643 |     Note:
1644 |         This function uses different icons for different file types and includes 
1645 |         size information for files and target information for symlinks when available.
1646 |     """
1647 |     if depth >= max_depth:
1648 |         tree_node.add("📁 [dim]...(max depth reached)[/dim]")
1649 |         return
1650 | 
1651 |     try:
1652 |         # Call the async list_directory tool
1653 |         list_result = await list_directory(path=str(path))
1654 | 
1655 |         # Handle potential errors from the list_directory call itself
1656 |         if isinstance(list_result, dict) and list_result.get("error"):
1657 |             error_msg = list_result.get("error", "Unknown listing error")
1658 |             tree_node.add(f"❌ [red]Error listing: {escape(error_msg)}[/red]")
1659 |             return
1660 |         
1661 |         # Ensure result structure is as expected
1662 |         if not isinstance(list_result, dict) or "entries" not in list_result:
1663 |              tree_node.add(f"❓ [yellow]Unexpected result format from list_directory for {escape(str(path))}[/yellow]")
1664 |              logger.warning(f"Unexpected list_directory result for {path}: {list_result}")
1665 |              return
1666 |              
1667 |         entries = sorted(list_result.get("entries", []), key=lambda x: x.get("name", ""))
1668 | 
1669 |         for item in entries:
1670 |             name = item.get("name", "?")
1671 |             item_type = item.get("type", "unknown")
1672 |             entry_error = item.get("error")
1673 |             item_path = path / name
1674 |             
1675 |             # Skip hidden files/dirs (same logic as demo)
1676 |             # if name.startswith('.') and name != '.gitignore':
1677 |             #     continue
1678 |                 
1679 |             # Handle entry-specific errors
1680 |             if entry_error:
1681 |                 tree_node.add(f"❌ [red]{escape(name)} - Error: {escape(entry_error)}[/red]")
1682 |                 continue
1683 |                 
1684 |             info = ""
1685 |             # Use size reported by list_directory
1686 |             if "size" in item and item_type == "file": 
1687 |                  size_bytes = item['size']
1688 |                  if size_bytes < 1024: 
1689 |                      info += f" ({size_bytes}b)"
1690 |                  elif size_bytes < 1024 * 1024: 
1691 |                      info += f" ({size_bytes/1024:.1f}KB)"
1692 |                  else: 
1693 |                      info += f" ({size_bytes/(1024*1024):.1f}MB)"
1694 |             # Use symlink_target reported by list_directory
1695 |             if item_type == "symlink" and "symlink_target" in item: 
1696 |                 info += f" → {escape(str(item['symlink_target']))}" 
1697 | 
1698 |             if item_type == "directory":
1699 |                 # Add node for directory
1700 |                 dir_node = tree_node.add(f"📁 [bold cyan]{escape(name)}[/bold cyan]{info}")
1701 |                 # Recurse into subdirectory
1702 |                 await _build_rich_directory_tree_recursive(item_path, dir_node, depth + 1, max_depth)
1703 |             elif item_type == "file":
1704 |                  # Icon logic copied from demo
1705 |                  icon = "📄"
1706 |                  ext = os.path.splitext(name)[1].lower()
1707 |                  if ext in ['.jpg', '.png', '.gif', '.bmp', '.jpeg', '.svg']: 
1708 |                      icon = "🖼️"
1709 |                  elif ext in ['.mp3', '.wav', '.ogg', '.flac']: 
1710 |                      icon = "🎵"
1711 |                  elif ext in ['.mp4', '.avi', '.mov', '.mkv']: 
1712 |                      icon = "🎬"
1713 |                  elif ext in ['.py', '.js', '.java', '.c', '.cpp', '.go', '.rs']: 
1714 |                      icon = "📜"
1715 |                  elif ext in ['.json', '.xml', '.yaml', '.yml']: 
1716 |                      icon = "📋"
1717 |                  elif ext in ['.zip', '.tar', '.gz', '.7z', '.rar']: 
1718 |                      icon = "📦"
1719 |                  elif ext in ['.md', '.txt', '.doc', '.docx', '.pdf']: 
1720 |                      icon = "📝"
1721 |                  tree_node.add(f"{icon} [green]{escape(name)}[/green]{info}")
1722 |             elif item_type == "symlink": 
1723 |                 tree_node.add(f"🔗 [magenta]{escape(name)}[/magenta]{info}")
1724 |             # Handle potential info/error items from list_directory (though less common than directory_tree)
1725 |             elif item_type == "info": 
1726 |                  tree_node.add(f"ℹ️ [dim]{escape(name)}[/dim]")
1727 |             elif item_type == "error":
1728 |                  tree_node.add(f"❌ [red]{escape(name)}[/red]{info}")
1729 |             else: # Handle unknown type
1730 |                 tree_node.add(f"❓ [yellow]{escape(name)}[/yellow]{info}")
1731 |                 
1732 |     except Exception as e:
1733 |         # Catch unexpected errors during the process for this path
1734 |         logger.error(f"Unexpected error building tree for {path}: {e}", exc_info=True)
1735 |         tree_node.add(f"❌ [bold red]Failed to process: {escape(str(path))} ({type(e).__name__})[/bold red]")
1736 | 
1737 | async def generate_rich_directory_tree(path: Union[str, Path], max_depth: int = 3) -> Tree:
1738 |     """
1739 |     Generates a `rich.Tree` visualization of a directory using async filesystem tools.
1740 |     
1741 |     This function generates a Rich Tree representation of a directory structure
1742 |     using the async filesystem tools provided by the Ultimate MCP Server. It
1743 |     supports traversing the directory tree up to a specified maximum depth.
1744 |     
1745 |     Args:
1746 |         path: The starting directory path (string or Path object).
1747 |         max_depth: The maximum depth to traverse.
1748 |     
1749 |     Returns:
1750 |         A `rich.Tree` object representing the directory structure.
1751 |     """
1752 |     start_path = Path(path)
1753 |     tree_root = Tree(f"📁 [bold cyan]{escape(start_path.name)}[/bold cyan]")
1754 |     
1755 |     # Check if the root path exists and is a directory before starting recursion
1756 |     try:
1757 |         # Use list_directory for the initial check
1758 |         initial_check = await list_directory(path=str(start_path))
1759 |         if isinstance(initial_check, dict) and initial_check.get("error"):
1760 |              # Check if the error is because it's not a directory or doesn't exist
1761 |              error_msg = initial_check['error']
1762 |              if "Not a directory" in error_msg or "No such file or directory" in error_msg:
1763 |                  tree_root.add(f"❌ [red]{escape(error_msg)}: {escape(str(start_path))}[/red]")
1764 |              else:
1765 |                  tree_root.add(f"❌ [red]Error accessing root path: {escape(error_msg)}[/red]")
1766 |              return tree_root # Return tree with only the error
1767 |         # We assume if list_directory didn't error, it's a directory.
1768 |     except Exception as e:
1769 |         logger.error(f"Error during initial check for {start_path}: {e}", exc_info=True)
1770 |         tree_root.add(f"❌ [bold red]Failed initial check: {escape(str(start_path))} ({type(e).__name__})[/bold red]")
1771 |         return tree_root
1772 | 
1773 |     # Start the recursive build if initial check seems okay
1774 |     await _build_rich_directory_tree_recursive(start_path, tree_root, depth=0, max_depth=max_depth)
1775 |     return tree_root 
1776 | 
1777 | # --- Cost Tracking Utility ---
1778 | 
1779 | class CostTracker:
1780 |     """
1781 |     Tracks and aggregates API call costs and token usage across multiple LLM operations.
1782 |     
1783 |     The CostTracker provides a centralized mechanism for monitoring API usage costs
1784 |     and token consumption across multiple calls to language model providers. It maintains
1785 |     a structured record of costs organized by provider and model, supporting various
1786 |     result formats from different API calls.
1787 |     
1788 |     The tracker can extract cost and token information from both object attributes
1789 |     (like CompletionResult objects) and dictionary structures (like tool results),
1790 |     making it versatile for different API response formats.
1791 |     
1792 |     Features:
1793 |     - Detailed tracking by provider and model
1794 |     - Support for input and output token counts
1795 |     - Optional cost limit monitoring
1796 |     - Rich console visualization of cost summaries
1797 |     - Aggregation of calls, tokens, and costs
1798 |     
1799 |     Usage example:
1800 |     ```python
1801 |     # Initialize tracker
1802 |     tracker = CostTracker(limit=5.0)  # Set $5.00 cost limit
1803 |     
1804 |     # Track costs from various API calls
1805 |     tracker.add_call(completion_result)
1806 |     tracker.add_call(summarization_result)
1807 |     
1808 |     # Check if limit exceeded
1809 |     if tracker.exceeds_limit():
1810 |         print("Cost limit exceeded!")
1811 |     
1812 |     # Display summary in console
1813 |     tracker.display_summary(console)
1814 |     ```
1815 |     
1816 |     Attributes:
1817 |         data: Nested dictionary storing cost and token data organized by provider and model
1818 |         limit: Optional cost limit in USD to monitor usage against
1819 |     """
1820 |     def __init__(self, limit: Optional[float] = None):
1821 |         """
1822 |         Initialize a new cost tracker with an optional spending limit.
1823 |         
1824 |         Args:
1825 |             limit: Optional maximum cost limit in USD. If provided, the tracker
1826 |                   can report when costs exceed this threshold using exceeds_limit().
1827 |         """
1828 |         self.data: Dict[str, Dict[str, Dict[str, Any]]] = {} # {provider: {model: {cost, tokens..., calls}}}
1829 |         self.limit: Optional[float] = limit  # Cost limit in USD
1830 | 
1831 |     @property
1832 |     def total_cost(self) -> float:
1833 |         """
1834 |         Get the total accumulated cost across all providers and models.
1835 |         
1836 |         Returns:
1837 |             float: The sum of all tracked costs in USD
1838 |         """
1839 |         total = 0.0
1840 |         for provider_data in self.data.values():
1841 |             for model_data in provider_data.values():
1842 |                 total += model_data.get("cost", 0.0)
1843 |         return total
1844 | 
1845 |     def exceeds_limit(self) -> bool:
1846 |         """
1847 |         Check if the current total cost exceeds the specified limit.
1848 |         
1849 |         Returns:
1850 |             bool: True if a limit is set and the total cost exceeds it, False otherwise
1851 |         """
1852 |         if self.limit is None:
1853 |             return False
1854 |         return self.total_cost >= self.limit
1855 | 
1856 |     def add_call(self, result: Any, provider: Optional[str] = None, model: Optional[str] = None):
1857 |         """
1858 |         Add cost and token data from an API call result to the tracker.
1859 |         
1860 |         This method extracts cost and token information from various result formats,
1861 |         including structured objects with attributes (like CompletionResult) or
1862 |         dictionaries (like tool results). It intelligently identifies the relevant
1863 |         data fields and updates the tracking statistics.
1864 |         
1865 |         Args:
1866 |             result: The API call result containing cost and token information.
1867 |                    Can be an object with attributes or a dictionary.
1868 |             provider: Optional provider name override. If not specified, will be
1869 |                      extracted from the result if available.
1870 |             model: Optional model name override. If not specified, will be
1871 |                   extracted from the result if available.
1872 |                   
1873 |         Example:
1874 |             ```python
1875 |             # Track a direct API call result
1876 |             summarization_result = await summarize_document(...)
1877 |             tracker.add_call(summarization_result)
1878 |             
1879 |             # Track with explicit provider/model specification
1880 |             tracker.add_call(
1881 |                 custom_result,
1882 |                 provider="openai",
1883 |                 model="gpt-4o"
1884 |             )
1885 |             ```
1886 |         """
1887 |         cost = 0.0
1888 |         input_tokens = 0
1889 |         output_tokens = 0
1890 |         total_tokens = 0
1891 | 
1892 |         # Try extracting from object attributes (e.g., CompletionResult)
1893 |         if hasattr(result, 'cost') and result.cost is not None:
1894 |             cost = float(result.cost)
1895 |         if hasattr(result, 'provider') and result.provider:
1896 |             provider = result.provider
1897 |         if hasattr(result, 'model') and result.model:
1898 |             model = result.model
1899 |         if hasattr(result, 'input_tokens') and result.input_tokens is not None:
1900 |             input_tokens = int(result.input_tokens)
1901 |         if hasattr(result, 'output_tokens') and result.output_tokens is not None:
1902 |             output_tokens = int(result.output_tokens)
1903 |         if hasattr(result, 'total_tokens') and result.total_tokens is not None:
1904 |             total_tokens = int(result.total_tokens)
1905 |         elif input_tokens > 0 or output_tokens > 0:
1906 |              total_tokens = input_tokens + output_tokens # Calculate if not present
1907 | 
1908 |         # Try extracting from dictionary keys (e.g., tool results, stats dicts)
1909 |         elif isinstance(result, dict):
1910 |             cost = float(result.get('cost', 0.0))
1911 |             provider = result.get('provider', provider) # Use existing if key not found
1912 |             model = result.get('model', model)         # Use existing if key not found
1913 |             tokens_data = result.get('tokens', {})
1914 |             if isinstance(tokens_data, dict):
1915 |                 input_tokens = int(tokens_data.get('input', 0))
1916 |                 output_tokens = int(tokens_data.get('output', 0))
1917 |                 total_tokens = int(tokens_data.get('total', 0))
1918 |                 if total_tokens == 0 and (input_tokens > 0 or output_tokens > 0):
1919 |                      total_tokens = input_tokens + output_tokens
1920 |             elif isinstance(tokens_data, (int, float)): # Handle case where 'tokens' is just a total number
1921 |                 total_tokens = int(tokens_data)
1922 | 
1923 |         # --- Fallback / Defaulting ---
1924 |         # If provider/model couldn't be determined, use defaults
1925 |         provider = provider or "UnknownProvider"
1926 |         model = model or "UnknownModel"
1927 | 
1928 |         # --- Update Tracking Data ---
1929 |         if provider not in self.data:
1930 |             self.data[provider] = {}
1931 |         if model not in self.data[provider]:
1932 |             self.data[provider][model] = {
1933 |                 "cost": 0.0,
1934 |                 "input_tokens": 0,
1935 |                 "output_tokens": 0,
1936 |                 "total_tokens": 0,
1937 |                 "calls": 0
1938 |             }
1939 | 
1940 |         self.data[provider][model]["cost"] += cost
1941 |         self.data[provider][model]["input_tokens"] += input_tokens
1942 |         self.data[provider][model]["output_tokens"] += output_tokens
1943 |         self.data[provider][model]["total_tokens"] += total_tokens
1944 |         self.data[provider][model]["calls"] += 1
1945 |         
1946 |     def record_call(self, provider: str, model: str, input_tokens: int, output_tokens: int, cost: float):
1947 |         """
1948 |         Directly record a call with explicit token counts and cost.
1949 |         
1950 |         This method allows manual tracking of API calls with explicit parameter values,
1951 |         useful when the cost information isn't available in a structured result object.
1952 |         
1953 |         Args:
1954 |             provider: The provider name (e.g., "openai", "anthropic")
1955 |             model: The model name (e.g., "gpt-4", "claude-3-opus")
1956 |             input_tokens: Number of input (prompt) tokens
1957 |             output_tokens: Number of output (completion) tokens
1958 |             cost: The cost of the API call in USD
1959 |             
1960 |         Example:
1961 |             ```python
1962 |             tracker.record_call(
1963 |                 provider="openai",
1964 |                 model="gpt-4o",
1965 |                 input_tokens=1500,
1966 |                 output_tokens=350,
1967 |                 cost=0.03
1968 |             )
1969 |             ```
1970 |         """
1971 |         if provider not in self.data:
1972 |             self.data[provider] = {}
1973 |         if model not in self.data[provider]:
1974 |             self.data[provider][model] = {
1975 |                 "cost": 0.0,
1976 |                 "input_tokens": 0,
1977 |                 "output_tokens": 0,
1978 |                 "total_tokens": 0,
1979 |                 "calls": 0
1980 |             }
1981 |             
1982 |         total_tokens = input_tokens + output_tokens
1983 |         
1984 |         self.data[provider][model]["cost"] += cost
1985 |         self.data[provider][model]["input_tokens"] += input_tokens
1986 |         self.data[provider][model]["output_tokens"] += output_tokens
1987 |         self.data[provider][model]["total_tokens"] += total_tokens
1988 |         self.data[provider][model]["calls"] += 1
1989 |         
1990 |     def add_custom_cost(self, description: str, provider: str, model: str, cost: float, 
1991 |                         input_tokens: int = 0, output_tokens: int = 0):
1992 |         """
1993 |         Add a custom cost entry with an optional description.
1994 |         
1995 |         This method is useful for tracking costs that aren't directly tied to a specific
1996 |         API call, such as batch processing fees, infrastructure costs, or estimated costs.
1997 |         
1998 |         Args:
1999 |             description: A descriptive label for this cost entry (e.g., "Batch Processing")
2000 |             provider: The provider name or service category
2001 |             model: The model name or service type
2002 |             cost: The cost amount in USD
2003 |             input_tokens: Optional input token count (default: 0)
2004 |             output_tokens: Optional output token count (default: 0)
2005 |             
2006 |         Example:
2007 |             ```python
2008 |             tracker.add_custom_cost(
2009 |                 "Batch Processing",
2010 |                 "openai",
2011 |                 "gpt-4-turbo",
2012 |                 0.25,
2013 |                 input_tokens=5000,
2014 |                 output_tokens=1200
2015 |             )
2016 |             ```
2017 |         """
2018 |         # Format the model name to include the description
2019 |         custom_model = f"{model} ({description})"
2020 |         
2021 |         if provider not in self.data:
2022 |             self.data[provider] = {}
2023 |         if custom_model not in self.data[provider]:
2024 |             self.data[provider][custom_model] = {
2025 |                 "cost": 0.0,
2026 |                 "input_tokens": 0,
2027 |                 "output_tokens": 0,
2028 |                 "total_tokens": 0,
2029 |                 "calls": 0
2030 |             }
2031 |             
2032 |         total_tokens = input_tokens + output_tokens
2033 |         
2034 |         self.data[provider][custom_model]["cost"] += cost
2035 |         self.data[provider][custom_model]["input_tokens"] += input_tokens
2036 |         self.data[provider][custom_model]["output_tokens"] += output_tokens
2037 |         self.data[provider][custom_model]["total_tokens"] += total_tokens
2038 |         self.data[provider][custom_model]["calls"] += 1
2039 | 
2040 |     def display_summary(self, console_instance: Optional[Console] = None, title: str = "Total Demo Cost Summary"):
2041 |         """
2042 |         Display a formatted summary of all tracked costs and token usage in a Rich console table.
2043 |         
2044 |         This method generates a detailed tabular report showing:
2045 |         - Costs broken down by provider and model
2046 |         - Number of calls made to each model
2047 |         - Input, output, and total token counts
2048 |         - Subtotals by provider (when multiple models are used)
2049 |         - Grand totals across all providers and models
2050 |         - Progress against cost limit (if set)
2051 |         
2052 |         The report is formatted using Rich tables with color coding for readability.
2053 |         
2054 |         Args:
2055 |             console_instance: Optional Rich Console instance to use for display.
2056 |                              If not provided, uses the default console.
2057 |             title: Custom title for the summary report.
2058 |                   Defaults to "Total Demo Cost Summary".
2059 |                   
2060 |         Example:
2061 |             ```python
2062 |             # Display with default settings
2063 |             tracker.display_summary()
2064 |             
2065 |             # Display with custom title and console
2066 |             from rich.console import Console
2067 |             custom_console = Console(width=100)
2068 |             tracker.display_summary(
2069 |                 console_instance=custom_console,
2070 |                 title="AI Generation Cost Report"
2071 |             )
2072 |             ```
2073 |         """
2074 |         output = console_instance or console # Use provided or default console
2075 | 
2076 |         output.print(Rule(f"[bold blue]{escape(title)}[/bold blue]"))
2077 | 
2078 |         if not self.data:
2079 |             output.print("[yellow]No cost data tracked.[/yellow]")
2080 |             return
2081 | 
2082 |         summary_table = Table(
2083 |             title="[bold]API Call Costs & Tokens[/bold]",
2084 |             box=box.ROUNDED,
2085 |             show_footer=True,
2086 |             footer_style="bold"
2087 |         )
2088 |         summary_table.add_column("Provider", style="cyan", footer="Grand Total")
2089 |         summary_table.add_column("Model", style="magenta")
2090 |         summary_table.add_column("Calls", style="blue", justify="right", footer=" ") # Placeholder footer
2091 |         summary_table.add_column("Input Tokens", style="yellow", justify="right", footer=" ")
2092 |         summary_table.add_column("Output Tokens", style="yellow", justify="right", footer=" ")
2093 |         summary_table.add_column("Total Tokens", style="bold yellow", justify="right", footer=" ")
2094 |         summary_table.add_column("Total Cost ($)", style="bold green", justify="right", footer=" ")
2095 | 
2096 |         grand_total_cost = 0.0
2097 |         grand_total_calls = 0
2098 |         grand_total_input = 0
2099 |         grand_total_output = 0
2100 |         grand_total_tokens = 0
2101 | 
2102 |         sorted_providers = sorted(self.data.keys())
2103 |         for provider in sorted_providers:
2104 |             provider_total_cost = 0.0
2105 |             provider_total_calls = 0
2106 |             provider_total_input = 0
2107 |             provider_total_output = 0
2108 |             provider_total_tokens = 0
2109 |             
2110 |             sorted_models = sorted(self.data[provider].keys())
2111 |             num_models = len(sorted_models)
2112 | 
2113 |             for i, model in enumerate(sorted_models):
2114 |                 stats = self.data[provider][model]
2115 |                 provider_total_cost += stats['cost']
2116 |                 provider_total_calls += stats['calls']
2117 |                 provider_total_input += stats['input_tokens']
2118 |                 provider_total_output += stats['output_tokens']
2119 |                 provider_total_tokens += stats['total_tokens']
2120 | 
2121 |                 # Display provider only on the first row for that provider
2122 |                 provider_display = escape(provider) if i == 0 else ""
2123 |                 
2124 |                 summary_table.add_row(
2125 |                     provider_display,
2126 |                     escape(model),
2127 |                     str(stats['calls']),
2128 |                     f"{stats['input_tokens']:,}",
2129 |                     f"{stats['output_tokens']:,}",
2130 |                     f"{stats['total_tokens']:,}",
2131 |                     f"{stats['cost']:.6f}"
2132 |                 )
2133 |                 
2134 |             # Add provider subtotal row if more than one model for the provider
2135 |             if num_models > 1:
2136 |                  summary_table.add_row(
2137 |                      "[dim]Subtotal[/dim]",
2138 |                      f"[dim]{provider}[/dim]",
2139 |                      f"[dim]{provider_total_calls:,}[/dim]",
2140 |                      f"[dim]{provider_total_input:,}[/dim]",
2141 |                      f"[dim]{provider_total_output:,}[/dim]",
2142 |                      f"[dim]{provider_total_tokens:,}[/dim]",
2143 |                      f"[dim]{provider_total_cost:.6f}[/dim]",
2144 |                      style="dim",
2145 |                      end_section=(provider != sorted_providers[-1]) # Add separator line unless it's the last provider
2146 |                  )
2147 |             elif provider != sorted_providers[-1]:
2148 |                  # Add separator if only one model but not the last provider
2149 |                  summary_table.add_row(end_section=True)
2150 | 
2151 | 
2152 |             grand_total_cost += provider_total_cost
2153 |             grand_total_calls += provider_total_calls
2154 |             grand_total_input += provider_total_input
2155 |             grand_total_output += provider_total_output
2156 |             grand_total_tokens += provider_total_tokens
2157 | 
2158 |         # Update footer values (need to re-assign list for footer update)
2159 |         summary_table.columns[2].footer = f"{grand_total_calls:,}"
2160 |         summary_table.columns[3].footer = f"{grand_total_input:,}"
2161 |         summary_table.columns[4].footer = f"{grand_total_output:,}"
2162 |         summary_table.columns[5].footer = f"{grand_total_tokens:,}"
2163 |         summary_table.columns[6].footer = f"{grand_total_cost:.6f}"
2164 | 
2165 |         output.print(summary_table)
2166 |         
2167 |         # Display cost limit information if set
2168 |         if self.limit is not None:
2169 |             limit_color = "green" if self.total_cost < self.limit else "red"
2170 |             output.print(f"[{limit_color}]Cost limit: ${self.limit:.2f} | Current usage: ${self.total_cost:.2f} ({(self.total_cost/self.limit*100):.1f}%)[/{limit_color}]")
2171 |         
2172 |         output.print() # Add a blank line after the table
2173 | 
2174 |     def display_costs(self, console: Optional[Console] = None, title: str = "Total Demo Cost Summary"):
2175 |         """
2176 |         Alias for display_summary for backward compatibility.
2177 |         
2178 |         This method provides a backward-compatible interface for legacy code
2179 |         that might be calling display_costs() instead of display_summary().
2180 |         
2181 |         Args:
2182 |             console: Console instance to use for display
2183 |             title: Custom title for the summary report
2184 |             
2185 |         Returns:
2186 |             Same as display_summary()
2187 |         """
2188 |         return self.display_summary(console_instance=console, title=title)
```
Page 28/45FirstPrevNextLast