#
tokens: 45821/50000 3/207 files (page 18/45)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 18 of 45. Use http://codebase.md/dicklesworthstone/llm_gateway_mcp_server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cursorignore
├── .env.example
├── .envrc
├── .gitignore
├── additional_features.md
├── check_api_keys.py
├── completion_support.py
├── comprehensive_test.py
├── docker-compose.yml
├── Dockerfile
├── empirically_measured_model_speeds.json
├── error_handling.py
├── example_structured_tool.py
├── examples
│   ├── __init__.py
│   ├── advanced_agent_flows_using_unified_memory_system_demo.py
│   ├── advanced_extraction_demo.py
│   ├── advanced_unified_memory_system_demo.py
│   ├── advanced_vector_search_demo.py
│   ├── analytics_reporting_demo.py
│   ├── audio_transcription_demo.py
│   ├── basic_completion_demo.py
│   ├── cache_demo.py
│   ├── claude_integration_demo.py
│   ├── compare_synthesize_demo.py
│   ├── cost_optimization.py
│   ├── data
│   │   ├── sample_event.txt
│   │   ├── Steve_Jobs_Introducing_The_iPhone_compressed.md
│   │   └── Steve_Jobs_Introducing_The_iPhone_compressed.mp3
│   ├── docstring_refiner_demo.py
│   ├── document_conversion_and_processing_demo.py
│   ├── entity_relation_graph_demo.py
│   ├── filesystem_operations_demo.py
│   ├── grok_integration_demo.py
│   ├── local_text_tools_demo.py
│   ├── marqo_fused_search_demo.py
│   ├── measure_model_speeds.py
│   ├── meta_api_demo.py
│   ├── multi_provider_demo.py
│   ├── ollama_integration_demo.py
│   ├── prompt_templates_demo.py
│   ├── python_sandbox_demo.py
│   ├── rag_example.py
│   ├── research_workflow_demo.py
│   ├── sample
│   │   ├── article.txt
│   │   ├── backprop_paper.pdf
│   │   ├── buffett.pdf
│   │   ├── contract_link.txt
│   │   ├── legal_contract.txt
│   │   ├── medical_case.txt
│   │   ├── northwind.db
│   │   ├── research_paper.txt
│   │   ├── sample_data.json
│   │   └── text_classification_samples
│   │       ├── email_classification.txt
│   │       ├── news_samples.txt
│   │       ├── product_reviews.txt
│   │       └── support_tickets.txt
│   ├── sample_docs
│   │   └── downloaded
│   │       └── attention_is_all_you_need.pdf
│   ├── sentiment_analysis_demo.py
│   ├── simple_completion_demo.py
│   ├── single_shot_synthesis_demo.py
│   ├── smart_browser_demo.py
│   ├── sql_database_demo.py
│   ├── sse_client_demo.py
│   ├── test_code_extraction.py
│   ├── test_content_detection.py
│   ├── test_ollama.py
│   ├── text_classification_demo.py
│   ├── text_redline_demo.py
│   ├── tool_composition_examples.py
│   ├── tournament_code_demo.py
│   ├── tournament_text_demo.py
│   ├── unified_memory_system_demo.py
│   ├── vector_search_demo.py
│   ├── web_automation_instruction_packs.py
│   └── workflow_delegation_demo.py
├── LICENSE
├── list_models.py
├── marqo_index_config.json.example
├── mcp_protocol_schema_2025-03-25_version.json
├── mcp_python_lib_docs.md
├── mcp_tool_context_estimator.py
├── model_preferences.py
├── pyproject.toml
├── quick_test.py
├── README.md
├── resource_annotations.py
├── run_all_demo_scripts_and_check_for_errors.py
├── storage
│   └── smart_browser_internal
│       ├── locator_cache.db
│       ├── readability.js
│       └── storage_state.enc
├── test_client.py
├── test_connection.py
├── TEST_README.md
├── test_sse_client.py
├── test_stdio_client.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── integration
│   │   ├── __init__.py
│   │   └── test_server.py
│   ├── manual
│   │   ├── test_extraction_advanced.py
│   │   └── test_extraction.py
│   └── unit
│       ├── __init__.py
│       ├── test_cache.py
│       ├── test_providers.py
│       └── test_tools.py
├── TODO.md
├── tool_annotations.py
├── tools_list.json
├── ultimate_mcp_banner.webp
├── ultimate_mcp_logo.webp
├── ultimate_mcp_server
│   ├── __init__.py
│   ├── __main__.py
│   ├── cli
│   │   ├── __init__.py
│   │   ├── __main__.py
│   │   ├── commands.py
│   │   ├── helpers.py
│   │   └── typer_cli.py
│   ├── clients
│   │   ├── __init__.py
│   │   ├── completion_client.py
│   │   └── rag_client.py
│   ├── config
│   │   └── examples
│   │       └── filesystem_config.yaml
│   ├── config.py
│   ├── constants.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── evaluation
│   │   │   ├── base.py
│   │   │   └── evaluators.py
│   │   ├── providers
│   │   │   ├── __init__.py
│   │   │   ├── anthropic.py
│   │   │   ├── base.py
│   │   │   ├── deepseek.py
│   │   │   ├── gemini.py
│   │   │   ├── grok.py
│   │   │   ├── ollama.py
│   │   │   ├── openai.py
│   │   │   └── openrouter.py
│   │   ├── server.py
│   │   ├── state_store.py
│   │   ├── tournaments
│   │   │   ├── manager.py
│   │   │   ├── tasks.py
│   │   │   └── utils.py
│   │   └── ums_api
│   │       ├── __init__.py
│   │       ├── ums_database.py
│   │       ├── ums_endpoints.py
│   │       ├── ums_models.py
│   │       └── ums_services.py
│   ├── exceptions.py
│   ├── graceful_shutdown.py
│   ├── services
│   │   ├── __init__.py
│   │   ├── analytics
│   │   │   ├── __init__.py
│   │   │   ├── metrics.py
│   │   │   └── reporting.py
│   │   ├── cache
│   │   │   ├── __init__.py
│   │   │   ├── cache_service.py
│   │   │   ├── persistence.py
│   │   │   ├── strategies.py
│   │   │   └── utils.py
│   │   ├── cache.py
│   │   ├── document.py
│   │   ├── knowledge_base
│   │   │   ├── __init__.py
│   │   │   ├── feedback.py
│   │   │   ├── manager.py
│   │   │   ├── rag_engine.py
│   │   │   ├── retriever.py
│   │   │   └── utils.py
│   │   ├── prompts
│   │   │   ├── __init__.py
│   │   │   ├── repository.py
│   │   │   └── templates.py
│   │   ├── prompts.py
│   │   └── vector
│   │       ├── __init__.py
│   │       ├── embeddings.py
│   │       └── vector_service.py
│   ├── tool_token_counter.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── audio_transcription.py
│   │   ├── base.py
│   │   ├── completion.py
│   │   ├── docstring_refiner.py
│   │   ├── document_conversion_and_processing.py
│   │   ├── enhanced-ums-lookbook.html
│   │   ├── entity_relation_graph.py
│   │   ├── excel_spreadsheet_automation.py
│   │   ├── extraction.py
│   │   ├── filesystem.py
│   │   ├── html_to_markdown.py
│   │   ├── local_text_tools.py
│   │   ├── marqo_fused_search.py
│   │   ├── meta_api_tool.py
│   │   ├── ocr_tools.py
│   │   ├── optimization.py
│   │   ├── provider.py
│   │   ├── pyodide_boot_template.html
│   │   ├── python_sandbox.py
│   │   ├── rag.py
│   │   ├── redline-compiled.css
│   │   ├── sentiment_analysis.py
│   │   ├── single_shot_synthesis.py
│   │   ├── smart_browser.py
│   │   ├── sql_databases.py
│   │   ├── text_classification.py
│   │   ├── text_redline_tools.py
│   │   ├── tournament.py
│   │   ├── ums_explorer.html
│   │   └── unified_memory_system.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── async_utils.py
│   │   ├── display.py
│   │   ├── logging
│   │   │   ├── __init__.py
│   │   │   ├── console.py
│   │   │   ├── emojis.py
│   │   │   ├── formatter.py
│   │   │   ├── logger.py
│   │   │   ├── panels.py
│   │   │   ├── progress.py
│   │   │   └── themes.py
│   │   ├── parse_yaml.py
│   │   ├── parsing.py
│   │   ├── security.py
│   │   └── text.py
│   └── working_memory_api.py
├── unified_memory_system_technical_analysis.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/ultimate_mcp_server/utils/logging/logger.py:
--------------------------------------------------------------------------------

```python
   1 | """
   2 | Main Logger class for Gateway.
   3 | 
   4 | This module provides the central Logger class that integrates all Gateway logging
   5 | functionality with a beautiful, informative interface.
   6 | """
   7 | import logging
   8 | import sys
   9 | import time
  10 | from contextlib import contextmanager
  11 | from datetime import datetime
  12 | from functools import wraps
  13 | from typing import Any, Dict, List, Optional, Tuple, Union
  14 | 
  15 | from rich.console import Console
  16 | 
  17 | # Use relative imports for utils within the same package
  18 | from .console import console
  19 | from .emojis import get_emoji
  20 | from .formatter import (
  21 |     DetailedLogFormatter,
  22 |     RichLoggingHandler,
  23 |     SimpleLogFormatter,
  24 | )
  25 | from .panels import (
  26 |     CodePanel,
  27 |     ErrorPanel,
  28 |     HeaderPanel,
  29 |     InfoPanel,
  30 |     ResultPanel,
  31 |     ToolOutputPanel,
  32 |     WarningPanel,
  33 | )
  34 | from .progress import GatewayProgress
  35 | 
  36 | # Set up standard Python logging with our custom handler
  37 | # Logging configuration is handled externally via dictConfig
  38 | 
  39 | class Logger:
  40 |     """
  41 |     Advanced logging system with rich formatting, progress tracking, and structured output.
  42 |     
  43 |     The Logger class extends Python's standard logging system with enhanced features:
  44 |     
  45 |     Key Features:
  46 |     - Rich console output with color, emoji, and formatted panels
  47 |     - Component-based logging for better organization of log messages
  48 |     - Operation tracking with timing and progress visualization
  49 |     - Multi-level logging (debug, info, success, warning, error, critical)
  50 |     - Context data capture for more detailed debugging
  51 |     - Integrated progress bars and spinners for long-running operations
  52 |     - Special formatters for code blocks, results, errors, and warnings
  53 |     
  54 |     Integration with Python's logging:
  55 |     - Builds on top of the standard logging module
  56 |     - Compatible with external logging configuration (e.g., dictConfig)
  57 |     - Properly propagates logs to ensure they reach root handlers
  58 |     - Adds custom "extra" fields to standard LogRecord objects
  59 |     
  60 |     Usage Patterns:
  61 |     - Create loggers with get_logger() for consistent naming
  62 |     - Use component and operation parameters to organize related logs
  63 |     - Add context data as structured information with message
  64 |     - Use special display methods (code, warning_panel, etc.) for rich output
  65 |     - Track long operations with time_operation and progress tracking
  66 |     
  67 |     This logger is designed to make complex server operations more transparent,
  68 |     providing clear information for both developers and users of the Ultimate MCP Server.
  69 |     """
  70 |     
  71 |     def __init__(
  72 |         self,
  73 |         name: str = "ultimate", # Default logger name changed
  74 |         console: Optional[Console] = None,
  75 |         level: str = "info",
  76 |         show_timestamps: bool = True,
  77 |         component: Optional[str] = None,
  78 |         capture_output: bool = False,
  79 |     ):
  80 |         """Initialize the logger.
  81 |         
  82 |         Args:
  83 |             name: Logger name
  84 |             console: Rich console to use
  85 |             level: Initial log level
  86 |             show_timestamps: Whether to show timestamps in logs
  87 |             component: Default component name
  88 |             capture_output: Whether to capture and store log output
  89 |         """
  90 |         self.name = name
  91 |         # Use provided console or get global console, defaulting to stderr console
  92 |         if console is not None:
  93 |             self.console = console
  94 |         else:
  95 |             global_console = globals().get("console")
  96 |             if global_console is not None:
  97 |                 self.console = global_console
  98 |             else:
  99 |                 self.console = Console(file=sys.stderr)
 100 |                 
 101 |         self.level = level.lower()
 102 |         self.show_timestamps = show_timestamps
 103 |         self.component = component
 104 |         self.capture_output = capture_output
 105 |         
 106 |         # Create a standard Python logger
 107 |         self.python_logger = logging.getLogger(name)
 108 |         
 109 |         # Set up formatters
 110 |         self.simple_formatter = SimpleLogFormatter(show_time=show_timestamps, show_level=True, show_component=True)
 111 |         self.detailed_formatter = DetailedLogFormatter(show_time=show_timestamps, show_level=True, show_component=True)
 112 |         
 113 |         # Progress tracker
 114 |         self.progress = GatewayProgress(console=self.console)
 115 |         
 116 |         # Output capture if enabled
 117 |         self.captured_logs = [] if capture_output else None
 118 |         
 119 |         # Restore propagation to allow messages to reach root handlers
 120 |         # Make sure this is True so logs configured via dictConfig are passed up
 121 |         self.python_logger.propagate = True 
 122 |         
 123 |         # Set initial log level on the Python logger instance
 124 |         # Note: The effective level will be determined by the handler/root config
 125 |         self.set_level(level)
 126 |     
 127 |     def set_level(self, level: str) -> None:
 128 |         """Set the log level.
 129 |         
 130 |         Args:
 131 |             level: Log level (debug, info, warning, error, critical)
 132 |         """
 133 |         level = level.lower()
 134 |         self.level = level # Store the intended level for should_log checks
 135 |         
 136 |         # Map to Python logging levels
 137 |         level_map = {
 138 |             "debug": logging.DEBUG,
 139 |             "info": logging.INFO,
 140 |             "warning": logging.WARNING,
 141 |             "error": logging.ERROR,
 142 |             "critical": logging.CRITICAL,
 143 |         }
 144 |         
 145 |         python_level = level_map.get(level, logging.INFO)
 146 |         # Set level on the logger itself. Handlers might have their own levels.
 147 |         self.python_logger.setLevel(python_level)
 148 |     
 149 |     def get_level(self) -> str:
 150 |         """Get the current log level.
 151 |         
 152 |         Returns:
 153 |             Current log level
 154 |         """
 155 |         # Return the Python logger's effective level
 156 |         effective_level_num = self.python_logger.getEffectiveLevel()
 157 |         level_map_rev = {
 158 |             logging.DEBUG: "debug",
 159 |             logging.INFO: "info",
 160 |             logging.WARNING: "warning",
 161 |             logging.ERROR: "error",
 162 |             logging.CRITICAL: "critical",
 163 |         }
 164 |         return level_map_rev.get(effective_level_num, "info")
 165 | 
 166 |     
 167 |     def should_log(self, level: str) -> bool:
 168 |         """Check if a message at the given level should be logged based on Python logger's effective level.
 169 |         
 170 |         Args:
 171 |             level: Log level to check
 172 |             
 173 |         Returns:
 174 |             Whether messages at this level should be logged
 175 |         """
 176 |         level_map = {
 177 |             "debug": logging.DEBUG,
 178 |             "info": logging.INFO,
 179 |             "success": logging.INFO, # Map success to info for level check
 180 |             "warning": logging.WARNING,
 181 |             "error": logging.ERROR,
 182 |             "critical": logging.CRITICAL,
 183 |         }
 184 |         message_level_num = level_map.get(level.lower(), logging.INFO)
 185 |         return self.python_logger.isEnabledFor(message_level_num)
 186 | 
 187 |     
 188 |     def _log(
 189 |         self,
 190 |         level: str,
 191 |         message: str,
 192 |         component: Optional[str] = None,
 193 |         operation: Optional[str] = None,
 194 |         emoji: Optional[str] = None,
 195 |         emoji_key: Optional[str] = None,  # Add emoji_key parameter
 196 |         context: Optional[Dict[str, Any]] = None,
 197 |         use_detailed_formatter: bool = False, # This arg seems unused now?
 198 |         exception_info: Optional[Union[bool, Tuple]] = None,
 199 |         stack_info: bool = False,
 200 |         extra: Optional[Dict[str, Any]] = None,
 201 |     ) -> None:
 202 |         """Internal method to handle logging via the standard Python logging mechanism.
 203 |         
 204 |         Args:
 205 |             level: Log level
 206 |             message: Log message
 207 |             component: Gateway component (core, composite, analysis, etc.)
 208 |             operation: Operation being performed
 209 |             emoji: Custom emoji override
 210 |             emoji_key: Key to look up emoji from emoji map (alternative to emoji)
 211 |             context: Additional contextual data
 212 |             exception_info: Include exception info (True/False or tuple)
 213 |             stack_info: Include stack info
 214 |             extra: Dictionary passed as extra to logging framework
 215 |         """
 216 |         # Check if we should log at this level using standard Python logging check
 217 |         # No need for the custom should_log method here if using stdlib correctly
 218 |         
 219 |         # Map level name to Python level number
 220 |         level_map = {
 221 |             "debug": logging.DEBUG,
 222 |             "info": logging.INFO,
 223 |             "success": logging.INFO, # Log success as INFO
 224 |             "warning": logging.WARNING,
 225 |             "error": logging.ERROR,
 226 |             "critical": logging.CRITICAL,
 227 |         }
 228 |         level_num = level_map.get(level.lower(), logging.INFO)
 229 | 
 230 |         if not self.python_logger.isEnabledFor(level_num):
 231 |             return
 232 |             
 233 |         # Use default component if not provided
 234 |         component = component or self.component
 235 |         
 236 |         # If emoji_key is provided, use it to determine emoji
 237 |         if emoji_key and not emoji:
 238 |             emoji = get_emoji("operation", emoji_key)
 239 |             if emoji == "❓":  # If operation emoji not found
 240 |                 # Try level emojis
 241 |                 from .emojis import LEVEL_EMOJIS
 242 |                 emoji = LEVEL_EMOJIS.get(emoji_key, "❓")
 243 |         
 244 |         # Prepare 'extra' dict for LogRecord
 245 |         log_extra = {} if extra is None else extra.copy()  # Create a copy to avoid modifying the original
 246 |         
 247 |         # Remove any keys that conflict with built-in LogRecord attributes
 248 |         for reserved_key in ['message', 'asctime', 'exc_info', 'exc_text', 'lineno', 'funcName', 'created', 'levelname', 'levelno']:
 249 |             if reserved_key in log_extra:
 250 |                 del log_extra[reserved_key]
 251 |                 
 252 |         # Add our custom keys
 253 |         log_extra['component'] = component
 254 |         log_extra['operation'] = operation
 255 |         log_extra['custom_emoji'] = emoji
 256 |         log_extra['log_context'] = context # Use a different key to avoid collision
 257 |         log_extra['gateway_level'] = level # Pass the original level name if needed by formatter
 258 |         
 259 |         # Handle exception info
 260 |         exc_info = None
 261 |         if exception_info:
 262 |             if isinstance(exception_info, bool):
 263 |                 exc_info = sys.exc_info()
 264 |             else:
 265 |                 exc_info = exception_info # Assume it's a valid tuple
 266 | 
 267 |         # Log through Python's logging system
 268 |         self.python_logger.log(
 269 |             level=level_num,
 270 |             msg=message,
 271 |             exc_info=exc_info,
 272 |             stack_info=stack_info,
 273 |             extra=log_extra
 274 |         )
 275 |             
 276 |         # Capture if enabled
 277 |         if self.captured_logs is not None:
 278 |             self.captured_logs.append({
 279 |                 "level": level,
 280 |                 "message": message,
 281 |                 "component": component,
 282 |                 "operation": operation,
 283 |                 "timestamp": datetime.now().isoformat(),
 284 |                 "context": context,
 285 |             })
 286 | 
 287 |     # --- Standard Logging Methods --- 
 288 | 
 289 |     def debug(
 290 |         self,
 291 |         message: str,
 292 |         component: Optional[str] = None,
 293 |         operation: Optional[str] = None,
 294 |         context: Optional[Dict[str, Any]] = None,
 295 |         emoji_key: Optional[str] = None,
 296 |         **kwargs
 297 |     ) -> None:
 298 |         """Log a debug message."""
 299 |         self._log("debug", message, component, operation, context=context, emoji_key=emoji_key, extra=kwargs)
 300 | 
 301 |     def info(
 302 |         self,
 303 |         message: str,
 304 |         component: Optional[str] = None,
 305 |         operation: Optional[str] = None,
 306 |         context: Optional[Dict[str, Any]] = None,
 307 |         emoji_key: Optional[str] = None,
 308 |          **kwargs
 309 |     ) -> None:
 310 |         """Log an info message."""
 311 |         self._log("info", message, component, operation, context=context, emoji_key=emoji_key, extra=kwargs)
 312 | 
 313 |     def success(
 314 |         self,
 315 |         message: str,
 316 |         component: Optional[str] = None,
 317 |         operation: Optional[str] = None,
 318 |         context: Optional[Dict[str, Any]] = None,
 319 |         emoji_key: Optional[str] = None,
 320 |          **kwargs
 321 |     ) -> None:
 322 |         """Log a success message."""
 323 |         self._log("success", message, component, operation, context=context, emoji_key=emoji_key, extra=kwargs)
 324 | 
 325 |     def warning(
 326 |         self,
 327 |         message: str,
 328 |         component: Optional[str] = None,
 329 |         operation: Optional[str] = None,
 330 |         context: Optional[Dict[str, Any]] = None,
 331 |         emoji_key: Optional[str] = None,
 332 |         # details: Optional[List[str]] = None, # Details handled by panel methods
 333 |          **kwargs
 334 |     ) -> None:
 335 |         """Log a warning message."""
 336 |         self._log("warning", message, component, operation, context=context, emoji_key=emoji_key, extra=kwargs)
 337 | 
 338 |     def error(
 339 |         self,
 340 |         message: str,
 341 |         component: Optional[str] = None,
 342 |         operation: Optional[str] = None,
 343 |         context: Optional[Dict[str, Any]] = None,
 344 |         exception: Optional[Exception] = None,
 345 |         emoji_key: Optional[str] = None,
 346 |         # error_code: Optional[str] = None,
 347 |         # resolution_steps: Optional[List[str]] = None,
 348 |          **kwargs
 349 |     ) -> None:
 350 |         """Log an error message."""
 351 |         # Get the exception info tuple if an exception was provided
 352 |         exc_info = None
 353 |         if exception is not None:
 354 |             exc_info = (type(exception), exception, exception.__traceback__)
 355 |         elif 'exc_info' in kwargs:
 356 |             exc_info = kwargs.pop('exc_info')  # Remove from kwargs to prevent conflicts
 357 |         
 358 |         self._log("error", message, component, operation, context=context, 
 359 |                  exception_info=exc_info, emoji_key=emoji_key, extra=kwargs)
 360 | 
 361 |     def critical(
 362 |         self,
 363 |         message: str,
 364 |         component: Optional[str] = None,
 365 |         operation: Optional[str] = None,
 366 |         context: Optional[Dict[str, Any]] = None,
 367 |         exception: Optional[Exception] = None,
 368 |         emoji_key: Optional[str] = None,
 369 |         # error_code: Optional[str] = None, # Pass via context or kwargs
 370 |          **kwargs
 371 |     ) -> None:
 372 |         """Log a critical message."""
 373 |         # Get the exception info tuple if an exception was provided
 374 |         exc_info = None
 375 |         if exception is not None:
 376 |             exc_info = (type(exception), exception, exception.__traceback__)
 377 |         elif 'exc_info' in kwargs:
 378 |             exc_info = kwargs.pop('exc_info')  # Remove from kwargs to prevent conflicts
 379 |         
 380 |         self._log("critical", message, component, operation, context=context, 
 381 |                  exception_info=exc_info, emoji_key=emoji_key, extra=kwargs)
 382 | 
 383 |     # --- Rich Display Methods --- 
 384 |     # These methods use the console directly or generate renderables
 385 |     # They might bypass the standard logging flow, or log additionally
 386 | 
 387 |     def operation(
 388 |         self,
 389 |         operation: str,
 390 |         message: str,
 391 |         component: Optional[str] = None,
 392 |         level: str = "info",
 393 |         context: Optional[Dict[str, Any]] = None,
 394 |         **kwargs
 395 |     ) -> None:
 396 |         """Log an operation-specific message.
 397 |         
 398 |         Args:
 399 |             operation: Operation name
 400 |             message: Log message
 401 |             component: Gateway component
 402 |             level: Log level (default: info)
 403 |             context: Additional context
 404 |             **kwargs: Extra fields for logging
 405 |         """
 406 |         self._log(level, message, component, operation, context=context, extra=kwargs)
 407 | 
 408 |     def tool(
 409 |         self,
 410 |         tool: str,
 411 |         command: str,
 412 |         output: str,
 413 |         status: str = "success",
 414 |         duration: Optional[float] = None,
 415 |         component: Optional[str] = None,
 416 |         **kwargs
 417 |     ) -> None:
 418 |         """Display formatted output from a tool.
 419 |         
 420 |         Args:
 421 |             tool: Name of the tool
 422 |             command: Command executed
 423 |             output: Tool output
 424 |             status: Execution status (success, error)
 425 |             duration: Execution duration in seconds
 426 |             component: Gateway component
 427 |             **kwargs: Extra fields for logging
 428 |         """
 429 |         # Optionally log the event
 430 |         log_level = "error" if status == "error" else "debug"
 431 |         log_message = f"Tool '{tool}' finished (status: {status})"
 432 |         log_context = {"command": command, "output_preview": output[:100] + "..." if len(output) > 100 else output}
 433 |         if duration is not None:
 434 |             log_context["duration_s"] = duration
 435 |         self._log(log_level, log_message, component, operation=f"tool.{tool}", context=log_context, extra=kwargs)
 436 | 
 437 |         # Display the panel directly on the console
 438 |         panel = ToolOutputPanel(tool, command, output, status, duration)
 439 |         self.console.print(panel)
 440 | 
 441 |     def code(
 442 |         self,
 443 |         code: str,
 444 |         language: str = "python",
 445 |         title: Optional[str] = None,
 446 |         line_numbers: bool = True,
 447 |         highlight_lines: Optional[List[int]] = None,
 448 |         message: Optional[str] = None,
 449 |         component: Optional[str] = None,
 450 |         level: str = "debug",
 451 |         **kwargs
 452 |     ) -> None:
 453 |         """Display a code block.
 454 | 
 455 |         Args:
 456 |             code: Code string
 457 |             language: Language for syntax highlighting
 458 |             title: Optional title for the panel
 459 |             line_numbers: Show line numbers
 460 |             highlight_lines: Lines to highlight
 461 |             message: Optional message to log alongside displaying the code
 462 |             component: Gateway component
 463 |             level: Log level for the optional message (default: debug)
 464 |             **kwargs: Extra fields for logging
 465 |         """
 466 |         if message:
 467 |             self._log(level, message, component, context={"code_preview": code[:100] + "..." if len(code) > 100 else code}, extra=kwargs)
 468 | 
 469 |         # Display the panel directly
 470 |         panel = CodePanel(code, language, title, line_numbers, highlight_lines)
 471 |         self.console.print(panel)
 472 | 
 473 |     def display_results(
 474 |         self,
 475 |         title: str,
 476 |         results: Union[List[Dict[str, Any]], Dict[str, Any]],
 477 |         status: str = "success",
 478 |         component: Optional[str] = None,
 479 |         show_count: bool = True,
 480 |         compact: bool = False,
 481 |         message: Optional[str] = None,
 482 |         level: str = "info",
 483 |         **kwargs
 484 |     ) -> None:
 485 |         """Display results in a formatted panel.
 486 | 
 487 |         Args:
 488 |             title: Panel title
 489 |             results: Results data
 490 |             status: Status (success, warning, error)
 491 |             component: Gateway component
 492 |             show_count: Show count in title
 493 |             compact: Use compact format
 494 |             message: Optional message to log
 495 |             level: Log level for the optional message (default: info)
 496 |             **kwargs: Extra fields for logging
 497 |         """
 498 |         if message:
 499 |             self._log(level, message, component, context={"result_count": len(results) if isinstance(results, list) else 1, "status": status}, extra=kwargs)
 500 |             
 501 |         # Display the panel directly
 502 |         panel = ResultPanel(title, results, status, component, show_count, compact)
 503 |         self.console.print(panel)
 504 | 
 505 |     def section(
 506 |         self,
 507 |         title: str,
 508 |         subtitle: Optional[str] = None,
 509 |         component: Optional[str] = None,
 510 |     ) -> None:
 511 |         """Display a section header.
 512 | 
 513 |         Args:
 514 |             title: Section title
 515 |             subtitle: Optional subtitle
 516 |             component: Gateway component
 517 |         """
 518 |         # This is purely presentational, doesn't log typically
 519 |         panel = HeaderPanel(title, subtitle, component=component)
 520 |         self.console.print(panel)
 521 | 
 522 |     def info_panel(
 523 |         self,
 524 |         title: str,
 525 |         content: Union[str, List[str], Dict[str, Any]],
 526 |         icon: Optional[str] = None,
 527 |         style: str = "info",
 528 |         component: Optional[str] = None,
 529 |     ) -> None:
 530 |         """Display an informational panel.
 531 | 
 532 |         Args:
 533 |             title: Panel title
 534 |             content: Panel content
 535 |             icon: Optional icon
 536 |             style: Panel style
 537 |             component: Gateway component
 538 |         """
 539 |         # Could log the title/content summary if desired
 540 |         # self._log("info", f"Displaying info panel: {title}", component)
 541 |         panel = InfoPanel(title, content, icon, style)
 542 |         self.console.print(panel)
 543 | 
 544 |     def warning_panel(
 545 |         self,
 546 |         title: Optional[str] = None,
 547 |         message: str = "",
 548 |         details: Optional[List[str]] = None,
 549 |         component: Optional[str] = None,
 550 |     ) -> None:
 551 |         """Display a warning panel.
 552 | 
 553 |         Args:
 554 |             title: Optional panel title
 555 |             message: Warning message
 556 |             details: Optional list of detail strings
 557 |             component: Gateway component
 558 |         """
 559 |         # Log the warning separately
 560 |         log_title = title if title else "Warning"
 561 |         self.warning(f"{log_title}: {message}", component, context={"details": details})
 562 | 
 563 |         # Display the panel directly
 564 |         panel = WarningPanel(title, message, details)
 565 |         self.console.print(panel)
 566 | 
 567 |     def error_panel(
 568 |         self,
 569 |         title: Optional[str] = None,
 570 |         message: str = "",
 571 |         details: Optional[str] = None,
 572 |         resolution_steps: Optional[List[str]] = None,
 573 |         error_code: Optional[str] = None,
 574 |         component: Optional[str] = None,
 575 |         exception: Optional[Exception] = None,
 576 |     ) -> None:
 577 |         """Display an error panel.
 578 | 
 579 |         Args:
 580 |             title: Optional panel title
 581 |             message: Error message
 582 |             details: Optional detail string (e.g., traceback)
 583 |             resolution_steps: Optional list of resolution steps
 584 |             error_code: Optional error code
 585 |             component: Gateway component
 586 |             exception: Associated exception (for logging traceback)
 587 |         """
 588 |         # Log the error separately
 589 |         log_title = title if title else "Error"
 590 |         log_context = {
 591 |             "details": details,
 592 |             "resolution": resolution_steps,
 593 |             "error_code": error_code,
 594 |         }
 595 |         self.error(f"{log_title}: {message}", component, context=log_context, exception=exception)
 596 | 
 597 |         # Display the panel directly
 598 |         panel = ErrorPanel(title, message, details, resolution_steps, error_code)
 599 |         self.console.print(panel)
 600 | 
 601 |     # --- Context Managers & Decorators --- 
 602 | 
 603 |     @contextmanager
 604 |     def time_operation(
 605 |         self,
 606 |         operation: str,
 607 |         component: Optional[str] = None,
 608 |         level: str = "info",
 609 |         start_message: Optional[str] = "Starting {operation}...",
 610 |         end_message: Optional[str] = "Finished {operation} in {duration:.2f}s",
 611 |         **kwargs
 612 |     ):
 613 |         """
 614 |         Context manager that times an operation and logs its start and completion.
 615 |         
 616 |         This method provides a clean, standardized way to track and log the duration
 617 |         of operations, ensuring consistent timing measurement and log formatting.
 618 |         It automatically logs the start of an operation, executes the operation 
 619 |         within the context, measures the exact duration, and logs the completion 
 620 |         with timing information.
 621 |         
 622 |         The timing uses Python's monotonic clock for accurate duration measurement
 623 |         even if system time changes during execution. Both start and end messages
 624 |         support templating with format string syntax, allowing customization while
 625 |         maintaining consistency.
 626 |         
 627 |         Key features:
 628 |         - Precise operation timing with monotonic clock
 629 |         - Automatic logging at start and end of operations
 630 |         - Customizable message templates
 631 |         - Consistent log format and metadata
 632 |         - Exception-safe timing (duration is logged even if operation fails)
 633 |         - Hierarchical operation tracking when combined with component parameter
 634 |         
 635 |         Usage Examples:
 636 |         ```python
 637 |         # Basic usage
 638 |         with logger.time_operation("data_processing"):
 639 |             process_large_dataset()
 640 |             
 641 |         # Custom messages and different log level
 642 |         with logger.time_operation(
 643 |             operation="database_backup",
 644 |             component="storage",
 645 |             level="debug",
 646 |             start_message="Starting backup of {operation}...",
 647 |             end_message="Backup of {operation} completed in {duration:.3f}s"
 648 |         ):
 649 |             backup_database()
 650 |             
 651 |         # Timing nested operations with different components
 652 |         with logger.time_operation("parent_task", component="scheduler"):
 653 |             do_first_part()
 654 |             with logger.time_operation("child_task", component="worker"):
 655 |                 do_second_part()
 656 |             finish_task()
 657 |         ```
 658 |         
 659 |         Args:
 660 |             operation: Name of the operation being timed
 661 |             component: Component performing the operation (uses logger default if None)
 662 |             level: Log level for start/end messages (default: "info")
 663 |             start_message: Template string for operation start message 
 664 |                           (None to skip start logging)
 665 |             end_message: Template string for operation end message
 666 |                         (None to skip end logging)
 667 |             **kwargs: Additional fields to include in log entries
 668 |         
 669 |         Yields:
 670 |             None
 671 |         
 672 |         Note:
 673 |             This context manager is exception-safe: the end message with duration
 674 |             is logged even if an exception occurs within the context. Exceptions
 675 |             are re-raised normally after logging.
 676 |         """
 677 |         start_time = time.monotonic()
 678 |         if start_message:
 679 |             self._log(level, start_message.format(operation=operation), component, operation, extra=kwargs)
 680 |             
 681 |         try:
 682 |             yield
 683 |         finally:
 684 |             duration = time.monotonic() - start_time
 685 |             if end_message:
 686 |                 self._log(level, end_message.format(operation=operation, duration=duration), component, operation, context={"duration_s": duration}, extra=kwargs)
 687 | 
 688 |     def track(
 689 |         self,
 690 |         iterable: Any,
 691 |         description: str,
 692 |         name: Optional[str] = None,
 693 |         total: Optional[int] = None,
 694 |         parent: Optional[str] = None,
 695 |         # Removed component - handled by logger instance
 696 |     ) -> Any:
 697 |         """Track progress over an iterable using the logger's progress tracker.
 698 |         
 699 |         Args:
 700 |             iterable: Iterable to track
 701 |             description: Description of the task
 702 |             name: Optional task name (defaults to description)
 703 |             total: Optional total number of items
 704 |             parent: Optional parent task name
 705 |             
 706 |         Returns:
 707 |             The iterable wrapped with progress tracking
 708 |         """
 709 |         return self.progress.track(iterable, description, name, total, parent)
 710 | 
 711 |     @contextmanager
 712 |     def task(
 713 |         self,
 714 |         description: str,
 715 |         name: Optional[str] = None,
 716 |         total: int = 100,
 717 |         parent: Optional[str] = None,
 718 |         # Removed component - handled by logger instance
 719 |         autostart: bool = True,
 720 |     ):
 721 |         """
 722 |         Context manager for tracking and displaying progress of a task.
 723 |         
 724 |         This method creates a rich progress display for long-running tasks, providing
 725 |         visual feedback and real-time status updates. It integrates with rich's
 726 |         progress tracking to show animated spinners, completion percentage, and
 727 |         elapsed/remaining time.
 728 |         
 729 |         The task progress tracker is particularly useful for operations like:
 730 |         - File processing (uploads, downloads, parsing)
 731 |         - Batch database operations
 732 |         - Multi-step data processing pipelines
 733 |         - API calls with multiple sequential requests
 734 |         - Any operation where progress feedback improves user experience
 735 |         
 736 |         The progress display automatically adapts to terminal width and supports
 737 |         nested tasks with parent-child relationships, allowing for complex operation
 738 |         visualization. Progress can be updated manually within the context.
 739 |         
 740 |         Key Features:
 741 |         - Real-time progress visualization with percentage completion
 742 |         - Automatic elapsed and remaining time estimation
 743 |         - Support for nested tasks and task hierarchies
 744 |         - Customizable description and task identification
 745 |         - Thread-safe progress updates
 746 |         - Automatic completion on context exit
 747 |         
 748 |         Usage Examples:
 749 |         ```python
 750 |         # Basic usage - process 50 items
 751 |         with logger.task("Processing files", total=50) as task:
 752 |             for i, file in enumerate(files):
 753 |                 process_file(file)
 754 |                 task.update(advance=1)  # Increment progress by 1
 755 |         
 756 |         # Nested tasks with parent-child relationship
 757 |         with logger.task("Main import", total=100) as main_task:
 758 |             # Process users (contributes 30% to main task)
 759 |             with logger.task("Importing users", total=len(users), parent=main_task.id) as subtask:
 760 |                 for user in users:
 761 |                     import_user(user)
 762 |                     subtask.update(advance=1)
 763 |                 main_task.update(advance=30)  # Users complete = 30% of main task
 764 |                 
 765 |             # Process products (contributes 70% to main task)
 766 |             with logger.task("Importing products", total=len(products), parent=main_task.id) as subtask:
 767 |                 for product in products:
 768 |                     import_product(product)
 769 |                     subtask.update(advance=1)
 770 |                 main_task.update(advance=70)  # Products complete = 70% of main task
 771 |         ```
 772 |         
 773 |         Args:
 774 |             description: Human-readable description of the task
 775 |             name: Unique identifier for the task (defaults to description if None)
 776 |             total: Total number of steps/work units for completion (100%)
 777 |             parent: ID of parent task (for nested task hierarchies)
 778 |             autostart: Automatically start displaying progress (default: True)
 779 |         
 780 |         Yields:
 781 |             GatewayProgress instance that can be used to update progress
 782 |             
 783 |         Notes:
 784 |             - The yielded progress object has methods like `update(advance=N)` to 
 785 |               increment progress and `update(total=N)` to adjust the total units.
 786 |             - Tasks are automatically completed when the context exits, even if
 787 |               an exception occurs.
 788 |             - For tasks without a clear number of steps, you can use update with
 789 |               a percentage value: `task.update(completed=50)` for 50% complete.
 790 |         """
 791 |         with self.progress.task(description, name, total, parent, autostart) as task_context:
 792 |              yield task_context
 793 | 
 794 |     @contextmanager
 795 |     def catch_and_log(
 796 |         self,
 797 |         component: Optional[str] = None,
 798 |         operation: Optional[str] = None,
 799 |         reraise: bool = True,
 800 |         level: str = "error",
 801 |         message: str = "An error occurred during {operation}",
 802 |     ):
 803 |         """
 804 |         Context manager that catches, logs, and optionally re-raises exceptions.
 805 |         
 806 |         This utility provides structured exception handling with automatic logging,
 807 |         allowing code to maintain a consistent error handling pattern while ensuring
 808 |         all exceptions are properly logged with relevant context information. It's
 809 |         particularly useful for operations where you want to ensure errors are always
 810 |         recorded, even if they'll be handled or suppressed at a higher level.
 811 |         
 812 |         The context manager wraps a block of code and:
 813 |         1. Executes the code normally
 814 |         2. Catches any exceptions that occur
 815 |         3. Logs the exception with configurable component, operation, and message
 816 |         4. Optionally re-raises the exception (controlled by the reraise parameter)
 817 |         
 818 |         This prevents "silent failures" and ensures consistent logging of all errors
 819 |         while preserving the original exception's traceback for debugging purposes.
 820 |         
 821 |         Key features:
 822 |         - Standardized error logging across the application
 823 |         - Configurable log level for different error severities
 824 |         - Component and operation tagging for error categorization
 825 |         - Template-based error messages with operation name substitution
 826 |         - Control over exception propagation behavior
 827 |         
 828 |         Usage Examples:
 829 |         ```python
 830 |         # Basic usage - catch, log, and re-raise
 831 |         with logger.catch_and_log(component="auth", operation="login"):
 832 |             user = authenticate_user(username, password)
 833 |         
 834 |         # Suppress exception after logging
 835 |         with logger.catch_and_log(
 836 |             component="email", 
 837 |             operation="send_notification",
 838 |             reraise=False,
 839 |             level="warning",
 840 |             message="Failed to send notification email for {operation}"
 841 |         ):
 842 |             send_email(user.email, "Welcome!", template="welcome")
 843 |             
 844 |         # Use as a safety net around cleanup code
 845 |         try:
 846 |             # Main operation
 847 |             process_file(file_path)
 848 |         finally:
 849 |             # Always log errors in cleanup but don't let them mask the main exception
 850 |             with logger.catch_and_log(reraise=False, level="warning"):
 851 |                 os.remove(temp_file)
 852 |         ```
 853 |         
 854 |         Args:
 855 |             component: Component name for error categorization (uses logger default if None)
 856 |             operation: Operation name for context (substituted in message template)
 857 |             reraise: Whether to re-raise the caught exception (default: True)
 858 |             level: Log level to use for the error message (default: "error")
 859 |             message: Template string for the error message, with {operation} placeholder
 860 |         
 861 |         Yields:
 862 |             None
 863 |             
 864 |         Note:
 865 |             When reraise=False, exceptions are completely suppressed after logging.
 866 |             This can be useful for non-critical operations like cleanup tasks,
 867 |             but should be used carefully to avoid hiding important errors.
 868 |         """
 869 |         component = component or self.component
 870 |         operation = operation or "operation"
 871 |         try:
 872 |             yield
 873 |         except Exception:
 874 |             log_msg = message.format(operation=operation)
 875 |             self._log(level, log_msg, component, operation, exception_info=True)
 876 |             if reraise:
 877 |                 raise
 878 | 
 879 |     def log_call(
 880 |         self,
 881 |         component: Optional[str] = None,
 882 |         operation: Optional[str] = None,
 883 |         level: str = "debug",
 884 |         log_args: bool = True,
 885 |         log_result: bool = False,
 886 |         log_exceptions: bool = True,
 887 |     ):
 888 |         """
 889 |         Decorator that logs function entries, exits, timing, and exceptions.
 890 |         
 891 |         This decorator provides automatic instrumentation for function calls,
 892 |         generating standardized log entries when functions are called and when they 
 893 |         complete. It tracks execution time, captures function arguments and results,
 894 |         and properly handles and logs exceptions.
 895 |         
 896 |         When applied to a function, it will:
 897 |         1. Log when the function is entered, optionally including arguments
 898 |         2. Execute the function normally
 899 |         3. Track the exact execution time using a monotonic clock
 900 |         4. Log function completion with duration, optionally including the return value
 901 |         5. Catch, log, and re-raise any exceptions that occur
 902 |         
 903 |         This is particularly valuable for:
 904 |         - Debugging complex call flows and function interaction
 905 |         - Performance analysis and identifying slow function calls
 906 |         - Audit trails of function execution and parameters
 907 |         - Troubleshooting intermittent issues with full context
 908 |         - Standardizing logging across large codebases
 909 |         
 910 |         Configuration Options:
 911 |         - Logging level can be adjusted based on function importance
 912 |         - Function arguments can be optionally included or excluded (for privacy/size)
 913 |         - Return values can be optionally captured (for debugging/audit)
 914 |         - Exception handling can be customized
 915 |         - Component and operation names provide hierarchical organization
 916 |         
 917 |         Usage Examples:
 918 |         ```python
 919 |         # Basic usage - log entry and exit at debug level
 920 |         @logger.log_call()
 921 |         def process_data(item_id, options=None):
 922 |             # Function implementation...
 923 |             return result
 924 |             
 925 |         # Customized - log as info level, include specific operation name
 926 |         @logger.log_call(
 927 |             component="billing",
 928 |             operation="payment_processing",
 929 |             level="info"
 930 |         )
 931 |         def process_payment(payment_id, amount):
 932 |             # Process payment...
 933 |             return receipt_id
 934 |             
 935 |         # Capture return values but not arguments (e.g., for sensitive data)
 936 |         @logger.log_call(
 937 |             level="debug",
 938 |             log_args=False,
 939 |             log_result=True
 940 |         )
 941 |         def validate_credentials(username, password):
 942 |             # Validate credentials without logging the password
 943 |             return is_valid
 944 |             
 945 |         # Detailed debugging for critical components
 946 |         @logger.log_call(
 947 |             component="auth",
 948 |             operation="token_verification",
 949 |             level="debug",
 950 |             log_args=True,
 951 |             log_result=True,
 952 |             log_exceptions=True
 953 |         )
 954 |         def verify_auth_token(token):
 955 |             # Verify token with full logging
 956 |             return token_data
 957 |         ```
 958 |         
 959 |         Args:
 960 |             component: Component name for logs (defaults to logger's component)
 961 |             operation: Operation name for logs (defaults to function name)
 962 |             level: Log level for entry/exit messages (default: "debug")
 963 |             log_args: Whether to log function arguments (default: True)
 964 |             log_result: Whether to log function return value (default: False)
 965 |             log_exceptions: Whether to log exceptions (default: True)
 966 |             
 967 |         Returns:
 968 |             Decorated function that logs entry, exit, and timing information
 969 |             
 970 |         Notes:
 971 |             - For functions with large or sensitive arguments, set log_args=False
 972 |             - When log_result=True, be cautious with functions returning large data
 973 |               structures as they will be truncated but may still impact performance
 974 |             - This decorator preserves the original function's name, docstring,
 975 |               and signature for compatibility with introspection tools
 976 |         """
 977 |         
 978 |         def decorator(func):
 979 |             @wraps(func)
 980 |             def wrapper(*args, **kwargs):
 981 |                 # Determine operation name
 982 |                 op_name = operation or func.__name__
 983 |                 comp_name = component or self.component
 984 |                 
 985 |                 # Log entry
 986 |                 entry_msg = f"Entering {op_name}..."
 987 |                 context = {}
 988 |                 if log_args:
 989 |                     # Be careful logging args, could contain sensitive info or be large
 990 |                     try:
 991 |                         arg_repr = f"args={args!r}, kwargs={kwargs!r}"
 992 |                         context['args'] = arg_repr[:200] + '...' if len(arg_repr) > 200 else arg_repr
 993 |                     except Exception:
 994 |                         context['args'] = "<Could not represent args>"
 995 |                         
 996 |                 self._log(level, entry_msg, comp_name, op_name, context=context)
 997 |                 
 998 |                 start_time = time.monotonic()
 999 |                 try:
1000 |                     result = func(*args, **kwargs)
1001 |                     duration = time.monotonic() - start_time
1002 |                     
1003 |                     # Log exit
1004 |                     exit_msg = f"Exiting {op_name} (duration: {duration:.3f}s)"
1005 |                     exit_context = {"duration_s": duration}
1006 |                     if log_result:
1007 |                         try:
1008 |                             res_repr = repr(result)
1009 |                             exit_context['result'] = res_repr[:200] + '...' if len(res_repr) > 200 else res_repr
1010 |                         except Exception:
1011 |                            exit_context['result'] = "<Could not represent result>"
1012 |                             
1013 |                     self._log(level, exit_msg, comp_name, op_name, context=exit_context)
1014 |                     return result
1015 |                     
1016 |                 except Exception as e:
1017 |                     duration = time.monotonic() - start_time
1018 |                     if log_exceptions:
1019 |                         exc_level = "error" # Always log exceptions as error?
1020 |                         exc_msg = f"Exception in {op_name} after {duration:.3f}s: {e}"
1021 |                         exc_context = {"duration_s": duration}
1022 |                         if log_args: # Include args context if available
1023 |                            exc_context.update(context)
1024 |                            
1025 |                         self._log(exc_level, exc_msg, comp_name, op_name, exception_info=True, context=exc_context)
1026 |                     raise
1027 |                     
1028 |             return wrapper
1029 |         return decorator
1030 | 
1031 |     # --- Startup/Shutdown Methods --- 
1032 | 
1033 |     def startup(
1034 |         self,
1035 |         version: str,
1036 |         component: Optional[str] = None,
1037 |         mode: str = "standard",
1038 |         context: Optional[Dict[str, Any]] = None,
1039 |         **kwargs
1040 |     ) -> None:
1041 |         """Log server startup information.
1042 |         
1043 |         Args:
1044 |             version: Server version
1045 |             component: Component name (usually None for global startup)
1046 |             mode: Performance mode
1047 |             context: Additional startup context
1048 |             **kwargs: Extra fields for logging
1049 |         """
1050 |         message = f"Starting Server (Version: {version}, Mode: {mode})"
1051 |         emoji = get_emoji("system", "startup")
1052 |         self.info(message, component, operation="startup", emoji=emoji, context=context, **kwargs)
1053 | 
1054 |     def shutdown(
1055 |         self,
1056 |         component: Optional[str] = None,
1057 |         duration: Optional[float] = None,
1058 |         context: Optional[Dict[str, Any]] = None,
1059 |         **kwargs
1060 |     ) -> None:
1061 |         """Log server shutdown information.
1062 |         
1063 |         Args:
1064 |             component: Component name
1065 |             duration: Optional uptime duration
1066 |             context: Additional shutdown context
1067 |             **kwargs: Extra fields for logging
1068 |         """
1069 |         message = "Server Shutting Down"
1070 |         if duration is not None:
1071 |             message += f" (Uptime: {duration:.2f}s)"
1072 |         emoji = get_emoji("system", "shutdown")
1073 |         self.info(message, component, operation="shutdown", emoji=emoji, context=context, **kwargs)
1074 | 
1075 | # --- Global Convenience Functions --- 
1076 | # These use the global 'logger' instance created in __init__.py
1077 | 
1078 | # At the global level, declare logger as None initially
1079 | logger = None  
1080 | 
1081 | def get_logger(name: str) -> Logger:
1082 |     """
1083 |     Get or create a logger instance for a specific component or module.
1084 |     
1085 |     This function creates a properly named Logger instance following the application's
1086 |     logging hierarchy and naming conventions. It serves as the primary entry point
1087 |     for obtaining loggers throughout the application, ensuring consistent logger
1088 |     configuration and behavior.
1089 |     
1090 |     The function implements a pseudo-singleton pattern for the default logger:
1091 |     - The first call initializes a global default logger
1092 |     - Each subsequent call creates a new named logger instance
1093 |     - The name parameter establishes the logger's identity in the logging hierarchy
1094 |     
1095 |     Logger Naming Conventions:
1096 |     Logger names should follow Python's module path pattern, where dots separate
1097 |     hierarchy levels. The recommended practice is to use:
1098 |     - The module's __name__ variable in most cases
1099 |     - Explicit names for specific subsystems or components
1100 |     
1101 |     Examples:
1102 |     - "ultimate_mcp_server.core.state_store"
1103 |     - "ultimate_mcp_server.services.rag"
1104 |     - "ultimate_mcp_server.tools.local_text"
1105 |     
1106 |     Args:
1107 |         name: Logger name that identifies the component, module, or subsystem
1108 |               Usually set to the module's __name__ or a specific component identifier
1109 |     
1110 |     Returns:
1111 |         A configured Logger instance with the specified name
1112 |         
1113 |     Usage Examples:
1114 |     ```python
1115 |     # Standard usage in a module
1116 |     logger = get_logger(__name__)
1117 |     
1118 |     # Component-specific logger
1119 |     auth_logger = get_logger("ultimate_mcp_server.auth")
1120 |     
1121 |     # Usage with structured logging
1122 |     logger = get_logger("my_module")
1123 |     logger.info("User action", 
1124 |                 component="auth", 
1125 |                 operation="login", 
1126 |                 context={"user_id": user.id})
1127 |     ```
1128 |     
1129 |     Note:
1130 |         While each call returns a new Logger instance, they all share the underlying
1131 |         Python logging configuration and output destinations. This allows for
1132 |         centralized control of log levels, formatting, and output handlers through
1133 |         standard logging configuration.
1134 |     """
1135 |     # Initialize the global logger if needed
1136 |     global logger
1137 |     if logger is None:
1138 |         logger = Logger(name)
1139 |     
1140 |     # Return a new logger with the requested name
1141 |     return Logger(name)
1142 | 
1143 | # Helper functions for global usage
1144 | def debug(
1145 |     message: str,
1146 |     component: Optional[str] = None,
1147 |     operation: Optional[str] = None,
1148 |     context: Optional[Dict[str, Any]] = None,
1149 |     emoji_key: Optional[str] = None,
1150 |     **kwargs
1151 | ) -> None:
1152 |     """Forward to default logger's debug method."""
1153 |     # Ensure logger is initialized
1154 |     global logger
1155 |     if logger is None:
1156 |         logger = Logger(__name__)
1157 |     
1158 |     logger.debug(message, component, operation, context, emoji_key=emoji_key, **kwargs)
1159 | 
1160 | def info(
1161 |     message: str,
1162 |     component: Optional[str] = None,
1163 |     operation: Optional[str] = None,
1164 |     context: Optional[Dict[str, Any]] = None,
1165 |     emoji_key: Optional[str] = None,
1166 |     **kwargs
1167 | ) -> None:
1168 |     """Forward to default logger's info method."""
1169 |     # Ensure logger is initialized
1170 |     global logger
1171 |     if logger is None:
1172 |         logger = Logger(__name__)
1173 |     
1174 |     logger.info(message, component, operation, context, emoji_key=emoji_key, **kwargs)
1175 | 
1176 | def success(
1177 |     message: str,
1178 |     component: Optional[str] = None,
1179 |     operation: Optional[str] = None,
1180 |     context: Optional[Dict[str, Any]] = None,
1181 |     emoji_key: Optional[str] = None,
1182 |     **kwargs
1183 | ) -> None:
1184 |     """Forward to default logger's success method."""
1185 |     # Ensure logger is initialized
1186 |     global logger
1187 |     if logger is None:
1188 |         logger = Logger(__name__)
1189 |     
1190 |     logger.success(message, component, operation, context, emoji_key=emoji_key, **kwargs)
1191 | 
1192 | def warning(
1193 |     message: str,
1194 |     component: Optional[str] = None,
1195 |     operation: Optional[str] = None,
1196 |     context: Optional[Dict[str, Any]] = None,
1197 |     emoji_key: Optional[str] = None,
1198 |     # details: Optional[List[str]] = None,
1199 |     **kwargs
1200 | ) -> None:
1201 |     """Forward to default logger's warning method."""
1202 |     # Ensure logger is initialized
1203 |     global logger
1204 |     if logger is None:
1205 |         logger = Logger(__name__)
1206 |     
1207 |     logger.warning(message, component, operation, context, emoji_key=emoji_key, **kwargs)
1208 | 
1209 | def error(
1210 |     message: str,
1211 |     component: Optional[str] = None,
1212 |     operation: Optional[str] = None,
1213 |     context: Optional[Dict[str, Any]] = None,
1214 |     exception: Optional[Exception] = None,
1215 |     emoji_key: Optional[str] = None,
1216 |     # error_code: Optional[str] = None,
1217 |     # resolution_steps: Optional[List[str]] = None,
1218 |     **kwargs
1219 | ) -> None:
1220 |     """Forward to default logger's error method."""
1221 |     # Ensure logger is initialized
1222 |     global logger
1223 |     if logger is None:
1224 |         logger = Logger(__name__)
1225 |     
1226 |     # Handle exc_info specially to prevent conflicts
1227 |     exc_info = kwargs.pop('exc_info', None) if 'exc_info' in kwargs else None
1228 |     
1229 |     logger.error(message, component, operation, context, 
1230 |                 exception=exception, emoji_key=emoji_key, 
1231 |                 **{**kwargs, 'exc_info': exc_info} if exc_info is not None else kwargs)
1232 | 
1233 | def critical(
1234 |     message: str,
1235 |     component: Optional[str] = None,
1236 |     operation: Optional[str] = None,
1237 |     context: Optional[Dict[str, Any]] = None,
1238 |     exception: Optional[Exception] = None,
1239 |     emoji_key: Optional[str] = None,
1240 |     # error_code: Optional[str] = None,
1241 |     **kwargs
1242 | ) -> None:
1243 |     """Forward to default logger's critical method."""
1244 |     # Ensure logger is initialized
1245 |     global logger
1246 |     if logger is None:
1247 |         logger = Logger(__name__)
1248 |     
1249 |     # Handle exc_info specially to prevent conflicts
1250 |     exc_info = kwargs.pop('exc_info', None) if 'exc_info' in kwargs else None
1251 |     
1252 |     logger.critical(message, component, operation, context, 
1253 |                    exception=exception, emoji_key=emoji_key, 
1254 |                    **{**kwargs, 'exc_info': exc_info} if exc_info is not None else kwargs)
1255 | 
1256 | def section(
1257 |     title: str,
1258 |     subtitle: Optional[str] = None,
1259 |     component: Optional[str] = None,
1260 | ) -> None:
1261 |     """Display a section header using the global logger's console."""
1262 |     # Ensure logger is initialized
1263 |     global logger
1264 |     if logger is None:
1265 |         logger = Logger(__name__)
1266 |     
1267 |     logger.section(title, subtitle, component)
1268 | 
1269 | # Example Usage (if run directly)
1270 | if __name__ == '__main__':
1271 |     # Example of how the logger might be configured and used
1272 |     
1273 |     # Normally configuration happens via dictConfig in main entry point
1274 |     # For standalone testing, we can add a handler manually
1275 |     test_logger = Logger("test_logger", level="debug") # Create instance
1276 |     test_logger.python_logger.addHandler(RichLoggingHandler(console=console))
1277 |     # Need to prevent propagation if manually adding handler here for test
1278 |     test_logger.python_logger.propagate = False 
1279 |     
1280 |     test_logger.section("Initialization", "Setting up components")
1281 |     test_logger.startup(version="1.0.0", mode="test")
1282 |     
1283 |     test_logger.debug("This is a debug message", component="core", operation="setup")
1284 |     test_logger.info("This is an info message", component="api")
1285 |     test_logger.success("Operation completed successfully", component="worker", operation="process_data")
1286 |     test_logger.warning("Something looks suspicious", component="cache", context={"key": "user:123"})
1287 |     
1288 |     try:
1289 |         x = 1 / 0
1290 |     except ZeroDivisionError as e:
1291 |         test_logger.error("An error occurred", component="math", operation="divide", exception=e)
1292 |         
1293 |     test_logger.critical("System unstable!", component="core", context={"reason": "disk full"})
1294 | 
1295 |     test_logger.info_panel("Configuration", {"host": "localhost", "port": 8013}, component="core")
1296 |     test_logger.warning_panel("Cache Alert", "Cache nearing capacity", details=["Size: 95MB", "Limit: 100MB"], component="cache")
1297 |     test_logger.error_panel("DB Connection Failed", "Could not connect to database", details="Connection timed out after 5s", resolution_steps=["Check DB server status", "Verify credentials"], error_code="DB500", component="db")
1298 | 
1299 |     test_logger.tool("grep", "grep 'error' log.txt", "line 1: error found\nline 5: error processing", status="success", duration=0.5, component="analysis")
1300 |     test_logger.code("def hello():\n  print('Hello')", language="python", title="Example Code", component="docs")
1301 | 
1302 |     with test_logger.time_operation("long_process", component="worker"):
1303 |         time.sleep(0.5)
1304 |         
1305 |     with test_logger.task("Processing items", total=10) as p:
1306 |         for _i in range(10):
1307 |             time.sleep(0.05)
1308 |             p.update_task(p.current_task_id, advance=1) # Assuming task context provides task_id
1309 | 
1310 |     @test_logger.log_call(component="utils", log_result=True)
1311 |     def add_numbers(a, b):
1312 |         return a + b
1313 |     
1314 |     add_numbers(5, 3)
1315 |     
1316 |     test_logger.shutdown(duration=123.45)
1317 | 
1318 | __all__ = [
1319 |     "critical",
1320 |     "debug",
1321 |     "error",
1322 |     "get_logger",
1323 |     "info",
1324 |     "logger",  # Add logger to exported names
1325 |     "warning",
1326 | ] 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/config.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Configuration management for Ultimate MCP Server.
  3 | 
  4 | Handles loading, validation, and access to configuration settings
  5 | from environment variables and config files.
  6 | """
  7 | import json
  8 | import logging
  9 | import os
 10 | import sys
 11 | from pathlib import Path
 12 | from typing import Any, Dict, List, Optional, Set
 13 | 
 14 | import yaml
 15 | from decouple import Config as DecoupleConfig
 16 | from decouple import RepositoryEnv, UndefinedValueError
 17 | from pydantic import BaseModel, Field, ValidationError, field_validator
 18 | 
 19 | # from pydantic_settings import BaseSettings, SettingsConfigDict # Removed BaseSettings
 20 | 
 21 | # --- Decouple Config Instance ---
 22 | # This will read from .env file and environment variables
 23 | decouple_config = DecoupleConfig(RepositoryEnv('.env'))
 24 | # --------------------------------
 25 | 
 26 | # Default configuration file paths (Adapt as needed)
 27 | DEFAULT_CONFIG_PATHS = [
 28 |     "./gateway_config.yaml",
 29 |     "./gateway_config.yml",
 30 |     "./gateway_config.json",
 31 |     "~/.config/ultimate_mcp_server/config.yaml",
 32 |     "~/.ultimate_mcp_server.yaml",
 33 | ]
 34 | 
 35 | # Environment variable prefix (still potentially useful for non-secret env vars)
 36 | ENV_PREFIX = "GATEWAY_"
 37 | 
 38 | # Global configuration instance
 39 | _config = None
 40 | 
 41 | # Basic logger for config loading issues before full logging is set up
 42 | config_logger = logging.getLogger("ultimate_mcp_server.config")
 43 | handler = logging.StreamHandler(sys.stderr)
 44 | if not config_logger.hasHandlers():
 45 |     config_logger.addHandler(handler)
 46 |     config_logger.setLevel(logging.INFO)
 47 | 
 48 | 
 49 | class ServerConfig(BaseModel):
 50 |     """
 51 |     HTTP server configuration settings for the Ultimate MCP Server.
 52 |     
 53 |     This configuration class defines the core server parameters including network binding,
 54 |     performance settings, debugging options, and server identity information. It controls
 55 |     how the Ultimate MCP Server presents itself on the network and manages HTTP connections,
 56 |     especially when running in SSE (Server-Sent Events) mode.
 57 |     
 58 |     Settings defined here affect:
 59 |     - Where and how the server listens for connections (host, port)
 60 |     - How many concurrent workers are spawned to handle requests
 61 |     - Cross-origin resource sharing (CORS) for web clients
 62 |     - Logging verbosity level
 63 |     - Debug capabilities for development
 64 |     
 65 |     Most of these settings can be overridden at startup using environment variables
 66 |     or command-line arguments when launching the server.
 67 |     
 68 |     All values have sensible defaults suitable for local development. For production
 69 |     deployments, it's recommended to adjust host, port, workers, and CORS settings
 70 |     based on your specific requirements.
 71 |     """
 72 |     name: str = Field("Ultimate MCP Server", description="Name of the server")
 73 |     host: str = Field("127.0.0.1", description="Host to bind the server to")
 74 |     port: int = Field(8013, description="Port to bind the server to") # Default port changed
 75 |     workers: int = Field(1, description="Number of worker processes")
 76 |     debug: bool = Field(False, description="Enable debug mode (affects reload)")
 77 |     cors_origins: List[str] = Field(default_factory=lambda: ["*"], description="CORS allowed origins") # Use default_factory for mutable defaults
 78 |     log_level: str = Field("info", description="Logging level (debug, info, warning, error, critical)")
 79 |     version: str = Field("0.1.0", description="Server version (from config, not package)")
 80 | 
 81 |     @field_validator('log_level')
 82 |     @classmethod
 83 |     def validate_log_level(cls, v):
 84 |         """
 85 |         Validate and normalize the log level configuration value.
 86 |         
 87 |         This validator ensures that the log_level field contains a valid logging level string.
 88 |         It performs two key functions:
 89 |         
 90 |         1. Validation: Checks that the provided value is one of the allowed logging levels
 91 |            (debug, info, warning, error, critical). If the value is invalid, it raises a
 92 |            ValidationError with a clear message listing the allowed values.
 93 |         
 94 |         2. Normalization: Converts the input to lowercase to ensure consistent handling
 95 |            regardless of how the value was specified in configuration sources. This allows
 96 |            users to specify the level in any case (e.g., "INFO", "info", "Info") and have
 97 |            it properly normalized.
 98 |         
 99 |         Args:
100 |             v: The raw log_level value from the configuration source (file, env var, etc.)
101 |                
102 |         Returns:
103 |             str: The validated and normalized (lowercase) log level string
104 |             
105 |         Raises:
106 |             ValueError: If the provided value is not one of the allowed logging levels
107 |             
108 |         Example:
109 |             >>> ServerConfig.validate_log_level("INFO")
110 |             'info'
111 |             >>> ServerConfig.validate_log_level("warning")
112 |             'warning'
113 |             >>> ServerConfig.validate_log_level("invalid")
114 |             ValueError: Log level must be one of ['debug', 'info', 'warning', 'error', 'critical']
115 |         """
116 |         allowed = ['debug', 'info', 'warning', 'error', 'critical']
117 |         level_lower = v.lower()
118 |         if level_lower not in allowed:
119 |             raise ValueError(f"Log level must be one of {allowed}")
120 |         return level_lower
121 | 
122 | class CacheConfig(BaseModel):
123 |     """
124 |     Caching system configuration for the Ultimate MCP Server.
125 |     
126 |     This configuration class defines parameters for the server's caching infrastructure,
127 |     which is used to store and retrieve frequently accessed data like LLM completions.
128 |     Effective caching significantly reduces API costs, improves response times, and
129 |     decreases load on provider APIs.
130 |     
131 |     The caching system supports:
132 |     - In-memory caching with configurable entry limits
133 |     - Time-based expiration of cached entries
134 |     - Optional persistence to disk
135 |     - Fuzzy matching for similar but non-identical requests
136 |     
137 |     When enabled, the caching layer sits between tool calls and provider APIs,
138 |     intercepting duplicate requests and returning cached results when appropriate.
139 |     This is especially valuable for expensive operations like complex LLM completions
140 |     that may be called multiple times with identical parameters.
141 |     
142 |     Proper cache configuration can dramatically reduce operating costs in production
143 |     environments while improving response times for end users. The default settings
144 |     provide a reasonable balance for most use cases, but may need adjustment based
145 |     on traffic patterns and memory constraints.
146 |     """
147 |     enabled: bool = Field(True, description="Whether caching is enabled")
148 |     ttl: int = Field(3600, description="Time-to-live for cache entries in seconds")
149 |     max_entries: int = Field(10000, description="Maximum number of entries to store in cache")
150 |     directory: Optional[str] = Field(None, description="Directory for cache persistence")
151 |     fuzzy_match: bool = Field(True, description="Whether to use fuzzy matching for cache keys")
152 | 
153 | class ProviderConfig(BaseModel):
154 |     """
155 |     Configuration for an individual LLM provider connection.
156 |     
157 |     This class encapsulates all settings needed to establish and maintain a connection
158 |     to a specific LLM provider service, such as OpenAI, Anthropic, or Gemini. Each provider
159 |     instance in the system has its own configuration derived from this class, allowing for
160 |     precise control over connection parameters, model selection, and authentication.
161 |     
162 |     The configuration supports:
163 |     - Authentication via API keys (typically loaded from environment variables)
164 |     - Custom API endpoints via base_url overrides
165 |     - Organization-specific routing (for multi-tenant API services)
166 |     - Default model selection for when no model is explicitly specified
167 |     - Request timeout and token limit management
168 |     - Provider-specific parameters via the additional_params dictionary
169 |     
170 |     Most provider settings can be loaded from either configuration files or environment
171 |     variables, with environment variables taking precedence. This allows for secure
172 |     management of sensitive credentials outside of versioned configuration files.
173 |     
174 |     For security best practices:
175 |     - API keys should be specified via environment variables, not in configuration files
176 |     - Custom API endpoints with private deployments should use HTTPS
177 |     - Timeout values should be set appropriately to prevent hung connections
178 |     
179 |     Each provider has its own instance of this configuration class, allowing for
180 |     independent configuration of multiple providers within the same server.
181 |     """
182 |     enabled: bool = Field(True, description="Whether the provider is enabled")
183 |     api_key: Optional[str] = Field(None, description="API key for the provider (loaded via decouple)") # Updated description
184 |     base_url: Optional[str] = Field(None, description="Base URL for API requests (loaded via decouple/file)") # Updated description
185 |     organization: Optional[str] = Field(None, description="Organization identifier (loaded via decouple/file)") # Updated description
186 |     default_model: Optional[str] = Field(None, description="Default model to use (loaded via decouple/file)") # Updated description
187 |     max_tokens: Optional[int] = Field(None, description="Maximum tokens for completions")
188 |     timeout: Optional[float] = Field(30.0, description="Timeout for API requests in seconds")
189 |     additional_params: Dict[str, Any] = Field(default_factory=dict, description="Additional provider-specific parameters (loaded via decouple/file)") # Updated description
190 | 
191 | class ProvidersConfig(BaseModel):
192 |     """
193 |     Centralized configuration for all supported LLM providers in the Ultimate MCP Server.
194 |     
195 |     This class serves as a container for individual provider configurations, organizing
196 |     all supported provider settings in a structured hierarchy. It acts as the central 
197 |     registry of provider configurations, making it easy to:
198 |     
199 |     1. Access configuration for specific providers by name as attributes
200 |     2. Iterate over all provider configurations for initialization and status checks
201 |     3. Update provider settings through a consistent interface
202 |     4. Add new providers to the system in a structured way
203 |     
204 |     Each provider has its own ProviderConfig instance as an attribute, named after the
205 |     provider (e.g., openai, anthropic, gemini). This allows for dot-notation access
206 |     to specific provider settings, providing a clean and intuitive API for configuration.
207 |     
208 |     The available providers are pre-defined based on the supported integrations in the
209 |     system. Each provider's configuration follows the same structure but may have
210 |     different default values or additional parameters based on provider-specific needs.
211 |     
212 |     When the configuration system loads settings from files or environment variables,
213 |     it updates these provider configurations directly, making them the definitive source
214 |     of provider settings throughout the application.
215 |     """
216 |     openai: ProviderConfig = Field(default_factory=ProviderConfig, description="OpenAI provider configuration")
217 |     anthropic: ProviderConfig = Field(default_factory=ProviderConfig, description="Anthropic provider configuration")
218 |     deepseek: ProviderConfig = Field(default_factory=ProviderConfig, description="DeepSeek provider configuration")
219 |     gemini: ProviderConfig = Field(default_factory=ProviderConfig, description="Gemini provider configuration")
220 |     openrouter: ProviderConfig = Field(default_factory=ProviderConfig, description="OpenRouter provider configuration")
221 |     grok: ProviderConfig = Field(default_factory=ProviderConfig, description="Grok (xAI) provider configuration")
222 |     ollama: ProviderConfig = Field(default_factory=ProviderConfig, description="Ollama provider configuration")
223 | 
224 | class FilesystemProtectionConfig(BaseModel):
225 |     """Configuration for filesystem protection heuristics."""
226 |     enabled: bool = Field(True, description="Enable protection checks for this operation")
227 |     max_files_threshold: int = Field(50, description="Trigger detailed check above this many files")
228 |     datetime_stddev_threshold_sec: float = Field(60 * 60 * 24 * 30, description="Timestamp variance threshold (seconds)")
229 |     file_type_variance_threshold: int = Field(5, description="File extension variance threshold")
230 |     max_stat_errors_pct: float = Field(10.0, description="Max percentage of failed stat calls allowed during check")
231 | 
232 | class FilesystemConfig(BaseModel):
233 |     """Configuration for filesystem tools."""
234 |     allowed_directories: List[str] = Field(default_factory=list, description="List of absolute paths allowed for access")
235 |     file_deletion_protection: FilesystemProtectionConfig = Field(default_factory=FilesystemProtectionConfig, description="Settings for deletion protection heuristics")
236 |     file_modification_protection: FilesystemProtectionConfig = Field(default_factory=FilesystemProtectionConfig, description="Settings for modification protection heuristics (placeholder)")
237 |     default_encoding: str = Field("utf-8", description="Default encoding for text file operations")
238 |     max_read_size_bytes: int = Field(100 * 1024 * 1024, description="Maximum size for reading files") # 100MB example
239 | 
240 | class AgentMemoryConfig(BaseModel):
241 |     """Configuration for Cognitive and Agent Memory tool."""
242 |     db_path: str = Field("unified_agent_memory.db", description="Path to the agent memory SQLite database")
243 |     max_text_length: int = Field(64000, description="Maximum length for text fields (e.g., content, reasoning)")
244 |     connection_timeout: float = Field(10.0, description="Database connection timeout in seconds")
245 |     max_working_memory_size: int = Field(20, description="Maximum number of items in working memory")
246 |     memory_decay_rate: float = Field(0.01, description="Decay rate for memory relevance per hour")
247 |     importance_boost_factor: float = Field(1.5, description="Multiplier for explicitly marked important memories")
248 |     similarity_threshold: float = Field(0.75, description="Default threshold for semantic similarity search")
249 |     max_semantic_candidates: int = Field(500, description="Maximum candidates to consider in semantic search before scoring")
250 | 
251 |     # TTLs per level (in seconds)
252 |     ttl_working: int = Field(60 * 30, description="Default TTL for working memories (seconds)")
253 |     ttl_episodic: int = Field(60 * 60 * 24 * 7, description="Default TTL for episodic memories (seconds)")
254 |     ttl_semantic: int = Field(60 * 60 * 24 * 30, description="Default TTL for semantic memories (seconds)")
255 |     ttl_procedural: int = Field(60 * 60 * 24 * 90, description="Default TTL for procedural memories (seconds)")
256 | 
257 |     # Embedding related (primarily for reference/defaults, service might override)
258 |     default_embedding_model: str = Field("text-embedding-3-small", description="Default embedding model identifier")
259 |     embedding_dimension: int = Field(1536, description="Expected dimension for the default embedding model")
260 | 
261 |     # Multi-tool support (for agents that make multiple UMS calls per turn)
262 |     enable_batched_operations: bool = Field(True, description="Allow multiple tool calls per agent turn")
263 |     max_tools_per_batch: int = Field(20, description="Maximum number of tools that can be called in a single batch")
264 | 
265 |     # SQLite Optimizations (Defined here, not env vars by default)
266 |     sqlite_pragmas: List[str] = Field(
267 |         default_factory=lambda: [
268 |             "PRAGMA journal_mode=DELETE",
269 |             "PRAGMA synchronous=NORMAL",
270 |             "PRAGMA foreign_keys=ON",
271 |             "PRAGMA temp_store=MEMORY",
272 |             "PRAGMA cache_size=-32000", # ~32MB cache
273 |             "PRAGMA mmap_size=2147483647", # Max mmap size
274 |             "PRAGMA busy_timeout=30000", # 30 seconds busy timeout
275 |         ],
276 |         description="List of SQLite PRAGMA statements for optimization"
277 |     )
278 | 
279 | class ToolRegistrationConfig(BaseModel):
280 |     """Configuration for tool registration."""
281 |     filter_enabled: bool = Field(False, description="Whether to filter which tools are registered")
282 |     included_tools: List[str] = Field(default_factory=list, description="List of tool names to include (empty means include all)")
283 |     excluded_tools: List[str] = Field(default_factory=list, description="List of tool names to exclude (takes precedence over included_tools)")
284 | 
285 | class SmartBrowserConfig(BaseModel):
286 |     """Configuration specific to the Smart Browser tool."""
287 |     sb_state_key_b64: Optional[str] = Field(None, description="Base64 encoded AES key for state encryption (e.g., 'openssl rand -base64 32')")
288 |     sb_max_tabs: int = Field(5, description="Max concurrent tabs in the pool")
289 |     sb_tab_timeout: int = Field(300, description="Timeout for operations within a tab (seconds)")
290 |     sb_inactivity_timeout: int = Field(600, description="Browser inactivity shutdown timeout (seconds)")
291 |     headless_mode: bool = Field(True, description="Run browser in headless mode")
292 |     vnc_enabled: bool = Field(False, description="Enable VNC server for headful mode")
293 |     vnc_password: Optional[str] = Field(None, description="Password for VNC server (required if vnc_enabled=True)")
294 |     proxy_pool_str: str = Field("", description="Semicolon-separated list of proxy URLs (e.g., 'http://user:pass@host:port;socks5://host2:port2')")
295 |     proxy_allowed_domains_str: str = Field("*", description="Comma-separated domains allowed for proxy (e.g., '.google.com,.example.com', '*' for all)")
296 |     vault_allowed_paths_str: str = Field("secret/data/,kv/data/", description="Comma-separated allowed Vault path prefixes (e.g., 'kv/data/myapp/,secret/data/shared/')")
297 | 
298 |     # Enhanced Locator Tunables
299 |     max_widgets: int = Field(300, description="Max interactive elements extracted for page map")
300 |     max_section_chars: int = Field(5000, description="Max chars for main text summary in page map")
301 |     dom_fp_limit: int = Field(20000, description="Max chars used for DOM fingerprint calculation")
302 |     llm_model_locator: str = Field("gpt-4o", description="LLM model used for locator fallback")
303 |     retry_after_fail: int = Field(1, description="Number of LLM locator retries after initial failure")
304 |     seq_cutoff: float = Field(0.72, description="SequenceMatcher cutoff for heuristic locator match")
305 |     area_min: int = Field(400, description="Minimum pixel area (width*height) for elements in page map")
306 |     high_risk_domains_set: Set[str] = Field( # Use set for direct comparison
307 |         default_factory=lambda: { # Use factory for mutable default
308 |             ".google.com", ".facebook.com", ".linkedin.com", ".glassdoor.com",
309 |             ".instagram.com", ".twitter.com", ".x.com", ".reddit.com", ".amazon.com",
310 |             ".ebay.com", ".ticketmaster.com", ".cloudflare.com", ".datadome.co",
311 |             ".perimeterx.net", ".recaptcha.net", ".hcaptcha.com",
312 |         },
313 |         description="Set of domains considered high-risk for bot detection (influences jitter timing)",
314 |     )
315 | 
316 |     # Validator for high_risk_domains_set (ensures leading dot)
317 |     @field_validator('high_risk_domains_set', mode='before')
318 |     @classmethod
319 |     def normalize_high_risk_domains(cls, v):
320 |         if isinstance(v, str): # Allow comma-separated string input from env/file
321 |             domains = {d.strip().lower() for d in v.split(',') if d.strip()}
322 |         elif isinstance(v, (list, set)):
323 |             domains = {str(d).strip().lower() for d in v if str(d).strip()}
324 |         else:
325 |             raise ValueError("high_risk_domains_set must be a list, set, or comma-separated string")
326 | 
327 |         # Ensure leading dot for all domains
328 |         normalized_domains = {d if d.startswith('.') else '.' + d for d in domains}
329 |         return normalized_domains
330 |     
331 | class GatewayConfig(BaseModel): # Inherit from BaseModel now
332 |     """
333 |     Root configuration model for the entire Ultimate MCP Server system.
334 |     
335 |     This class serves as the top-level configuration container, bringing together
336 |     all component-specific configurations into a unified structure. It represents the
337 |     complete configuration state of the Ultimate MCP Server and is the primary interface
338 |     for accessing configuration settings throughout the application.
339 |     
340 |     The configuration is hierarchically organized into logical sections:
341 |     - server: Network, HTTP, and core server settings
342 |     - providers: LLM provider connections and credentials
343 |     - cache: Response caching behavior and persistence
344 |     - filesystem: Safe filesystem access rules and protection
345 |     - agent_memory: Settings for the agent memory and cognitive systems
346 |     - tool_registration: Controls for which tools are enabled
347 |     
348 |     Additionally, it includes several top-level settings for paths and directories
349 |     that are used across multiple components of the system.
350 |     
351 |     This configuration model is loaded through the config module's functions, which
352 |     handle merging settings from:
353 |     1. Default values defined in the model
354 |     2. Configuration files (YAML/JSON)
355 |     3. Environment variables
356 |     4. Command-line arguments (where applicable)
357 |     
358 |     Throughout the application, this configuration is accessed through the get_config()
359 |     function, which returns a singleton instance of this class with all settings
360 |     properly loaded and validated.
361 |     
362 |     Usage example:
363 |         ```python
364 |         from ultimate_mcp_server.config import get_config
365 |         
366 |         config = get_config()
367 |         
368 |         # Access configuration sections
369 |         server_port = config.server.port
370 |         openai_api_key = config.providers.openai.api_key
371 |         
372 |         # Access top-level settings
373 |         logs_dir = config.log_directory
374 |         ```
375 |     """
376 |     server: ServerConfig = Field(default_factory=ServerConfig)
377 |     providers: ProvidersConfig = Field(default_factory=ProvidersConfig)
378 |     cache: CacheConfig = Field(default_factory=CacheConfig)
379 |     filesystem: FilesystemConfig = Field(default_factory=FilesystemConfig)
380 |     agent_memory: AgentMemoryConfig = Field(default_factory=AgentMemoryConfig) # Added agent memory
381 |     tool_registration: ToolRegistrationConfig = Field(default_factory=ToolRegistrationConfig) # Added tool registration config
382 |     smart_browser: SmartBrowserConfig = Field(default_factory=SmartBrowserConfig)
383 |     default_provider: str = Field("openai", description="Default LLM provider to use if unspecified (e.g., 'openai', 'anthropic')")
384 | 
385 |     storage_directory: str = Field("./storage", description="Directory for persistent storage")
386 |     log_directory: str = Field("./logs", description="Directory for log files")
387 |     prompt_templates_directory: str = Field("./prompt_templates", description="Directory containing prompt templates") # Added prompt dir
388 | 
389 | def expand_path(path: str) -> str:
390 |     """
391 |     Expand a path string to resolve user home directories and environment variables.
392 |     
393 |     This utility function takes a potentially relative path string that may contain
394 |     user home directory references (e.g., "~/logs") or environment variables
395 |     (e.g., "$HOME/data") and expands it to an absolute path.
396 |     
397 |     The expansion process:
398 |     1. Expands user home directory (e.g., "~" → "/home/username")
399 |     2. Expands environment variables (e.g., "$VAR" → "value")
400 |     3. Converts to an absolute path (resolving relative paths)
401 |     
402 |     Args:
403 |         path: A path string that may contain "~" or environment variables
404 |         
405 |     Returns:
406 |         The expanded absolute path as a string
407 |         
408 |     Example:
409 |         >>> expand_path("~/logs")
410 |         '/home/username/logs'
411 |         >>> expand_path("$DATA_DIR/cache")
412 |         '/var/data/cache'  # Assuming $DATA_DIR is set to "/var/data"
413 |     """
414 |     expanded = os.path.expanduser(path)
415 |     expanded = os.path.expandvars(expanded)
416 |     return os.path.abspath(expanded)
417 | 
418 | def find_config_file() -> Optional[str]:
419 |     """
420 |     Find the first available configuration file from the list of default paths.
421 |     
422 |     This function searches for configuration files in standard locations, following
423 |     a predefined priority order. It checks each potential location sequentially and
424 |     returns the path of the first valid configuration file found.
425 |     
426 |     The search locations (defined in DEFAULT_CONFIG_PATHS) typically include:
427 |     - Current directory (e.g., "./gateway_config.yaml")
428 |     - User config directory (e.g., "~/.config/ultimate_mcp_server/config.yaml")
429 |     - User home directory (e.g., "~/.ultimate_mcp_server.yaml")
430 |     
431 |     Each path is expanded using expand_path() before checking if it exists.
432 |     
433 |     Returns:
434 |         The path to the first found configuration file, or None if no files exist
435 |         
436 |     Note:
437 |         This function only verifies that the files exist, not that they have
438 |         valid content or format. Content validation happens during actual loading.
439 |     """
440 |     for path in DEFAULT_CONFIG_PATHS:
441 |         try:
442 |             expanded_path = expand_path(path)
443 |             if os.path.isfile(expanded_path):
444 |                 config_logger.debug(f"Found config file: {expanded_path}")
445 |                 return expanded_path
446 |         except Exception as e:
447 |             config_logger.debug(f"Could not check path {path}: {e}")
448 |     config_logger.debug("No default config file found.")
449 |     return None
450 | 
451 | def load_config_from_file(path: str) -> Dict[str, Any]:
452 |     """
453 |     Load configuration data from a YAML or JSON file.
454 |     
455 |     This function reads and parses a configuration file into a Python dictionary.
456 |     It automatically detects the file format based on the file extension:
457 |     - .yaml/.yml: Parsed as YAML using PyYAML
458 |     - .json: Parsed as JSON using Python's built-in json module
459 |     
460 |     The function performs several steps:
461 |     1. Expands the path to resolve any home directory (~/...) or environment variables
462 |     2. Verifies that the file exists
463 |     3. Determines the appropriate parser based on file extension
464 |     4. Reads and parses the file content
465 |     5. Returns the parsed configuration as a dictionary
466 |     
467 |     Args:
468 |         path: Path to the configuration file (can be relative or use ~/... or $VAR/...)
469 |         
470 |     Returns:
471 |         Dictionary containing the parsed configuration data
472 |         
473 |     Raises:
474 |         FileNotFoundError: If the configuration file doesn't exist
475 |         ValueError: If the file has an unsupported format or contains invalid syntax
476 |         RuntimeError: If there are other errors reading the file
477 |         
478 |     Note:
479 |         If the file is empty or contains "null" in YAML, an empty dictionary is
480 |         returned rather than None, ensuring consistent return type.
481 |     """
482 |     path = expand_path(path)
483 |     if not os.path.isfile(path):
484 |         raise FileNotFoundError(f"Configuration file not found: {path}")
485 |     config_logger.debug(f"Loading configuration from file: {path}")
486 |     try:
487 |         with open(path, 'r', encoding='utf-8') as f:
488 |             if path.endswith(('.yaml', '.yml')):
489 |                 config_data = yaml.safe_load(f)
490 |             elif path.endswith('.json'):
491 |                 config_data = json.load(f)
492 |             else:
493 |                 raise ValueError(f"Unsupported config format: {path}")
494 |             return config_data if config_data is not None else {}
495 |     except (yaml.YAMLError, json.JSONDecodeError) as e:
496 |         raise ValueError(f"Invalid format in {path}: {e}") from e
497 |     except Exception as e:
498 |         raise RuntimeError(f"Error reading {path}: {e}") from e
499 | 
500 | def load_config(
501 |     config_file_path: Optional[str] = None,
502 |     load_default_files: bool = True,
503 | ) -> GatewayConfig:
504 |     """
505 |     Load, merge, and validate configuration from multiple sources with priority handling.
506 |     
507 |     This function implements the complete configuration loading process, combining settings
508 |     from multiple sources according to their priority. It also handles path expansion,
509 |     directory creation, and validation of the resulting configuration.
510 |     
511 |     Configuration Sources (in order of decreasing priority):
512 |     1. Environment variables (via decouple) - Use GATEWAY_* prefix or provider-specific vars
513 |     2. .env file variables (via decouple) - Same naming as environment variables
514 |     3. YAML/JSON configuration file - If explicitly specified or found in default locations
515 |     4. Default values defined in Pydantic models - Fallback when no other source specifies a value
516 |     
517 |     Special handling:
518 |     - Provider API keys: Loaded from provider-specific environment variables
519 |       (e.g., OPENAI_API_KEY, ANTHROPIC_API_KEY)
520 |     - Directory paths: Automatically expanded and created if they don't exist
521 |     - Validation: All configuration values are validated against their Pydantic models
522 |     
523 |     Args:
524 |         config_file_path: Optional explicit path to a configuration file to load.
525 |                          If provided, this file must exist and be valid YAML/JSON.
526 |         load_default_files: Whether to search for configuration files in default locations
527 |                            if config_file_path is not provided. Default: True
528 |     
529 |     Returns:
530 |         GatewayConfig: A fully loaded and validated configuration object
531 |         
532 |     Raises:
533 |         FileNotFoundError: If an explicitly specified config file doesn't exist
534 |         ValueError: If the config file has invalid format or content
535 |         RuntimeError: If other errors occur during loading
536 |         
537 |     Example:
538 |         ```python
539 |         # Load with defaults and environment variables
540 |         config = load_config()
541 |         
542 |         # Load from a specific config file
543 |         config = load_config(config_file_path="path/to/custom_config.yaml")
544 |         
545 |         # Load only from environment variables, ignoring config files
546 |         config = load_config(load_default_files=False)
547 |         ```
548 |     """
549 |     global _config
550 |     file_config_data = {}
551 | 
552 |     # 1. Find and load config file (if specified or found)
553 |     chosen_file_path = None
554 |     if config_file_path:
555 |         chosen_file_path = expand_path(config_file_path)
556 |     elif load_default_files:
557 |         chosen_file_path = find_config_file()
558 | 
559 |     if chosen_file_path and os.path.isfile(chosen_file_path):
560 |         try:
561 |             file_config_data = load_config_from_file(chosen_file_path)
562 |             config_logger.info(f"Loaded base configuration from: {chosen_file_path}")
563 |         except Exception as e:
564 |             config_logger.warning(f"Could not load config file {chosen_file_path}: {e}")
565 |             if config_file_path:
566 |                 raise ValueError(f"Failed to load specified config: {chosen_file_path}") from e
567 |     elif config_file_path:
568 |          raise FileNotFoundError(f"Specified configuration file not found: {config_file_path}")
569 | 
570 |     # 2. Initialize GatewayConfig from Pydantic defaults and file data
571 |     try:
572 |         # Ensure nested keys exist before validation if loading from potentially incomplete file
573 |         file_config_data.setdefault('server', {})
574 |         file_config_data.setdefault('providers', {})
575 |         file_config_data.setdefault('cache', {})
576 |         file_config_data.setdefault('filesystem', {})
577 |         file_config_data.setdefault('agent_memory', {})
578 |         file_config_data.setdefault('tool_registration', {})
579 |         file_config_data.setdefault('smart_browser', {})
580 |         loaded_config = GatewayConfig.model_validate(file_config_data)
581 |     except ValidationError as e:
582 |         config_logger.error("Configuration validation failed during file/default loading:")
583 |         config_logger.error(str(e))
584 |         config_logger.warning("Falling back to default configuration before applying env vars.")
585 |         loaded_config = GatewayConfig() # Fallback to defaults
586 | 
587 |     # 3. Use decouple to load/override settings from .env/environment variables
588 |     #    Decouple handles checking env vars and .env file automatically.
589 | 
590 |     # --- Load Provider API Keys ---
591 |     provider_key_map = {
592 |         "openai": "OPENAI_API_KEY",
593 |         "anthropic": "ANTHROPIC_API_KEY",
594 |         "deepseek": "DEEPSEEK_API_KEY",
595 |         "gemini": "GEMINI_API_KEY",
596 |         "openrouter": "OPENROUTER_API_KEY",
597 |         "grok": "GROK_API_KEY",
598 |     }
599 |     for provider_name, env_var in provider_key_map.items():
600 |         provider_conf = getattr(loaded_config.providers, provider_name, None)
601 |         if provider_conf:
602 |             api_key_from_env = decouple_config.get(env_var, default=None)
603 |             if api_key_from_env:
604 |                 if provider_conf.api_key and provider_conf.api_key != api_key_from_env:
605 |                     config_logger.debug(f"Overriding API key for {provider_name} from env/'.env'.")
606 |                 elif not provider_conf.api_key:
607 |                     config_logger.debug(f"Setting API key for {provider_name} from env/'.env'.")
608 |                 provider_conf.api_key = api_key_from_env
609 | 
610 |     try:
611 |         # Use the default defined in GatewayConfig as the fallback if env/file doesn't specify
612 |         loaded_config.default_provider = decouple_config('DEFAULT_PROVIDER', default=loaded_config.default_provider)
613 |         config_logger.debug(f"Set default provider: {loaded_config.default_provider}")
614 |     except Exception as e:
615 |         config_logger.warning(f"Could not load default provider from env: {e}. Using default '{loaded_config.default_provider}'.")
616 | 
617 |     # --- Load other Provider settings (base_url, default_model, org, specific headers) ---
618 |     # Example for OpenRouter specific headers
619 |     openrouter_conf = loaded_config.providers.openrouter
620 |     try:
621 |         # Use get() to avoid UndefinedValueError if not set
622 |         http_referer = decouple_config.get('OPENROUTER_HTTP_REFERER', default=None)
623 |         x_title = decouple_config.get('OPENROUTER_X_TITLE', default=None)
624 |         if http_referer:
625 |             openrouter_conf.additional_params['http_referer'] = http_referer
626 |             config_logger.debug("Setting OpenRouter http_referer from env/'.env'.")
627 |         if x_title:
628 |             openrouter_conf.additional_params['x_title'] = x_title
629 |             config_logger.debug("Setting OpenRouter x_title from env/'.env'.")
630 |     except Exception as e: # Catch potential decouple issues
631 |         config_logger.warning(f"Could not load optional OpenRouter headers from env: {e}")
632 | 
633 |     # --- Load Ollama Provider Settings ---
634 |     ollama_conf = loaded_config.providers.ollama
635 |     try:
636 |         enabled_env = decouple_config.get('OLLAMA_ENABLED', default=None)
637 |         if enabled_env is not None:
638 |             ollama_conf.enabled = enabled_env.lower() == 'true'
639 |             config_logger.debug(f"Setting Ollama enabled from env/'.env': {ollama_conf.enabled}")
640 |         
641 |         api_url_env = decouple_config.get('OLLAMA_API_URL', default=None)
642 |         if api_url_env:
643 |             ollama_conf.base_url = api_url_env
644 |             config_logger.debug(f"Setting Ollama base_url from env/'.env': {ollama_conf.base_url}")
645 |         
646 |         default_model_env = decouple_config.get('OLLAMA_DEFAULT_MODEL', default=None)
647 |         if default_model_env:
648 |             ollama_conf.default_model = default_model_env
649 |             config_logger.debug(f"Setting Ollama default_model from env/'.env': {ollama_conf.default_model}")
650 |         
651 |         request_timeout_env = decouple_config.get('OLLAMA_REQUEST_TIMEOUT', default=None)
652 |         if request_timeout_env is not None:
653 |             ollama_conf.timeout = int(request_timeout_env)
654 |             config_logger.debug(f"Setting Ollama timeout from env/'.env': {ollama_conf.timeout}")
655 |     except Exception as e:
656 |         config_logger.warning(f"Could not load optional Ollama settings from env: {e}")
657 | 
658 |     # Example for generic provider settings like base_url, default_model, organization
659 |     for provider_name in ["openai", "anthropic", "deepseek", "gemini", "openrouter", "grok", "ollama"]:
660 |         provider_conf = getattr(loaded_config.providers, provider_name, None)
661 |         if provider_conf:
662 |             p_name_upper = provider_name.upper()
663 |             try:
664 |                 base_url_env = decouple_config.get(f"{p_name_upper}_BASE_URL", default=None)
665 |                 if base_url_env:
666 |                     provider_conf.base_url = base_url_env
667 |                     config_logger.debug(f"Setting {provider_name} base_url from env/'.env'.")
668 | 
669 |                 default_model_env = decouple_config.get(f"{p_name_upper}_DEFAULT_MODEL", default=None)
670 |                 if default_model_env:
671 |                     provider_conf.default_model = default_model_env
672 |                     config_logger.debug(f"Setting {provider_name} default_model from env/'.env'.")
673 | 
674 |                 org_env = decouple_config.get(f"{p_name_upper}_ORGANIZATION", default=None)
675 |                 if org_env:
676 |                     provider_conf.organization = org_env
677 |                     config_logger.debug(f"Setting {provider_name} organization from env/'.env'.")
678 | 
679 |             except Exception as e:
680 |                  config_logger.warning(f"Could not load optional settings for provider {provider_name} from env: {e}")
681 | 
682 | 
683 |     # --- Load Server Port ---
684 |     try:
685 |         server_port_env = decouple_config.get('GATEWAY_SERVER_PORT', default=None)
686 |         if server_port_env is not None:
687 |             loaded_config.server.port = decouple_config('GATEWAY_SERVER_PORT', cast=int)
688 |             config_logger.debug(f"Overriding server port from env: {loaded_config.server.port}")
689 |     except (ValueError, UndefinedValueError) as e:
690 |         config_logger.warning(f"Invalid or missing GATEWAY_SERVER_PORT env var: {e}. Using default/file value.")
691 | 
692 |     # --- Load Filesystem Allowed Directories ---
693 |     allowed_dirs_env_var = "FILESYSTEM__ALLOWED_DIRECTORIES"
694 |     try:
695 |         allowed_dirs_env_value_str = decouple_config.get(allowed_dirs_env_var, default=None)
696 |         if allowed_dirs_env_value_str is not None:
697 |             try:
698 |                 allowed_dirs_from_env = json.loads(allowed_dirs_env_value_str)
699 |                 if isinstance(allowed_dirs_from_env, list):
700 |                     if loaded_config.filesystem.allowed_directories:
701 |                         config_logger.debug(f"Overriding filesystem.allowed_directories from env var {allowed_dirs_env_var}.")
702 |                     else:
703 |                         config_logger.debug(f"Setting filesystem.allowed_directories from env var {allowed_dirs_env_var}.")
704 |                     loaded_config.filesystem.allowed_directories = allowed_dirs_from_env
705 |                 else:
706 |                      config_logger.warning(f"Env var {allowed_dirs_env_var} did not contain a valid JSON list. Value ignored.")
707 |             except json.JSONDecodeError:
708 |                 config_logger.warning(f"Failed to parse JSON from env var {allowed_dirs_env_var}. Value: '{allowed_dirs_env_value_str}'. Ignoring env var.")
709 |     except Exception as e:
710 |         config_logger.error(f"Error processing env var {allowed_dirs_env_var}: {e}", exc_info=True)
711 | 
712 |     # --- Load Agent Memory Settings ---
713 |     agent_mem_conf = loaded_config.agent_memory # Get the config object
714 |     try:
715 |         agent_mem_conf.db_path = decouple_config('AGENT_MEMORY_DB_PATH', default=agent_mem_conf.db_path)
716 |         agent_mem_conf.max_text_length = decouple_config('AGENT_MEMORY_MAX_TEXT_LENGTH', default=agent_mem_conf.max_text_length, cast=int)
717 |         agent_mem_conf.connection_timeout = decouple_config('AGENT_MEMORY_CONNECTION_TIMEOUT', default=agent_mem_conf.connection_timeout, cast=float)
718 |         agent_mem_conf.max_working_memory_size = decouple_config('AGENT_MEMORY_MAX_WORKING_SIZE', default=agent_mem_conf.max_working_memory_size, cast=int)
719 |         # Load TTLs
720 |         agent_mem_conf.ttl_working = decouple_config('AGENT_MEMORY_TTL_WORKING', default=agent_mem_conf.ttl_working, cast=int)
721 |         agent_mem_conf.ttl_episodic = decouple_config('AGENT_MEMORY_TTL_EPISODIC', default=agent_mem_conf.ttl_episodic, cast=int)
722 |         agent_mem_conf.ttl_semantic = decouple_config('AGENT_MEMORY_TTL_SEMANTIC', default=agent_mem_conf.ttl_semantic, cast=int)
723 |         agent_mem_conf.ttl_procedural = decouple_config('AGENT_MEMORY_TTL_PROCEDURAL', default=agent_mem_conf.ttl_procedural, cast=int)
724 |         # Load other parameters
725 |         agent_mem_conf.memory_decay_rate = decouple_config('AGENT_MEMORY_DECAY_RATE', default=agent_mem_conf.memory_decay_rate, cast=float)
726 |         agent_mem_conf.importance_boost_factor = decouple_config('AGENT_MEMORY_IMPORTANCE_BOOST', default=agent_mem_conf.importance_boost_factor, cast=float)
727 |         agent_mem_conf.similarity_threshold = decouple_config('AGENT_MEMORY_SIMILARITY_THRESHOLD', default=agent_mem_conf.similarity_threshold, cast=float)
728 |         agent_mem_conf.max_semantic_candidates = decouple_config('AGENT_MEMORY_MAX_SEMANTIC_CANDIDATES', default=agent_mem_conf.max_semantic_candidates, cast=int)
729 |         # Load embedding defaults (mainly for reference)
730 |         agent_mem_conf.default_embedding_model = decouple_config('AGENT_MEMORY_DEFAULT_EMBEDDING_MODEL', default=agent_mem_conf.default_embedding_model)
731 |         agent_mem_conf.embedding_dimension = decouple_config('AGENT_MEMORY_EMBEDDING_DIMENSION', default=agent_mem_conf.embedding_dimension, cast=int)
732 |         # Load multi-tool support settings
733 |         def _cast_bool(value):
734 |             if isinstance(value, bool):
735 |                 return value
736 |             if isinstance(value, str):
737 |                 return value.lower() in ('true', '1', 'yes', 'on')
738 |             return bool(value)
739 |         
740 |         agent_mem_conf.enable_batched_operations = decouple_config('AGENT_MEMORY_ENABLE_BATCHED_OPERATIONS', default=agent_mem_conf.enable_batched_operations, cast=_cast_bool)
741 |         agent_mem_conf.max_tools_per_batch = decouple_config('AGENT_MEMORY_MAX_TOOLS_PER_BATCH', default=agent_mem_conf.max_tools_per_batch, cast=int)
742 | 
743 |         config_logger.debug("Loaded agent memory settings from env/'.env' or defaults.")
744 |     except (ValueError, UndefinedValueError) as e:
745 |          config_logger.warning(f"Issue loading agent memory settings from env: {e}. Using Pydantic defaults.")
746 |     except Exception as e:
747 |         config_logger.error(f"Unexpected error loading agent memory settings: {e}", exc_info=True)
748 | 
749 |     # --- Load Prompt Templates Directory ---
750 |     try:
751 |         loaded_config.prompt_templates_directory = decouple_config('GATEWAY_PROMPT_TEMPLATES_DIR', default=loaded_config.prompt_templates_directory)
752 |         config_logger.debug(f"Set prompt templates directory: {loaded_config.prompt_templates_directory}")
753 |     except Exception as e:
754 |         config_logger.warning(f"Could not load prompt templates directory from env: {e}")
755 | 
756 | 
757 |     # --- Load Cache Directory ---
758 |     try:
759 |         cache_dir_env = decouple_config('GATEWAY_CACHE_DIR', default=None) # Changed env var name for clarity
760 |         if cache_dir_env:
761 |              loaded_config.cache.directory = cache_dir_env
762 |              config_logger.debug(f"Set cache directory from env: {loaded_config.cache.directory}")
763 |     except Exception as e:
764 |          config_logger.warning(f"Could not load cache directory from env: {e}")
765 | 
766 |     sb_conf = loaded_config.smart_browser # Get the config object
767 |     try:
768 |         # State Key (already added previously)
769 |         sb_conf.sb_state_key_b64 = decouple_config('SB_STATE_KEY', default=sb_conf.sb_state_key_b64)
770 |         if sb_conf.sb_state_key_b64:
771 |              config_logger.debug("Loaded SB_STATE_KEY from env/'.env' or file.")
772 |         else:
773 |              config_logger.info("Smart Browser state encryption disabled (SB_STATE_KEY not found).")
774 | 
775 |         # Other SB settings
776 |         sb_conf.sb_max_tabs = decouple_config('SB_MAX_TABS', default=sb_conf.sb_max_tabs, cast=int)
777 |         sb_conf.sb_tab_timeout = decouple_config('SB_TAB_TIMEOUT', default=sb_conf.sb_tab_timeout, cast=int)
778 |         sb_conf.sb_inactivity_timeout = decouple_config('SB_INACTIVITY_TIMEOUT', default=sb_conf.sb_inactivity_timeout, cast=int)
779 |         sb_conf.headless_mode = decouple_config('SB_HEADLESS_MODE', default=sb_conf.headless_mode, cast=bool) # Use SB_ prefix
780 |         sb_conf.vnc_enabled = decouple_config('SB_VNC_ENABLED', default=sb_conf.vnc_enabled, cast=bool) # Use SB_ prefix
781 |         sb_conf.vnc_password = decouple_config('SB_VNC_PASSWORD', default=sb_conf.vnc_password) # Use SB_ prefix
782 |         sb_conf.proxy_pool_str = decouple_config('SB_PROXY_POOL', default=sb_conf.proxy_pool_str) # Use SB_ prefix
783 |         sb_conf.proxy_allowed_domains_str = decouple_config('SB_PROXY_ALLOWED_DOMAINS', default=sb_conf.proxy_allowed_domains_str) # Use SB_ prefix
784 |         sb_conf.vault_allowed_paths_str = decouple_config('SB_VAULT_ALLOWED_PATHS', default=sb_conf.vault_allowed_paths_str) # Use SB_ prefix
785 | 
786 |         # Locator Tunables
787 |         sb_conf.max_widgets = decouple_config('SB_MAX_WIDGETS', default=sb_conf.max_widgets, cast=int)
788 |         sb_conf.max_section_chars = decouple_config('SB_MAX_SECTION_CHARS', default=sb_conf.max_section_chars, cast=int)
789 |         sb_conf.dom_fp_limit = decouple_config('SB_DOM_FP_LIMIT', default=sb_conf.dom_fp_limit, cast=int)
790 |         sb_conf.llm_model_locator = decouple_config('SB_LLM_MODEL_LOCATOR', default=sb_conf.llm_model_locator)
791 |         sb_conf.retry_after_fail = decouple_config('SB_RETRY_AFTER_FAIL', default=sb_conf.retry_after_fail, cast=int)
792 |         sb_conf.seq_cutoff = decouple_config('SB_SEQ_CUTOFF', default=sb_conf.seq_cutoff, cast=float)
793 |         sb_conf.area_min = decouple_config('SB_AREA_MIN', default=sb_conf.area_min, cast=int)
794 | 
795 |         # High Risk Domains (Load as string, validator handles conversion)
796 |         high_risk_domains_env = decouple_config('SB_HIGH_RISK_DOMAINS', default=None)
797 |         if high_risk_domains_env is not None:
798 |              # Let the validator handle parsing and normalization
799 |              sb_conf.high_risk_domains_set = high_risk_domains_env # Pass the raw string
800 | 
801 |         config_logger.debug("Loaded Smart Browser settings from env/'.env' or defaults.")
802 | 
803 |     except (ValueError, UndefinedValueError) as e:
804 |          config_logger.warning(f"Issue loading Smart Browser settings from env: {e}. Using defaults/file values.")
805 |     except Exception as e:
806 |         config_logger.error(f"Unexpected error loading Smart Browser settings: {e}", exc_info=True)
807 | 
808 |     # --- Expand paths ---
809 |     try:
810 |         # Expand core directories
811 |         loaded_config.storage_directory = expand_path(loaded_config.storage_directory)
812 |         loaded_config.log_directory = expand_path(loaded_config.log_directory)
813 |         loaded_config.prompt_templates_directory = expand_path(loaded_config.prompt_templates_directory) # Expand new dir
814 | 
815 |         # Expand cache directory if set
816 |         if loaded_config.cache.directory:
817 |             loaded_config.cache.directory = expand_path(loaded_config.cache.directory)
818 | 
819 |         # Expand agent memory DB path (assuming it's a relative path)
820 |         # Check if it's already absolute to avoid issues
821 |         if not os.path.isabs(loaded_config.agent_memory.db_path):
822 |             # Place it relative to storage_directory by default? Or workspace root? Let's choose storage.
823 |             db_in_storage = Path(loaded_config.storage_directory) / loaded_config.agent_memory.db_path
824 |             loaded_config.agent_memory.db_path = str(db_in_storage.resolve())
825 |             config_logger.debug(f"Expanded agent memory db path to: {loaded_config.agent_memory.db_path}")
826 | 
827 |         # Expand allowed filesystem directories
828 |         expanded_allowed_dirs = []
829 |         for d in loaded_config.filesystem.allowed_directories:
830 |              if isinstance(d, str):
831 |                   expanded_allowed_dirs.append(expand_path(d))
832 |              else:
833 |                   config_logger.warning(f"Ignoring non-string entry in allowed_directories: {d!r}")
834 |         loaded_config.filesystem.allowed_directories = expanded_allowed_dirs
835 |     except Exception as e:
836 |         config_logger.error(f"Error expanding configured paths: {e}", exc_info=True)
837 | 
838 |     # --- Ensure critical directories exist ---
839 |     try:
840 |         # Use pathlib for consistency
841 |         Path(loaded_config.storage_directory).mkdir(parents=True, exist_ok=True)
842 |         Path(loaded_config.log_directory).mkdir(parents=True, exist_ok=True)
843 |         Path(loaded_config.prompt_templates_directory).mkdir(parents=True, exist_ok=True) # Ensure prompt dir exists
844 | 
845 |         if loaded_config.cache.enabled and loaded_config.cache.directory:
846 |              Path(loaded_config.cache.directory).mkdir(parents=True, exist_ok=True)
847 | 
848 |         # Ensure Agent Memory DB directory exists
849 |         db_dir = Path(loaded_config.agent_memory.db_path).parent
850 |         db_dir.mkdir(parents=True, exist_ok=True)
851 | 
852 |     except OSError as e:
853 |         config_logger.error(f"Failed to create necessary directories: {e}")
854 | 
855 |     _config = loaded_config
856 |     config_logger.debug(f"Effective allowed directories: {loaded_config.filesystem.allowed_directories}")
857 |     config_logger.debug(f"Effective Agent Memory DB path: {loaded_config.agent_memory.db_path}")
858 |     config_logger.debug(f"Effective Prompt Templates directory: {loaded_config.prompt_templates_directory}")
859 |     return _config
860 | 
861 | def get_config() -> GatewayConfig:
862 |     """
863 |     Retrieve the globally cached configuration instance or load a new one if needed.
864 |     
865 |     This function serves as the primary entry point for accessing the server's configuration
866 |     throughout the application. It implements a singleton pattern with on-demand loading and
867 |     optional forced reloading to ensure consistent configuration access with minimal overhead.
868 |     
869 |     Key behaviors:
870 |     - CACHING: Returns a previously loaded configuration instance when available
871 |     - LAZY LOADING: Loads configuration on first access rather than at import time
872 |     - FORCE RELOAD: Supports reloading via the GATEWAY_FORCE_CONFIG_RELOAD environment variable
873 |     - COMPLETE: Includes settings from environment variables, config files, and defaults
874 |     - VALIDATED: Uses Pydantic models to ensure all configuration values are valid
875 |     
876 |     The configuration loading follows this priority order:
877 |     1. Environment variables (highest priority)
878 |     2. .env file values
879 |     3. Configuration file settings
880 |     4. Pydantic default values (lowest priority)
881 |     
882 |     Returns:
883 |         GatewayConfig: The validated configuration instance with all settings applied.
884 |         
885 |     Raises:
886 |         RuntimeError: If configuration loading fails for any reason (invalid settings,
887 |                      missing required values, inaccessible files, etc.)
888 |                      
889 |     Example usage:
890 |         ```python
891 |         from ultimate_mcp_server.config import get_config
892 |         
893 |         # Access server configuration
894 |         config = get_config()
895 |         server_port = config.server.port
896 |         
897 |         # Access provider API keys
898 |         openai_api_key = config.providers.openai.api_key
899 |         
900 |         # Check if a feature is enabled
901 |         if config.cache.enabled:
902 |             # Use caching functionality
903 |             pass
904 |         ```
905 |     """
906 |     global _config
907 |     # Use decouple directly here for the reload flag check
908 |     force_reload = decouple_config.get("GATEWAY_FORCE_CONFIG_RELOAD", default='false').lower() == 'true'
909 | 
910 |     if _config is None or force_reload:
911 |         try:
912 |             _config = load_config() # load_config now handles internal state update
913 |         except Exception as e:
914 |             config_logger.critical(f"Failed to load configuration: {e}", exc_info=True)
915 |             raise RuntimeError("Configuration could not be loaded.") from e
916 | 
917 |     if _config is None: # Should not happen if load_config succeeded or raised
918 |         raise RuntimeError("Configuration is None after loading attempt.")
919 | 
920 |     return _config
921 | 
922 | 
923 | def get_config_as_dict() -> Dict[str, Any]:
924 |     """
925 |     Convert the current configuration to a plain Python dictionary.
926 |     
927 |     This function retrieves the current configuration using get_config() and 
928 |     converts the Pydantic model instance to a standard Python dictionary. This is
929 |     useful for situations where you need a serializable representation of the
930 |     configuration, such as:
931 |     
932 |     - Sending configuration over an API
933 |     - Logging configuration values
934 |     - Debugging configuration state
935 |     - Comparing configurations
936 |     
937 |     The conversion preserves the full nested structure of the configuration,
938 |     with all Pydantic models converted to their dictionary representations.
939 |     
940 |     Returns:
941 |         A nested dictionary containing all configuration values
942 |         
943 |     Raises:
944 |         Any exceptions that might be raised by get_config()
945 |         
946 |     Example:
947 |         ```python
948 |         # Get dictionary representation of config for logging
949 |         config_dict = get_config_as_dict()
950 |         logger.debug(f"Current server configuration: {config_dict['server']}")
951 |         
952 |         # Use with JSON serialization
953 |         import json
954 |         config_json = json.dumps(get_config_as_dict())
955 |         ```
956 |     """
957 |     config_obj = get_config()
958 |     return config_obj.model_dump()
959 | 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/services/analytics/reporting.py:
--------------------------------------------------------------------------------

```python
   1 | """Reporting and visualization for Ultimate MCP Server analytics."""
   2 | import json
   3 | import os
   4 | from datetime import datetime
   5 | from pathlib import Path
   6 | from typing import Any, Dict, List, Optional, Union
   7 | 
   8 | from ultimate_mcp_server.services.analytics.metrics import get_metrics_tracker
   9 | from ultimate_mcp_server.utils import get_logger
  10 | 
  11 | logger = get_logger(__name__)
  12 | 
  13 | try:
  14 |     import matplotlib.dates as mdates
  15 |     import matplotlib.pyplot as plt
  16 |     import pandas as pd
  17 |     PLOTTING_AVAILABLE = True
  18 | except ImportError:
  19 |     PLOTTING_AVAILABLE = False
  20 | 
  21 | 
  22 | class AnalyticsReporting:
  23 |     """Provides comprehensive reporting and visualization capabilities for Ultimate MCP Server analytics.
  24 |     
  25 |     This class offers tools to generate detailed usage, cost, and provider-specific reports
  26 |     in various formats (JSON, HTML, Markdown) with optional data visualizations. It serves
  27 |     as the primary interface for extracting actionable insights from the server's operational
  28 |     metrics and presenting them in human-readable formats.
  29 |     
  30 |     Features:
  31 |     - Multiple report types: usage reports, provider-specific analysis, and cost breakdowns
  32 |     - Multiple output formats: JSON, HTML, and Markdown
  33 |     - Optional data visualizations using matplotlib (when available)
  34 |     - Customizable reporting periods
  35 |     - Persistent report storage
  36 |     
  37 |     The reporting system uses the metrics tracked by the MetricsTracker to generate these
  38 |     reports, providing insights into token usage, costs, request patterns, cache efficiency,
  39 |     and provider/model distribution.
  40 |     
  41 |     Usage:
  42 |         # Create a reporting instance
  43 |         reporter = AnalyticsReporting()
  44 |         
  45 |         # Generate a usage report for the last 7 days
  46 |         report_path = reporter.generate_usage_report(days=7, output_format="html")
  47 |         
  48 |         # Generate a cost analysis report for the last month
  49 |         cost_report = reporter.generate_cost_report(days=30, output_format="json")
  50 |         
  51 |         # Generate a provider-specific report
  52 |         provider_report = reporter.generate_provider_report(
  53 |             provider="anthropic", 
  54 |             days=14,
  55 |             output_format="markdown"
  56 |         )
  57 |     """
  58 |     
  59 |     def __init__(
  60 |         self,
  61 |         reports_dir: Optional[Union[str, Path]] = None,
  62 |         include_plots: bool = True
  63 |     ):
  64 |         """Initialize the analytics reporting.
  65 |         
  66 |         Args:
  67 |             reports_dir: Directory for reports storage
  68 |             include_plots: Whether to include plots in reports
  69 |         """
  70 |         # Set reports directory
  71 |         if reports_dir:
  72 |             self.reports_dir = Path(reports_dir)
  73 |         else:
  74 |             self.reports_dir = Path.home() / ".ultimate" / "reports"
  75 |             
  76 |         # Create reports directory if it doesn't exist
  77 |         self.reports_dir.mkdir(parents=True, exist_ok=True)
  78 |         
  79 |         # Plotting settings
  80 |         self.include_plots = include_plots and PLOTTING_AVAILABLE
  81 |         
  82 |         # Get metrics tracker
  83 |         self.metrics = get_metrics_tracker()
  84 |         
  85 |         logger.info(
  86 |             f"Analytics reporting initialized (dir: {self.reports_dir}, plots: {self.include_plots})",
  87 |             emoji_key="analytics"
  88 |         )
  89 |     
  90 |     def generate_usage_report(
  91 |         self,
  92 |         days: int = 7,
  93 |         output_format: str = "json",
  94 |         include_plots: Optional[bool] = None
  95 |     ) -> Union[Dict[str, Any], str, Path]:
  96 |         """Generate a usage report.
  97 |         
  98 |         Args:
  99 |             days: Number of days to include in the report
 100 |             output_format: Output format (json, html, markdown)
 101 |             include_plots: Whether to include plots (overrides default setting)
 102 |             
 103 |         Returns:
 104 |             Report data or path to report file
 105 |         """
 106 |         # Get metrics
 107 |         metrics = self.metrics.get_stats()
 108 |         
 109 |         # Determine plotting
 110 |         do_plots = self.include_plots if include_plots is None else include_plots
 111 |         do_plots = do_plots and PLOTTING_AVAILABLE
 112 |         
 113 |         # Build report data
 114 |         report_data = {
 115 |             "generated_at": datetime.now().isoformat(),
 116 |             "period": f"{days} days",
 117 |             "general": metrics["general"],
 118 |             "cache": metrics["cache"],
 119 |             "top_providers": metrics["top_providers"],
 120 |             "top_models": metrics["top_models"],
 121 |             "daily_usage": [
 122 |                 day for day in metrics["daily_usage"]
 123 |                 if (datetime.now() - datetime.strptime(day["date"], "%Y-%m-%d")).days < days
 124 |             ],
 125 |         }
 126 |         
 127 |         # Generate report based on format
 128 |         if output_format == "json":
 129 |             # JSON format
 130 |             report_path = self.reports_dir / f"usage_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
 131 |             with open(report_path, "w") as f:
 132 |                 json.dump(report_data, f, indent=2)
 133 |                 
 134 |             logger.info(
 135 |                 f"Generated JSON usage report: {report_path}",
 136 |                 emoji_key="analytics"
 137 |             )
 138 |             
 139 |             return report_path
 140 |             
 141 |         elif output_format == "html":
 142 |             # HTML format (with optional plots)
 143 |             report_path = self.reports_dir / f"usage_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.html"
 144 |             
 145 |             # Generate plots if requested
 146 |             plot_paths = []
 147 |             if do_plots:
 148 |                 plot_paths = self._generate_report_plots(report_data, days)
 149 |             
 150 |             # Generate HTML
 151 |             html = self._generate_html_report(report_data, plot_paths)
 152 |             
 153 |             with open(report_path, "w") as f:
 154 |                 f.write(html)
 155 |                 
 156 |             logger.info(
 157 |                 f"Generated HTML usage report: {report_path}",
 158 |                 emoji_key="analytics"
 159 |             )
 160 |             
 161 |             return report_path
 162 |             
 163 |         elif output_format == "markdown":
 164 |             # Markdown format
 165 |             report_path = self.reports_dir / f"usage_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md"
 166 |             
 167 |             # Generate plots if requested
 168 |             plot_paths = []
 169 |             if do_plots:
 170 |                 plot_paths = self._generate_report_plots(report_data, days)
 171 |             
 172 |             # Generate Markdown
 173 |             markdown = self._generate_markdown_report(report_data, plot_paths)
 174 |             
 175 |             with open(report_path, "w") as f:
 176 |                 f.write(markdown)
 177 |                 
 178 |             logger.info(
 179 |                 f"Generated Markdown usage report: {report_path}",
 180 |                 emoji_key="analytics"
 181 |             )
 182 |             
 183 |             return report_path
 184 |             
 185 |         else:
 186 |             # Default to raw data
 187 |             logger.warning(
 188 |                 f"Unknown output format: {output_format}, returning raw data",
 189 |                 emoji_key="warning"
 190 |             )
 191 |             return report_data
 192 |     
 193 |     def generate_provider_report(
 194 |         self,
 195 |         provider: str,
 196 |         days: int = 7,
 197 |         output_format: str = "json",
 198 |         include_plots: Optional[bool] = None
 199 |     ) -> Union[Dict[str, Any], str, Path]:
 200 |         """Generate a provider-specific report.
 201 |         
 202 |         Args:
 203 |             provider: Provider name
 204 |             days: Number of days to include in the report
 205 |             output_format: Output format (json, html, markdown)
 206 |             include_plots: Whether to include plots (overrides default setting)
 207 |             
 208 |         Returns:
 209 |             Report data or path to report file
 210 |         """
 211 |         # Get metrics
 212 |         metrics = self.metrics.get_stats()
 213 |         
 214 |         # Check if provider exists
 215 |         if provider not in metrics["providers"]:
 216 |             logger.error(
 217 |                 f"Unknown provider: {provider}",
 218 |                 emoji_key="error"
 219 |             )
 220 |             return {"error": f"Unknown provider: {provider}"}
 221 |         
 222 |         # Determine plotting
 223 |         do_plots = self.include_plots if include_plots is None else include_plots
 224 |         do_plots = do_plots and PLOTTING_AVAILABLE
 225 |         
 226 |         # Extract provider-specific data
 227 |         provider_data = metrics["providers"][provider]
 228 |         provider_models = {
 229 |             model: data
 230 |             for model, data in metrics["models"].items()
 231 |             if model.startswith(provider) or model.lower().startswith(provider.lower())
 232 |         }
 233 |         
 234 |         # Collect daily usage for this provider (approximate)
 235 |         provider_share = provider_data["tokens"] / metrics["general"]["tokens_total"] if metrics["general"]["tokens_total"] > 0 else 0
 236 |         provider_daily = [
 237 |             {
 238 |                 "date": day["date"],
 239 |                 "tokens": int(day["tokens"] * provider_share),  # Approximate
 240 |                 "cost": day["cost"] * provider_share,  # Approximate
 241 |             }
 242 |             for day in metrics["daily_usage"]
 243 |             if (datetime.now() - datetime.strptime(day["date"], "%Y-%m-%d")).days < days
 244 |         ]
 245 |         
 246 |         # Build report data
 247 |         report_data = {
 248 |             "generated_at": datetime.now().isoformat(),
 249 |             "period": f"{days} days",
 250 |             "provider": provider,
 251 |             "stats": provider_data,
 252 |             "models": provider_models,
 253 |             "daily_usage": provider_daily,
 254 |             "percentage_of_total": provider_share * 100,
 255 |         }
 256 |         
 257 |         # Generate report based on format
 258 |         if output_format == "json":
 259 |             # JSON format
 260 |             report_path = self.reports_dir / f"{provider}_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
 261 |             with open(report_path, "w") as f:
 262 |                 json.dump(report_data, f, indent=2)
 263 |                 
 264 |             logger.info(
 265 |                 f"Generated JSON provider report: {report_path}",
 266 |                 emoji_key="analytics"
 267 |             )
 268 |             
 269 |             return report_path
 270 |             
 271 |         elif output_format == "html":
 272 |             # HTML format (with optional plots)
 273 |             report_path = self.reports_dir / f"{provider}_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.html"
 274 |             
 275 |             # Generate plots if requested
 276 |             plot_paths = []
 277 |             if do_plots:
 278 |                 plot_paths = self._generate_provider_plots(report_data, provider, days)
 279 |             
 280 |             # Generate HTML
 281 |             html = self._generate_html_provider_report(report_data, plot_paths)
 282 |             
 283 |             with open(report_path, "w") as f:
 284 |                 f.write(html)
 285 |                 
 286 |             logger.info(
 287 |                 f"Generated HTML provider report: {report_path}",
 288 |                 emoji_key="analytics"
 289 |             )
 290 |             
 291 |             return report_path
 292 |             
 293 |         elif output_format == "markdown":
 294 |             # Markdown format
 295 |             report_path = self.reports_dir / f"{provider}_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md"
 296 |             
 297 |             # Generate plots if requested
 298 |             plot_paths = []
 299 |             if do_plots:
 300 |                 plot_paths = self._generate_provider_plots(report_data, provider, days)
 301 |             
 302 |             # Generate Markdown
 303 |             markdown = self._generate_markdown_provider_report(report_data, plot_paths)
 304 |             
 305 |             with open(report_path, "w") as f:
 306 |                 f.write(markdown)
 307 |                 
 308 |             logger.info(
 309 |                 f"Generated Markdown provider report: {report_path}",
 310 |                 emoji_key="analytics"
 311 |             )
 312 |             
 313 |             return report_path
 314 |             
 315 |         else:
 316 |             # Default to raw data
 317 |             logger.warning(
 318 |                 f"Unknown output format: {output_format}, returning raw data",
 319 |                 emoji_key="warning"
 320 |             )
 321 |             return report_data
 322 |     
 323 |     def generate_cost_report(
 324 |         self,
 325 |         days: int = 30,
 326 |         output_format: str = "json",
 327 |         include_plots: Optional[bool] = None
 328 |     ) -> Union[Dict[str, Any], str, Path]:
 329 |         """Generate a cost analysis report.
 330 |         
 331 |         Args:
 332 |             days: Number of days to include in the report
 333 |             output_format: Output format (json, html, markdown)
 334 |             include_plots: Whether to include plots (overrides default setting)
 335 |             
 336 |         Returns:
 337 |             Report data or path to report file
 338 |         """
 339 |         # Get metrics
 340 |         metrics = self.metrics.get_stats()
 341 |         
 342 |         # Determine plotting
 343 |         do_plots = self.include_plots if include_plots is None else include_plots
 344 |         do_plots = do_plots and PLOTTING_AVAILABLE
 345 |         
 346 |         # Process daily cost data
 347 |         daily_costs = [
 348 |             {
 349 |                 "date": day["date"],
 350 |                 "cost": day["cost"],
 351 |             }
 352 |             for day in metrics["daily_usage"]
 353 |             if (datetime.now() - datetime.strptime(day["date"], "%Y-%m-%d")).days < days
 354 |         ]
 355 |         
 356 |         # Calculate cost by provider
 357 |         provider_costs = [
 358 |             {
 359 |                 "provider": provider,
 360 |                 "cost": data["cost"],
 361 |                 "percentage": data["cost"] / metrics["general"]["cost_total"] * 100 if metrics["general"]["cost_total"] > 0 else 0,
 362 |             }
 363 |             for provider, data in metrics["providers"].items()
 364 |         ]
 365 |         provider_costs.sort(key=lambda x: x["cost"], reverse=True)
 366 |         
 367 |         # Calculate cost by model
 368 |         model_costs = [
 369 |             {
 370 |                 "model": model,
 371 |                 "cost": data["cost"],
 372 |                 "percentage": data["cost"] / metrics["general"]["cost_total"] * 100 if metrics["general"]["cost_total"] > 0 else 0,
 373 |             }
 374 |             for model, data in metrics["models"].items()
 375 |         ]
 376 |         model_costs.sort(key=lambda x: x["cost"], reverse=True)
 377 |         
 378 |         # Calculate cost efficiency (tokens per dollar)
 379 |         cost_efficiency = [
 380 |             {
 381 |                 "model": model,
 382 |                 "tokens_per_dollar": data["tokens"] / data["cost"] if data["cost"] > 0 else 0,
 383 |                 "tokens": data["tokens"],
 384 |                 "cost": data["cost"],
 385 |             }
 386 |             for model, data in metrics["models"].items()
 387 |             if data["cost"] > 0
 388 |         ]
 389 |         cost_efficiency.sort(key=lambda x: x["tokens_per_dollar"], reverse=True)
 390 |         
 391 |         # Build report data
 392 |         report_data = {
 393 |             "generated_at": datetime.now().isoformat(),
 394 |             "period": f"{days} days",
 395 |             "total_cost": metrics["general"]["cost_total"],
 396 |             "cache_savings": metrics["cache"]["saved_cost"],
 397 |             "daily_costs": daily_costs,
 398 |             "provider_costs": provider_costs,
 399 |             "model_costs": model_costs,
 400 |             "cost_efficiency": cost_efficiency,
 401 |         }
 402 |         
 403 |         # Generate report based on format
 404 |         if output_format == "json":
 405 |             # JSON format
 406 |             report_path = self.reports_dir / f"cost_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
 407 |             with open(report_path, "w") as f:
 408 |                 json.dump(report_data, f, indent=2)
 409 |                 
 410 |             logger.info(
 411 |                 f"Generated JSON cost report: {report_path}",
 412 |                 emoji_key="analytics"
 413 |             )
 414 |             
 415 |             return report_path
 416 |             
 417 |         elif output_format == "html":
 418 |             # HTML format (with optional plots)
 419 |             report_path = self.reports_dir / f"cost_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.html"
 420 |             
 421 |             # Generate plots if requested
 422 |             plot_paths = []
 423 |             if do_plots:
 424 |                 plot_paths = self._generate_cost_plots(report_data, days)
 425 |             
 426 |             # Generate HTML
 427 |             html = self._generate_html_cost_report(report_data, plot_paths)
 428 |             
 429 |             with open(report_path, "w") as f:
 430 |                 f.write(html)
 431 |                 
 432 |             logger.info(
 433 |                 f"Generated HTML cost report: {report_path}",
 434 |                 emoji_key="analytics"
 435 |             )
 436 |             
 437 |             return report_path
 438 |             
 439 |         elif output_format == "markdown":
 440 |             # Markdown format
 441 |             report_path = self.reports_dir / f"cost_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md"
 442 |             
 443 |             # Generate plots if requested
 444 |             plot_paths = []
 445 |             if do_plots:
 446 |                 plot_paths = self._generate_cost_plots(report_data, days)
 447 |             
 448 |             # Generate Markdown
 449 |             markdown = self._generate_markdown_cost_report(report_data, plot_paths)
 450 |             
 451 |             with open(report_path, "w") as f:
 452 |                 f.write(markdown)
 453 |                 
 454 |             logger.info(
 455 |                 f"Generated Markdown cost report: {report_path}",
 456 |                 emoji_key="analytics"
 457 |             )
 458 |             
 459 |             return report_path
 460 |             
 461 |         else:
 462 |             # Default to raw data
 463 |             logger.warning(
 464 |                 f"Unknown output format: {output_format}, returning raw data",
 465 |                 emoji_key="warning"
 466 |             )
 467 |             return report_data
 468 |     
 469 |     def _generate_report_plots(
 470 |         self,
 471 |         report_data: Dict[str, Any],
 472 |         days: int
 473 |     ) -> List[str]:
 474 |         """Generate plots for a usage report.
 475 |         
 476 |         Args:
 477 |             report_data: Report data
 478 |             days: Number of days to include
 479 |             
 480 |         Returns:
 481 |             List of plot file paths
 482 |         """
 483 |         if not PLOTTING_AVAILABLE:
 484 |             return []
 485 |             
 486 |         plot_paths = []
 487 |         
 488 |         # Create daily usage plot
 489 |         if report_data["daily_usage"]:
 490 |             try:
 491 |                 # Prepare data
 492 |                 df = pd.DataFrame(report_data["daily_usage"])
 493 |                 df["date"] = pd.to_datetime(df["date"])
 494 |                 df = df.sort_values("date")
 495 |                 
 496 |                 # Create plot directory
 497 |                 plot_dir = self.reports_dir / "plots"
 498 |                 plot_dir.mkdir(exist_ok=True)
 499 |                 
 500 |                 # Create plot
 501 |                 plt.figure(figsize=(10, 6))
 502 |                 plt.plot(df["date"], df["tokens"], marker="o", linestyle="-", linewidth=2)
 503 |                 plt.title(f"Daily Token Usage (Last {days} Days)")
 504 |                 plt.xlabel("Date")
 505 |                 plt.ylabel("Tokens")
 506 |                 plt.grid(True, alpha=0.3)
 507 |                 plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m-%d"))
 508 |                 plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=max(1, days // 7)))
 509 |                 plt.xticks(rotation=45)
 510 |                 plt.tight_layout()
 511 |                 
 512 |                 # Save plot
 513 |                 plot_path = str(plot_dir / f"daily_usage_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png")
 514 |                 plt.savefig(plot_path)
 515 |                 plt.close()
 516 |                 
 517 |                 plot_paths.append(plot_path)
 518 |                 
 519 |             except Exception as e:
 520 |                 logger.error(
 521 |                     f"Failed to generate daily usage plot: {str(e)}",
 522 |                     emoji_key="error"
 523 |                 )
 524 |         
 525 |         # Create provider distribution plot
 526 |         if report_data["top_providers"]:
 527 |             try:
 528 |                 # Prepare data
 529 |                 providers = [p["provider"] for p in report_data["top_providers"]]
 530 |                 percentages = [p["percentage"] * 100 for p in report_data["top_providers"]]
 531 |                 
 532 |                 # Create plot
 533 |                 plt.figure(figsize=(8, 8))
 534 |                 plt.pie(percentages, labels=providers, autopct="%1.1f%%", startangle=90, shadow=True)
 535 |                 plt.axis("equal")
 536 |                 plt.title("Token Usage by Provider")
 537 |                 plt.tight_layout()
 538 |                 
 539 |                 # Save plot
 540 |                 plot_path = str(plot_dir / f"provider_distribution_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png")
 541 |                 plt.savefig(plot_path)
 542 |                 plt.close()
 543 |                 
 544 |                 plot_paths.append(plot_path)
 545 |                 
 546 |             except Exception as e:
 547 |                 logger.error(
 548 |                     f"Failed to generate provider distribution plot: {str(e)}",
 549 |                     emoji_key="error"
 550 |                 )
 551 |         
 552 |         # Create model distribution plot
 553 |         if report_data["top_models"]:
 554 |             try:
 555 |                 # Prepare data
 556 |                 models = [m["model"] for m in report_data["top_models"]]
 557 |                 percentages = [m["percentage"] * 100 for m in report_data["top_models"]]
 558 |                 
 559 |                 # Create plot
 560 |                 plt.figure(figsize=(10, 6))
 561 |                 plt.bar(models, percentages)
 562 |                 plt.title("Token Usage by Model")
 563 |                 plt.xlabel("Model")
 564 |                 plt.ylabel("Percentage of Total Tokens")
 565 |                 plt.xticks(rotation=45, ha="right")
 566 |                 plt.grid(True, alpha=0.3, axis="y")
 567 |                 plt.tight_layout()
 568 |                 
 569 |                 # Save plot
 570 |                 plot_path = str(plot_dir / f"model_distribution_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png")
 571 |                 plt.savefig(plot_path)
 572 |                 plt.close()
 573 |                 
 574 |                 plot_paths.append(plot_path)
 575 |                 
 576 |             except Exception as e:
 577 |                 logger.error(
 578 |                     f"Failed to generate model distribution plot: {str(e)}",
 579 |                     emoji_key="error"
 580 |                 )
 581 |         
 582 |         return plot_paths
 583 |     
 584 |     def _generate_provider_plots(
 585 |         self,
 586 |         report_data: Dict[str, Any],
 587 |         provider: str,
 588 |         days: int
 589 |     ) -> List[str]:
 590 |         """Generate plots for a provider report.
 591 |         
 592 |         Args:
 593 |             report_data: Report data
 594 |             provider: Provider name
 595 |             days: Number of days to include
 596 |             
 597 |         Returns:
 598 |             List of plot file paths
 599 |         """
 600 |         if not PLOTTING_AVAILABLE:
 601 |             return []
 602 |             
 603 |         plot_paths = []
 604 |         
 605 |         # Create plot directory
 606 |         plot_dir = self.reports_dir / "plots"
 607 |         plot_dir.mkdir(exist_ok=True)
 608 |         
 609 |         # Create daily usage plot
 610 |         if report_data["daily_usage"]:
 611 |             try:
 612 |                 # Prepare data
 613 |                 df = pd.DataFrame(report_data["daily_usage"])
 614 |                 df["date"] = pd.to_datetime(df["date"])
 615 |                 df = df.sort_values("date")
 616 |                 
 617 |                 # Create plot
 618 |                 plt.figure(figsize=(10, 6))
 619 |                 plt.plot(df["date"], df["tokens"], marker="o", linestyle="-", linewidth=2)
 620 |                 plt.title(f"{provider} Daily Token Usage (Last {days} Days)")
 621 |                 plt.xlabel("Date")
 622 |                 plt.ylabel("Tokens")
 623 |                 plt.grid(True, alpha=0.3)
 624 |                 plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m-%d"))
 625 |                 plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=max(1, days // 7)))
 626 |                 plt.xticks(rotation=45)
 627 |                 plt.tight_layout()
 628 |                 
 629 |                 # Save plot
 630 |                 plot_path = str(plot_dir / f"{provider}_daily_usage_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png")
 631 |                 plt.savefig(plot_path)
 632 |                 plt.close()
 633 |                 
 634 |                 plot_paths.append(plot_path)
 635 |                 
 636 |             except Exception as e:
 637 |                 logger.error(
 638 |                     f"Failed to generate provider daily usage plot: {str(e)}",
 639 |                     emoji_key="error"
 640 |                 )
 641 |         
 642 |         # Create model distribution plot
 643 |         if report_data["models"]:
 644 |             try:
 645 |                 # Prepare data
 646 |                 models = list(report_data["models"].keys())
 647 |                 tokens = [data["tokens"] for _, data in report_data["models"].items()]
 648 |                 
 649 |                 # Create plot
 650 |                 plt.figure(figsize=(10, 6))
 651 |                 plt.bar(models, tokens)
 652 |                 plt.title(f"{provider} Token Usage by Model")
 653 |                 plt.xlabel("Model")
 654 |                 plt.ylabel("Tokens")
 655 |                 plt.xticks(rotation=45, ha="right")
 656 |                 plt.grid(True, alpha=0.3, axis="y")
 657 |                 plt.tight_layout()
 658 |                 
 659 |                 # Save plot
 660 |                 plot_path = str(plot_dir / f"{provider}_model_distribution_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png")
 661 |                 plt.savefig(plot_path)
 662 |                 plt.close()
 663 |                 
 664 |                 plot_paths.append(plot_path)
 665 |                 
 666 |             except Exception as e:
 667 |                 logger.error(
 668 |                     f"Failed to generate provider model distribution plot: {str(e)}",
 669 |                     emoji_key="error"
 670 |                 )
 671 |         
 672 |         return plot_paths
 673 |     
 674 |     def _generate_cost_plots(
 675 |         self,
 676 |         report_data: Dict[str, Any],
 677 |         days: int
 678 |     ) -> List[str]:
 679 |         """Generate plots for a cost report.
 680 |         
 681 |         Args:
 682 |             report_data: Report data
 683 |             days: Number of days to include
 684 |             
 685 |         Returns:
 686 |             List of plot file paths
 687 |         """
 688 |         if not PLOTTING_AVAILABLE:
 689 |             return []
 690 |             
 691 |         plot_paths = []
 692 |         
 693 |         # Create plot directory
 694 |         plot_dir = self.reports_dir / "plots"
 695 |         plot_dir.mkdir(exist_ok=True)
 696 |         
 697 |         # Create daily cost plot
 698 |         if report_data["daily_costs"]:
 699 |             try:
 700 |                 # Prepare data
 701 |                 df = pd.DataFrame(report_data["daily_costs"])
 702 |                 df["date"] = pd.to_datetime(df["date"])
 703 |                 df = df.sort_values("date")
 704 |                 
 705 |                 # Create plot
 706 |                 plt.figure(figsize=(10, 6))
 707 |                 plt.plot(df["date"], df["cost"], marker="o", linestyle="-", linewidth=2)
 708 |                 plt.title(f"Daily Cost (Last {days} Days)")
 709 |                 plt.xlabel("Date")
 710 |                 plt.ylabel("Cost ($)")
 711 |                 plt.grid(True, alpha=0.3)
 712 |                 plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m-%d"))
 713 |                 plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=max(1, days // 7)))
 714 |                 plt.xticks(rotation=45)
 715 |                 plt.tight_layout()
 716 |                 
 717 |                 # Save plot
 718 |                 plot_path = str(plot_dir / f"daily_cost_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png")
 719 |                 plt.savefig(plot_path)
 720 |                 plt.close()
 721 |                 
 722 |                 plot_paths.append(plot_path)
 723 |                 
 724 |             except Exception as e:
 725 |                 logger.error(
 726 |                     f"Failed to generate daily cost plot: {str(e)}",
 727 |                     emoji_key="error"
 728 |                 )
 729 |         
 730 |         # Create provider cost distribution plot
 731 |         if report_data["provider_costs"]:
 732 |             try:
 733 |                 # Prepare data
 734 |                 providers = [p["provider"] for p in report_data["provider_costs"]]
 735 |                 costs = [p["cost"] for p in report_data["provider_costs"]]
 736 |                 
 737 |                 # Create plot
 738 |                 plt.figure(figsize=(8, 8))
 739 |                 plt.pie(costs, labels=providers, autopct="%1.1f%%", startangle=90, shadow=True)
 740 |                 plt.axis("equal")
 741 |                 plt.title("Cost by Provider")
 742 |                 plt.tight_layout()
 743 |                 
 744 |                 # Save plot
 745 |                 plot_path = str(plot_dir / f"provider_cost_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png")
 746 |                 plt.savefig(plot_path)
 747 |                 plt.close()
 748 |                 
 749 |                 plot_paths.append(plot_path)
 750 |                 
 751 |             except Exception as e:
 752 |                 logger.error(
 753 |                     f"Failed to generate provider cost distribution plot: {str(e)}",
 754 |                     emoji_key="error"
 755 |                 )
 756 |         
 757 |         # Create cost efficiency plot
 758 |         if report_data["cost_efficiency"]:
 759 |             try:
 760 |                 # Prepare data (limit to top 10 for readability)
 761 |                 top_efficient = report_data["cost_efficiency"][:10]
 762 |                 models = [m["model"] for m in top_efficient]
 763 |                 efficiency = [m["tokens_per_dollar"] for m in top_efficient]
 764 |                 
 765 |                 # Create plot
 766 |                 plt.figure(figsize=(10, 6))
 767 |                 plt.bar(models, efficiency)
 768 |                 plt.title("Cost Efficiency (Tokens per Dollar)")
 769 |                 plt.xlabel("Model")
 770 |                 plt.ylabel("Tokens per Dollar")
 771 |                 plt.xticks(rotation=45, ha="right")
 772 |                 plt.grid(True, alpha=0.3, axis="y")
 773 |                 plt.tight_layout()
 774 |                 
 775 |                 # Save plot
 776 |                 plot_path = str(plot_dir / f"cost_efficiency_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png")
 777 |                 plt.savefig(plot_path)
 778 |                 plt.close()
 779 |                 
 780 |                 plot_paths.append(plot_path)
 781 |                 
 782 |             except Exception as e:
 783 |                 logger.error(
 784 |                     f"Failed to generate cost efficiency plot: {str(e)}",
 785 |                     emoji_key="error"
 786 |                 )
 787 |         
 788 |         return plot_paths
 789 |     
 790 |     def _generate_html_report(self, report_data: Dict[str, Any], plot_paths: List[str]) -> str:
 791 |         """Generate an HTML usage report.
 792 |         
 793 |         Args:
 794 |             report_data: Report data
 795 |             plot_paths: List of plot file paths
 796 |             
 797 |         Returns:
 798 |             HTML report content
 799 |         """
 800 |         # Basic HTML template
 801 |         html = f"""<!DOCTYPE html>
 802 | <html>
 803 | <head>
 804 |     <title>Ultimate MCP Server Usage Report</title>
 805 |     <style>
 806 |         body {{ font-family: Arial, sans-serif; margin: 20px; }}
 807 |         h1, h2, h3 {{ color: #333; }}
 808 |         .container {{ max-width: 1200px; margin: 0 auto; }}
 809 |         .card {{ background: #f9f9f9; border-radius: 5px; padding: 20px; margin-bottom: 20px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }}
 810 |         table {{ width: 100%; border-collapse: collapse; }}
 811 |         th, td {{ padding: 8px; text-align: left; border-bottom: 1px solid #ddd; }}
 812 |         th {{ background-color: #f2f2f2; }}
 813 |         .stat {{ font-size: 24px; font-weight: bold; color: #0066cc; }}
 814 |         .plot {{ max-width: 100%; height: auto; margin: 20px 0; }}
 815 |     </style>
 816 | </head>
 817 | <body>
 818 |     <div class="container">
 819 |         <h1>Ultimate MCP Server Usage Report</h1>
 820 |         <p>Generated at: {report_data["generated_at"]}</p>
 821 |         <p>Period: {report_data["period"]}</p>
 822 |         
 823 |         <div class="card">
 824 |             <h2>General Statistics</h2>
 825 |             <table>
 826 |                 <tr>
 827 |                     <td>Uptime</td>
 828 |                     <td class="stat">{report_data["general"]["uptime_human"]}</td>
 829 |                 </tr>
 830 |                 <tr>
 831 |                     <td>Total Requests</td>
 832 |                     <td class="stat">{report_data["general"]["requests_total"]:,}</td>
 833 |                 </tr>
 834 |                 <tr>
 835 |                     <td>Total Tokens</td>
 836 |                     <td class="stat">{report_data["general"]["tokens_total"]:,}</td>
 837 |                 </tr>
 838 |                 <tr>
 839 |                     <td>Total Cost</td>
 840 |                     <td class="stat">${report_data["general"]["cost_total"]:.2f}</td>
 841 |                 </tr>
 842 |                 <tr>
 843 |                     <td>Average Response Time</td>
 844 |                     <td class="stat">{report_data["general"]["avg_response_time"]:.3f}s</td>
 845 |                 </tr>
 846 |                 <tr>
 847 |                     <td>Total Errors</td>
 848 |                     <td class="stat">{report_data["general"]["errors_total"]}</td>
 849 |                 </tr>
 850 |                 <tr>
 851 |                     <td>Error Rate</td>
 852 |                     <td class="stat">{report_data["general"]["error_rate"]*100:.2f}%</td>
 853 |                 </tr>
 854 |             </table>
 855 |         </div>
 856 |         
 857 |         <div class="card">
 858 |             <h2>Cache Statistics</h2>
 859 |             <table>
 860 |                 <tr>
 861 |                     <td>Cache Hits</td>
 862 |                     <td class="stat">{report_data["cache"]["hits"]:,}</td>
 863 |                 </tr>
 864 |                 <tr>
 865 |                     <td>Cache Misses</td>
 866 |                     <td class="stat">{report_data["cache"]["misses"]:,}</td>
 867 |                 </tr>
 868 |                 <tr>
 869 |                     <td>Hit Ratio</td>
 870 |                     <td class="stat">{report_data["cache"]["hit_ratio"]*100:.2f}%</td>
 871 |                 </tr>
 872 |                 <tr>
 873 |                     <td>Cost Savings</td>
 874 |                     <td class="stat">${report_data["cache"]["saved_cost"]:.2f}</td>
 875 |                 </tr>
 876 |             </table>
 877 |         </div>
 878 | """
 879 |         
 880 |         # Add plots if available
 881 |         if plot_paths:
 882 |             html += """
 883 |         <div class="card">
 884 |             <h2>Usage Visualizations</h2>
 885 | """
 886 |             for plot_path in plot_paths:
 887 |                 # Use relative path
 888 |                 rel_path = os.path.relpath(plot_path, self.reports_dir)
 889 |                 html += f"""
 890 |             <img class="plot" src="{rel_path}" alt="Usage Plot">
 891 | """
 892 |             html += """
 893 |         </div>
 894 | """
 895 |         
 896 |         # Add top providers
 897 |         if report_data["top_providers"]:
 898 |             html += """
 899 |         <div class="card">
 900 |             <h2>Top Providers</h2>
 901 |             <table>
 902 |                 <tr>
 903 |                     <th>Provider</th>
 904 |                     <th>Tokens</th>
 905 |                     <th>Percentage</th>
 906 |                 </tr>
 907 | """
 908 |             for provider in report_data["top_providers"]:
 909 |                 html += f"""
 910 |                 <tr>
 911 |                     <td>{provider["provider"]}</td>
 912 |                     <td>{provider["tokens"]:,}</td>
 913 |                     <td>{provider["percentage"]*100:.2f}%</td>
 914 |                 </tr>
 915 | """
 916 |             html += """
 917 |             </table>
 918 |         </div>
 919 | """
 920 |         
 921 |         # Add top models
 922 |         if report_data["top_models"]:
 923 |             html += """
 924 |         <div class="card">
 925 |             <h2>Top Models</h2>
 926 |             <table>
 927 |                 <tr>
 928 |                     <th>Model</th>
 929 |                     <th>Tokens</th>
 930 |                     <th>Percentage</th>
 931 |                 </tr>
 932 | """
 933 |             for model in report_data["top_models"]:
 934 |                 html += f"""
 935 |                 <tr>
 936 |                     <td>{model["model"]}</td>
 937 |                     <td>{model["tokens"]:,}</td>
 938 |                     <td>{model["percentage"]*100:.2f}%</td>
 939 |                 </tr>
 940 | """
 941 |             html += """
 942 |             </table>
 943 |         </div>
 944 | """
 945 |         
 946 |         # Add daily usage
 947 |         if report_data["daily_usage"]:
 948 |             html += """
 949 |         <div class="card">
 950 |             <h2>Daily Usage</h2>
 951 |             <table>
 952 |                 <tr>
 953 |                     <th>Date</th>
 954 |                     <th>Tokens</th>
 955 |                     <th>Cost</th>
 956 |                 </tr>
 957 | """
 958 |             for day in sorted(report_data["daily_usage"], key=lambda x: x["date"], reverse=True):
 959 |                 html += f"""
 960 |                 <tr>
 961 |                     <td>{day["date"]}</td>
 962 |                     <td>{day["tokens"]:,}</td>
 963 |                     <td>${day["cost"]:.2f}</td>
 964 |                 </tr>
 965 | """
 966 |             html += """
 967 |             </table>
 968 |         </div>
 969 | """
 970 |         
 971 |         # Close HTML
 972 |         html += """
 973 |     </div>
 974 | </body>
 975 | </html>
 976 | """
 977 |         
 978 |         return html
 979 |     
 980 |     def _generate_markdown_report(self, report_data: Dict[str, Any], plot_paths: List[str]) -> str:
 981 |         """Generate a Markdown usage report.
 982 |         
 983 |         Args:
 984 |             report_data: Report data
 985 |             plot_paths: List of plot file paths
 986 |             
 987 |         Returns:
 988 |             Markdown report content
 989 |         """
 990 |         # Basic Markdown template
 991 |         markdown = f"""# Ultimate MCP Server Usage Report
 992 | 
 993 | Generated at: {report_data["generated_at"]}  
 994 | Period: {report_data["period"]}
 995 | 
 996 | ## General Statistics
 997 | 
 998 | - **Uptime:** {report_data["general"]["uptime_human"]}
 999 | - **Total Requests:** {report_data["general"]["requests_total"]:,}
1000 | - **Total Tokens:** {report_data["general"]["tokens_total"]:,}
1001 | - **Total Cost:** ${report_data["general"]["cost_total"]:.2f}
1002 | - **Average Response Time:** {report_data["general"]["avg_response_time"]:.3f}s
1003 | - **Total Errors:** {report_data["general"]["errors_total"]}
1004 | - **Error Rate:** {report_data["general"]["error_rate"]*100:.2f}%
1005 | 
1006 | ## Cache Statistics
1007 | 
1008 | - **Cache Hits:** {report_data["cache"]["hits"]:,}
1009 | - **Cache Misses:** {report_data["cache"]["misses"]:,}
1010 | - **Hit Ratio:** {report_data["cache"]["hit_ratio"]*100:.2f}%
1011 | - **Cost Savings:** ${report_data["cache"]["saved_cost"]:.2f}
1012 | 
1013 | """
1014 |         
1015 |         # Add plots if available
1016 |         if plot_paths:
1017 |             markdown += """## Usage Visualizations
1018 | 
1019 | """
1020 |             for plot_path in plot_paths:
1021 |                 # Use relative path
1022 |                 rel_path = os.path.relpath(plot_path, self.reports_dir)
1023 |                 markdown += f"""![Usage Plot]({rel_path})
1024 | 
1025 | """
1026 |         
1027 |         # Add top providers
1028 |         if report_data["top_providers"]:
1029 |             markdown += """## Top Providers
1030 | 
1031 | | Provider | Tokens | Percentage |
1032 | |----------|--------|------------|
1033 | """
1034 |             for provider in report_data["top_providers"]:
1035 |                 markdown += f"""| {provider["provider"]} | {provider["tokens"]:,} | {provider["percentage"]*100:.2f}% |
1036 | """
1037 |             markdown += "\n"
1038 |         
1039 |         # Add top models
1040 |         if report_data["top_models"]:
1041 |             markdown += """## Top Models
1042 | 
1043 | | Model | Tokens | Percentage |
1044 | |-------|--------|------------|
1045 | """
1046 |             for model in report_data["top_models"]:
1047 |                 markdown += f"""| {model["model"]} | {model["tokens"]:,} | {model["percentage"]*100:.2f}% |
1048 | """
1049 |             markdown += "\n"
1050 |         
1051 |         # Add daily usage
1052 |         if report_data["daily_usage"]:
1053 |             markdown += """## Daily Usage
1054 | 
1055 | | Date | Tokens | Cost |
1056 | |------|--------|------|
1057 | """
1058 |             for day in sorted(report_data["daily_usage"], key=lambda x: x["date"], reverse=True):
1059 |                 markdown += f"""| {day["date"]} | {day["tokens"]:,} | ${day["cost"]:.2f} |
1060 | """
1061 |         
1062 |         return markdown
1063 |     
1064 |     def _generate_html_provider_report(self, report_data: Dict[str, Any], plot_paths: List[str]) -> str:
1065 |         """Generate an HTML provider report.
1066 |         
1067 |         Args:
1068 |             report_data: Report data
1069 |             plot_paths: List of plot file paths
1070 |             
1071 |         Returns:
1072 |             HTML report content
1073 |         """
1074 |         # Basic HTML template (similar to usage report but provider-specific)
1075 |         provider = report_data["provider"]
1076 |         provider_stats = report_data["stats"]
1077 |         
1078 |         html = f"""<!DOCTYPE html>
1079 | <html>
1080 | <head>
1081 |     <title>{provider} Provider Report</title>
1082 |     <style>
1083 |         body {{ font-family: Arial, sans-serif; margin: 20px; }}
1084 |         h1, h2, h3 {{ color: #333; }}
1085 |         .container {{ max-width: 1200px; margin: 0 auto; }}
1086 |         .card {{ background: #f9f9f9; border-radius: 5px; padding: 20px; margin-bottom: 20px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }}
1087 |         table {{ width: 100%; border-collapse: collapse; }}
1088 |         th, td {{ padding: 8px; text-align: left; border-bottom: 1px solid #ddd; }}
1089 |         th {{ background-color: #f2f2f2; }}
1090 |         .stat {{ font-size: 24px; font-weight: bold; color: #0066cc; }}
1091 |         .plot {{ max-width: 100%; height: auto; margin: 20px 0; }}
1092 |     </style>
1093 | </head>
1094 | <body>
1095 |     <div class="container">
1096 |         <h1>{provider} Provider Report</h1>
1097 |         <p>Generated at: {report_data["generated_at"]}</p>
1098 |         <p>Period: {report_data["period"]}</p>
1099 |         
1100 |         <div class="card">
1101 |             <h2>Provider Statistics</h2>
1102 |             <table>
1103 |                 <tr>
1104 |                     <td>Total Requests</td>
1105 |                     <td class="stat">{provider_stats["requests"]:,}</td>
1106 |                 </tr>
1107 |                 <tr>
1108 |                     <td>Total Tokens</td>
1109 |                     <td class="stat">{provider_stats["tokens"]:,}</td>
1110 |                 </tr>
1111 |                 <tr>
1112 |                     <td>Total Cost</td>
1113 |                     <td class="stat">${provider_stats["cost"]:.2f}</td>
1114 |                 </tr>
1115 |                 <tr>
1116 |                     <td>Average Response Time</td>
1117 |                     <td class="stat">{provider_stats["avg_response_time"]:.3f}s</td>
1118 |                 </tr>
1119 |                 <tr>
1120 |                     <td>Total Errors</td>
1121 |                     <td class="stat">{provider_stats["errors"]}</td>
1122 |                 </tr>
1123 |                 <tr>
1124 |                     <td>Percentage of Total Usage</td>
1125 |                     <td class="stat">{report_data["percentage_of_total"]:.2f}%</td>
1126 |                 </tr>
1127 |             </table>
1128 |         </div>
1129 | """
1130 |         
1131 |         # Add plots if available
1132 |         if plot_paths:
1133 |             html += """
1134 |         <div class="card">
1135 |             <h2>Usage Visualizations</h2>
1136 | """
1137 |             for plot_path in plot_paths:
1138 |                 # Use relative path
1139 |                 rel_path = os.path.relpath(plot_path, self.reports_dir)
1140 |                 html += f"""
1141 |             <img class="plot" src="{rel_path}" alt="Provider Usage Plot">
1142 | """
1143 |             html += """
1144 |         </div>
1145 | """
1146 |         
1147 |         # Add models
1148 |         if report_data["models"]:
1149 |             html += """
1150 |         <div class="card">
1151 |             <h2>Models</h2>
1152 |             <table>
1153 |                 <tr>
1154 |                     <th>Model</th>
1155 |                     <th>Requests</th>
1156 |                     <th>Tokens</th>
1157 |                     <th>Cost</th>
1158 |                     <th>Avg Response Time</th>
1159 |                 </tr>
1160 | """
1161 |             for model, data in report_data["models"].items():
1162 |                 html += f"""
1163 |                 <tr>
1164 |                     <td>{model}</td>
1165 |                     <td>{data["requests"]:,}</td>
1166 |                     <td>{data["tokens"]:,}</td>
1167 |                     <td>${data["cost"]:.2f}</td>
1168 |                     <td>{data["avg_response_time"]:.3f}s</td>
1169 |                 </tr>
1170 | """
1171 |             html += """
1172 |             </table>
1173 |         </div>
1174 | """
1175 |         
1176 |         # Add daily usage
1177 |         if report_data["daily_usage"]:
1178 |             html += """
1179 |         <div class="card">
1180 |             <h2>Daily Usage</h2>
1181 |             <table>
1182 |                 <tr>
1183 |                     <th>Date</th>
1184 |                     <th>Tokens</th>
1185 |                     <th>Cost</th>
1186 |                 </tr>
1187 | """
1188 |             for day in sorted(report_data["daily_usage"], key=lambda x: x["date"], reverse=True):
1189 |                 html += f"""
1190 |                 <tr>
1191 |                     <td>{day["date"]}</td>
1192 |                     <td>{day["tokens"]:,}</td>
1193 |                     <td>${day["cost"]:.2f}</td>
1194 |                 </tr>
1195 | """
1196 |             html += """
1197 |             </table>
1198 |         </div>
1199 | """
1200 |         
1201 |         # Close HTML
1202 |         html += """
1203 |     </div>
1204 | </body>
1205 | </html>
1206 | """
1207 |         
1208 |         return html
1209 |     
1210 |     def _generate_markdown_provider_report(self, report_data: Dict[str, Any], plot_paths: List[str]) -> str:
1211 |         """Generate a Markdown provider report.
1212 |         
1213 |         Args:
1214 |             report_data: Report data
1215 |             plot_paths: List of plot file paths
1216 |             
1217 |         Returns:
1218 |             Markdown report content
1219 |         """
1220 |         # Basic Markdown template (similar to usage report but provider-specific)
1221 |         provider = report_data["provider"]
1222 |         provider_stats = report_data["stats"]
1223 |         
1224 |         markdown = f"""# {provider} Provider Report
1225 | 
1226 | Generated at: {report_data["generated_at"]}  
1227 | Period: {report_data["period"]}
1228 | 
1229 | ## Provider Statistics
1230 | 
1231 | - **Total Requests:** {provider_stats["requests"]:,}
1232 | - **Total Tokens:** {provider_stats["tokens"]:,}
1233 | - **Total Cost:** ${provider_stats["cost"]:.2f}
1234 | - **Average Response Time:** {provider_stats["avg_response_time"]:.3f}s
1235 | - **Total Errors:** {provider_stats["errors"]}
1236 | - **Percentage of Total Usage:** {report_data["percentage_of_total"]:.2f}%
1237 | 
1238 | """
1239 |         
1240 |         # Add plots if available
1241 |         if plot_paths:
1242 |             markdown += """## Usage Visualizations
1243 | 
1244 | """
1245 |             for plot_path in plot_paths:
1246 |                 # Use relative path
1247 |                 rel_path = os.path.relpath(plot_path, self.reports_dir)
1248 |                 markdown += f"""![Provider Usage Plot]({rel_path})
1249 | 
1250 | """
1251 |         
1252 |         # Add models
1253 |         if report_data["models"]:
1254 |             markdown += """## Models
1255 | 
1256 | | Model | Requests | Tokens | Cost | Avg Response Time |
1257 | |-------|----------|--------|------|-------------------|
1258 | """
1259 |             for model, data in report_data["models"].items():
1260 |                 markdown += f"""| {model} | {data["requests"]:,} | {data["tokens"]:,} | ${data["cost"]:.2f} | {data["avg_response_time"]:.3f}s |
1261 | """
1262 |             markdown += "\n"
1263 |         
1264 |         # Add daily usage
1265 |         if report_data["daily_usage"]:
1266 |             markdown += """## Daily Usage
1267 | 
1268 | | Date | Tokens | Cost |
1269 | |------|--------|------|
1270 | """
1271 |             for day in sorted(report_data["daily_usage"], key=lambda x: x["date"], reverse=True):
1272 |                 markdown += f"""| {day["date"]} | {day["tokens"]:,} | ${day["cost"]:.2f} |
1273 | """
1274 |         
1275 |         return markdown
1276 |     
1277 |     def _generate_html_cost_report(self, report_data: Dict[str, Any], plot_paths: List[str]) -> str:
1278 |         """Generate an HTML cost report.
1279 |         
1280 |         Args:
1281 |             report_data: Report data
1282 |             plot_paths: List of plot file paths
1283 |             
1284 |         Returns:
1285 |             HTML report content
1286 |         """
1287 |         # Basic HTML template (cost-focused)
1288 |         html = f"""<!DOCTYPE html>
1289 | <html>
1290 | <head>
1291 |     <title>Ultimate MCP Server Cost Report</title>
1292 |     <style>
1293 |         body {{ font-family: Arial, sans-serif; margin: 20px; }}
1294 |         h1, h2, h3 {{ color: #333; }}
1295 |         .container {{ max-width: 1200px; margin: 0 auto; }}
1296 |         .card {{ background: #f9f9f9; border-radius: 5px; padding: 20px; margin-bottom: 20px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }}
1297 |         table {{ width: 100%; border-collapse: collapse; }}
1298 |         th, td {{ padding: 8px; text-align: left; border-bottom: 1px solid #ddd; }}
1299 |         th {{ background-color: #f2f2f2; }}
1300 |         .stat {{ font-size: 24px; font-weight: bold; color: #0066cc; }}
1301 |         .cost {{ font-size: 24px; font-weight: bold; color: #cc0000; }}
1302 |         .savings {{ font-size: 24px; font-weight: bold; color: #00cc00; }}
1303 |         .plot {{ max-width: 100%; height: auto; margin: 20px 0; }}
1304 |     </style>
1305 | </head>
1306 | <body>
1307 |     <div class="container">
1308 |         <h1>Ultimate MCP Server Cost Report</h1>
1309 |         <p>Generated at: {report_data["generated_at"]}</p>
1310 |         <p>Period: {report_data["period"]}</p>
1311 |         
1312 |         <div class="card">
1313 |             <h2>Cost Overview</h2>
1314 |             <table>
1315 |                 <tr>
1316 |                     <td>Total Cost</td>
1317 |                     <td class="cost">${report_data["total_cost"]:.2f}</td>
1318 |                 </tr>
1319 |                 <tr>
1320 |                     <td>Cache Savings</td>
1321 |                     <td class="savings">${report_data["cache_savings"]:.2f}</td>
1322 |                 </tr>
1323 |                 <tr>
1324 |                     <td>Net Cost</td>
1325 |                     <td class="cost">${report_data["total_cost"] - report_data["cache_savings"]:.2f}</td>
1326 |                 </tr>
1327 |             </table>
1328 |         </div>
1329 | """
1330 |         
1331 |         # Add plots if available
1332 |         if plot_paths:
1333 |             html += """
1334 |         <div class="card">
1335 |             <h2>Cost Visualizations</h2>
1336 | """
1337 |             for plot_path in plot_paths:
1338 |                 # Use relative path
1339 |                 rel_path = os.path.relpath(plot_path, self.reports_dir)
1340 |                 html += f"""
1341 |             <img class="plot" src="{rel_path}" alt="Cost Plot">
1342 | """
1343 |             html += """
1344 |         </div>
1345 | """
1346 |         
1347 |         # Add provider costs
1348 |         if report_data["provider_costs"]:
1349 |             html += """
1350 |         <div class="card">
1351 |             <h2>Cost by Provider</h2>
1352 |             <table>
1353 |                 <tr>
1354 |                     <th>Provider</th>
1355 |                     <th>Cost</th>
1356 |                     <th>Percentage</th>
1357 |                 </tr>
1358 | """
1359 |             for provider in report_data["provider_costs"]:
1360 |                 html += f"""
1361 |                 <tr>
1362 |                     <td>{provider["provider"]}</td>
1363 |                     <td>${provider["cost"]:.2f}</td>
1364 |                     <td>{provider["percentage"]:.2f}%</td>
1365 |                 </tr>
1366 | """
1367 |             html += """
1368 |             </table>
1369 |         </div>
1370 | """
1371 |         
1372 |         # Add model costs
1373 |         if report_data["model_costs"]:
1374 |             html += """
1375 |         <div class="card">
1376 |             <h2>Cost by Model</h2>
1377 |             <table>
1378 |                 <tr>
1379 |                     <th>Model</th>
1380 |                     <th>Cost</th>
1381 |                     <th>Percentage</th>
1382 |                 </tr>
1383 | """
1384 |             for model in report_data["model_costs"]:
1385 |                 html += f"""
1386 |                 <tr>
1387 |                     <td>{model["model"]}</td>
1388 |                     <td>${model["cost"]:.2f}</td>
1389 |                     <td>{model["percentage"]:.2f}%</td>
1390 |                 </tr>
1391 | """
1392 |             html += """
1393 |             </table>
1394 |         </div>
1395 | """
1396 |         
1397 |         # Add cost efficiency
1398 |         if report_data["cost_efficiency"]:
1399 |             html += """
1400 |         <div class="card">
1401 |             <h2>Cost Efficiency (Tokens per Dollar)</h2>
1402 |             <table>
1403 |                 <tr>
1404 |                     <th>Model</th>
1405 |                     <th>Tokens per Dollar</th>
1406 |                     <th>Tokens</th>
1407 |                     <th>Cost</th>
1408 |                 </tr>
1409 | """
1410 |             for model in report_data["cost_efficiency"]:
1411 |                 html += f"""
1412 |                 <tr>
1413 |                     <td>{model["model"]}</td>
1414 |                     <td class="stat">{model["tokens_per_dollar"]:,.0f}</td>
1415 |                     <td>{model["tokens"]:,}</td>
1416 |                     <td>${model["cost"]:.2f}</td>
1417 |                 </tr>
1418 | """
1419 |             html += """
1420 |             </table>
1421 |         </div>
1422 | """
1423 |         
1424 |         # Add daily costs
1425 |         if report_data["daily_costs"]:
1426 |             html += """
1427 |         <div class="card">
1428 |             <h2>Daily Costs</h2>
1429 |             <table>
1430 |                 <tr>
1431 |                     <th>Date</th>
1432 |                     <th>Cost</th>
1433 |                 </tr>
1434 | """
1435 |             for day in sorted(report_data["daily_costs"], key=lambda x: x["date"], reverse=True):
1436 |                 html += f"""
1437 |                 <tr>
1438 |                     <td>{day["date"]}</td>
1439 |                     <td>${day["cost"]:.2f}</td>
1440 |                 </tr>
1441 | """
1442 |             html += """
1443 |             </table>
1444 |         </div>
1445 | """
1446 |         
1447 |         # Close HTML
1448 |         html += """
1449 |     </div>
1450 | </body>
1451 | </html>
1452 | """
1453 |         
1454 |         return html
1455 |     
1456 |     def _generate_markdown_cost_report(self, report_data: Dict[str, Any], plot_paths: List[str]) -> str:
1457 |         """Generate a Markdown cost report.
1458 |         
1459 |         Args:
1460 |             report_data: Report data
1461 |             plot_paths: List of plot file paths
1462 |             
1463 |         Returns:
1464 |             Markdown report content
1465 |         """
1466 |         # Basic Markdown template (cost-focused)
1467 |         markdown = f"""# Ultimate MCP Server Cost Report
1468 | 
1469 | Generated at: {report_data["generated_at"]}  
1470 | Period: {report_data["period"]}
1471 | 
1472 | ## Cost Overview
1473 | 
1474 | - **Total Cost:** ${report_data["total_cost"]:.2f}
1475 | - **Cache Savings:** ${report_data["cache_savings"]:.2f}
1476 | - **Net Cost:** ${report_data["total_cost"] - report_data["cache_savings"]:.2f}
1477 | 
1478 | """
1479 |         
1480 |         # Add plots if available
1481 |         if plot_paths:
1482 |             markdown += """## Cost Visualizations
1483 | 
1484 | """
1485 |             for plot_path in plot_paths:
1486 |                 # Use relative path
1487 |                 rel_path = os.path.relpath(plot_path, self.reports_dir)
1488 |                 markdown += f"""![Cost Plot]({rel_path})
1489 | 
1490 | """
1491 |         
1492 |         # Add provider costs
1493 |         if report_data["provider_costs"]:
1494 |             markdown += """## Cost by Provider
1495 | 
1496 | | Provider | Cost | Percentage |
1497 | |----------|------|------------|
1498 | """
1499 |             for provider in report_data["provider_costs"]:
1500 |                 markdown += f"""| {provider["provider"]} | ${provider["cost"]:.2f} | {provider["percentage"]:.2f}% |
1501 | """
1502 |             markdown += "\n"
1503 |         
1504 |         # Add model costs
1505 |         if report_data["model_costs"]:
1506 |             markdown += """## Cost by Model
1507 | 
1508 | | Model | Cost | Percentage |
1509 | |-------|------|------------|
1510 | """
1511 |             for model in report_data["model_costs"]:
1512 |                 markdown += f"""| {model["model"]} | ${model["cost"]:.2f} | {model["percentage"]:.2f}% |
1513 | """
1514 |             markdown += "\n"
1515 |         
1516 |         # Add cost efficiency
1517 |         if report_data["cost_efficiency"]:
1518 |             markdown += """## Cost Efficiency (Tokens per Dollar)
1519 | 
1520 | | Model | Tokens per Dollar | Tokens | Cost |
1521 | |-------|-------------------|--------|------|
1522 | """
1523 |             for model in report_data["cost_efficiency"]:
1524 |                 markdown += f"""| {model["model"]} | {model["tokens_per_dollar"]:,.0f} | {model["tokens"]:,} | ${model["cost"]:.2f} |
1525 | """
1526 |             markdown += "\n"
1527 |         
1528 |         # Add daily costs
1529 |         if report_data["daily_costs"]:
1530 |             markdown += """## Daily Costs
1531 | 
1532 | | Date | Cost |
1533 | |------|------|
1534 | """
1535 |             for day in sorted(report_data["daily_costs"], key=lambda x: x["date"], reverse=True):
1536 |                 markdown += f"""| {day["date"]} | ${day["cost"]:.2f} |
1537 | """
1538 |         
1539 |         return markdown
```
Page 18/45FirstPrevNextLast