#
tokens: 34075/50000 2/207 files (page 21/45)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 21 of 45. Use http://codebase.md/dicklesworthstone/llm_gateway_mcp_server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cursorignore
├── .env.example
├── .envrc
├── .gitignore
├── additional_features.md
├── check_api_keys.py
├── completion_support.py
├── comprehensive_test.py
├── docker-compose.yml
├── Dockerfile
├── empirically_measured_model_speeds.json
├── error_handling.py
├── example_structured_tool.py
├── examples
│   ├── __init__.py
│   ├── advanced_agent_flows_using_unified_memory_system_demo.py
│   ├── advanced_extraction_demo.py
│   ├── advanced_unified_memory_system_demo.py
│   ├── advanced_vector_search_demo.py
│   ├── analytics_reporting_demo.py
│   ├── audio_transcription_demo.py
│   ├── basic_completion_demo.py
│   ├── cache_demo.py
│   ├── claude_integration_demo.py
│   ├── compare_synthesize_demo.py
│   ├── cost_optimization.py
│   ├── data
│   │   ├── sample_event.txt
│   │   ├── Steve_Jobs_Introducing_The_iPhone_compressed.md
│   │   └── Steve_Jobs_Introducing_The_iPhone_compressed.mp3
│   ├── docstring_refiner_demo.py
│   ├── document_conversion_and_processing_demo.py
│   ├── entity_relation_graph_demo.py
│   ├── filesystem_operations_demo.py
│   ├── grok_integration_demo.py
│   ├── local_text_tools_demo.py
│   ├── marqo_fused_search_demo.py
│   ├── measure_model_speeds.py
│   ├── meta_api_demo.py
│   ├── multi_provider_demo.py
│   ├── ollama_integration_demo.py
│   ├── prompt_templates_demo.py
│   ├── python_sandbox_demo.py
│   ├── rag_example.py
│   ├── research_workflow_demo.py
│   ├── sample
│   │   ├── article.txt
│   │   ├── backprop_paper.pdf
│   │   ├── buffett.pdf
│   │   ├── contract_link.txt
│   │   ├── legal_contract.txt
│   │   ├── medical_case.txt
│   │   ├── northwind.db
│   │   ├── research_paper.txt
│   │   ├── sample_data.json
│   │   └── text_classification_samples
│   │       ├── email_classification.txt
│   │       ├── news_samples.txt
│   │       ├── product_reviews.txt
│   │       └── support_tickets.txt
│   ├── sample_docs
│   │   └── downloaded
│   │       └── attention_is_all_you_need.pdf
│   ├── sentiment_analysis_demo.py
│   ├── simple_completion_demo.py
│   ├── single_shot_synthesis_demo.py
│   ├── smart_browser_demo.py
│   ├── sql_database_demo.py
│   ├── sse_client_demo.py
│   ├── test_code_extraction.py
│   ├── test_content_detection.py
│   ├── test_ollama.py
│   ├── text_classification_demo.py
│   ├── text_redline_demo.py
│   ├── tool_composition_examples.py
│   ├── tournament_code_demo.py
│   ├── tournament_text_demo.py
│   ├── unified_memory_system_demo.py
│   ├── vector_search_demo.py
│   ├── web_automation_instruction_packs.py
│   └── workflow_delegation_demo.py
├── LICENSE
├── list_models.py
├── marqo_index_config.json.example
├── mcp_protocol_schema_2025-03-25_version.json
├── mcp_python_lib_docs.md
├── mcp_tool_context_estimator.py
├── model_preferences.py
├── pyproject.toml
├── quick_test.py
├── README.md
├── resource_annotations.py
├── run_all_demo_scripts_and_check_for_errors.py
├── storage
│   └── smart_browser_internal
│       ├── locator_cache.db
│       ├── readability.js
│       └── storage_state.enc
├── test_client.py
├── test_connection.py
├── TEST_README.md
├── test_sse_client.py
├── test_stdio_client.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── integration
│   │   ├── __init__.py
│   │   └── test_server.py
│   ├── manual
│   │   ├── test_extraction_advanced.py
│   │   └── test_extraction.py
│   └── unit
│       ├── __init__.py
│       ├── test_cache.py
│       ├── test_providers.py
│       └── test_tools.py
├── TODO.md
├── tool_annotations.py
├── tools_list.json
├── ultimate_mcp_banner.webp
├── ultimate_mcp_logo.webp
├── ultimate_mcp_server
│   ├── __init__.py
│   ├── __main__.py
│   ├── cli
│   │   ├── __init__.py
│   │   ├── __main__.py
│   │   ├── commands.py
│   │   ├── helpers.py
│   │   └── typer_cli.py
│   ├── clients
│   │   ├── __init__.py
│   │   ├── completion_client.py
│   │   └── rag_client.py
│   ├── config
│   │   └── examples
│   │       └── filesystem_config.yaml
│   ├── config.py
│   ├── constants.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── evaluation
│   │   │   ├── base.py
│   │   │   └── evaluators.py
│   │   ├── providers
│   │   │   ├── __init__.py
│   │   │   ├── anthropic.py
│   │   │   ├── base.py
│   │   │   ├── deepseek.py
│   │   │   ├── gemini.py
│   │   │   ├── grok.py
│   │   │   ├── ollama.py
│   │   │   ├── openai.py
│   │   │   └── openrouter.py
│   │   ├── server.py
│   │   ├── state_store.py
│   │   ├── tournaments
│   │   │   ├── manager.py
│   │   │   ├── tasks.py
│   │   │   └── utils.py
│   │   └── ums_api
│   │       ├── __init__.py
│   │       ├── ums_database.py
│   │       ├── ums_endpoints.py
│   │       ├── ums_models.py
│   │       └── ums_services.py
│   ├── exceptions.py
│   ├── graceful_shutdown.py
│   ├── services
│   │   ├── __init__.py
│   │   ├── analytics
│   │   │   ├── __init__.py
│   │   │   ├── metrics.py
│   │   │   └── reporting.py
│   │   ├── cache
│   │   │   ├── __init__.py
│   │   │   ├── cache_service.py
│   │   │   ├── persistence.py
│   │   │   ├── strategies.py
│   │   │   └── utils.py
│   │   ├── cache.py
│   │   ├── document.py
│   │   ├── knowledge_base
│   │   │   ├── __init__.py
│   │   │   ├── feedback.py
│   │   │   ├── manager.py
│   │   │   ├── rag_engine.py
│   │   │   ├── retriever.py
│   │   │   └── utils.py
│   │   ├── prompts
│   │   │   ├── __init__.py
│   │   │   ├── repository.py
│   │   │   └── templates.py
│   │   ├── prompts.py
│   │   └── vector
│   │       ├── __init__.py
│   │       ├── embeddings.py
│   │       └── vector_service.py
│   ├── tool_token_counter.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── audio_transcription.py
│   │   ├── base.py
│   │   ├── completion.py
│   │   ├── docstring_refiner.py
│   │   ├── document_conversion_and_processing.py
│   │   ├── enhanced-ums-lookbook.html
│   │   ├── entity_relation_graph.py
│   │   ├── excel_spreadsheet_automation.py
│   │   ├── extraction.py
│   │   ├── filesystem.py
│   │   ├── html_to_markdown.py
│   │   ├── local_text_tools.py
│   │   ├── marqo_fused_search.py
│   │   ├── meta_api_tool.py
│   │   ├── ocr_tools.py
│   │   ├── optimization.py
│   │   ├── provider.py
│   │   ├── pyodide_boot_template.html
│   │   ├── python_sandbox.py
│   │   ├── rag.py
│   │   ├── redline-compiled.css
│   │   ├── sentiment_analysis.py
│   │   ├── single_shot_synthesis.py
│   │   ├── smart_browser.py
│   │   ├── sql_databases.py
│   │   ├── text_classification.py
│   │   ├── text_redline_tools.py
│   │   ├── tournament.py
│   │   ├── ums_explorer.html
│   │   └── unified_memory_system.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── async_utils.py
│   │   ├── display.py
│   │   ├── logging
│   │   │   ├── __init__.py
│   │   │   ├── console.py
│   │   │   ├── emojis.py
│   │   │   ├── formatter.py
│   │   │   ├── logger.py
│   │   │   ├── panels.py
│   │   │   ├── progress.py
│   │   │   └── themes.py
│   │   ├── parse_yaml.py
│   │   ├── parsing.py
│   │   ├── security.py
│   │   └── text.py
│   └── working_memory_api.py
├── unified_memory_system_technical_analysis.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/mcp_tool_context_estimator.py:
--------------------------------------------------------------------------------

```python
   1 | #!/usr/bin/env python
   2 | """
   3 | MCP Tool Context Estimator
   4 | 
   5 | This script connects to an already running MCP server and estimates how much 
   6 | of an LLM's context window would be consumed by the registered tools when 
   7 | they're sent to the model via the Model Context Protocol.
   8 | """
   9 | 
  10 | import argparse
  11 | import asyncio
  12 | import json
  13 | import os
  14 | import sys
  15 | import traceback
  16 | from typing import Any, Dict, List, Optional
  17 | 
  18 | import aiohttp
  19 | import tiktoken
  20 | from mcp import ClientSession
  21 | from mcp.client.sse import sse_client
  22 | from mcp.client.stdio import stdio_client
  23 | from rich.console import Console
  24 | from rich.table import Table
  25 | 
  26 | # Add the current directory to the Python path to ensure we can import modules
  27 | sys.path.append("/data/projects/ultimate_mcp_server")
  28 | 
  29 | # Import the existing decouple configuration from the project
  30 | from ultimate_mcp_server.config import decouple_config
  31 | 
  32 | # Import actual model pricing from constants
  33 | from ultimate_mcp_server.constants import COST_PER_MILLION_TOKENS
  34 | 
  35 | # Removed dependency on STANDALONE_TOOL_FUNCTIONS to avoid circular imports
  36 | # from ultimate_mcp_server.tools import STANDALONE_TOOL_FUNCTIONS
  37 | 
  38 | # Define a function to read tool names from a file generated by the server
  39 | def read_tool_names_from_file(filename='tools_list.json', quiet=False):
  40 |     """Read tool names from a JSON file generated by the server"""
  41 |     console = Console()
  42 |     try:
  43 |         if os.path.exists(filename):
  44 |             with open(filename, 'r') as f:
  45 |                 tool_data = json.load(f)
  46 |                 if not quiet:
  47 |                     console.print(f"[green]Successfully loaded {len(tool_data)} tools from {filename}[/green]")
  48 |                 return tool_data
  49 |         else:
  50 |             if not quiet:
  51 |                 console.print(f"[yellow]Tool list file {filename} not found. Will use server-provided tools only.[/yellow]")
  52 |             return []
  53 |     except Exception as e:
  54 |         if not quiet:
  55 |             console.print(f"[red]Error reading tool list: {str(e)}[/red]")
  56 |         return []
  57 | 
  58 | # Run another server with --load-all-tools for comparison
  59 | RUN_LOAD_ALL_TOOLS_COMPARISON = True
  60 | SHOW_DESCRIPTIONS = True
  61 | 
  62 | async def detect_server_transport(host: str, port: str, quiet: bool = False) -> tuple[str, str]:
  63 |     """
  64 |     Detect what transport mode the server is running and return the appropriate URL and transport type.
  65 |     
  66 |     Args:
  67 |         host: Server hostname
  68 |         port: Server port
  69 |         quiet: If True, suppress detection messages
  70 |         
  71 |     Returns:
  72 |         Tuple of (url, transport_type) where transport_type is 'sse', 'streamable-http', or 'stdio'
  73 |     """
  74 |     console = Console()
  75 |     
  76 |     if not quiet:
  77 |         console.print(f"[blue]Detecting transport mode for server at {host}:{port}...[/blue]")
  78 |     
  79 |     # Test MCP protocol endpoints with proper requests
  80 |     endpoints_to_try = [
  81 |         (f"http://{host}:{port}/mcp/", "streamable-http"),
  82 |         (f"http://{host}:{port}/sse", "sse"),
  83 |         (f"http://{host}:{port}", "sse"),  # fallback for sse
  84 |     ]
  85 |     
  86 |     # Create a simple MCP initialization message for testing
  87 |     test_message = {
  88 |         "jsonrpc": "2.0",
  89 |         "id": 1,
  90 |         "method": "initialize",
  91 |         "params": {
  92 |             "protocolVersion": "2024-11-05",
  93 |             "capabilities": {},
  94 |             "clientInfo": {"name": "mcp-detector", "version": "1.0.0"}
  95 |         }
  96 |     }
  97 |     
  98 |     for url, transport in endpoints_to_try:
  99 |         try:
 100 |             timeout = aiohttp.ClientTimeout(total=5)
 101 |             async with aiohttp.ClientSession(timeout=timeout) as session:
 102 |                 if transport == "streamable-http":
 103 |                     # Test streamable-http with POST + MCP message
 104 |                     headers = {
 105 |                         "Content-Type": "application/json",
 106 |                         "Accept": "application/json, text/event-stream"
 107 |                     }
 108 |                     async with session.post(url, json=test_message, headers=headers) as response:
 109 |                         if response.status == 200:
 110 |                             # Check if response looks like MCP
 111 |                             try:
 112 |                                 data = await response.text()
 113 |                                 if '"jsonrpc":"2.0"' in data or '"result"' in data:
 114 |                                     if not quiet:
 115 |                                         console.print(f"[green]Detected {transport} transport at {url}[/green]")
 116 |                                     return url, transport
 117 |                             except Exception:
 118 |                                 pass
 119 |                         elif response.status in [400, 404, 405, 406]:
 120 |                             # Server exists but doesn't support this transport
 121 |                             if not quiet:
 122 |                                 console.print(f"[dim]Endpoint {url} returned {response.status}[/dim]")
 123 |                             continue
 124 |                 else:
 125 |                     # Test SSE endpoints - they might respond to GET or POST
 126 |                     # Try GET first for SSE
 127 |                     try:
 128 |                         async with session.get(url) as response:
 129 |                             if response.status == 200:
 130 |                                 content_type = response.headers.get('content-type', '').lower()
 131 |                                 if 'text/event-stream' in content_type:
 132 |                                     if not quiet:
 133 |                                         console.print(f"[green]Detected {transport} transport at {url}[/green]")
 134 |                                     return url, transport
 135 |                     except Exception:
 136 |                         pass
 137 |                     
 138 |                     # If GET failed, try POST for SSE (some servers might expect it)
 139 |                     try:
 140 |                         async with session.post(url, json=test_message) as response:
 141 |                             if response.status == 200:
 142 |                                 content_type = response.headers.get('content-type', '').lower()
 143 |                                 if 'text/event-stream' in content_type or 'application/json' in content_type:
 144 |                                     if not quiet:
 145 |                                         console.print(f"[green]Detected {transport} transport at {url}[/green]")
 146 |                                     return url, transport
 147 |                     except Exception:
 148 |                         pass
 149 |                         
 150 |         except Exception as e:
 151 |             if not quiet:
 152 |                 console.print(f"[dim]Could not connect to {url}: {str(e)}[/dim]")
 153 |             continue
 154 |     
 155 |     # If HTTP detection fails, try to guess based on what we know
 156 |     # Check if port 8013 responds at all
 157 |     try:
 158 |         timeout = aiohttp.ClientTimeout(total=2)
 159 |         async with aiohttp.ClientSession(timeout=timeout) as session:
 160 |             async with session.get(f"http://{host}:{port}/") as response:
 161 |                 if response.status == 200:
 162 |                     # Server is running, probably streamable-http since that's the new default
 163 |                     default_url = f"http://{host}:{port}/mcp/"
 164 |                     if not quiet:
 165 |                         console.print(f"[yellow]Server detected but transport unclear, defaulting to streamable-http at {default_url}[/yellow]")
 166 |                     return default_url, "streamable-http"
 167 |     except Exception:
 168 |         pass
 169 |     
 170 |     # Final fallback to SSE for backwards compatibility
 171 |     fallback_url = f"http://{host}:{port}/sse"
 172 |     if not quiet:
 173 |         console.print(f"[yellow]Could not detect transport mode, defaulting to SSE at {fallback_url}[/yellow]")
 174 |     return fallback_url, "sse"
 175 | 
 176 | def get_server_url_and_transport() -> tuple[str, str]:
 177 |     """
 178 |     Get the MCP server URL and transport type from .env file or environment variables
 179 |     
 180 |     Returns:
 181 |         Tuple of (server_url, transport_type)
 182 |     """
 183 |     # Try to get from python-decouple (.env file)
 184 |     try:
 185 |         host = decouple_config('MCP_SERVER_HOST', default='localhost')
 186 |         port = decouple_config('MCP_SERVER_PORT', default='8013')
 187 |         
 188 |         # Try to detect transport type - this will be resolved in the async context
 189 |         return host, port
 190 |     except Exception:
 191 |         # Fallback to environment variables if decouple fails
 192 |         if "MCP_SERVER_HOST" in os.environ and "MCP_SERVER_PORT" in os.environ:
 193 |             host = os.environ["MCP_SERVER_HOST"]
 194 |             port = os.environ["MCP_SERVER_PORT"]
 195 |             return host, port
 196 |         
 197 |         # Default fallback
 198 |         return "localhost", "8013"
 199 | 
 200 | # Calculate token counts for different models
 201 | def count_tokens(text: str) -> int:
 202 |     """Count tokens using tiktoken with cl100k_base encoding (used by most modern models)"""
 203 |     encoding = tiktoken.get_encoding("cl100k_base")
 204 |     return len(encoding.encode(text))
 205 | 
 206 | # Use real pricing imported from constants.py
 207 | # Convert from dollars per million tokens to dollars per 1000 tokens for our calculations
 208 | MODEL_PRICES = {
 209 |     model: price_info["input"] / 1000  # Convert from per million to per thousand
 210 |     for model, price_info in COST_PER_MILLION_TOKENS.items()
 211 | }
 212 | 
 213 | def format_capabilities(capabilities):
 214 |     """Safely format capabilities object to string for display"""
 215 |     result = {}
 216 |     # Check for specific capabilities we know about
 217 |     if hasattr(capabilities, "tools"):
 218 |         result["tools"] = "Available" if capabilities.tools else "Not available"
 219 |     if hasattr(capabilities, "prompts"):
 220 |         result["prompts"] = "Available" if capabilities.prompts else "Not available"
 221 |     if hasattr(capabilities, "resources"):
 222 |         result["resources"] = "Available" if capabilities.resources else "Not available"
 223 |     if hasattr(capabilities, "logging"):
 224 |         result["logging"] = "Available" if capabilities.logging else "Not available"
 225 |     if hasattr(capabilities, "completions"):
 226 |         result["completions"] = "Available" if capabilities.completions else "Not available"
 227 |     if hasattr(capabilities, "experimental"):
 228 |         result["experimental"] = "Available" if capabilities.experimental else "Not available"
 229 |     
 230 |     return json.dumps(result, indent=2)
 231 | 
 232 | async def get_mcp_server_tools_streamable_http(server_url: str, include_tools: Optional[List[str]] = None, console: Console = None, quiet: bool = False) -> Dict[str, Any]:
 233 |     """
 234 |     Connect to an MCP server running in streamable-http mode and fetch all registered tools.
 235 |     
 236 |     Args:
 237 |         server_url: The URL of the running MCP server (should be http://host:port/mcp)
 238 |         include_tools: Optional list of tool names to include (if None, get all tools)
 239 |         console: Optional console for output
 240 |         quiet: If True, only show most important output
 241 |         
 242 |     Returns:
 243 |         Dictionary with server info and tool definitions
 244 |     """
 245 |     if console is None:
 246 |         console = Console()
 247 |         
 248 |     if not quiet:
 249 |         console.print(f"[bold blue]Connecting to streamable-http MCP server at {server_url}...[/bold blue]")
 250 |     
 251 |     try:
 252 |         timeout = aiohttp.ClientTimeout(total=30)
 253 |         async with aiohttp.ClientSession(timeout=timeout) as session:
 254 |             # First, try to initialize the MCP connection
 255 |             init_data = {
 256 |                 "jsonrpc": "2.0",
 257 |                 "id": 1,
 258 |                 "method": "initialize",
 259 |                 "params": {
 260 |                     "protocolVersion": "2024-11-05",
 261 |                     "capabilities": {"roots": {"listChanged": True}},
 262 |                     "clientInfo": {"name": "mcp-tool-context-estimator", "version": "1.0.0"}
 263 |                 }
 264 |             }
 265 |             
 266 |             headers = {
 267 |                 "Content-Type": "application/json",
 268 |                 "Accept": "application/json, text/event-stream"
 269 |             }
 270 |             
 271 |             if not quiet:
 272 |                 console.print("[bold blue]Initializing MCP protocol via streamable-http...[/bold blue]")
 273 |             
 274 |             async with session.post(server_url, json=init_data, headers=headers) as response:
 275 |                 if response.status != 200:
 276 |                     raise Exception(f"Failed to initialize: HTTP {response.status}")
 277 |                 
 278 |                 # Capture session ID from response headers
 279 |                 session_id = response.headers.get('mcp-session-id')
 280 |                 if not session_id:
 281 |                     raise Exception("No session ID returned from server")
 282 |                 
 283 |                 # Handle SSE-formatted response
 284 |                 response_text = await response.text()
 285 |                 if response.content_type == "text/event-stream":
 286 |                     # Parse SSE format
 287 |                     lines = response_text.strip().split('\n')
 288 |                     json_data = None
 289 |                     for line in lines:
 290 |                         if line.startswith('data: '):
 291 |                             json_data = line[6:]  # Remove 'data: ' prefix
 292 |                             break
 293 |                     if json_data:
 294 |                         init_result = json.loads(json_data)
 295 |                     else:
 296 |                         raise Exception("No JSON data found in SSE response")
 297 |                 else:
 298 |                     init_result = await response.json()
 299 |                 
 300 |                 if "error" in init_result:
 301 |                     raise Exception(f"MCP initialization error: {init_result['error']}")
 302 |                 
 303 |                 if "result" not in init_result:
 304 |                     raise Exception("Invalid MCP initialization response")
 305 |                 
 306 |                 result = init_result["result"]
 307 |                 server_info = result.get("serverInfo", {})
 308 |                 server_name = server_info.get("name", "Unknown Server")
 309 |                 server_version = server_info.get("version", "Unknown Version")
 310 |                 
 311 |                 if not quiet:
 312 |                     console.print(f"[green]Connected to server:[/green] {server_name} v{server_version}")
 313 |                 
 314 |                 # Show server capabilities
 315 |                 capabilities = result.get("capabilities", {})
 316 |                 if not quiet:
 317 |                     console.print("[bold blue]Server capabilities:[/bold blue]")
 318 |                     console.print(json.dumps(capabilities, indent=2))
 319 |                 
 320 |                 # Check if tools capability is present
 321 |                 has_tools = capabilities.get("tools", False)
 322 |                     
 323 |                 if not quiet and not has_tools:
 324 |                     console.print("[bold yellow]Warning: This server does not advertise tools capability![/bold yellow]")
 325 |                     console.print("The server might not support tool listing, but we'll try anyway.")
 326 |                 
 327 |                 # Get server instructions (from server info)
 328 |                 server_instructions = server_info.get("instructions", "")
 329 |                 if server_instructions and not quiet:
 330 |                     console.print(f"[green]Server provides instructions of length {len(server_instructions):,} chars[/green]")
 331 |                 elif not quiet:
 332 |                     console.print("[yellow]Server does not provide instructions[/yellow]")
 333 |             
 334 |             # Update headers to include session ID for subsequent requests
 335 |             headers["mcp-session-id"] = session_id
 336 |             
 337 |             # Send initialized notification
 338 |             init_notify_data = {
 339 |                 "jsonrpc": "2.0",
 340 |                 "method": "notifications/initialized"
 341 |             }
 342 |             
 343 |             async with session.post(server_url, json=init_notify_data, headers=headers) as response:
 344 |                 # This is a notification, so we don't expect a response
 345 |                 pass
 346 |             
 347 |             # Now list the tools
 348 |             if not quiet:
 349 |                 console.print("[bold blue]Retrieving tool definitions...[/bold blue]")
 350 |             
 351 |             list_tools_data = {
 352 |                 "jsonrpc": "2.0",
 353 |                 "id": 2,
 354 |                 "method": "tools/list"
 355 |             }
 356 |             
 357 |             async with session.post(server_url, json=list_tools_data, headers=headers) as response:
 358 |                 if response.status != 200:
 359 |                     raise Exception(f"Failed to list tools: HTTP {response.status}")
 360 |                 
 361 |                 # Handle SSE-formatted response for tools list
 362 |                 response_text = await response.text()
 363 |                 if response.content_type == "text/event-stream":
 364 |                     # Parse SSE format
 365 |                     lines = response_text.strip().split('\n')
 366 |                     json_data = None
 367 |                     for line in lines:
 368 |                         if line.startswith('data: '):
 369 |                             json_data = line[6:]  # Remove 'data: ' prefix
 370 |                             break
 371 |                     if json_data:
 372 |                         tools_result = json.loads(json_data)
 373 |                     else:
 374 |                         raise Exception("No JSON data found in SSE response")
 375 |                 else:
 376 |                     tools_result = await response.json()
 377 |                 
 378 |                 if "error" in tools_result:
 379 |                     raise Exception(f"MCP tools/list error: {tools_result['error']}")
 380 |                 
 381 |                 if "result" not in tools_result:
 382 |                     raise Exception("Invalid MCP tools/list response")
 383 |                 
 384 |                 tools_data = tools_result["result"]
 385 |                 tools = tools_data.get("tools", [])
 386 |                 
 387 |                 # Count tools
 388 |                 tool_count = len(tools) if tools else 0
 389 |                 if not quiet:
 390 |                     console.print(f"[green]Found {tool_count} tools[/green]")
 391 |                 
 392 |                 if tool_count == 0:
 393 |                     console.print("[bold yellow]No tools found on the server.[/bold yellow]")
 394 |                     return {}
 395 |                 
 396 |                 # Convert tools to their JSON representation (exactly as sent to LLMs)
 397 |                 tool_defs = []
 398 |                 
 399 |                 # Add debug information about descriptions
 400 |                 has_descriptions = 0
 401 |                 total_desc_length = 0
 402 |                 
 403 |                 for tool in tools:
 404 |                     # Convert to dict that matches the MCP protocol spec for tool definitions
 405 |                     tool_dict = {
 406 |                         "name": tool.get("name"),
 407 |                         "inputSchema": tool.get("inputSchema")
 408 |                     }
 409 |                     
 410 |                     # Debug description handling
 411 |                     if tool.get("description"):
 412 |                         desc = tool["description"]
 413 |                         has_descriptions += 1
 414 |                         total_desc_length += len(desc)
 415 |                         if not quiet:
 416 |                             console.print(f"[dim]Tool '{tool['name']}' has description ({len(desc):,} chars)[/dim]")
 417 |                         tool_dict["description"] = desc
 418 |                     elif not quiet:
 419 |                         console.print(f"[dim yellow]Tool '{tool['name']}' has no description[/dim yellow]")
 420 |                         
 421 |                     if tool.get("annotations"):
 422 |                         tool_dict["annotations"] = tool["annotations"]
 423 |                     
 424 |                     tool_defs.append(tool_dict)
 425 |                 
 426 |                 # Print description statistics
 427 |                 if not quiet:
 428 |                     console.print(f"[green]{has_descriptions} out of {tool_count} tools have descriptions[/green]")
 429 |                     if has_descriptions > 0:
 430 |                         console.print(f"[green]Average description length: {total_desc_length/has_descriptions:,.1f} chars[/green]")
 431 |                 
 432 |                 # Include server info in the result to be used for creating the complete LLM prompt
 433 |                 return {
 434 |                     "tools": tool_defs,
 435 |                     "server_name": server_name,
 436 |                     "server_version": server_version,
 437 |                     "server_instructions": server_instructions
 438 |                 }
 439 |                 
 440 |     except Exception as e:
 441 |         console.print(f"[bold red]Error connecting to streamable-http MCP server:[/bold red] {str(e)}")
 442 |         if not quiet:
 443 |             console.print("[bold yellow]Stack trace:[/bold yellow]")
 444 |             console.print(traceback.format_exc())
 445 |         raise
 446 | 
 447 | async def get_mcp_server_tools_stdio(command: str, args: Optional[List[str]] = None, include_tools: Optional[List[str]] = None, console: Console = None, quiet: bool = False) -> Dict[str, Any]:
 448 |     """
 449 |     Connect to an MCP server via stdio transport and fetch all registered tools.
 450 |     
 451 |     Args:
 452 |         command: Command to run the MCP server
 453 |         args: Additional arguments for the command
 454 |         include_tools: Optional list of tool names to include (if None, get all tools)
 455 |         console: Optional console for output
 456 |         quiet: If True, only show most important output
 457 |         
 458 |     Returns:
 459 |         Dictionary with server info and tool definitions
 460 |     """
 461 |     if console is None:
 462 |         console = Console()
 463 |     
 464 |     if not quiet:
 465 |         console.print(f"[bold blue]Connecting to MCP server via stdio: {command} {' '.join(args or [])}[/bold blue]")
 466 |     
 467 |     try:
 468 |         # Build the command array
 469 |         cmd = command.split() if isinstance(command, str) else [command]
 470 |         if args:
 471 |             cmd.extend(args)
 472 |         
 473 |         async with stdio_client(cmd) as (read, write):
 474 |             # Create a client session
 475 |             async with ClientSession(read, write) as session:
 476 |                 # Initialize connection to server
 477 |                 if not quiet:
 478 |                     console.print("[bold blue]Initializing MCP protocol via stdio...[/bold blue]")
 479 |                 init_result = await session.initialize()
 480 |                 
 481 |                 # Get server info
 482 |                 server_name = init_result.serverInfo.name
 483 |                 server_version = init_result.serverInfo.version
 484 |                 if not quiet:
 485 |                     console.print(f"[green]Connected to server:[/green] {server_name} v{server_version}")
 486 |                 
 487 |                 # Show server capabilities safely
 488 |                 if not quiet:
 489 |                     console.print("[bold blue]Server capabilities:[/bold blue]")
 490 |                     console.print(format_capabilities(init_result.capabilities))
 491 |                 
 492 |                 # Check if tools capability is present
 493 |                 has_tools = False
 494 |                 if hasattr(init_result.capabilities, "tools") and init_result.capabilities.tools:
 495 |                     has_tools = True
 496 |                     
 497 |                 if not quiet and not has_tools:
 498 |                     console.print("[bold yellow]Warning: This server does not advertise tools capability![/bold yellow]")
 499 |                     console.print("The server might not support tool listing, but we'll try anyway.")
 500 |                 
 501 |                 # Get server instructions (will be used in the LLM prompt)
 502 |                 server_instructions = ""
 503 |                 if hasattr(init_result, "instructions") and init_result.instructions:
 504 |                     server_instructions = init_result.instructions
 505 |                     if not quiet:
 506 |                         console.print(f"[green]Server provides instructions of length {len(server_instructions):,} chars[/green]")
 507 |                 elif not quiet:
 508 |                     console.print("[yellow]Server does not provide instructions[/yellow]")
 509 |                 
 510 |                 # List available tools
 511 |                 if not quiet:
 512 |                     console.print("[bold blue]Retrieving tool definitions...[/bold blue]")
 513 |                 try:
 514 |                     tools_result = await session.list_tools()
 515 |                     
 516 |                     # Handle ListToolsResult object
 517 |                     tools = []
 518 |                     if hasattr(tools_result, "tools"):
 519 |                         tools = tools_result.tools
 520 |                     else:
 521 |                         if not quiet:
 522 |                             console.print("[bold yellow]Tools result doesn't have expected structure. Trying alternatives...[/bold yellow]")
 523 |                         if hasattr(tools_result, "__iter__"):
 524 |                             tools = list(tools_result)
 525 |                         else:
 526 |                             if not quiet:
 527 |                                 console.print(f"[bold yellow]Tools result type: {type(tools_result)}[/bold yellow]")
 528 |                                 console.print(f"Tools result attributes: {dir(tools_result)}")
 529 |                             raise ValueError("Unable to extract tools from server response")
 530 |                     
 531 |                     # Count tools
 532 |                     tool_count = len(tools) if tools else 0
 533 |                     if not quiet:
 534 |                         console.print(f"[green]Found {tool_count} tools[/green]")
 535 |                     
 536 |                     if tool_count == 0:
 537 |                         console.print("[bold yellow]No tools found on the server.[/bold yellow]")
 538 |                         return {}
 539 |                     
 540 |                     # Convert tools to their JSON representation (exactly as sent to LLMs)
 541 |                     tool_defs = []
 542 |                     
 543 |                     # Add debug information about descriptions
 544 |                     has_descriptions = 0
 545 |                     total_desc_length = 0
 546 |                     
 547 |                     for tool in tools:
 548 |                         # Convert to dict that matches the MCP protocol spec for tool definitions
 549 |                         tool_dict = {
 550 |                             "name": tool.name,
 551 |                             "inputSchema": tool.inputSchema
 552 |                         }
 553 |                         
 554 |                         # Debug description handling
 555 |                         if hasattr(tool, "description") and tool.description:
 556 |                             desc = tool.description
 557 |                             has_descriptions += 1
 558 |                             total_desc_length += len(desc)
 559 |                             if not quiet:
 560 |                                 console.print(f"[dim]Tool '{tool.name}' has description ({len(desc):,} chars)[/dim]")
 561 |                             tool_dict["description"] = desc
 562 |                         elif not quiet:
 563 |                             console.print(f"[dim yellow]Tool '{tool.name}' has no description[/dim yellow]")
 564 |                             
 565 |                         if hasattr(tool, "annotations") and tool.annotations:
 566 |                             tool_dict["annotations"] = tool.annotations
 567 |                         
 568 |                         tool_defs.append(tool_dict)
 569 |                     
 570 |                     # Print description statistics
 571 |                     if not quiet:
 572 |                         console.print(f"[green]{has_descriptions} out of {tool_count} tools have descriptions[/green]")
 573 |                         if has_descriptions > 0:
 574 |                             console.print(f"[green]Average description length: {total_desc_length/has_descriptions:,.1f} chars[/green]")
 575 |                     
 576 |                     # Include server info in the result to be used for creating the complete LLM prompt
 577 |                     return {
 578 |                         "tools": tool_defs,
 579 |                         "server_name": server_name,
 580 |                         "server_version": server_version,
 581 |                         "server_instructions": server_instructions
 582 |                     }
 583 |                 except Exception as e:
 584 |                     console.print(f"[bold red]Error listing tools:[/bold red] {str(e)}")
 585 |                     if not quiet:
 586 |                         console.print("[bold yellow]Stack trace:[/bold yellow]")
 587 |                         console.print(traceback.format_exc())
 588 |                     raise
 589 |     except Exception as e:
 590 |         console.print(f"[bold red]Error connecting to MCP server via stdio:[/bold red] {str(e)}")
 591 |         if not quiet:
 592 |             console.print("[bold yellow]Stack trace:[/bold yellow]")
 593 |             console.print(traceback.format_exc())
 594 |         raise
 595 | 
 596 | async def get_mcp_server_tools(server_url: str, transport_type: str, include_tools: Optional[List[str]] = None, console: Console = None, quiet: bool = False, command: Optional[str] = None, args: Optional[List[str]] = None) -> Dict[str, Any]:
 597 |     """
 598 |     Connect to an already running MCP server and fetch all registered tools.
 599 |     
 600 |     Args:
 601 |         server_url: The URL of the running MCP server (ignored for stdio)
 602 |         transport_type: The transport type ('sse', 'streamable-http', or 'stdio')
 603 |         include_tools: Optional list of tool names to include (if None, get all tools)
 604 |         console: Optional console for output
 605 |         quiet: If True, only show most important output
 606 |         command: Command to run for stdio transport
 607 |         args: Additional arguments for stdio command
 608 |         
 609 |     Returns:
 610 |         Dictionary with server info and tool definitions
 611 |     """
 612 |     if console is None:
 613 |         console = Console()
 614 |     
 615 |     if transport_type == "streamable-http":
 616 |         return await get_mcp_server_tools_streamable_http(server_url, include_tools, console, quiet)
 617 |     elif transport_type == "stdio":
 618 |         if not command:
 619 |             raise ValueError("Command must be provided for stdio transport")
 620 |         return await get_mcp_server_tools_stdio(command, args, include_tools, console, quiet)
 621 |     
 622 |     # Original SSE implementation
 623 |     if not quiet:
 624 |         console.print(f"[bold blue]Connecting to MCP server at {server_url}...[/bold blue]")
 625 |     
 626 |     try:
 627 |         async with sse_client(server_url) as (read, write):
 628 |             # Create a client session
 629 |             async with ClientSession(read, write) as session:
 630 |                 # Initialize connection to server
 631 |                 if not quiet:
 632 |                     console.print("[bold blue]Initializing MCP protocol...[/bold blue]")
 633 |                 init_result = await session.initialize()
 634 |                 
 635 |                 # Get server info
 636 |                 server_name = init_result.serverInfo.name
 637 |                 server_version = init_result.serverInfo.version
 638 |                 if not quiet:
 639 |                     console.print(f"[green]Connected to server:[/green] {server_name} v{server_version}")
 640 |                 
 641 |                 # Show server capabilities safely
 642 |                 if not quiet:
 643 |                     console.print("[bold blue]Server capabilities:[/bold blue]")
 644 |                     console.print(format_capabilities(init_result.capabilities))
 645 |                 
 646 |                 # Check if tools capability is present
 647 |                 has_tools = False
 648 |                 if hasattr(init_result.capabilities, "tools") and init_result.capabilities.tools:
 649 |                     has_tools = True
 650 |                     
 651 |                 if not quiet and not has_tools:
 652 |                     console.print("[bold yellow]Warning: This server does not advertise tools capability![/bold yellow]")
 653 |                     console.print("The server might not support tool listing, but we'll try anyway.")
 654 |                 
 655 |                 # Get server instructions (will be used in the LLM prompt)
 656 |                 server_instructions = ""
 657 |                 if hasattr(init_result, "instructions") and init_result.instructions:
 658 |                     server_instructions = init_result.instructions
 659 |                     if not quiet:
 660 |                         console.print(f"[green]Server provides instructions of length {len(server_instructions):,} chars[/green]")
 661 |                 elif not quiet:
 662 |                     console.print("[yellow]Server does not provide instructions[/yellow]")
 663 |                 
 664 |                 # List available tools
 665 |                 if not quiet:
 666 |                     console.print("[bold blue]Retrieving tool definitions...[/bold blue]")
 667 |                 try:
 668 |                     tools_result = await session.list_tools()
 669 |                     
 670 |                     # Handle ListToolsResult object
 671 |                     # The result should have a 'tools' attribute which is the actual list
 672 |                     tools = []
 673 |                     if hasattr(tools_result, "tools"):
 674 |                         tools = tools_result.tools
 675 |                     else:
 676 |                         # If it doesn't have a tools attribute, try to access it as a list directly
 677 |                         # or check other common patterns
 678 |                         if not quiet:
 679 |                             console.print("[bold yellow]Tools result doesn't have expected structure. Trying alternatives...[/bold yellow]")
 680 |                         if hasattr(tools_result, "__iter__"):
 681 |                             tools = list(tools_result)
 682 |                         else:
 683 |                             # Print the object to help diagnose
 684 |                             if not quiet:
 685 |                                 console.print(f"[bold yellow]Tools result type: {type(tools_result)}[/bold yellow]")
 686 |                                 console.print(f"Tools result attributes: {dir(tools_result)}")
 687 |                             raise ValueError("Unable to extract tools from server response")
 688 |                     
 689 |                     # Count tools
 690 |                     tool_count = len(tools) if tools else 0
 691 |                     if not quiet:
 692 |                         console.print(f"[green]Found {tool_count} tools[/green]")
 693 |                     
 694 |                     if tool_count == 0:
 695 |                         console.print("[bold yellow]No tools found on the server.[/bold yellow]")
 696 |                         return {}
 697 |                     
 698 |                     # Convert tools to their JSON representation (exactly as sent to LLMs)
 699 |                     tool_defs = []
 700 |                     
 701 |                     # Add debug information about descriptions
 702 |                     has_descriptions = 0
 703 |                     total_desc_length = 0
 704 |                     
 705 |                     for tool in tools:
 706 |                         # Convert to dict that matches the MCP protocol spec for tool definitions
 707 |                         tool_dict = {
 708 |                             "name": tool.name,
 709 |                             "inputSchema": tool.inputSchema
 710 |                         }
 711 |                         
 712 |                         # Debug description handling
 713 |                         if hasattr(tool, "description") and tool.description:
 714 |                             desc = tool.description
 715 |                             has_descriptions += 1
 716 |                             total_desc_length += len(desc)
 717 |                             if not quiet:
 718 |                                 console.print(f"[dim]Tool '{tool.name}' has description ({len(desc):,} chars)[/dim]")
 719 |                             tool_dict["description"] = desc
 720 |                         elif not quiet:
 721 |                             console.print(f"[dim yellow]Tool '{tool.name}' has no description[/dim yellow]")
 722 |                             
 723 |                         if hasattr(tool, "annotations") and tool.annotations:
 724 |                             tool_dict["annotations"] = tool.annotations
 725 |                         
 726 |                         tool_defs.append(tool_dict)
 727 |                     
 728 |                     # Print description statistics
 729 |                     if not quiet:
 730 |                         console.print(f"[green]{has_descriptions} out of {tool_count} tools have descriptions[/green]")
 731 |                         if has_descriptions > 0:
 732 |                             console.print(f"[green]Average description length: {total_desc_length/has_descriptions:,.1f} chars[/green]")
 733 |                     
 734 |                     # Include server info in the result to be used for creating the complete LLM prompt
 735 |                     return {
 736 |                         "tools": tool_defs,
 737 |                         "server_name": server_name,
 738 |                         "server_version": server_version,
 739 |                         "server_instructions": server_instructions
 740 |                     }
 741 |                 except Exception as e:
 742 |                     console.print(f"[bold red]Error listing tools:[/bold red] {str(e)}")
 743 |                     if not quiet:
 744 |                         console.print("[bold yellow]Stack trace:[/bold yellow]")
 745 |                         console.print(traceback.format_exc())
 746 |                     
 747 |                     # Try retrieving server details to help diagnose
 748 |                     if not quiet:
 749 |                         try:
 750 |                             console.print("[bold blue]Getting additional server information...[/bold blue]")
 751 |                             if hasattr(init_result.capabilities, "prompts") and init_result.capabilities.prompts:
 752 |                                 prompts_result = await session.list_prompts()
 753 |                                 prompt_count = 0
 754 |                                 if hasattr(prompts_result, "prompts"):
 755 |                                     prompt_count = len(prompts_result.prompts)
 756 |                                 console.print(f"Server has {prompt_count} prompts available")
 757 |                         except Exception:
 758 |                             pass
 759 |                         
 760 |                     raise
 761 |     except Exception as e:
 762 |         console.print(f"[bold red]Error connecting to MCP server:[/bold red] {str(e)}")
 763 |         if not quiet:
 764 |             console.print("[bold yellow]Stack trace:[/bold yellow]")
 765 |             console.print(traceback.format_exc())
 766 |         
 767 |         # Provide guidance based on the error
 768 |         if "Connection refused" in str(e):
 769 |             console.print("[bold yellow]The server doesn't appear to be running at the specified URL.[/bold yellow]")
 770 |             console.print("Make sure your MCP server is running and available at the URL you specified.")
 771 |         elif "401" in str(e):
 772 |             console.print("[bold yellow]Authentication error - the server requires credentials.[/bold yellow]")
 773 |         elif "404" in str(e):
 774 |             console.print("[bold yellow]The server endpoint was not found.[/bold yellow]")
 775 |             console.print("Check if you need to use a different URL path (e.g., /sse or /mcp)")
 776 |             console.print("Try using /sse instead of just the port number.")
 777 |         
 778 |         sys.exit(1)
 779 | 
 780 | def create_full_tool_registration_prompt(server_info, tools=None, quiet=False):
 781 |     """
 782 |     Create a full, realistic prompt as would be sent to an LLM when registering MCP tools.
 783 |     
 784 |     This generates the exact format used in the MCP client's format_tools_for_anthropic method
 785 |     which sends tools to the Anthropic API.
 786 |     
 787 |     Args:
 788 |         server_info: Dictionary with server information
 789 |         tools: List of tool definitions to include (if None, use all tools)
 790 |         quiet: If True, only show most important output
 791 |         
 792 |     Returns:
 793 |         String with the serialized JSON representation of tools as sent to the API
 794 |     """
 795 |     if tools is None:
 796 |         tools = server_info["tools"]
 797 |         
 798 |     # The actual format sent to Anthropic API is just:
 799 |     # {
 800 |     #   "name": sanitized_name,
 801 |     #   "input_schema": tool.input_schema,
 802 |     #   "description": tool.description  # only if present
 803 |     # }
 804 |     formatted_tools = []
 805 |     
 806 |     # Track description statistics
 807 |     desc_count = 0
 808 |     total_desc_len = 0
 809 |     
 810 |     console = Console()
 811 |     
 812 |     for tool in tools:
 813 |         # Create the tool dict exactly as in format_tools_for_anthropic
 814 |         tool_dict_for_api = {
 815 |             "name": tool["name"],
 816 |             "input_schema": tool["inputSchema"]
 817 |         }
 818 |         if SHOW_DESCRIPTIONS:
 819 |             # Add description only if it exists and is not empty
 820 |             if "description" in tool and tool["description"]:
 821 |                 desc = tool["description"]
 822 |                 tool_dict_for_api["description"] = desc
 823 |                 desc_count += 1
 824 |                 total_desc_len += len(desc)
 825 |                 if not quiet and len(desc) > 100:
 826 |                     # Show abbreviated version for long descriptions
 827 |                     abbrev = desc[:50] + "..." + desc[-50:]
 828 |                     console.print(f"[dim]Including description for {tool['name']}: {abbrev}[/dim]")
 829 |                 elif not quiet:
 830 |                     console.print(f"[dim]Including description for {tool['name']}: {desc}[/dim]")
 831 |             elif not quiet:
 832 |                 console.print(f"[dim yellow]No description for {tool['name']}[/dim yellow]")
 833 |                 
 834 |         formatted_tools.append(tool_dict_for_api)
 835 |     
 836 |     # Final description statistics - ALWAYS show these since they're part of the requested output
 837 |     console.print(f"[green]Included {desc_count} descriptions out of {len(tools)} tools in final output[/green]")
 838 |     if desc_count > 0:
 839 |         console.print(f"[green]Average description length in final output: {total_desc_len/desc_count:,.1f} chars[/green]")
 840 |     
 841 |     # Return the serialized JSON that would be sent to the API
 842 |     return json.dumps(formatted_tools, indent=2)
 843 | 
 844 | def format_tool_for_llm(tool: Dict[str, Any]) -> str:
 845 |     """
 846 |     Format a tool definition exactly as it would be presented to an LLM.
 847 |     This should match the format used in actual LLM prompt construction.
 848 |     """
 849 |     # This is how tools are typically formatted for LLMs in the JSON format
 850 |     return json.dumps(tool, indent=2)
 851 | 
 852 | def analyze_tools_token_usage(current_tools: Dict[str, Any], all_tools: Dict[str, Any], quiet: bool = False):
 853 |     """
 854 |     Analyze token usage for a complete MCP tool registration prompt
 855 |     
 856 |     Args:
 857 |         current_tools: Current active toolset info
 858 |         all_tools: Complete toolset info (with --load-all-tools)
 859 |         quiet: If True, only show most important output
 860 |     """
 861 |     console = Console()
 862 |     
 863 |     # Format tools as they would be sent to an LLM
 864 |     current_tools_subset = current_tools["tools"]
 865 |     all_tools_subset = all_tools["tools"]
 866 |     
 867 |     # Determine if we're likely comparing the same set vs different sets
 868 |     same_toolsets = len(current_tools_subset) == len(all_tools_subset)
 869 |     if same_toolsets and not quiet:
 870 |         console.print("[yellow]Warning: Current tool count equals all tools count.[/yellow]")
 871 |         console.print("[yellow]This suggests the server is already running with --load-all-tools[/yellow]")
 872 |     
 873 |     # Adjust column labels based on what we're comparing
 874 |     current_label = "Current Tools"
 875 |     all_label = "All Tools" 
 876 |     
 877 |     # Get JSON representations
 878 |     current_tools_json = "\n".join(format_tool_for_llm(tool) for tool in current_tools_subset)
 879 |     all_tools_json = "\n".join(format_tool_for_llm(tool) for tool in all_tools_subset)
 880 |     
 881 |     # Create the full prompts
 882 |     current_tools_prompt = create_full_tool_registration_prompt(current_tools, current_tools_subset, quiet)
 883 |     all_tools_prompt = create_full_tool_registration_prompt(all_tools, all_tools_subset, quiet)
 884 |     
 885 |     # Calculate sizes for raw JSON
 886 |     current_tools_size_kb = len(current_tools_json.encode('utf-8')) / 1024
 887 |     all_tools_size_kb = len(all_tools_json.encode('utf-8')) / 1024
 888 |     
 889 |     # Calculate sizes for full prompts
 890 |     current_tools_prompt_size_kb = len(current_tools_prompt.encode('utf-8')) / 1024
 891 |     all_tools_prompt_size_kb = len(all_tools_prompt.encode('utf-8')) / 1024
 892 |     
 893 |     # Count tokens for raw JSON
 894 |     current_tools_tokens = count_tokens(current_tools_json)
 895 |     all_tools_tokens = count_tokens(all_tools_json)
 896 |     
 897 |     # Count tokens for full prompts
 898 |     current_tools_prompt_tokens = count_tokens(current_tools_prompt)
 899 |     all_tools_prompt_tokens = count_tokens(all_tools_prompt)
 900 |     
 901 |     # Calculate costs for different models (using full prompt tokens)
 902 |     current_tools_costs = {model: (price * current_tools_prompt_tokens / 1000) 
 903 |                        for model, price in MODEL_PRICES.items()}
 904 |     all_tools_costs = {model: (price * all_tools_prompt_tokens / 1000) 
 905 |                           for model, price in MODEL_PRICES.items()}
 906 |     
 907 |     # Save the complete, untruncated text to files
 908 |     with open("current_tools_sent_to_llm.json", "w", encoding="utf-8") as f:
 909 |         f.write(current_tools_prompt)
 910 |     console.print("[green]Saved current tools JSON to current_tools_sent_to_llm.json[/green]")
 911 |     
 912 |     with open("all_tools_sent_to_llm.json", "w", encoding="utf-8") as f:
 913 |         f.write(all_tools_prompt)
 914 |     console.print("[green]Saved all tools JSON to all_tools_sent_to_llm.json[/green]\n\n")
 915 |     
 916 |     # Create data for display - ensure the data is correct and consistent
 917 |     data = {
 918 |         "current_tools": {
 919 |             "count": len(current_tools_subset),
 920 |             "raw_size_kb": current_tools_size_kb,
 921 |             "raw_tokens": current_tools_tokens,
 922 |             "full_size_kb": current_tools_prompt_size_kb,
 923 |             "full_tokens": current_tools_prompt_tokens,
 924 |             "costs": current_tools_costs
 925 |         },
 926 |         "all_tools": {
 927 |             "count": len(all_tools_subset),
 928 |             "raw_size_kb": all_tools_size_kb,
 929 |             "raw_tokens": all_tools_tokens,
 930 |             "full_size_kb": all_tools_prompt_size_kb,
 931 |             "full_tokens": all_tools_prompt_tokens,
 932 |             "costs": all_tools_costs
 933 |         }
 934 |     }
 935 |     
 936 |     # Create comparison table
 937 |     table = Table(title="Tool Registration Token Usage")
 938 |     
 939 |     # Add columns - including percentage column 
 940 |     table.add_column("Metric", style="white")
 941 |     table.add_column(current_label, style="cyan")
 942 |     table.add_column(all_label, style="magenta")
 943 |     table.add_column("Difference", style="yellow")
 944 |     table.add_column(f"{current_label} as % of {all_label}", style="green")
 945 |     
 946 |     # SECTION 1: Number of Tools
 947 |     # Calculate percentage for count
 948 |     count_percentage = (data["current_tools"]["count"] / data["all_tools"]["count"]) * 100 if data["all_tools"]["count"] > 0 else 100
 949 |     
 950 |     # Add rows - keep consistent format with other rows for the number of tools
 951 |     table.add_row(
 952 |         "Number of Tools", 
 953 |         str(data["current_tools"]["count"]), 
 954 |         str(data["all_tools"]["count"]),
 955 |         str(data["current_tools"]["count"] - data["all_tools"]["count"]),
 956 |         f"{count_percentage:.2f}%"
 957 |     )
 958 |     
 959 |     # Add a divider after Number of Tools
 960 |     table.add_section()
 961 |     
 962 |     # SECTION 2: Full Prompt stats
 963 |     # Calculate percentage for full prompt size
 964 |     full_size_percentage = (data["current_tools"]["full_size_kb"] / data["all_tools"]["full_size_kb"]) * 100 if data["all_tools"]["full_size_kb"] > 0 else 100
 965 |     
 966 |     table.add_row(
 967 |         "Full Prompt Size (KB)", 
 968 |         f"{data['current_tools']['full_size_kb']:,.2f}", 
 969 |         f"{data['all_tools']['full_size_kb']:,.2f}",
 970 |         f"{data['current_tools']['full_size_kb'] - data['all_tools']['full_size_kb']:,.2f}",
 971 |         f"{full_size_percentage:.2f}%"
 972 |     )
 973 |     
 974 |     # Calculate percentage for full tokens
 975 |     full_tokens_percentage = (data["current_tools"]["full_tokens"] / data["all_tools"]["full_tokens"]) * 100 if data["all_tools"]["full_tokens"] > 0 else 100
 976 |     
 977 |     table.add_row(
 978 |         "Full Prompt Token Count", 
 979 |         f"{data['current_tools']['full_tokens']:,}", 
 980 |         f"{data['all_tools']['full_tokens']:,}",
 981 |         f"{data['current_tools']['full_tokens'] - data['all_tools']['full_tokens']:,}",
 982 |         f"{full_tokens_percentage:.2f}%"
 983 |     )
 984 |     
 985 |     # Add a divider after Full Prompt stats
 986 |     table.add_section()
 987 |     
 988 |     # SECTION 3: Model costs
 989 |     # Specify the models to include and their order
 990 |     models_to_include = [
 991 |         "claude-3-7-sonnet-20250219",
 992 |         "gpt-4.1",
 993 |         "gemini-2.5-pro-preview-03-25",
 994 |         "grok-3-latest"
 995 |     ]
 996 |     
 997 |     # Add cost rows for selected models only, in specified order
 998 |     for model in models_to_include:
 999 |         if model in MODEL_PRICES:
1000 |             current_cost = data["current_tools"]["costs"][model]
1001 |             all_cost = data["all_tools"]["costs"][model]
1002 |             diff_cost = current_cost - all_cost
1003 |             
1004 |             # Calculate percentage
1005 |             cost_percentage = (current_cost / all_cost) * 100 if all_cost > 0 else 100
1006 |             
1007 |             table.add_row(
1008 |                 f"Cost ({model})",
1009 |                 f"${current_cost:.4f}",
1010 |                 f"${all_cost:.4f}",
1011 |                 f"${diff_cost:.4f}",
1012 |                 f"{cost_percentage:.2f}%"
1013 |             )
1014 |     
1015 |     # Print table
1016 |     console.print(table)
1017 |     
1018 |     # Print raw data as JSON (only if not in quiet mode)
1019 |     if not quiet:
1020 |         console.print("\nRaw token usage data:")
1021 |         console.print(json.dumps(data, indent=2))
1022 |     
1023 |     return data
1024 | 
1025 | async def get_complete_toolset(quiet: bool = False) -> List[Dict[str, Any]]:
1026 |     """
1027 |     Generate the complete toolset that would be available with --load-all-tools
1028 |     
1029 |     This uses a list of tool names read from a file generated by the server.
1030 |     If the file doesn't exist, it will use a list of common tools from the current server.
1031 |     
1032 |     Args:
1033 |         quiet: If True, only show most important output
1034 |     
1035 |     Returns:
1036 |         Dictionary with server info and simulated complete toolset
1037 |     """
1038 |     console = Console()
1039 |     if not quiet:
1040 |         console.print("[bold blue]Analyzing complete toolset (--load-all-tools)[/bold blue]")
1041 |     
1042 |     # First get the current server's tools to extract real descriptions where possible
1043 |     try:
1044 |         # Get server connection details
1045 |         host, port = get_server_url_and_transport()
1046 |         server_url, transport_type = await detect_server_transport(host, port, quiet=quiet)
1047 |         current_tools_info = await get_mcp_server_tools(server_url, transport_type, quiet=quiet, command=None, args=None)
1048 |         current_tools = {tool["name"]: tool for tool in current_tools_info["tools"]} if current_tools_info else {}
1049 |         if not quiet:
1050 |             console.print(f"[green]Retrieved {len(current_tools)} tools from current server to use their real descriptions[/green]")
1051 |     except Exception as e:
1052 |         if not quiet:
1053 |             console.print(f"[yellow]Could not get current tools: {str(e)}[/yellow]")
1054 |         current_tools = {}
1055 |     
1056 |     # Read tool names from file created by the server
1057 |     all_tool_names = read_tool_names_from_file(quiet=quiet)
1058 |     
1059 |     # If no tools found in file, use the tools we got from the server
1060 |     if not all_tool_names and current_tools:
1061 |         if not quiet:
1062 |             console.print("[yellow]No tools found in file. Using current server tools and adding some common ones.[/yellow]")
1063 |         all_tool_names = list(current_tools.keys())
1064 |         
1065 |         # Add some common tool names that might not be in the current server
1066 |         additional_tools = [
1067 |             "excel_create_workbook", "excel_open_workbook", "excel_add_worksheet",
1068 |             "excel_set_cell_value", "excel_get_cell_value", "excel_save_workbook",
1069 |             "excel_get_worksheet_names", "excel_create_chart", "excel_set_range_format",
1070 |             "smart_browser.autopilot", "smart_browser.parallel", "smart_browser.download_site_pdfs",
1071 |             "generate_image", "analyze_image", "transcribe_audio"
1072 |         ]
1073 |         
1074 |         # Add them if not already present
1075 |         for tool in additional_tools:
1076 |             if tool not in all_tool_names:
1077 |                 all_tool_names.append(tool)
1078 |     
1079 |     if not quiet:
1080 |         console.print(f"[green]Using complete list of {len(all_tool_names)} tools for all-tools mode[/green]")
1081 |     
1082 |     # Create tool entries based on real data
1083 |     tool_defs = []
1084 |     
1085 |     for tool_name in all_tool_names:
1086 |         # First check if we have real data for this tool
1087 |         if tool_name in current_tools:
1088 |             # Use the actual tool definition from the server
1089 |             tool_def = current_tools[tool_name]
1090 |             if not quiet:
1091 |                 console.print(f"[dim]Using real definition for tool '{tool_name}'[/dim]")
1092 |         else:
1093 |             # Create a definition with a realistic description based on the tool name
1094 |             tool_desc = f"The {tool_name} tool provides functionality for {tool_name.replace('_', ' ')}. " + \
1095 |                        "This would be the actual docstring from the function when loaded with --load-all-tools."
1096 |             
1097 |             # Create a basic definition
1098 |             tool_def = {
1099 |                 "name": tool_name,
1100 |                 "inputSchema": {
1101 |                     "type": "object",
1102 |                     "properties": {
1103 |                         "param1": {"type": "string", "description": "First parameter"},
1104 |                         "param2": {"type": "string", "description": "Second parameter"}
1105 |                     },
1106 |                     "required": ["param1"]
1107 |                 },
1108 |                 "description": tool_desc
1109 |             }
1110 |             if not quiet:
1111 |                 console.print(f"[dim yellow]Created placeholder for tool '{tool_name}'[/dim yellow]")
1112 |         
1113 |         tool_defs.append(tool_def)
1114 |     
1115 |     # Return a similar structure to what get_mcp_server_tools returns
1116 |     return {
1117 |         "tools": tool_defs,
1118 |         "server_name": "Ultimate MCP Server (with --load-all-tools)",
1119 |         "server_version": "1.6.0",
1120 |         "server_instructions": """This server provides access to the complete set of tools available in the Ultimate MCP Server.
1121 | When running with --load-all-tools, all tools from all categories are available, including:
1122 | - Completion tools for text generation
1123 | - Provider tools for model management
1124 | - Filesystem tools for file operations
1125 | - Optimization tools for cost and performance
1126 | - Text processing tools for manipulating text
1127 | - Meta tools for accessing tool information
1128 | - Search tools for querying databases
1129 | - Browser automation tools
1130 | - Web research tools
1131 | - HTML processing tools
1132 | - Extraction tools
1133 | - SQL database tools
1134 | - Document processing tools
1135 | - Audio transcription tools
1136 | - Excel spreadsheet tools
1137 | - OCR tools
1138 | - Sentiment analysis tools
1139 | """
1140 |     }
1141 | 
1142 | def parse_args():
1143 |     """Parse command line arguments"""
1144 |     parser = argparse.ArgumentParser(description="MCP Tool Context Estimator")
1145 |     parser.add_argument("--url", default=None, 
1146 |                         help="URL of the MCP server (default: auto-detected)")
1147 |     parser.add_argument("--transport", default=None,
1148 |                         choices=["sse", "streamable-http", "stdio"],
1149 |                         help="Force specific transport type (default: auto-detect)")
1150 |     parser.add_argument("--command", default=None,
1151 |                         help="Command to run for stdio transport (e.g., 'python -m ultimate_mcp_server')")
1152 |     parser.add_argument("--args", default=None, nargs="*",
1153 |                         help="Additional arguments for stdio command")
1154 |     parser.add_argument("--no-all-tools", action="store_true",
1155 |                         help="Skip comparison with all tools")
1156 |     parser.add_argument("--quiet", "-q", action="store_true",
1157 |                         help="Only show most important information and final table")
1158 |     return parser.parse_args()
1159 | 
1160 | async def main():
1161 |     """Main function"""
1162 |     console = Console()
1163 |     args = parse_args()
1164 |     
1165 |     # Handle stdio transport
1166 |     if args.transport == "stdio":
1167 |         if not args.command:
1168 |             console.print("[bold red]Error: --command is required for stdio transport[/bold red]")
1169 |             console.print("Example: --transport stdio --command 'python -m ultimate_mcp_server'")
1170 |             sys.exit(1)
1171 |         
1172 |         server_url = None  # Not used for stdio
1173 |         transport_type = "stdio"
1174 |         command = args.command
1175 |         stdio_args = args.args or []
1176 |         
1177 |         if not args.quiet:
1178 |             console.print(f"[blue]Using stdio transport with command: {command} {' '.join(stdio_args)}[/blue]")
1179 |     else:
1180 |         # Get server connection details for HTTP transports
1181 |         if args.url:
1182 |             # Parse URL to extract host and port
1183 |             import urllib.parse
1184 |             parsed = urllib.parse.urlparse(args.url)
1185 |             host = parsed.hostname or "localhost"
1186 |             port = str(parsed.port or 8013)
1187 |             if args.transport:
1188 |                 transport_type = args.transport
1189 |                 if transport_type == "sse":
1190 |                     server_url = f"http://{host}:{port}/sse"
1191 |                 else:  # streamable-http
1192 |                     server_url = f"http://{host}:{port}/mcp/"
1193 |             else:
1194 |                 # Auto-detect transport for manually specified URL
1195 |                 server_url, transport_type = await detect_server_transport(host, port, quiet=args.quiet)
1196 |         else:
1197 |             # Auto-detect everything
1198 |             host, port = get_server_url_and_transport()
1199 |             if args.transport:
1200 |                 transport_type = args.transport
1201 |                 if transport_type == "sse":
1202 |                     server_url = f"http://{host}:{port}/sse"
1203 |                 else:  # streamable-http
1204 |                     server_url = f"http://{host}:{port}/mcp/"
1205 |             else:
1206 |                 server_url, transport_type = await detect_server_transport(host, port, quiet=args.quiet)
1207 |         
1208 |         command = None
1209 |         stdio_args = None
1210 |     
1211 |     quiet_mode = args.quiet
1212 |     
1213 |     try:
1214 |         # Get the active toolset from the running server
1215 |         current_tools = await get_mcp_server_tools(
1216 |             server_url, 
1217 |             transport_type, 
1218 |             quiet=quiet_mode,
1219 |             command=command,
1220 |             args=stdio_args
1221 |         )
1222 |         
1223 |         if not current_tools or "tools" not in current_tools or not current_tools["tools"]:
1224 |             console.print("[bold yellow]No tools found on the server.[/bold yellow]")
1225 |             return
1226 |         
1227 |         if args.no_all_tools:
1228 |             # If we're not doing the comparison, create a meaningful subset for comparison
1229 |             if not quiet_mode:
1230 |                 console.print("[yellow]Skipping comparison with full --load-all-tools[/yellow]")
1231 |                 console.print("[green]Creating an artificial subset of current tools for comparison[/green]")
1232 |             
1233 |             # Create a more meaningful subset by taking half the tools
1234 |             # If we have 1-4 tools, use all of them to avoid empty subset
1235 |             total_tools = len(current_tools["tools"])
1236 |             subset_size = max(total_tools // 2, min(total_tools, 4))
1237 |             subset_tools = current_tools["tools"][:subset_size]
1238 |             
1239 |             if not quiet_mode:
1240 |                 console.print(f"[green]Created subset with {subset_size} tools out of {total_tools} total[/green]")
1241 |             
1242 |             # Create subset version
1243 |             subset_data = {
1244 |                 "tools": subset_tools, 
1245 |                 "server_name": current_tools["server_name"] + " (Subset)",
1246 |                 "server_version": current_tools["server_version"],
1247 |                 "server_instructions": current_tools["server_instructions"]
1248 |             }
1249 |             
1250 |             # Analyze token usage with the artificial subset vs full
1251 |             analyze_tools_token_usage(subset_data, current_tools, quiet=quiet_mode)
1252 |         else:
1253 |             # Get the complete toolset that would be available with --load-all-tools
1254 |             all_tools = await get_complete_toolset(quiet=quiet_mode)
1255 |             
1256 |             # Check if current server is likely already running with all tools
1257 |             current_tool_count = len(current_tools["tools"])
1258 |             all_tool_count = len(all_tools["tools"])
1259 |             
1260 |             if abs(current_tool_count - all_tool_count) <= 2:  # Allow small difference
1261 |                 if not quiet_mode:
1262 |                     console.print(f"[yellow]Warning: Current server has {current_tool_count} tools, "
1263 |                                  f"which is very close to the expected all-tools count of {all_tool_count}[/yellow]")
1264 |                     console.print("[yellow]This suggests the server is already running with --load-all-tools[/yellow]")
1265 |                 
1266 |                 # For accurate comparison when counts are the same, we should just use the same data for both
1267 |                 # to ensure metrics are consistent
1268 |                 same_tools_data = {  # noqa: F841
1269 |                     "tools": current_tools["tools"].copy(),
1270 |                     "server_name": "Current Server",
1271 |                     "server_version": current_tools["server_version"],
1272 |                     "server_instructions": current_tools["server_instructions"]
1273 |                 }
1274 |                 
1275 |                 # Create a deep copy to ensure they're exactly the same
1276 |                 all_tools = {
1277 |                     "tools": current_tools["tools"].copy(),
1278 |                     "server_name": "All Tools",
1279 |                     "server_version": current_tools["server_version"],
1280 |                     "server_instructions": current_tools["server_instructions"]
1281 |                 }
1282 |             
1283 |             # Analyze token usage with full prompt simulation
1284 |             analyze_tools_token_usage(current_tools, all_tools, quiet=quiet_mode)
1285 |     except KeyboardInterrupt:
1286 |         console.print("[bold yellow]Operation cancelled by user[/bold yellow]")
1287 |     except Exception as e:
1288 |         console.print(f"[bold red]Unexpected error:[/bold red] {str(e)}")
1289 |         if not quiet_mode:
1290 |             console.print(traceback.format_exc())
1291 | 
1292 | if __name__ == "__main__":
1293 |     asyncio.run(main()) 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/tools/base.py:
--------------------------------------------------------------------------------

```python
   1 | """Base tool classes and decorators for Ultimate MCP Server."""
   2 | import asyncio
   3 | import functools
   4 | import inspect
   5 | import time
   6 | from typing import Any, Callable, Dict, List, Optional, Type, Union
   7 | 
   8 | try:
   9 |     from fastmcp import Tool
  10 | except ImportError:
  11 |     # Handle case where mcp might be available via different import
  12 |     try:
  13 |         from fastmcp import Tool
  14 |     except ImportError:
  15 |         Tool = None  # Tool will be provided by the mcp_server
  16 | 
  17 | from ultimate_mcp_server.exceptions import (
  18 |     ResourceError,
  19 |     ToolError,
  20 |     ToolExecutionError,
  21 |     ToolInputError,
  22 |     format_error_response,
  23 | )
  24 | 
  25 | # from ultimate_mcp_server.services.cache import with_cache
  26 | from ultimate_mcp_server.utils import get_logger
  27 | 
  28 | logger = get_logger("ultimate_mcp_server.tools.base")
  29 | 
  30 | 
  31 | def tool(name=None, description=None):
  32 |     """
  33 |     Decorator that marks a BaseTool class method as an MCP tool.
  34 |     
  35 |     This decorator adds metadata to a method, identifying it as a tool that should be
  36 |     registered with the MCP server when the containing BaseTool class is initialized.
  37 |     It allows customizing the tool's name and description, which are used in tool
  38 |     discoverability and documentation.
  39 |     
  40 |     Unlike the register_tool function which directly registers standalone functions,
  41 |     this decorator only marks methods for later registration, allowing BaseTool subclasses
  42 |     to organize multiple related tools together in a single class.
  43 |     
  44 |     The decorator adds three attributes to the method:
  45 |     - _tool: A boolean flag indicating this is a tool method
  46 |     - _tool_name: The name to use when registering the tool (or original method name)
  47 |     - _tool_description: The description to use for the tool (or method docstring)
  48 |     
  49 |     These attributes are used during the tool registration process, typically in the
  50 |     _register_tools method of BaseTool subclasses.
  51 |     
  52 |     Args:
  53 |         name: Custom name for the tool (defaults to the method name if not provided)
  54 |         description: Custom description for the tool (defaults to the method's docstring)
  55 |         
  56 |     Returns:
  57 |         A decorator function that adds tool metadata attributes to the decorated method
  58 |         
  59 |     Example:
  60 |         ```python
  61 |         class MyToolSet(BaseTool):
  62 |             tool_name = "my_toolset"
  63 |             
  64 |             @tool(name="custom_operation", description="Performs a customized operation")
  65 |             async def perform_operation(self, param1: str, param2: int) -> Dict[str, Any]:
  66 |                 # Implementation
  67 |                 return {"result": "success"}
  68 |         ```
  69 |         
  70 |     Notes:
  71 |         - This decorator should be used on methods of classes that inherit from BaseTool
  72 |         - Decorated methods should be async
  73 |         - The decorated method must take self as its first parameter
  74 |         - This decorator does not apply error handling or other middleware automatically
  75 |     """
  76 |     def decorator(func):
  77 |         @functools.wraps(func)
  78 |         async def wrapper(self, *args, **kwargs):
  79 |             return await func(self, *args, **kwargs)
  80 |         
  81 |         wrapper._tool = True
  82 |         wrapper._tool_name = name
  83 |         wrapper._tool_description = description
  84 |         
  85 |         return wrapper
  86 |     
  87 |     return decorator
  88 | 
  89 | 
  90 | def with_resource(resource_type, allow_creation=False, require_existence=True):
  91 |     """
  92 |     Decorator for standardizing resource access and validation in tool methods.
  93 |     
  94 |     This decorator provides consistent resource handling for tool methods that
  95 |     access or create persistent resources in the MCP ecosystem. It enforces resource
  96 |     validation rules, handles resource registration, and provides unified error handling
  97 |     for resource-related operations.
  98 |     
  99 |     Core functionalities:
 100 |     1. Resource existence validation - Ensures resources exist before allowing access
 101 |     2. Resource creation tracking - Registers newly created resources with the system
 102 |     3. Resource type validation - Confirms resources match expected types
 103 |     4. Standardized error handling - Produces consistent error responses for resource issues
 104 |     
 105 |     The decorator identifies resource IDs by looking for common parameter names like
 106 |     '{resource_type}_id', 'id', or 'resource_id' in the function's keyword arguments.
 107 |     When a resource ID is found, it performs the configured validation checks before
 108 |     allowing the function to execute. After execution, it can optionally register
 109 |     newly created resources.
 110 |     
 111 |     Args:
 112 |         resource_type: Type category for the resource (e.g., "document", "embedding", 
 113 |                       "database"). Used for validation and registration.
 114 |         allow_creation: Whether the tool is allowed to create new resources of this type.
 115 |                        When True, the decorator will register any created resources.
 116 |         require_existence: Whether the resource must exist before the tool is called.
 117 |                           When True, the decorator will verify resource existence.
 118 |         
 119 |     Returns:
 120 |         A decorator function that applies resource handling to tool methods.
 121 |         
 122 |     Raises:
 123 |         ResourceError: When resource validation fails (e.g., resource not found,
 124 |                       resource type mismatch, or unauthorized resource access).
 125 |         
 126 |     Example:
 127 |         ```python
 128 |         class DocumentTools(BaseTool):
 129 |             @tool()
 130 |             @with_resource("document", require_existence=True, allow_creation=False)
 131 |             async def get_document_summary(self, document_id: str):
 132 |                 # This method will fail with ResourceError if document_id doesn't exist
 133 |                 # Resource existence is checked before this code runs
 134 |                 ...
 135 |                 
 136 |             @tool()
 137 |             @with_resource("document", require_existence=False, allow_creation=True)
 138 |             async def create_document(self, content: str, metadata: Dict[str, Any] = None):
 139 |                 # Process content and create document
 140 |                 doc_id = str(uuid.uuid4())
 141 |                 # ... processing logic ...
 142 |                 
 143 |                 # Return created resource with resource_id key to trigger registration
 144 |                 return {
 145 |                     "resource_id": doc_id,  # This triggers resource registration
 146 |                     "status": "created",
 147 |                     "metadata": {"content_length": len(content), "created_at": time.time()}
 148 |                 }
 149 |                 # The resource is automatically registered with the returned metadata
 150 |         ```
 151 |     
 152 |     Notes:
 153 |         - This decorator should be applied after @tool but before other decorators
 154 |           like @with_error_handling to ensure proper execution order
 155 |         - Resources created with allow_creation=True must include a "resource_id" 
 156 |           key in their result dictionary to trigger registration
 157 |         - The resource registry must be accessible via the tool's mcp server instance
 158 |     """
 159 |     def decorator(func):
 160 |         @functools.wraps(func)
 161 |         async def wrapper(self, *args, **kwargs):
 162 |             # Get resource ID from kwargs (common parameter names)
 163 |             resource_id = None
 164 |             for param_name in [f"{resource_type}_id", "id", "resource_id"]:
 165 |                 if param_name in kwargs:
 166 |                     resource_id = kwargs[param_name]
 167 |                     break
 168 |             
 169 |             # Check if resource exists if required
 170 |             if require_existence and resource_id:
 171 |                 # Get resource registry from MCP server
 172 |                 resource_registry = getattr(self.mcp, "resources", None)
 173 |                 if resource_registry is None:
 174 |                     logger.warning(
 175 |                         f"Resource registry not available, skipping existence check for {resource_type}/{resource_id}",
 176 |                         emoji_key="warning"
 177 |                     )
 178 |                 else:
 179 |                     # Check if resource exists
 180 |                     exists = await resource_registry.exists(resource_type, resource_id)
 181 |                     if not exists:
 182 |                         raise ResourceError(
 183 |                             f"{resource_type.capitalize()} not found: {resource_id}",
 184 |                             resource_type=resource_type,
 185 |                             resource_id=resource_id
 186 |                         )
 187 |             
 188 |             # Call function
 189 |             result = await func(self, *args, **kwargs)
 190 |             
 191 |             # If the function returns a new resource ID, register it
 192 |             if allow_creation and isinstance(result, dict) and "resource_id" in result:
 193 |                 new_resource_id = result["resource_id"]
 194 |                 # Get resource registry from MCP server
 195 |                 resource_registry = getattr(self.mcp, "resources", None)
 196 |                 if resource_registry is not None:
 197 |                     # Register new resource
 198 |                     metadata = {
 199 |                         "created_at": time.time(),
 200 |                         "creator": kwargs.get("ctx", {}).get("user_id", "unknown"),
 201 |                         "resource_type": resource_type
 202 |                     }
 203 |                     
 204 |                     # Add other metadata from result if available
 205 |                     if "metadata" in result:
 206 |                         metadata.update(result["metadata"])
 207 |                     
 208 |                     await resource_registry.register(
 209 |                         resource_type, 
 210 |                         new_resource_id, 
 211 |                         metadata=metadata
 212 |                     )
 213 |                     
 214 |                     logger.info(
 215 |                         f"Registered new {resource_type}: {new_resource_id}",
 216 |                         emoji_key="resource",
 217 |                         resource_type=resource_type,
 218 |                         resource_id=new_resource_id
 219 |                     )
 220 |             
 221 |             return result
 222 |                 
 223 |         # Add resource metadata to function
 224 |         wrapper._resource_type = resource_type
 225 |         wrapper._allow_creation = allow_creation
 226 |         wrapper._require_existence = require_existence
 227 |         
 228 |         return wrapper
 229 |     
 230 |     return decorator
 231 | 
 232 | 
 233 | class ResourceRegistry:
 234 |     """
 235 |     Registry that tracks and manages resources used by MCP tools.
 236 |     
 237 |     The ResourceRegistry provides a centralized system for tracking resources created or
 238 |     accessed by tools within the MCP ecosystem. It maintains resource metadata, handles
 239 |     persistence of resource information, and provides methods for registering, looking up,
 240 |     and deleting resources.
 241 |     
 242 |     Resources in the MCP ecosystem represent persistent or semi-persistent objects that
 243 |     may be accessed across multiple tool calls or sessions. Examples include documents,
 244 |     knowledge bases, embeddings, file paths, and database connections. The registry helps
 245 |     manage the lifecycle of these resources and prevents issues like resource leaks or
 246 |     unauthorized access.
 247 |     
 248 |     Key features:
 249 |     - In-memory caching of resource metadata for fast lookups
 250 |     - Optional persistent storage via pluggable storage backends
 251 |     - Resource type categorization (documents, embeddings, etc.)
 252 |     - Resource existence checking for access control
 253 |     - Simple CRUD operations for resource metadata
 254 |     
 255 |     Resources are organized by type and identified by unique IDs within those types.
 256 |     Each resource has associated metadata that can include creation time, owner information,
 257 |     and resource-specific attributes.
 258 |     
 259 |     The registry is typically initialized by the MCP server and made available to all tools.
 260 |     Tools that create resources should register them, and tools that access resources should
 261 |     verify their existence before proceeding.
 262 |     """
 263 |     
 264 |     def __init__(self, storage_backend=None):
 265 |         """Initialize the resource registry.
 266 |         
 267 |         Args:
 268 |             storage_backend: Backend for persistent storage (if None, in-memory only)
 269 |         """
 270 |         self.resources = {}
 271 |         self.storage = storage_backend
 272 |         self.logger = get_logger("ultimate_mcp_server.resources")
 273 |     
 274 |     async def register(self, resource_type, resource_id, metadata=None):
 275 |         """Register a resource in the registry.
 276 |         
 277 |         Args:
 278 |             resource_type: Type of resource (e.g., "document", "embedding")
 279 |             resource_id: Resource identifier
 280 |             metadata: Additional metadata about the resource
 281 |             
 282 |         Returns:
 283 |             True if registration was successful
 284 |         """
 285 |         # Initialize resource type if not exists
 286 |         if resource_type not in self.resources:
 287 |             self.resources[resource_type] = {}
 288 |         
 289 |         # Register resource
 290 |         self.resources[resource_type][resource_id] = {
 291 |             "id": resource_id,
 292 |             "type": resource_type,
 293 |             "metadata": metadata or {},
 294 |             "registered_at": time.time()
 295 |         }
 296 |         
 297 |         # Persist to storage backend if available
 298 |         if self.storage:
 299 |             try:
 300 |                 await self.storage.save_resource(
 301 |                     resource_type, 
 302 |                     resource_id, 
 303 |                     self.resources[resource_type][resource_id]
 304 |                 )
 305 |             except Exception as e:
 306 |                 self.logger.error(
 307 |                     f"Failed to persist resource {resource_type}/{resource_id}: {str(e)}",
 308 |                     emoji_key="error",
 309 |                     exc_info=True
 310 |                 )
 311 |         
 312 |         return True
 313 |     
 314 |     async def exists(self, resource_type, resource_id):
 315 |         """Check if a resource exists in the registry.
 316 |         
 317 |         Args:
 318 |             resource_type: Type of resource
 319 |             resource_id: Resource identifier
 320 |             
 321 |         Returns:
 322 |             True if the resource exists
 323 |         """
 324 |         # Check in-memory registry first
 325 |         if resource_type in self.resources and resource_id in self.resources[resource_type]:
 326 |             return True
 327 |         
 328 |         # Check storage backend if available
 329 |         if self.storage:
 330 |             try:
 331 |                 return await self.storage.resource_exists(resource_type, resource_id)
 332 |             except Exception as e:
 333 |                 self.logger.error(
 334 |                     f"Failed to check resource existence {resource_type}/{resource_id}: {str(e)}",
 335 |                     emoji_key="error",
 336 |                     exc_info=True
 337 |                 )
 338 |         
 339 |         return False
 340 |     
 341 |     async def get(self, resource_type, resource_id):
 342 |         """Get resource metadata from the registry.
 343 |         
 344 |         Args:
 345 |             resource_type: Type of resource
 346 |             resource_id: Resource identifier
 347 |             
 348 |         Returns:
 349 |             Resource metadata or None if not found
 350 |         """
 351 |         # Check in-memory registry first
 352 |         if resource_type in self.resources and resource_id in self.resources[resource_type]:
 353 |             return self.resources[resource_type][resource_id]
 354 |         
 355 |         # Check storage backend if available
 356 |         if self.storage:
 357 |             try:
 358 |                 resource = await self.storage.get_resource(resource_type, resource_id)
 359 |                 if resource:
 360 |                     # Cache in memory for future access
 361 |                     if resource_type not in self.resources:
 362 |                         self.resources[resource_type] = {}
 363 |                     self.resources[resource_type][resource_id] = resource
 364 |                     return resource
 365 |             except Exception as e:
 366 |                 self.logger.error(
 367 |                     f"Failed to get resource {resource_type}/{resource_id}: {str(e)}",
 368 |                     emoji_key="error",
 369 |                     exc_info=True
 370 |                 )
 371 |         
 372 |         return None
 373 |     
 374 |     async def list(self, resource_type, limit=100, offset=0, filters=None):
 375 |         """List resources of a specific type.
 376 |         
 377 |         Args:
 378 |             resource_type: Type of resource to list
 379 |             limit: Maximum number of resources to return
 380 |             offset: Offset for pagination
 381 |             filters: Dictionary of filters to apply
 382 |             
 383 |         Returns:
 384 |             List of resource metadata
 385 |         """
 386 |         result = []
 387 |         
 388 |         # Get from storage backend first if available
 389 |         if self.storage:
 390 |             try:
 391 |                 resources = await self.storage.list_resources(
 392 |                     resource_type, 
 393 |                     limit=limit, 
 394 |                     offset=offset, 
 395 |                     filters=filters
 396 |                 )
 397 |                 
 398 |                 # Cache in memory for future access
 399 |                 if resources:
 400 |                     if resource_type not in self.resources:
 401 |                         self.resources[resource_type] = {}
 402 |                     
 403 |                     for resource in resources:
 404 |                         resource_id = resource.get("id")
 405 |                         if resource_id:
 406 |                             self.resources[resource_type][resource_id] = resource
 407 |                     
 408 |                     return resources
 409 |             except Exception as e:
 410 |                 self.logger.error(
 411 |                     f"Failed to list resources of type {resource_type}: {str(e)}",
 412 |                     emoji_key="error",
 413 |                     exc_info=True
 414 |                 )
 415 |         
 416 |         # Fallback to in-memory registry
 417 |         if resource_type in self.resources:
 418 |             # Apply filters if provided
 419 |             filtered_resources = self.resources[resource_type].values()
 420 |             if filters:
 421 |                 for key, value in filters.items():
 422 |                     filtered_resources = [
 423 |                         r for r in filtered_resources 
 424 |                         if r.get("metadata", {}).get(key) == value
 425 |                     ]
 426 |             
 427 |             # Apply pagination
 428 |             result = list(filtered_resources)[offset:offset+limit]
 429 |         
 430 |         return result
 431 |     
 432 |     async def delete(self, resource_type, resource_id):
 433 |         """Delete a resource from the registry.
 434 |         
 435 |         Args:
 436 |             resource_type: Type of resource
 437 |             resource_id: Resource identifier
 438 |             
 439 |         Returns:
 440 |             True if deletion was successful
 441 |         """
 442 |         # Delete from in-memory registry
 443 |         if resource_type in self.resources and resource_id in self.resources[resource_type]:
 444 |             del self.resources[resource_type][resource_id]
 445 |         
 446 |         # Delete from storage backend if available
 447 |         if self.storage:
 448 |             try:
 449 |                 return await self.storage.delete_resource(resource_type, resource_id)
 450 |             except Exception as e:
 451 |                 self.logger.error(
 452 |                     f"Failed to delete resource {resource_type}/{resource_id}: {str(e)}",
 453 |                     emoji_key="error",
 454 |                     exc_info=True
 455 |                 )
 456 |         
 457 |         return True
 458 | 
 459 | 
 460 | class BaseToolMetrics:
 461 |     """
 462 |     Metrics collection and aggregation system for tool execution statistics.
 463 |     
 464 |     The BaseToolMetrics class provides a standardized way to track and aggregate performance
 465 |     metrics for tool executions. It maintains cumulative statistics about calls to a tool,
 466 |     including execution counts, success rates, timing information, and optional token usage
 467 |     and cost data when available.
 468 |     
 469 |     This class is used both internally by BaseTool instances and by the with_tool_metrics
 470 |     decorator to provide consistent metrics tracking across the entire MCP ecosystem. The
 471 |     collected metrics enable monitoring, debugging, and optimization of tool performance
 472 |     and usage patterns.
 473 |     
 474 |     Metrics tracked:
 475 |     - Total number of calls
 476 |     - Number of successful and failed calls
 477 |     - Success rate
 478 |     - Total, minimum, and maximum execution duration
 479 |     - Total token usage (for LLM-based tools)
 480 |     - Total cost (for tools with cost accounting)
 481 |     
 482 |     The metrics are aggregated in memory and can be retrieved at any time via the get_stats()
 483 |     method. They represent the lifetime statistics of the tool since the metrics object
 484 |     was created.
 485 |     
 486 |     Example:
 487 |     ```python
 488 |     # Accessing metrics from a tool
 489 |     my_tool = MyToolClass(mcp_server)
 490 |     metrics = my_tool.metrics.get_stats()
 491 |     print(f"Success rate: {metrics['success_rate']:.2%}")
 492 |     print(f"Average duration: {metrics['average_duration']:.2f}s")
 493 |     ```
 494 |     """
 495 |     
 496 |     def __init__(self):
 497 |         """Initialize metrics tracking."""
 498 |         self.total_calls = 0
 499 |         self.successful_calls = 0
 500 |         self.failed_calls = 0
 501 |         self.total_duration = 0.0
 502 |         self.min_duration = float('inf')
 503 |         self.max_duration = 0.0
 504 |         self.total_tokens = 0
 505 |         self.total_cost = 0.0
 506 |         
 507 |     def record_call(
 508 |         self,
 509 |         success: bool,
 510 |         duration: float,
 511 |         tokens: Optional[int] = None,
 512 |         cost: Optional[float] = None
 513 |     ) -> None:
 514 |         """Record metrics for a tool call.
 515 |         
 516 |         Args:
 517 |             success: Whether the call was successful
 518 |             duration: Duration of the call in seconds
 519 |             tokens: Number of tokens used (if applicable)
 520 |             cost: Cost of the call (if applicable)
 521 |         """
 522 |         self.total_calls += 1
 523 |         
 524 |         if success:
 525 |             self.successful_calls += 1
 526 |         else:
 527 |             self.failed_calls += 1
 528 |             
 529 |         self.total_duration += duration
 530 |         self.min_duration = min(self.min_duration, duration)
 531 |         self.max_duration = max(self.max_duration, duration)
 532 |         
 533 |         if tokens is not None:
 534 |             self.total_tokens += tokens
 535 |             
 536 |         if cost is not None:
 537 |             self.total_cost += cost
 538 |     
 539 |     def get_stats(self) -> Dict[str, Any]:
 540 |         """Get current metrics.
 541 |         
 542 |         Returns:
 543 |             Dictionary of metrics
 544 |         """
 545 |         if self.total_calls == 0:
 546 |             return {
 547 |                 "total_calls": 0,
 548 |                 "success_rate": 0.0,
 549 |                 "average_duration": 0.0,
 550 |                 "min_duration": 0.0,
 551 |                 "max_duration": 0.0,
 552 |                 "total_tokens": 0,
 553 |                 "total_cost": 0.0,
 554 |             }
 555 |             
 556 |         return {
 557 |             "total_calls": self.total_calls,
 558 |             "successful_calls": self.successful_calls,
 559 |             "failed_calls": self.failed_calls,
 560 |             "success_rate": self.successful_calls / self.total_calls,
 561 |             "average_duration": self.total_duration / self.total_calls,
 562 |             "min_duration": self.min_duration if self.min_duration != float('inf') else 0.0,
 563 |             "max_duration": self.max_duration,
 564 |             "total_tokens": self.total_tokens,
 565 |             "total_cost": self.total_cost,
 566 |         }
 567 | 
 568 | 
 569 | class BaseTool:
 570 |     """
 571 |     Foundation class for all tool implementations in the Ultimate MCP Server.
 572 |     
 573 |     The BaseTool class serves as the fundamental building block for creating tools that 
 574 |     can be registered with and executed by the MCP server. It provides core functionality
 575 |     for metrics tracking, logging, resource management, and tool execution.
 576 |     
 577 |     Tools in the Ultimate MCP Server ecosystem are designed to provide specific capabilities
 578 |     that can be invoked by clients (typically LLMs) to perform various operations like
 579 |     document processing, vector search, file operations, etc. The BaseTool architecture
 580 |     ensures all tools have consistent behavior for error handling, metrics collection,
 581 |     and server integration.
 582 |     
 583 |     Key features:
 584 |     - Standardized tool registration via decorators
 585 |     - Consistent metrics tracking for all tool executions
 586 |     - Unified error handling and response formatting
 587 |     - Integration with the server's resource registry
 588 |     - Logger setup with tool-specific naming
 589 |     
 590 |     Tool classes should inherit from BaseTool and define their tools using the @tool
 591 |     decorator. Each tool method should be async and follow the standard pattern of
 592 |     accepting parameters, performing operations, and returning results in a structured
 593 |     format.
 594 |     
 595 |     Example:
 596 |     ```python
 597 |     class MyCustomTools(BaseTool):
 598 |         tool_name = "my_custom_tools"
 599 |         description = "Provides custom tools for specific operations"
 600 |         
 601 |         @tool(name="custom_operation")
 602 |         @with_tool_metrics
 603 |         @with_error_handling
 604 |         async def perform_operation(self, param1: str, param2: int) -> Dict[str, Any]:
 605 |             # Implementation
 606 |             return {"result": "success", "data": some_data}
 607 |     ```
 608 |     """
 609 |     
 610 |     tool_name: str = "base_tool"
 611 |     description: str = "Base tool class for Ultimate MCP Server."
 612 |     
 613 |     def __init__(self, mcp_server):
 614 |         """Initialize the tool.
 615 |         
 616 |         Args:
 617 |             mcp_server: MCP server instance
 618 |         """
 619 |         # If mcp_server is a Gateway instance, get the MCP object
 620 |         self.mcp = mcp_server.mcp if hasattr(mcp_server, 'mcp') else mcp_server
 621 |         self.logger = get_logger(f"tool.{self.tool_name}")
 622 |         self.metrics = BaseToolMetrics()
 623 |         
 624 |         # Initialize resource registry if not already available
 625 |         if not hasattr(self.mcp, "resources"):
 626 |             self.mcp.resources = ResourceRegistry()
 627 |         
 628 |     def _register_tools(self):
 629 |         """Register tools with MCP server.
 630 |         
 631 |         Override this method in subclasses to register specific tools.
 632 |         This method is no longer called by the base class constructor.
 633 |         Registration is now handled externally, e.g., in register_all_tools.
 634 |         """
 635 |         pass
 636 |         
 637 |     async def execute(self, tool_name: str, params: Dict[str, Any]) -> Any:
 638 |         """
 639 |         Execute a tool method by name with the given parameters.
 640 |         
 641 |         This method provides the core execution mechanism for BaseTool subclasses,
 642 |         dynamically dispatching calls to the appropriate tool method based on the
 643 |         tool_name parameter. It handles parameter validation, metrics collection,
 644 |         and error standardization to ensure consistent behavior across all tools.
 645 |         
 646 |         Execution flow:
 647 |         1. Looks up the requested tool method in the class
 648 |         2. Validates that the method is properly marked as a tool
 649 |         3. Applies metrics tracking via _wrap_with_metrics
 650 |         4. Executes the tool with the provided parameters
 651 |         5. Returns the tool's response or a standardized error
 652 |         
 653 |         Args:
 654 |             tool_name: Name of the specific tool method to execute
 655 |             params: Dictionary of parameters to pass to the tool method
 656 |                     (These parameters will be unpacked as kwargs)
 657 |         
 658 |         Returns:
 659 |             The result returned by the tool method, or a standardized error response
 660 |             if execution fails
 661 |             
 662 |         Raises:
 663 |             ToolError: If the specified tool_name is not found or not properly
 664 |                        marked as a tool method
 665 |                        
 666 |         Example:
 667 |             ```python
 668 |             # Direct execution of a tool method
 669 |             result = await my_tool_instance.execute(
 670 |                 "analyze_document", 
 671 |                 {"document_id": "doc123", "analysis_type": "sentiment"}
 672 |             )
 673 |             
 674 |             # Error handling
 675 |             if "isError" in result and result["isError"]:
 676 |                 print(f"Tool execution failed: {result['error']['message']}")
 677 |             else:
 678 |                 print(f"Analysis result: {result['analysis_score']}")
 679 |             ```
 680 |         """
 681 |         # Find method with tool name
 682 |         method_name = tool_name.split(".")[-1]  # Handle namespaced tools
 683 |         method = getattr(self, method_name, None)
 684 |         
 685 |         if not method or not hasattr(method, "_tool"):
 686 |             raise ToolError(
 687 |                 f"Tool not found: {tool_name}",
 688 |                 error_code="tool_not_found"
 689 |             )
 690 |         
 691 |         # Execute tool with metrics wrapper
 692 |         return await self._wrap_with_metrics(method, **params)
 693 | 
 694 |     async def _wrap_with_metrics(
 695 |         self,
 696 |         func: Callable,
 697 |         *args,
 698 |         **kwargs
 699 |     ) -> Any:
 700 |         """
 701 |         Internal method that wraps a function call with metrics tracking.
 702 |         
 703 |         This method provides a standardized way to execute tool functions while capturing
 704 |         performance metrics such as execution duration, success/failure status, token usage,
 705 |         and cost. These metrics are stored in the BaseTool instance's metrics object for
 706 |         later analysis and reporting.
 707 |         
 708 |         The method performs the following steps:
 709 |         1. Records the start time of the operation
 710 |         2. Executes the provided function with the supplied arguments
 711 |         3. If successful, extracts metrics data from the result (if available)
 712 |         4. Records the execution metrics in the BaseTool's metrics object
 713 |         5. Returns the original result or propagates any exceptions that occurred
 714 |         
 715 |         Metrics extraction:
 716 |         - If the result is a dictionary, it will attempt to extract:
 717 |           - Token usage from either result["tokens"]["total"] or result["total_tokens"]
 718 |           - Cost information from result["cost"]
 719 |         
 720 |         Args:
 721 |             func: Async function to execute with metrics tracking
 722 |             *args: Positional arguments to pass to the function
 723 |             **kwargs: Keyword arguments to pass to the function
 724 |             
 725 |         Returns:
 726 |             The result of the wrapped function call
 727 |             
 728 |         Raises:
 729 |             Any exception raised by the wrapped function (after logging it)
 730 |             
 731 |         Notes:
 732 |             - This method is typically called internally by BaseTool subclasses
 733 |             - Related to but different from the standalone with_tool_metrics decorator
 734 |             - Exceptions are logged but not caught (to allow proper error handling)
 735 |         """
 736 |         start_time = time.time()
 737 |         success = False
 738 |         tokens = None
 739 |         cost = None
 740 |         
 741 |         try:
 742 |             # Call function
 743 |             result = await func(*args, **kwargs)
 744 |             
 745 |             # Extract metrics if available
 746 |             if isinstance(result, dict):
 747 |                 if "tokens" in result and isinstance(result["tokens"], dict):
 748 |                     tokens = result["tokens"].get("total")
 749 |                 elif "total_tokens" in result:
 750 |                     tokens = result["total_tokens"]
 751 |                     
 752 |                 cost = result.get("cost")
 753 |                 
 754 |             success = True
 755 |             return result
 756 |             
 757 |         except Exception as e:
 758 |             self.logger.error(
 759 |                 f"Tool execution failed: {func.__name__}: {str(e)}",
 760 |                 emoji_key="error",
 761 |                 tool=func.__name__,
 762 |                 exc_info=True
 763 |             )
 764 |             raise
 765 |             
 766 |         finally:
 767 |             # Record metrics
 768 |             duration = time.time() - start_time
 769 |             self.metrics.record_call(
 770 |                 success=success,
 771 |                 duration=duration,
 772 |                 tokens=tokens,
 773 |                 cost=cost
 774 |             )
 775 | 
 776 | 
 777 | def with_tool_metrics(func):
 778 |     """
 779 |     Decorator that automatically tracks performance metrics for tool functions.
 780 |     
 781 |     This decorator captures and records execution metrics for both class methods and
 782 |     standalone functions. It adapts its behavior based on whether the decorated function
 783 |     is a method on a BaseTool instance or a standalone function.
 784 |     
 785 |     Metrics captured include:
 786 |     - Execution time (duration in seconds)
 787 |     - Success/failure state
 788 |     - Token usage (extracted from result if available)
 789 |     - Cost information (extracted from result if available)
 790 |     
 791 |     The decorator performs several functions:
 792 |     1. Captures start time before execution
 793 |     2. Executes the wrapped function, preserving all args/kwargs
 794 |     3. Extracts metrics from the result dictionary if available
 795 |     4. Logs execution statistics
 796 |     5. Updates metrics in the BaseTool.metrics object if available
 797 |     
 798 |     When used with other decorators:
 799 |     - Should be applied before with_error_handling to ensure metrics are 
 800 |       captured even when errors occur
 801 |     - Works well with with_cache, tracking metrics for both cache hits and misses
 802 |     - Compatible with with_retry, recording each attempt separately
 803 |     
 804 |     Args:
 805 |         func: The async function to decorate (can be a method or standalone function)
 806 |         
 807 |     Returns:
 808 |         Wrapped async function that captures and records metrics
 809 |         
 810 |     Example:
 811 |         ```python
 812 |         @with_tool_metrics
 813 |         @with_error_handling
 814 |         async def my_tool_function(param1, param2):
 815 |             # Function implementation
 816 |         ```
 817 |     """
 818 |     @functools.wraps(func)
 819 |     async def wrapper(*args, **kwargs):
 820 |         # Check if the first arg looks like a BaseTool instance
 821 |         self_obj = args[0] if args and isinstance(args[0], BaseTool) else None
 822 |         tool_name = getattr(self_obj, 'tool_name', func.__name__)
 823 | 
 824 |         start_time = time.time()
 825 |         success = False
 826 |         tokens = None
 827 |         cost = None
 828 |         result = None
 829 |         
 830 |         try:
 831 |             # Call original function, passing self_obj if it exists
 832 |             if self_obj:
 833 |                 # Assumes if self_obj exists, it's the first positional arg expected by func
 834 |                 result = func(self_obj, *args[1:], **kwargs)
 835 |             else:
 836 |                 # Pass only the args/kwargs received, assuming func is standalone
 837 |                 result = func(*args, **kwargs)
 838 |             
 839 |             # Only await when necessary
 840 |             if inspect.isawaitable(result):
 841 |                 result = await result
 842 |             # result is now either a ToolResult _or_ an async iterator
 843 |             
 844 |             # Extract metrics if available from result
 845 |             if isinstance(result, dict):
 846 |                 if "tokens" in result and isinstance(result["tokens"], dict):
 847 |                     tokens = result["tokens"].get("total")
 848 |                 elif "total_tokens" in result:
 849 |                     tokens = result["total_tokens"]
 850 |                 cost = result.get("cost")
 851 |                 
 852 |             success = True
 853 |             return result
 854 |             
 855 |         except Exception as e:
 856 |             logger.error(
 857 |                 f"Tool execution failed: {tool_name}: {str(e)}",
 858 |                 emoji_key="error",
 859 |                 tool=tool_name,
 860 |                 exc_info=True
 861 |             )
 862 |             raise # Re-raise exception for other handlers (like with_error_handling)
 863 |             
 864 |         finally:
 865 |             # Record metrics
 866 |             duration = time.time() - start_time
 867 |             
 868 |             # Log execution stats
 869 |             logger.debug(
 870 |                 f"Tool execution: {tool_name} ({'success' if success else 'failed'})",
 871 |                 emoji_key="tool" if success else "error",
 872 |                 tool=tool_name,
 873 |                 time=duration,
 874 |                 cost=cost
 875 |             )
 876 |             
 877 |             # Update metrics if we found a self object with a metrics attribute
 878 |             if self_obj and hasattr(self_obj, 'metrics'):
 879 |                 self_obj.metrics.record_call(
 880 |                     success=success,
 881 |                     duration=duration,
 882 |                     tokens=tokens,
 883 |                     cost=cost
 884 |                 )
 885 |                 
 886 |     return wrapper
 887 | 
 888 | 
 889 | def with_retry(
 890 |     max_retries: int = 3,
 891 |     retry_delay: float = 1.0,
 892 |     backoff_factor: float = 2.0,
 893 |     retry_exceptions: List[Type[Exception]] = None
 894 | ):
 895 |     """
 896 |     Decorator that adds exponential backoff retry logic to async tool functions.
 897 |     
 898 |     This decorator wraps an async function with retry logic that will automatically
 899 |     re-execute the function if it fails with certain exceptions. It implements an
 900 |     exponential backoff strategy to progressively increase the wait time between
 901 |     retry attempts, reducing load during transient failures.
 902 |     
 903 |     Retry behavior:
 904 |     1. When the decorated function raises an exception, the decorator checks if it's a
 905 |        retriable exception type (based on the retry_exceptions parameter)
 906 |     2. If retriable, it waits for a delay period (which increases with each attempt)
 907 |     3. After waiting, it retries the function with the same arguments
 908 |     4. This process repeats until either the function succeeds or max_retries is reached
 909 |     
 910 |     Args:
 911 |         max_retries: Maximum number of retry attempts before giving up (default: 3)
 912 |         retry_delay: Initial delay in seconds before first retry (default: 1.0)
 913 |         backoff_factor: Multiplier for delay between retries (default: 2.0)
 914 |                        Each retry's delay is calculated as: retry_delay * (backoff_factor ^ attempt)
 915 |         retry_exceptions: List of exception types that should trigger retries.
 916 |                          If None, all exceptions will trigger retries.
 917 |     
 918 |     Returns:
 919 |         A decorator function that wraps the given async function with retry logic.
 920 |         
 921 |     Example:
 922 |         ```python
 923 |         @with_retry(max_retries=3, retry_delay=2.0, backoff_factor=3.0,
 924 |                    retry_exceptions=[ConnectionError, TimeoutError])
 925 |         async def fetch_data(url):
 926 |             # This function will retry up to 3 times if it raises ConnectionError or TimeoutError
 927 |             # Delays between retries: 2s, 6s, 18s
 928 |             # For other exceptions, it will fail immediately
 929 |             return await some_api_call(url)
 930 |         ```
 931 |         
 932 |     Notes:
 933 |         - This decorator only works with async functions
 934 |         - The decorated function must be idempotent (safe to call multiple times)
 935 |         - Retries are logged at WARNING level, final failures at ERROR level
 936 |         - The final exception is re-raised after all retries are exhausted
 937 |     """
 938 |     def decorator(func):
 939 |         @functools.wraps(func)
 940 |         async def wrapper(*args, **kwargs):
 941 |             last_exception = None
 942 |             delay = retry_delay
 943 |             
 944 |             for attempt in range(max_retries + 1):
 945 |                 try:
 946 |                     # Call original function
 947 |                     return await func(*args, **kwargs)
 948 |                     
 949 |                 except Exception as e:
 950 |                     # Only retry on specified exceptions
 951 |                     if retry_exceptions and not any(
 952 |                         isinstance(e, exc_type) for exc_type in retry_exceptions
 953 |                     ):
 954 |                         raise
 955 |                         
 956 |                     last_exception = e
 957 |                     
 958 |                     # Log retry attempt
 959 |                     if attempt < max_retries:
 960 |                         logger.warning(
 961 |                             f"Tool execution failed, retrying ({attempt+1}/{max_retries}): {str(e)}",
 962 |                             emoji_key="warning",
 963 |                             tool=func.__name__,
 964 |                             attempt=attempt+1,
 965 |                             max_retries=max_retries,
 966 |                             delay=delay
 967 |                         )
 968 |                         
 969 |                         # Wait before retrying
 970 |                         await asyncio.sleep(delay)
 971 |                         
 972 |                         # Increase delay for next retry
 973 |                         delay *= backoff_factor
 974 |                     else:
 975 |                         # Log final failure
 976 |                         logger.error(
 977 |                             f"Tool execution failed after {max_retries} retries: {str(e)}",
 978 |                             emoji_key="error",
 979 |                             tool=func.__name__,
 980 |                             exc_info=True
 981 |                         )
 982 |                         
 983 |             # If we get here, all retries failed
 984 |             raise last_exception
 985 |                 
 986 |         return wrapper
 987 |     return decorator
 988 |     
 989 | 
 990 | def with_error_handling(func):
 991 |     """
 992 |     Decorator that transforms tool function exceptions into standardized error responses.
 993 |     
 994 |     This decorator intercepts any exceptions raised during tool execution and converts them
 995 |     into a structured error response format following the MCP protocol standards. It ensures
 996 |     that clients receive consistent, actionable error information regardless of how or where
 997 |     the error occurred.
 998 |     
 999 |     The decorator performs several key functions:
1000 |     1. Detects if it's decorating a BaseTool method or standalone function and adapts accordingly
1001 |     2. Reconstructs function call arguments appropriately based on function signature
1002 |     3. Catches exceptions raised during execution and transforms them into structured responses
1003 |     4. Maps different exception types to corresponding MCP error types with appropriate metadata
1004 |     5. Logs detailed error information while providing a clean, standardized response to clients
1005 |     
1006 |     Exception handling:
1007 |     - ToolError: Passed through with logging (assumes already formatted correctly)
1008 |     - ValueError: Converted to ToolInputError with detailed context
1009 |     - Other exceptions: Converted to ToolExecutionError with execution context
1010 |     
1011 |     All error responses have the same structure:
1012 |     ```
1013 |     {
1014 |         "success": False,
1015 |         "isError": True,
1016 |         "error": {
1017 |             "type": "<error_type>",
1018 |             "message": "<human-readable message>",
1019 |             "details": {<context-specific details>},
1020 |             "retriable": <boolean>,
1021 |             "suggestions": [<optional recovery suggestions>],
1022 |             "timestamp": <current_time>
1023 |         }
1024 |     }
1025 |     ```
1026 |     
1027 |     Args:
1028 |         func: The async function to decorate (can be a method or standalone function)
1029 |         
1030 |     Returns:
1031 |         Decorated async function that catches exceptions and returns structured error responses
1032 |         
1033 |     Example:
1034 |         ```python
1035 |         @with_error_handling
1036 |         async def my_tool_function(param1, param2):
1037 |             # If this raises an exception, it will be transformed into a structured response
1038 |             # rather than propagating up to the caller
1039 |             # ...
1040 |         ```
1041 |     """
1042 |     @functools.wraps(func)
1043 |     async def wrapper(*args, **kwargs):
1044 |         # Check if the first arg looks like a BaseTool instance
1045 |         self_obj = args[0] if args and isinstance(args[0], BaseTool) else None
1046 |         # Determine tool_name based on instance or func name
1047 |         tool_name = getattr(self_obj, 'tool_name', func.__name__) 
1048 |         
1049 |         sig = inspect.signature(func)
1050 |         func_params = set(sig.parameters.keys())  # noqa: F841
1051 |         
1052 |         call_args = []
1053 |         call_kwargs = {}
1054 | 
1055 |         if self_obj:
1056 |             expected_params = list(sig.parameters.values())
1057 |             if expected_params and expected_params[0].name == 'self':
1058 |                 call_args.append(self_obj)
1059 |         
1060 |         start_index = 1 if self_obj and call_args else 0
1061 |         call_args.extend(args[start_index:])
1062 | 
1063 |         # Pass all original kwargs through
1064 |         call_kwargs.update(kwargs)
1065 |             
1066 |         try:
1067 |             # Call original function with reconstructed args/kwargs
1068 |             # This version passes *all* kwargs received by the wrapper,
1069 |             # trusting FastMCP to pass the correct ones including 'ctx'.
1070 |             result = func(*call_args, **call_kwargs)
1071 |             
1072 |             # Only await when necessary
1073 |             if inspect.isawaitable(result):
1074 |                 result = await result
1075 |             # result is now either a ToolResult _or_ an async iterator
1076 |             return result
1077 |             
1078 |         except ToolError as e:
1079 |             # Already a tool error, log and return
1080 |             logger.error(
1081 |                 f"Tool error in {tool_name}: {str(e)} ({e.error_code})",
1082 |                 emoji_key="error",
1083 |                 tool=tool_name,
1084 |                 error_code=e.error_code,
1085 |                 details=e.details
1086 |             )
1087 |             
1088 |             # Debug log the formatted error response
1089 |             error_response = format_error_response(e)
1090 |             logger.debug(f"Formatted error response for {tool_name}: {error_response}")
1091 |             
1092 |             # Return standardized error response
1093 |             return error_response
1094 |             
1095 |         except ValueError as e:
1096 |             # Convert ValueError to ToolInputError with more detailed information
1097 |             error = ToolInputError(
1098 |                 f"Invalid input to {tool_name}: {str(e)}",
1099 |                 details={
1100 |                     "tool_name": tool_name,
1101 |                     "exception_type": "ValueError",
1102 |                     "original_error": str(e)
1103 |                 }
1104 |             )
1105 |             
1106 |             logger.error(
1107 |                 f"Invalid input to {tool_name}: {str(e)}",
1108 |                 emoji_key="error",
1109 |                 tool=tool_name,
1110 |                 error_code=error.error_code
1111 |             )
1112 |             
1113 |             # Return standardized error response
1114 |             return format_error_response(error)
1115 |             
1116 |         except Exception as e:
1117 |             # Create a more specific error message that includes the tool name
1118 |             specific_message = f"Execution error in {tool_name}: {str(e)}"
1119 |             
1120 |             # Convert to ToolExecutionError for other exceptions
1121 |             error = ToolExecutionError(
1122 |                 specific_message,
1123 |                 cause=e,
1124 |                 details={
1125 |                     "tool_name": tool_name,
1126 |                     "exception_type": type(e).__name__,
1127 |                     "original_message": str(e)
1128 |                 }
1129 |             )
1130 |             
1131 |             logger.error(
1132 |                 specific_message,
1133 |                 emoji_key="error",
1134 |                 tool=tool_name,
1135 |                 exc_info=True
1136 |             )
1137 |             
1138 |             # Return standardized error response
1139 |             return format_error_response(error)
1140 |                 
1141 |     return wrapper
1142 | 
1143 | 
1144 | def register_tool(mcp_server, name=None, description=None, cache_ttl=None):
1145 |     """
1146 |     Register a standalone function as an MCP tool with optional caching and error handling.
1147 |     
1148 |     This function creates a decorator that registers the decorated function with the MCP server,
1149 |     automatically applying error handling and optional result caching. It provides a simpler
1150 |     alternative to class-based tool registration via the BaseTool class, allowing standalone
1151 |     functions to be exposed as MCP tools without creating a full tool class.
1152 |     
1153 |     The decorator handles:
1154 |     1. Tool registration with the MCP server using the provided name (or function name)
1155 |     2. Documentation via the provided description (or function docstring)
1156 |     3. Optional result caching with the specified TTL
1157 |     4. Standardized error handling via the with_error_handling decorator
1158 |     
1159 |     Args:
1160 |         mcp_server: MCP server instance to register the tool with
1161 |         name: Tool name used for registration (defaults to the function name if not provided)
1162 |         description: Tool description for documentation (defaults to function docstring if not provided)
1163 |         cache_ttl: Optional time-to-live in seconds for caching tool results. If provided, the tool results
1164 |                   will be cached for this duration to improve performance for identical calls.
1165 |         
1166 |     Returns:
1167 |         Decorator function that transforms the decorated function into a registered MCP tool
1168 |         
1169 |     Example:
1170 |         ```python
1171 |         # Initialize MCP server
1172 |         mcp_server = FastMCP()
1173 |         
1174 |         # Register a function as a tool
1175 |         @register_tool(mcp_server, name="get_weather", cache_ttl=300)
1176 |         async def get_weather_data(location: str, units: str = "metric"):
1177 |             '''Get current weather data for a location.'''
1178 |             # Implementation
1179 |             return {"temperature": 22, "conditions": "sunny"}
1180 |             
1181 |         # The function is now registered as an MCP tool named "get_weather"
1182 |         # with 5-minute result caching and standardized error handling
1183 |         ```
1184 |         
1185 |     Notes:
1186 |         - The decorated function must be async
1187 |         - If cache_ttl is provided, identical calls will return cached results 
1188 |           rather than re-executing the function
1189 |         - Function signature is preserved, making it transparent to callers
1190 |         - For more complex tools with multiple methods, use the BaseTool class instead
1191 |     """
1192 |     def decorator(func):
1193 |         # Get function name and docstring
1194 |         tool_name = name or func.__name__
1195 |         tool_description = description or func.__doc__ or f"Tool: {tool_name}"  # noqa: F841
1196 |         
1197 |         # Apply caching if specified
1198 |         # if cache_ttl is not None:
1199 |         #     func = with_cache(ttl=cache_ttl)(func)
1200 |         
1201 |         # Apply error handling
1202 |         func = with_error_handling(func)
1203 |         
1204 |         # Register with MCP server
1205 |         mcp_server.tool(name=tool_name)(func)
1206 |         
1207 |         return func
1208 |     
1209 |     return decorator
1210 | 
1211 | def _get_json_schema_type(type_annotation):
1212 |     """
1213 |     Convert Python type annotations to JSON Schema type definitions.
1214 |     
1215 |     This utility function translates Python's typing annotations into equivalent JSON Schema
1216 |     type definitions, enabling automatic generation of API documentation and client interfaces
1217 |     from Python function signatures. It handles basic types, Optional types, Lists, and 
1218 |     provides reasonable defaults for complex types.
1219 |     
1220 |     The function is primarily used internally by the MCP framework to generate JSON Schema
1221 |     definitions for tool parameters, allowing clients to understand the expected input types
1222 |     and structures for each tool.
1223 |     
1224 |     Type mappings:
1225 |     - str -> {"type": "string"}
1226 |     - int -> {"type": "integer"}
1227 |     - float -> {"type": "number"}
1228 |     - bool -> {"type": "boolean"}
1229 |     - Optional[T] -> Same as T, but adds "null" to "type" array
1230 |     - List[T] -> {"type": "array", "items": <schema for T>}
1231 |     - Dict -> {"type": "object"}
1232 |     - Other complex types -> {"type": "object"}
1233 |     
1234 |     Args:
1235 |         type_annotation: A Python type annotation (from typing module or built-in types)
1236 |         
1237 |     Returns:
1238 |         A dictionary containing the equivalent JSON Schema type definition
1239 |         
1240 |     Notes:
1241 |         - This function provides only type information, not complete JSON Schema validation
1242 |           rules like minimum/maximum values, string patterns, etc.
1243 |         - Complex nested types (e.g., List[Dict[str, List[int]]]) are handled, but deeply 
1244 |           nested structures may be simplified in the output schema
1245 |         - This function is meant for internal use by the tool registration system
1246 |         
1247 |     Examples:
1248 |         ```python
1249 |         # Basic types
1250 |         _get_json_schema_type(str)  # -> {"type": "string"}
1251 |         _get_json_schema_type(int)  # -> {"type": "integer"}
1252 |         
1253 |         # Optional types
1254 |         from typing import Optional
1255 |         _get_json_schema_type(Optional[str])  # -> {"type": ["string", "null"]}
1256 |         
1257 |         # List types
1258 |         from typing import List
1259 |         _get_json_schema_type(List[int])  # -> {"type": "array", "items": {"type": "integer"}}
1260 |         
1261 |         # Complex types
1262 |         from typing import Dict, List
1263 |         _get_json_schema_type(Dict[str, List[int]])  # -> {"type": "object"}
1264 |         ```
1265 |     """
1266 |     import typing
1267 |     
1268 |     # Handle basic types
1269 |     if type_annotation is str:
1270 |         return {"type": "string"}
1271 |     elif type_annotation is int:
1272 |         return {"type": "integer"}
1273 |     elif type_annotation is float:
1274 |         return {"type": "number"}
1275 |     elif type_annotation is bool:
1276 |         return {"type": "boolean"}
1277 |     
1278 |     # Handle Optional types
1279 |     origin = typing.get_origin(type_annotation)
1280 |     args = typing.get_args(type_annotation)
1281 |     
1282 |     if origin is Union and type(None) in args:
1283 |         # Optional type - get the non-None type
1284 |         non_none_args = [arg for arg in args if arg is not type(None)]
1285 |         if len(non_none_args) == 1:
1286 |             inner_type = _get_json_schema_type(non_none_args[0])
1287 |             return inner_type
1288 |     
1289 |     # Handle lists
1290 |     if origin is list or origin is List:
1291 |         if args:
1292 |             item_type = _get_json_schema_type(args[0])
1293 |             return {
1294 |                 "type": "array",
1295 |                 "items": item_type
1296 |             }
1297 |         return {"type": "array"}
1298 |     
1299 |     # Handle dictionaries
1300 |     if origin is dict or origin is Dict:
1301 |         return {"type": "object"}
1302 |     
1303 |     # Default to object for complex types
1304 |     return {"type": "object"}
1305 | 
1306 | def with_state_management(namespace: str):
1307 |     """
1308 |     Decorator that provides persistent state management capabilities to tool functions.
1309 |     
1310 |     This decorator enables stateful behavior in otherwise stateless tool functions by
1311 |     injecting state access methods that allow reading, writing, and deleting values
1312 |     from a persistent, namespace-based state store. This is essential for tools that
1313 |     need to maintain context across multiple invocations, manage session data, or 
1314 |     build features with memory capabilities.
1315 |     
1316 |     The state management system provides:
1317 |     - Namespace isolation: Each tool can use its own namespace to prevent key collisions
1318 |     - Thread-safe concurrency: Built-in locks ensure safe parallel access to the same state
1319 |     - Optional persistence: State can be backed by disk storage for durability across restarts
1320 |     - Lazy loading: State is loaded from disk only when accessed, improving performance
1321 |     
1322 |     State accessibility functions injected into the decorated function:
1323 |     - get_state(key, default=None) → Any: Retrieve a value by key, with optional default
1324 |     - set_state(key, value) → None: Store a value under the specified key
1325 |     - delete_state(key) → None: Remove a value from the state store
1326 |     
1327 |     All state operations are async, allowing the tool to continue processing while
1328 |     state operations are pending.
1329 |     
1330 |     Args:
1331 |         namespace: A unique string identifying this tool's state namespace. This 
1332 |                   should be chosen carefully to avoid collisions with other tools.
1333 |                   Recommended format: "<tool_category>.<specific_feature>"
1334 |                   Examples: "conversation.history", "user.preferences", "document.cache"
1335 |     
1336 |     Returns:
1337 |         A decorator function that wraps the original tool function, adding state
1338 |         management capabilities via injected parameters.
1339 |         
1340 |     Examples:
1341 |         Basic usage with conversation history:
1342 |         ```python
1343 |         @with_state_management("conversation.history")
1344 |         async def chat_with_memory(message: str, ctx=None, get_state=None, set_state=None, delete_state=None):
1345 |             # Get previous messages from persistent store
1346 |             history = await get_state("messages", [])
1347 |             
1348 |             # Add new message
1349 |             history.append({"role": "user", "content": message})
1350 |             
1351 |             # Generate response based on all previous conversation context
1352 |             response = generate_response(message, history)
1353 |             
1354 |             # Add AI response to history
1355 |             history.append({"role": "assistant", "content": response})
1356 |             
1357 |             # Store updated history for future calls
1358 |             await set_state("messages", history)
1359 |             return {"response": response}
1360 |         ```
1361 |         
1362 |         Advanced pattern with conversational memory and user customization:
1363 |         ```python
1364 |         @with_state_management("assistant.settings")
1365 |         async def personalized_assistant(
1366 |             query: str, 
1367 |             update_preferences: bool = False,
1368 |             preferences: Dict[str, Any] = None,
1369 |             ctx=None, 
1370 |             get_state=None, 
1371 |             set_state=None, 
1372 |             delete_state=None
1373 |         ):
1374 |             # Get user ID from context
1375 |             user_id = ctx.get("user_id", "default_user")
1376 |             
1377 |             # Retrieve user-specific preferences
1378 |             user_prefs = await get_state(f"prefs:{user_id}", {
1379 |                 "tone": "professional",
1380 |                 "verbosity": "concise",
1381 |                 "expertise_level": "intermediate"
1382 |             })
1383 |             
1384 |             # Update preferences if requested
1385 |             if update_preferences and preferences:
1386 |                 user_prefs.update(preferences)
1387 |                 await set_state(f"prefs:{user_id}", user_prefs)
1388 |             
1389 |             # Get conversation history
1390 |             history = await get_state(f"history:{user_id}", [])
1391 |             
1392 |             # Process query using preferences and history
1393 |             response = process_personalized_query(
1394 |                 query, 
1395 |                 user_preferences=user_prefs,
1396 |                 conversation_history=history
1397 |             )
1398 |             
1399 |             # Update conversation history
1400 |             history.append({"query": query, "response": response})
1401 |             if len(history) > 20:  # Keep only recent history
1402 |                 history = history[-20:]
1403 |             await set_state(f"history:{user_id}", history)
1404 |             
1405 |             return {
1406 |                 "response": response,
1407 |                 "preferences": user_prefs
1408 |             }
1409 |         ```
1410 |         
1411 |         State persistence across server restarts:
1412 |         ```python
1413 |         # First call to the tool
1414 |         @with_state_management("task.progress")
1415 |         async def long_running_task(task_id: str, step: int = None, ctx=None, 
1416 |                                    get_state=None, set_state=None, delete_state=None):
1417 |             # Get current progress
1418 |             progress = await get_state(task_id, {"completed_steps": [], "current_step": 0})
1419 |             
1420 |             # Update progress if a new step is provided
1421 |             if step is not None:
1422 |                 progress["current_step"] = step
1423 |                 progress["completed_steps"].append({
1424 |                     "step": step,
1425 |                     "timestamp": time.time()
1426 |                 })
1427 |                 await set_state(task_id, progress)
1428 |             
1429 |             # Even if the server restarts, the next call will retrieve the saved progress
1430 |             return {
1431 |                 "task_id": task_id,
1432 |                 "progress": progress,
1433 |                 "completed": len(progress["completed_steps"]),
1434 |                 "current_step": progress["current_step"]
1435 |             }
1436 |         ```
1437 |         
1438 |     Implementation Pattern:
1439 |     The decorator works by injecting three async state management functions into the
1440 |     decorated function's keyword arguments:
1441 |     
1442 |     1. `get_state(key, default=None)`:
1443 |        - Retrieves state values from the persistent store
1444 |        - If key doesn't exist, returns the provided default value
1445 |        - Example: `user_data = await get_state("user:12345", {})`
1446 |     
1447 |     2. `set_state(key, value)`: 
1448 |        - Stores a value in the persistent state store
1449 |        - Automatically serializes complex Python objects (dicts, lists, etc.)
1450 |        - Example: `await set_state("session:abc", {"authenticated": True})`
1451 |     
1452 |     3. `delete_state(key)`:
1453 |        - Removes a key and its associated value from the store
1454 |        - Example: `await delete_state("temporary_data")`
1455 |     
1456 |     Notes:
1457 |         - The decorated function must accept get_state, set_state, delete_state, and ctx
1458 |           parameters, either explicitly or via **kwargs.
1459 |         - State persistence depends on the MCP server configuration. If persistence is
1460 |           enabled, state will survive server restarts.
1461 |         - For large objects, consider storing only references or identifiers in the state
1462 |           and using a separate storage system for the actual data.
1463 |         - The state store is shared across all server instances, so state keys should be
1464 |           chosen to avoid collisions between different tools and features.
1465 |     """
1466 |     def decorator(func):
1467 |         @functools.wraps(func)
1468 |         async def wrapper(*args, **kwargs):
1469 |             # Get context from kwargs
1470 |             context = kwargs.get('ctx')
1471 |             if not context or not hasattr(context, 'fastmcp'):
1472 |                 raise ValueError("Context with FastMCP server required")
1473 |             
1474 |             # Access StateStore via the FastMCP 2.0+ pattern
1475 |             if not hasattr(context.fastmcp, '_state_store'):
1476 |                 raise ValueError("FastMCP server does not have a state store attached")
1477 |             
1478 |             state_store = context.fastmcp._state_store
1479 |             
1480 |             # Add state accessors to kwargs
1481 |             kwargs['get_state'] = lambda key, default=None: state_store.get(namespace, key, default)
1482 |             kwargs['set_state'] = lambda key, value: state_store.set(namespace, key, value)
1483 |             kwargs['delete_state'] = lambda key: state_store.delete(namespace, key)
1484 |             
1485 |             return await func(*args, **kwargs)
1486 |         
1487 |         # Update signature to include context parameter if not already present
1488 |         sig = inspect.signature(func)
1489 |         if 'ctx' not in sig.parameters:
1490 |             wrapped_params = list(sig.parameters.values())
1491 |             wrapped_params.append(
1492 |                 inspect.Parameter('ctx', inspect.Parameter.KEYWORD_ONLY, 
1493 |                                  annotation='Optional[Dict[str, Any]]', default=None)
1494 |             )
1495 |             wrapper.__signature__ = sig.replace(parameters=wrapped_params)
1496 |         
1497 |         return wrapper
1498 |     return decorator
1499 | 
```
Page 21/45FirstPrevNextLast