#
tokens: 49120/50000 40/207 files (page 2/45)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 2 of 45. Use http://codebase.md/dicklesworthstone/llm_gateway_mcp_server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cursorignore
├── .env.example
├── .envrc
├── .gitignore
├── additional_features.md
├── check_api_keys.py
├── completion_support.py
├── comprehensive_test.py
├── docker-compose.yml
├── Dockerfile
├── empirically_measured_model_speeds.json
├── error_handling.py
├── example_structured_tool.py
├── examples
│   ├── __init__.py
│   ├── advanced_agent_flows_using_unified_memory_system_demo.py
│   ├── advanced_extraction_demo.py
│   ├── advanced_unified_memory_system_demo.py
│   ├── advanced_vector_search_demo.py
│   ├── analytics_reporting_demo.py
│   ├── audio_transcription_demo.py
│   ├── basic_completion_demo.py
│   ├── cache_demo.py
│   ├── claude_integration_demo.py
│   ├── compare_synthesize_demo.py
│   ├── cost_optimization.py
│   ├── data
│   │   ├── sample_event.txt
│   │   ├── Steve_Jobs_Introducing_The_iPhone_compressed.md
│   │   └── Steve_Jobs_Introducing_The_iPhone_compressed.mp3
│   ├── docstring_refiner_demo.py
│   ├── document_conversion_and_processing_demo.py
│   ├── entity_relation_graph_demo.py
│   ├── filesystem_operations_demo.py
│   ├── grok_integration_demo.py
│   ├── local_text_tools_demo.py
│   ├── marqo_fused_search_demo.py
│   ├── measure_model_speeds.py
│   ├── meta_api_demo.py
│   ├── multi_provider_demo.py
│   ├── ollama_integration_demo.py
│   ├── prompt_templates_demo.py
│   ├── python_sandbox_demo.py
│   ├── rag_example.py
│   ├── research_workflow_demo.py
│   ├── sample
│   │   ├── article.txt
│   │   ├── backprop_paper.pdf
│   │   ├── buffett.pdf
│   │   ├── contract_link.txt
│   │   ├── legal_contract.txt
│   │   ├── medical_case.txt
│   │   ├── northwind.db
│   │   ├── research_paper.txt
│   │   ├── sample_data.json
│   │   └── text_classification_samples
│   │       ├── email_classification.txt
│   │       ├── news_samples.txt
│   │       ├── product_reviews.txt
│   │       └── support_tickets.txt
│   ├── sample_docs
│   │   └── downloaded
│   │       └── attention_is_all_you_need.pdf
│   ├── sentiment_analysis_demo.py
│   ├── simple_completion_demo.py
│   ├── single_shot_synthesis_demo.py
│   ├── smart_browser_demo.py
│   ├── sql_database_demo.py
│   ├── sse_client_demo.py
│   ├── test_code_extraction.py
│   ├── test_content_detection.py
│   ├── test_ollama.py
│   ├── text_classification_demo.py
│   ├── text_redline_demo.py
│   ├── tool_composition_examples.py
│   ├── tournament_code_demo.py
│   ├── tournament_text_demo.py
│   ├── unified_memory_system_demo.py
│   ├── vector_search_demo.py
│   ├── web_automation_instruction_packs.py
│   └── workflow_delegation_demo.py
├── LICENSE
├── list_models.py
├── marqo_index_config.json.example
├── mcp_protocol_schema_2025-03-25_version.json
├── mcp_python_lib_docs.md
├── mcp_tool_context_estimator.py
├── model_preferences.py
├── pyproject.toml
├── quick_test.py
├── README.md
├── resource_annotations.py
├── run_all_demo_scripts_and_check_for_errors.py
├── storage
│   └── smart_browser_internal
│       ├── locator_cache.db
│       ├── readability.js
│       └── storage_state.enc
├── test_client.py
├── test_connection.py
├── TEST_README.md
├── test_sse_client.py
├── test_stdio_client.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── integration
│   │   ├── __init__.py
│   │   └── test_server.py
│   ├── manual
│   │   ├── test_extraction_advanced.py
│   │   └── test_extraction.py
│   └── unit
│       ├── __init__.py
│       ├── test_cache.py
│       ├── test_providers.py
│       └── test_tools.py
├── TODO.md
├── tool_annotations.py
├── tools_list.json
├── ultimate_mcp_banner.webp
├── ultimate_mcp_logo.webp
├── ultimate_mcp_server
│   ├── __init__.py
│   ├── __main__.py
│   ├── cli
│   │   ├── __init__.py
│   │   ├── __main__.py
│   │   ├── commands.py
│   │   ├── helpers.py
│   │   └── typer_cli.py
│   ├── clients
│   │   ├── __init__.py
│   │   ├── completion_client.py
│   │   └── rag_client.py
│   ├── config
│   │   └── examples
│   │       └── filesystem_config.yaml
│   ├── config.py
│   ├── constants.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── evaluation
│   │   │   ├── base.py
│   │   │   └── evaluators.py
│   │   ├── providers
│   │   │   ├── __init__.py
│   │   │   ├── anthropic.py
│   │   │   ├── base.py
│   │   │   ├── deepseek.py
│   │   │   ├── gemini.py
│   │   │   ├── grok.py
│   │   │   ├── ollama.py
│   │   │   ├── openai.py
│   │   │   └── openrouter.py
│   │   ├── server.py
│   │   ├── state_store.py
│   │   ├── tournaments
│   │   │   ├── manager.py
│   │   │   ├── tasks.py
│   │   │   └── utils.py
│   │   └── ums_api
│   │       ├── __init__.py
│   │       ├── ums_database.py
│   │       ├── ums_endpoints.py
│   │       ├── ums_models.py
│   │       └── ums_services.py
│   ├── exceptions.py
│   ├── graceful_shutdown.py
│   ├── services
│   │   ├── __init__.py
│   │   ├── analytics
│   │   │   ├── __init__.py
│   │   │   ├── metrics.py
│   │   │   └── reporting.py
│   │   ├── cache
│   │   │   ├── __init__.py
│   │   │   ├── cache_service.py
│   │   │   ├── persistence.py
│   │   │   ├── strategies.py
│   │   │   └── utils.py
│   │   ├── cache.py
│   │   ├── document.py
│   │   ├── knowledge_base
│   │   │   ├── __init__.py
│   │   │   ├── feedback.py
│   │   │   ├── manager.py
│   │   │   ├── rag_engine.py
│   │   │   ├── retriever.py
│   │   │   └── utils.py
│   │   ├── prompts
│   │   │   ├── __init__.py
│   │   │   ├── repository.py
│   │   │   └── templates.py
│   │   ├── prompts.py
│   │   └── vector
│   │       ├── __init__.py
│   │       ├── embeddings.py
│   │       └── vector_service.py
│   ├── tool_token_counter.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── audio_transcription.py
│   │   ├── base.py
│   │   ├── completion.py
│   │   ├── docstring_refiner.py
│   │   ├── document_conversion_and_processing.py
│   │   ├── enhanced-ums-lookbook.html
│   │   ├── entity_relation_graph.py
│   │   ├── excel_spreadsheet_automation.py
│   │   ├── extraction.py
│   │   ├── filesystem.py
│   │   ├── html_to_markdown.py
│   │   ├── local_text_tools.py
│   │   ├── marqo_fused_search.py
│   │   ├── meta_api_tool.py
│   │   ├── ocr_tools.py
│   │   ├── optimization.py
│   │   ├── provider.py
│   │   ├── pyodide_boot_template.html
│   │   ├── python_sandbox.py
│   │   ├── rag.py
│   │   ├── redline-compiled.css
│   │   ├── sentiment_analysis.py
│   │   ├── single_shot_synthesis.py
│   │   ├── smart_browser.py
│   │   ├── sql_databases.py
│   │   ├── text_classification.py
│   │   ├── text_redline_tools.py
│   │   ├── tournament.py
│   │   ├── ums_explorer.html
│   │   └── unified_memory_system.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── async_utils.py
│   │   ├── display.py
│   │   ├── logging
│   │   │   ├── __init__.py
│   │   │   ├── console.py
│   │   │   ├── emojis.py
│   │   │   ├── formatter.py
│   │   │   ├── logger.py
│   │   │   ├── panels.py
│   │   │   ├── progress.py
│   │   │   └── themes.py
│   │   ├── parse_yaml.py
│   │   ├── parsing.py
│   │   ├── security.py
│   │   └── text.py
│   └── working_memory_api.py
├── unified_memory_system_technical_analysis.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/ultimate_mcp_server/config/examples/filesystem_config.yaml:
--------------------------------------------------------------------------------

```yaml
 1 | # Example Filesystem Configuration
 2 | # Add this section to your main configuration file to enable the filesystem tools
 3 | 
 4 | # Main filesystem configuration
 5 | filesystem:
 6 |   # List of allowed directories (absolute or with ~ for home directory)
 7 |   # All filesystem operations will be limited to these directories for security
 8 |   allowed_directories:
 9 |     - ~/data                     # User's data directory
10 |     - /tmp/ultimate           # Temporary directory
11 |     - /opt/ultimate/shared    # Shared application directory
12 |   
13 |   # Additional filesystem-specific options can be added here
14 |   # For example, you could add settings like:
15 |   # max_file_size: 10485760      # Maximum allowed file size (10MB)
16 |   # enable_binary_files: false   # Whether to allow binary file operations
17 |   # default_encoding: "utf-8"    # Default file encoding
18 | 
19 | # For development/debugging
20 | logging:
21 |   level: INFO
22 |   log_colors: true
23 |   log_file: logs/filesystem.log
24 |   
25 | # Integration with other parts of the system
26 | mcp:
27 |   tools:
28 |     # Whether filesystem tools are enabled at all
29 |     enable_filesystem: true
30 |     # More granular permissions could be added here
31 |     # For example, you could control which operations are allowed:
32 |     filesystem_permissions:
33 |       allow_read: true
34 |       allow_write: true
35 |       allow_directory_operations: true 
```

--------------------------------------------------------------------------------
/quick_test.py:
--------------------------------------------------------------------------------

```python
 1 | #!/usr/bin/env python3
 2 | """
 3 | Quick test script for Ultimate MCP Server connectivity
 4 | """
 5 | 
 6 | import asyncio
 7 | import json
 8 | 
 9 | from fastmcp import Client
10 | 
11 | 
12 | async def quick_test():
13 |     """Quick connectivity and basic functionality test."""
14 |     server_url = "http://127.0.0.1:8013/mcp"
15 |     
16 |     print(f"🔗 Testing connection to {server_url}")
17 |     
18 |     try:
19 |         async with Client(server_url) as client:
20 |             print("✅ Connected successfully!")
21 |             
22 |             # Test 1: Echo
23 |             echo_result = await client.call_tool("echo", {"message": "Quick test"})
24 |             print(f"📢 Echo: {echo_result[0].text}")
25 |             
26 |             # Test 2: Provider status
27 |             provider_result = await client.call_tool("get_provider_status", {})
28 |             provider_data = json.loads(provider_result[0].text)
29 |             available_providers = [name for name, status in provider_data.get('providers', {}).items() 
30 |                                  if status.get('available')]
31 |             print(f"🔌 Available providers: {', '.join(available_providers)}")
32 |             
33 |             # Test 3: Tool count
34 |             tools = await client.list_tools()
35 |             print(f"🛠️  Available tools: {len(tools)}")
36 |             
37 |             # Test 4: Resources
38 |             resources = await client.list_resources()
39 |             print(f"📚 Available resources: {len(resources)}")
40 |             
41 |             print("🎉 All tests passed!")
42 |             
43 |     except Exception as e:
44 |         print(f"❌ Test failed: {e}")
45 | 
46 | 
47 | if __name__ == "__main__":
48 |     asyncio.run(quick_test()) 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/services/prompts/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Prompt service for Ultimate MCP Server."""
 2 | from ultimate_mcp_server.services.prompts.repository import (
 3 |     PromptRepository,
 4 |     get_prompt_repository,
 5 | )
 6 | from ultimate_mcp_server.services.prompts.templates import (
 7 |     PromptTemplate,
 8 |     PromptTemplateRenderer,
 9 |     render_prompt,
10 |     render_prompt_template,
11 | )
12 | 
13 | 
14 | class PromptService:
15 |     """Service for managing prompts."""
16 |     
17 |     def __init__(self):
18 |         """Initialize prompt service."""
19 |         self.repository = get_prompt_repository()
20 |         self.renderer = PromptTemplateRenderer()
21 |         
22 |     def get_prompt(self, prompt_id: str) -> PromptTemplate:
23 |         """Get prompt by ID.
24 |         
25 |         Args:
26 |             prompt_id: Prompt ID
27 |             
28 |         Returns:
29 |             Prompt template
30 |         """
31 |         return self.repository.get_prompt(prompt_id)
32 |         
33 |     def render_prompt(self, prompt_id: str, variables: dict = None) -> str:
34 |         """Render prompt with variables.
35 |         
36 |         Args:
37 |             prompt_id: Prompt ID
38 |             variables: Variables to use in rendering
39 |             
40 |         Returns:
41 |             Rendered prompt text
42 |         """
43 |         prompt = self.get_prompt(prompt_id)
44 |         return self.renderer.render(prompt, variables or {})
45 | 
46 | def get_prompt_service() -> PromptService:
47 |     """Get prompt service.
48 |     
49 |     Returns:
50 |         Prompt service instance
51 |     """
52 |     return PromptService()
53 | 
54 | __all__ = [
55 |     "PromptRepository",
56 |     "get_prompt_repository",
57 |     "PromptTemplate",
58 |     "PromptTemplateRenderer",
59 |     "render_prompt",
60 |     "render_prompt_template",
61 |     "PromptService",
62 |     "get_prompt_service",
63 | ]
```

--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------

```yaml
 1 | version: '3.8'
 2 | 
 3 | services:
 4 |   ultimate-mcp-server:
 5 |     build:
 6 |       context: .
 7 |       dockerfile: Dockerfile
 8 |     container_name: ultimate-mcp-server
 9 |     restart: unless-stopped
10 |     ports:
11 |       - "${SERVER_PORT:-8013}:8013"
12 |     volumes:
13 |       - ./logs:/app/logs
14 |       - ./.cache:/app/.cache
15 |       - ./.embeddings:/app/.embeddings
16 |     env_file:
17 |       - .env
18 |     environment:
19 |       - SERVER_HOST=0.0.0.0
20 |       - PYTHONUNBUFFERED=1
21 |     healthcheck:
22 |       test: ["CMD", "curl", "-f", "http://localhost:8013/healthz"]
23 |       interval: 30s
24 |       timeout: 10s
25 |       retries: 3
26 |       start_period: 10s
27 |     deploy:
28 |       resources:
29 |         limits:
30 |           memory: 2G
31 |         reservations:
32 |           memory: 1G
33 | 
34 | #   Optional monitoring service (with Prometheus + Grafana)
35 |   prometheus:
36 |     image: prom/prometheus:latest
37 |     container_name: prometheus
38 |     restart: unless-stopped
39 |     ports:
40 |       - "9090:9090"
41 |     volumes:
42 |       - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
43 |       - prometheus_data:/prometheus
44 |     command:
45 |       - '--config.file=/etc/prometheus/prometheus.yml'
46 |       - '--storage.tsdb.path=/prometheus'
47 |       - '--web.console.libraries=/etc/prometheus/console_libraries'
48 |       - '--web.console.templates=/etc/prometheus/consoles'
49 |       - '--web.enable-lifecycle'
50 |   
51 |   grafana:
52 |     image: grafana/grafana:latest
53 |     container_name: grafana
54 |     restart: unless-stopped
55 |     ports:
56 |       - "3000:3000"
57 |     volumes:
58 |       - ./monitoring/grafana/provisioning:/etc/grafana/provisioning
59 |       - grafana_data:/var/lib/grafana
60 |     depends_on:
61 |       - prometheus
62 |     environment:
63 |       - GF_SECURITY_ADMIN_USER=admin
64 |       - GF_SECURITY_ADMIN_PASSWORD=admin
65 |       - GF_USERS_ALLOW_SIGN_UP=false
66 | 
67 | # Uncomment if using monitoring services
68 | volumes:
69 |   prometheus_data:
70 |   grafana_data:
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/services/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Services for Ultimate MCP Server."""
 2 | 
 3 | # Example: Only keep get_analytics_service if get_rag_engine needs it directly
 4 | from ultimate_mcp_server.services.analytics import get_analytics_service
 5 | 
 6 | # __all__ should only export symbols defined *in this file* or truly essential high-level interfaces
 7 | # Avoid re-exporting everything from submodules.
 8 | __all__ = [
 9 |     "get_analytics_service", # Only keep if it's fundamental/used here
10 |     "get_rag_engine",        # get_rag_engine is defined below
11 | ]
12 | 
13 | _rag_engine = None
14 | 
15 | def get_rag_engine():
16 |     """Get or create a RAG engine instance.
17 |     
18 |     Returns:
19 |         RAGEngine: RAG engine instance
20 |     """
21 |     global _rag_engine
22 |     
23 |     if _rag_engine is None:
24 |         # Import dependencies *inside* the function to avoid top-level cycles
25 |         from ultimate_mcp_server.core import (
26 |             get_provider_manager,  # Assuming this doesn't import services
27 |         )
28 |         from ultimate_mcp_server.services.knowledge_base import (
29 |             get_knowledge_base_retriever,  # Import KB retriever here
30 |         )
31 |         from ultimate_mcp_server.services.knowledge_base.rag_engine import RAGEngine
32 | 
33 |         # Assuming OptimizationTools doesn't create cycles with services
34 |         # This might need further investigation if OptimizationTools imports services
35 |         from ultimate_mcp_server.tools.optimization import get_optimization_service
36 |         
37 |         # analytics_service is already imported at top-level
38 |         # retriever = get_knowledge_base_retriever()
39 |         # provider_manager = get_provider_manager()
40 |         # optimization_service = get_optimization_service()
41 |         # analytics_service = get_analytics_service()
42 |         
43 |         _rag_engine = RAGEngine(
44 |             retriever=get_knowledge_base_retriever(),
45 |             provider_manager=get_provider_manager(),
46 |             optimization_service=get_optimization_service(),
47 |             analytics_service=get_analytics_service() # Imported at top
48 |         )
49 |     
50 |     return _rag_engine
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/core/ums_api/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Ultimate MCP Server UMS API Module.
 2 | 
 3 | This module provides the UMS (Unified Memory System) API endpoints and services
 4 | for monitoring and managing cognitive states, actions, performance, and artifacts.
 5 | """
 6 | 
 7 | from .ums_endpoints import setup_ums_api
 8 | 
 9 | # Import database utilities
10 | from .ums_database import (
11 |     get_database_path,
12 |     get_db_connection,
13 |     execute_query,
14 |     execute_update,
15 |     ensure_database_exists,
16 |     _dict_depth,
17 |     _count_values,
18 |     calculate_state_complexity,
19 |     compute_state_diff,
20 |     generate_timeline_segments,
21 |     calculate_timeline_stats,
22 |     get_action_status_indicator,
23 |     categorize_action_performance,
24 |     get_action_resource_usage,
25 |     estimate_wait_time,
26 |     get_priority_label,
27 |     calculate_action_performance_score,
28 |     calculate_efficiency_rating,
29 |     format_file_size,
30 |     calculate_performance_summary,
31 |     generate_performance_insights,
32 |     find_cognitive_patterns,
33 |     calculate_sequence_similarity,
34 |     calculate_single_state_similarity,
35 |     analyze_state_transitions,
36 |     detect_cognitive_anomalies,
37 | )
38 | 
39 | # Import all models for easy access
40 | from .ums_models import *
41 | 
42 | # Import all services
43 | from .ums_services import *
44 | 
45 | __all__ = [
46 |     "setup_ums_api",
47 |     # Database utilities
48 |     "get_database_path",
49 |     "get_db_connection", 
50 |     "execute_query",
51 |     "execute_update",
52 |     "ensure_database_exists",
53 |     "_dict_depth",
54 |     "_count_values",
55 |     "calculate_state_complexity",
56 |     "compute_state_diff",
57 |     "generate_timeline_segments",
58 |     "calculate_timeline_stats",
59 |     "get_action_status_indicator",
60 |     "categorize_action_performance",
61 |     "get_action_resource_usage",
62 |     "estimate_wait_time",
63 |     "get_priority_label",
64 |     "calculate_action_performance_score",
65 |     "calculate_efficiency_rating",
66 |     "format_file_size",
67 |     "calculate_performance_summary",
68 |     "generate_performance_insights",
69 |     "find_cognitive_patterns",
70 |     "calculate_sequence_similarity",
71 |     "calculate_single_state_similarity",
72 |     "analyze_state_transitions",
73 |     "detect_cognitive_anomalies",
74 | ] 
```

--------------------------------------------------------------------------------
/list_models.py:
--------------------------------------------------------------------------------

```python
 1 | #!/usr/bin/env python3
 2 | import asyncio
 3 | 
 4 | from ultimate_mcp_server.core.server import Gateway
 5 | 
 6 | 
 7 | async def list_models():
 8 |     """
 9 |     List all available models from each configured LLM provider.
10 |     
11 |     This function initializes the MCP Gateway with all providers and queries
12 |     each provider for its available models. It then prints a formatted list
13 |     of all models grouped by provider, making it easy to see which models
14 |     are accessible with the current configuration.
15 |     
16 |     The function performs the following steps:
17 |     1. Initialize the Gateway, which loads configuration for all providers
18 |     2. Initialize each provider, which may include API key validation
19 |     3. Query each provider for its available models
20 |     4. Print the provider name followed by a list of its available models
21 |     
22 |     Each model is displayed with the provider name as a prefix (e.g., "openai:gpt-4o")
23 |     for clear identification. Models that don't have an 'id' field will use their
24 |     'name' field instead, and those without either will be labeled as 'unknown'.
25 |     
26 |     This function is useful for:
27 |     - Verifying that API keys are working correctly
28 |     - Checking which models are available for use
29 |     - Debugging provider configuration issues
30 |     - Getting the correct model identifiers for use in applications
31 |     
32 |     Returns:
33 |         None - Results are printed to the console
34 |         
35 |     Notes:
36 |         - Requires valid API keys for each provider to be configured
37 |         - Some providers may have rate limits on model listing operations
38 |         - This function will fail if any provider's initialization fails
39 |     """
40 |     gateway = Gateway()
41 |     await gateway._initialize_providers()
42 |     print("Initialized providers")
43 |     
44 |     # Get the models from each provider
45 |     for provider_name, provider in gateway.providers.items():
46 |         print(f"\nProvider: {provider_name}")
47 |         models = await provider.list_models()
48 |         for model in models:
49 |             print(f"  - {provider_name}:{model.get('id', model.get('name', 'unknown'))}")
50 | 
51 | if __name__ == "__main__":
52 |     asyncio.run(list_models()) 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/utils/parse_yaml.py:
--------------------------------------------------------------------------------

```python
 1 | """YAML parsing utilities for Ultimate MCP Server."""
 2 | from pathlib import Path
 3 | from typing import Any, Dict, Optional
 4 | 
 5 | import yaml
 6 | 
 7 | 
 8 | def find_config_file() -> Optional[Path]:
 9 |     """Find the configuration file in standard locations.
10 |     
11 |     Looks for config.yaml in:
12 |     1. Current directory
13 |     2. ~/.config/umcp/config.yaml
14 |     
15 |     Returns:
16 |         Path to config file if found, None otherwise
17 |     """
18 |     # Check current directory
19 |     local_config = Path.cwd() / "config.yaml"
20 |     if local_config.exists():
21 |         return local_config
22 |         
23 |     # Check user's config directory
24 |     home_config = Path.home() / ".config" / "umcp" / "config.yaml"
25 |     if home_config.exists():
26 |         return home_config
27 |         
28 |     return None
29 | 
30 | def load_yaml_config(config_path: Optional[Path] = None) -> Dict[str, Any]:
31 |     """Load YAML configuration from file.
32 |     
33 |     Args:
34 |         config_path: Path to config file. If None, will try to find in standard locations.
35 |         
36 |     Returns:
37 |         Dictionary with configuration
38 |         
39 |     Raises:
40 |         FileNotFoundError: If config file not found
41 |         yaml.YAMLError: If config file has invalid YAML
42 |     """
43 |     if config_path is None:
44 |         config_path = find_config_file()
45 |         
46 |     if not config_path or not config_path.exists():
47 |         raise FileNotFoundError(f"Configuration file not found at {config_path}")
48 |         
49 |     with open(config_path, 'r') as f:
50 |         try:
51 |             config = yaml.safe_load(f)
52 |             return config or {}
53 |         except yaml.YAMLError as e:
54 |             raise yaml.YAMLError(f"Error parsing YAML file {config_path}: {e}") from e
55 | 
56 | def get_provider_config(provider_name: str, config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
57 |     """Get configuration for a specific provider.
58 |     
59 |     Args:
60 |         provider_name: Name of the provider (e.g., "ollama")
61 |         config: Configuration dictionary. If None, will load from file.
62 |         
63 |     Returns:
64 |         Dictionary with provider configuration or empty dict if not found
65 |     """
66 |     if config is None:
67 |         config = load_yaml_config()
68 |         
69 |     providers = config.get("providers", {})
70 |     return providers.get(provider_name, {}) 
```

--------------------------------------------------------------------------------
/examples/sse_client_demo.py:
--------------------------------------------------------------------------------

```python
 1 | #!/usr/bin/env python3
 2 | """
 3 | Demo: Connect to Ultimate MCP Server in SSE mode using the official MCP Python SDK.
 4 | 
 5 | Requirements:
 6 |     pip install "mcp[cli]"
 7 | 
 8 | This script connects to a running Ultimate MCP Server server in SSE mode (default: http://127.0.0.1:8013/sse),
 9 | lists available tools, and calls the 'echo' tool if available.
10 | """
11 | import asyncio
12 | import sys
13 | from typing import Optional
14 | 
15 | try:
16 |     from mcp import ClientSession
17 |     from mcp.client.sse import sse_client
18 | except ImportError:
19 |     print("[ERROR] You must install the MCP Python SDK: pip install 'mcp[cli]'")
20 |     sys.exit(1)
21 | 
22 | DEFAULT_SSE_URL = "http://127.0.0.1:8013/sse"
23 | 
24 | async def main(sse_url: Optional[str] = None):
25 |     sse_url = sse_url or DEFAULT_SSE_URL
26 |     print(f"Connecting to MCP SSE server at: {sse_url}\n")
27 |     try:
28 |         async with sse_client(sse_url) as (read, write):
29 |             async with ClientSession(read, write) as session:
30 |                 await session.initialize()
31 |                 print("[OK] Connected. Listing available tools...\n")
32 |                 tools = await session.list_tools()
33 |                 if not tools:
34 |                     print("[ERROR] No tools available on the server.")
35 |                     return 1
36 |                 print("Available tools:")
37 |                 for tool in tools:
38 |                     print(f"  - {tool.name}: {tool.description}")
39 |                 # Try to call the 'echo' tool if available
40 |                 echo_tool = next((t for t in tools if t.name == "echo"), None)
41 |                 if not echo_tool:
42 |                     print("\n[INFO] 'echo' tool not found. Demo will exit.")
43 |                     return 0
44 |                 # Call the echo tool
45 |                 test_message = "Hello from SSE client demo!"
46 |                 print(f"\nCalling 'echo' tool with message: '{test_message}'...")
47 |                 result = await session.call_tool("echo", {"message": test_message})
48 |                 print(f"[RESULT] echo: {result}")
49 |                 return 0
50 |     except Exception as e:
51 |         print(f"[ERROR] Failed to connect or interact with server: {e}")
52 |         return 1
53 | 
54 | if __name__ == "__main__":
55 |     sse_url = sys.argv[1] if len(sys.argv) > 1 else None
56 |     exit_code = asyncio.run(main(sse_url))
57 |     sys.exit(exit_code) 
```

--------------------------------------------------------------------------------
/tools_list.json:
--------------------------------------------------------------------------------

```json
  1 | [
  2 |   "generate_completion",
  3 |   "stream_completion",
  4 |   "chat_completion",
  5 |   "multi_completion",
  6 |   "get_provider_status",
  7 |   "list_models",
  8 |   "estimate_cost",
  9 |   "compare_models",
 10 |   "recommend_model",
 11 |   "execute_optimized_workflow",
 12 |   "read_file",
 13 |   "read_multiple_files",
 14 |   "write_file",
 15 |   "edit_file",
 16 |   "create_directory",
 17 |   "list_directory",
 18 |   "directory_tree",
 19 |   "move_file",
 20 |   "search_files",
 21 |   "get_file_info",
 22 |   "list_allowed_directories",
 23 |   "get_unique_filepath",
 24 |   "run_ripgrep",
 25 |   "run_awk",
 26 |   "run_sed",
 27 |   "run_jq",
 28 |   "run_ripgrep_stream",
 29 |   "run_awk_stream",
 30 |   "run_sed_stream",
 31 |   "run_jq_stream",
 32 |   "execute_python",
 33 |   "repl_python",
 34 |   "click",
 35 |   "browse",
 36 |   "type_text",
 37 |   "search",
 38 |   "download",
 39 |   "download_site_pdfs",
 40 |   "collect_documentation",
 41 |   "parallel",
 42 |   "run_macro",
 43 |   "autopilot",
 44 |   "convert_document",
 45 |   "chunk_document",
 46 |   "clean_and_format_text_as_markdown",
 47 |   "detect_content_type",
 48 |   "batch_format_texts",
 49 |   "optimize_markdown_formatting",
 50 |   "identify_sections",
 51 |   "generate_qa_pairs",
 52 |   "summarize_document",
 53 |   "extract_metrics",
 54 |   "flag_risks",
 55 |   "canonicalise_entities",
 56 |   "ocr_image",
 57 |   "enhance_ocr_text",
 58 |   "analyze_pdf_structure",
 59 |   "process_document_batch",
 60 |   "extract_entities",
 61 |   "extract_tables",
 62 |   "analyze_business_sentiment",
 63 |   "analyze_business_text_batch",
 64 |   "create_workflow",
 65 |   "get_workflow_details",
 66 |   "record_action_start",
 67 |   "record_action_completion",
 68 |   "get_recent_actions",
 69 |   "get_thought_chain",
 70 |   "store_memory",
 71 |   "get_memory_by_id",
 72 |   "get_memory_metadata",
 73 |   "get_memory_tags",
 74 |   "update_memory_metadata",
 75 |   "update_memory_link_metadata",
 76 |   "create_memory_link",
 77 |   "get_workflow_metadata",
 78 |   "get_contradictions",
 79 |   "query_memories",
 80 |   "update_memory",
 81 |   "get_linked_memories",
 82 |   "add_tag_to_memory",
 83 |   "create_embedding",
 84 |   "get_embedding",
 85 |   "get_working_memory",
 86 |   "focus_memory",
 87 |   "optimize_working_memory",
 88 |   "promote_memory_level",
 89 |   "save_cognitive_state",
 90 |   "load_cognitive_state",
 91 |   "decay_link_strengths",
 92 |   "generate_reflection",
 93 |   "get_rich_context_package",
 94 |   "get_goal_details",
 95 |   "create_goal",
 96 |   "update_goal_status",
 97 |   "vector_similarity",
 98 |   "record_artifact",
 99 |   "get_artifacts",
100 |   "get_artifact_by_id",
101 |   "get_similar_memories",
102 |   "query_goals",
103 |   "consolidate_memories",
104 |   "diagnose_file_access_issues",
105 |   "generate_workflow_report",
106 |   "hybrid_search_memories",
107 |   "get_subgraph",
108 |   "echo"
109 | ]
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/services/knowledge_base/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Knowledge base services for RAG functionality."""
 2 | 
 3 | from .manager import KnowledgeBaseManager
 4 | from .rag_engine import RAGEngine
 5 | from .retriever import KnowledgeBaseRetriever
 6 | 
 7 | __all__ = [
 8 |     "KnowledgeBaseManager", 
 9 |     "KnowledgeBaseRetriever", 
10 |     "RAGEngine",
11 |     "get_knowledge_base_manager", 
12 |     "get_knowledge_base_retriever",
13 |     "get_rag_service"
14 | ]
15 | 
16 | # Singleton instances
17 | _knowledge_base_manager = None
18 | _knowledge_base_retriever = None
19 | _rag_service = None
20 | 
21 | def get_knowledge_base_manager() -> KnowledgeBaseManager:
22 |     """Get or create a knowledge base manager instance.
23 |     
24 |     Returns:
25 |         KnowledgeBaseManager: Knowledge base manager instance
26 |     """
27 |     global _knowledge_base_manager
28 |     
29 |     if _knowledge_base_manager is None:
30 |         from ultimate_mcp_server.services.vector import get_vector_database_service
31 |         
32 |         vector_service = get_vector_database_service()
33 |         _knowledge_base_manager = KnowledgeBaseManager(vector_service)
34 |         
35 |     return _knowledge_base_manager
36 | 
37 | def get_knowledge_base_retriever() -> KnowledgeBaseRetriever:
38 |     """Get or create a knowledge base retriever instance.
39 |     
40 |     Returns:
41 |         KnowledgeBaseRetriever: Knowledge base retriever instance
42 |     """
43 |     global _knowledge_base_retriever
44 |     
45 |     if _knowledge_base_retriever is None:
46 |         from ultimate_mcp_server.services.vector import get_vector_database_service
47 |         
48 |         vector_service = get_vector_database_service()
49 |         _knowledge_base_retriever = KnowledgeBaseRetriever(vector_service)
50 |         
51 |     return _knowledge_base_retriever
52 | 
53 | def get_rag_service() -> RAGEngine:
54 |     """Get or create a RAG engine instance.
55 |     
56 |     Returns:
57 |         RAGEngine: RAG engine instance
58 |     """
59 |     global _rag_service
60 |     
61 |     if _rag_service is None:
62 |         from ultimate_mcp_server.core import get_provider_manager
63 |         from ultimate_mcp_server.services.analytics import get_analytics_service
64 |         from ultimate_mcp_server.tools.optimization import get_optimization_service
65 |         
66 |         retriever = get_knowledge_base_retriever()
67 |         provider_manager = get_provider_manager()
68 |         optimization_service = get_optimization_service()
69 |         analytics_service = get_analytics_service()
70 |         
71 |         _rag_service = RAGEngine(
72 |             retriever=retriever,
73 |             provider_manager=provider_manager,
74 |             optimization_service=optimization_service,
75 |             analytics_service=analytics_service
76 |         )
77 |     
78 |     return _rag_service 
```

--------------------------------------------------------------------------------
/examples/sample/research_paper.txt:
--------------------------------------------------------------------------------

```
 1 | # Quantum Entanglement-Based Secure Communication Protocol Using Topological Qubits
 2 | 
 3 | ## Abstract
 4 | 
 5 | This paper introduces a novel quantum communication protocol that leverages topological qubits and quantum entanglement to achieve unprecedented levels of security and fault tolerance. Our research team at the Quantum Information Science Laboratory (QISL) at MIT, in collaboration with researchers from Stanford University's Quantum Systems Engineering Department and Google's Quantum AI division, has successfully demonstrated a practical implementation of this protocol using a 128-qubit quantum processor.
 6 | 
 7 | The protocol, named "TopEnt," combines the inherent security properties of quantum entanglement with the error resistance of topological quantum computation. Through a series of experiments conducted between January 2024 and March 2025, we achieved secure key distribution rates of 15.4 kilobits per second over a distance of 103 kilometers with a quantum bit error rate (QBER) of only 0.42%, significantly outperforming previous quantum key distribution methods.
 8 | 
 9 | Our findings suggest that TopEnt is resistant to all currently known quantum attack vectors, including photon-number splitting attacks and measurement-device-independent vulnerabilities. The theoretical foundation of our work builds upon established research by Bennett and Brassard (1984), Ekert (1991), and the more recent contributions of Zhang et al. (2022) on topological quantum error correction.
10 | 
11 | The practical implementation was made possible through a novel quantum repeater design developed by our team that utilizes entanglement swapping across intermediate nodes. Additionally, we employed a modified version of the surface code for error correction, enabling reliable quantum state preservation even in noisy environments.
12 | 
13 | This research was funded by DARPA's Quantum Information Science Program (Grant No. QIS-2023-45678), the National Science Foundation (Grant No. PHY-2356789), and received computational support from the MIT Quantum Computing Center.
14 | 
15 | These findings have significant implications for secure military communications, financial data transmission, and the future quantum internet infrastructure. We anticipate that TopEnt could be deployed in real-world applications within the next 3-5 years, pending further refinements and miniaturization of the required quantum hardware.
16 | 
17 | **Keywords:** quantum entanglement, topological qubits, quantum key distribution, quantum cryptography, quantum repeaters, fault-tolerant quantum computation 
```

--------------------------------------------------------------------------------
/additional_features.md:
--------------------------------------------------------------------------------

```markdown
 1 | # --- Command Line Execution ---
 2 | 
 3 | # 1. Ensure the Ultimate MCP Server is running in one terminal:
 4 | #    (Activate venv)
 5 | #    ultimate-mcp-server run
 6 | 
 7 | # 2. Ensure your .env file is configured with available API keys.
 8 | 
 9 | # 3. In another terminal, run the test orchestrator script:
10 | #    (Activate venv)
11 | #    python run_all_demo_scripts_and_check_for_errors.py [OPTIONS]
12 | 
13 | # Example Options (Check the script's argparse setup for exact flags):
14 | #    --tests-to-run <test_name_pattern> # Run only specific tests
15 | #    --include-providers <provider1,provider2> # Only run tests involving these providers
16 | #    --output-format <simple|detailed|html> # Control output verbosity/format
17 | #    --fail-fast # Stop on the first failure
18 | 
19 | # --- Example Code Snippet (Illustrating the runner's purpose) ---
20 | # This Python code shows how you might import and use the runner function if needed,
21 | # based on the example provided in the source.
22 | 
23 | from run_all_demo_scripts_and_check_for_errors import run_test_suite # Import the main function
24 | 
25 | # Define test parameters
26 | test_params = {
27 |     "tests_to_run": "all",  # Or a pattern like "browser*" to run only browser tests
28 |     "include_providers": ["openai", "anthropic"], # Limit tests to these providers if keys are set
29 |     "exclude_providers": ["gemini"], # Explicitly exclude providers
30 |     "output_format": "detailed", # Get detailed console output
31 |     "fail_fast": False, # Continue running tests even if one fails
32 |     # Add other parameters supported by run_test_suite if needed
33 | }
34 | 
35 | print(f"Starting test suite with parameters: {test_params}")
36 | 
37 | # Execute the test suite
38 | results = run_test_suite(**test_params)
39 | 
40 | # Process and display the results summary
41 | print("\n--- Test Suite Summary ---")
42 | print(f"Total Tests Attempted: {results.get('total_tests', 0)}")
43 | print(f"Passed: {results.get('passed', 0)}")
44 | print(f"Failed: {results.get('failed', 0)}")
45 | print(f"Skipped (e.g., missing keys/deps): {results.get('skipped', 0)}")
46 | print("--------------------------")
47 | 
48 | # The HTML report (if generated) provides detailed logs
49 | html_report_content = results.get("html_report")
50 | if html_report_content:
51 |     report_path = "test_suite_report.html"
52 |     try:
53 |         with open(report_path, "w", encoding="utf-8") as f:
54 |             f.write(html_report_content)
55 |         print(f"Detailed HTML report saved to: {report_path}")
56 |     except Exception as e:
57 |         print(f"Error saving HTML report: {e}")
58 | else:
59 |     # Handle case where HTML report wasn't generated based on output_format
60 |     if test_params.get("output_format") == "html":
61 |          print("HTML report was requested but not found in results.")
62 | 
63 | # Note: This code block assumes you can import and run the test suite function
64 | # directly. The primary intended use is likely via the command line script.
```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | FROM python:3.13-slim as builder
 2 | 
 3 | # Set working directory
 4 | WORKDIR /app
 5 | 
 6 | # Set environment variables for build
 7 | ENV PYTHONDONTWRITEBYTECODE=1 \
 8 |     PYTHONUNBUFFERED=1 \
 9 |     PIP_NO_CACHE_DIR=1 \
10 |     PIP_DISABLE_PIP_VERSION_CHECK=1
11 | 
12 | # Install system dependencies for build
13 | RUN apt-get update && \
14 |     apt-get install -y --no-install-recommends \
15 |     build-essential \
16 |     curl \
17 |     && apt-get clean && \
18 |     rm -rf /var/lib/apt/lists/*
19 | 
20 | # Install Python build tools and core dependencies (including torch for runtime)
21 | # Assuming torch is needed at runtime by dependencies like sentence-transformers
22 | COPY pyproject.toml ./
23 | RUN pip install --upgrade pip && \
24 |     # Install CPU-specific torch first if needed by dependencies
25 |     pip install torch --index-url https://download.pytorch.org/whl/cpu && \
26 |     # Install the project and its dependencies defined in pyproject.toml
27 |     pip install .
28 | 
29 | # Install system dependencies for OCR
30 | RUN apt-get update && apt-get install -y \
31 |     tesseract-ocr \
32 |     libtesseract-dev \
33 |     poppler-utils \
34 |     libgl1-mesa-glx \
35 |     && apt-get clean \
36 |     && rm -rf /var/lib/apt/lists/*
37 | 
38 | # Install language packs for tesseract (optional)
39 | RUN apt-get update && apt-get install -y \
40 |     tesseract-ocr-eng \
41 |     tesseract-ocr-fra \
42 |     tesseract-ocr-deu \
43 |     tesseract-ocr-spa \
44 |     && apt-get clean \
45 |     && rm -rf /var/lib/apt/lists/*
46 | 
47 | # Create a lightweight runtime image
48 | FROM python:3.13-slim
49 | 
50 | # Set working directory
51 | WORKDIR /app
52 | 
53 | # Set environment variables for runtime
54 | ENV PYTHONDONTWRITEBYTECODE=1 \
55 |     PYTHONUNBUFFERED=1
56 | 
57 | # Install runtime system dependencies
58 | # Add libgomp1 commonly needed by numpy/torch
59 | RUN apt-get update && \
60 |     apt-get install -y --no-install-recommends \
61 |     curl libgomp1 \
62 |     && apt-get clean && \
63 |     rm -rf /var/lib/apt/lists/*
64 | 
65 | # Create directories needed by the application before changing user
66 | RUN mkdir -p logs .cache .embeddings
67 | 
68 | # Copy installed Python packages from builder stage
69 | COPY --from=builder /usr/local/lib/python3.13/site-packages /usr/local/lib/python3.13/site-packages
70 | # Copy the hatchling/pip generated entrypoint scripts
71 | COPY --from=builder /usr/local/bin /usr/local/bin
72 | # Copy necessary configuration files
73 | COPY marqo_index_config.json .
74 | 
75 | # Create non-root user and group
76 | RUN groupadd -r ultimatemcpserver && \
77 |     useradd --no-log-init -r -g ultimatemcpserver ultimatemcpserver && \
78 |     # Change ownership of app directories
79 |     chown -R ultimatemcpserver:ultimatemcpserver /app
80 | 
81 | # Switch to non-root user
82 | USER ultimatemcpserver
83 | 
84 | # Expose application port
85 | EXPOSE 8013
86 | 
87 | # Use the installed script from pyproject.toml as entrypoint
88 | ENTRYPOINT ["ultimate-mcp-server"]
89 | CMD ["serve", "--host", "0.0.0.0", "--port", "8013"]
90 | 
91 | # Add health check
92 | HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
93 |     CMD curl -f http://localhost:8013/healthz || exit 1
```

--------------------------------------------------------------------------------
/empirically_measured_model_speeds.json:
--------------------------------------------------------------------------------

```json
  1 | {
  2 |     "claude-3-5-haiku-20241022": {
  3 |         "total_time_s": 9.66,
  4 |         "output_tokens": 452,
  5 |         "output_tokens_per_second": 46.79
  6 |     },
  7 |     "claude-3-7-sonnet-20250219": {
  8 |         "total_time_s": 8.907,
  9 |         "output_tokens": 469,
 10 |         "output_tokens_per_second": 52.66
 11 |     },
 12 |     "claude-3-opus-20240229": {
 13 |         "total_time_s": 20.186,
 14 |         "output_tokens": 583,
 15 |         "output_tokens_per_second": 28.88
 16 |     },
 17 |     "claude-3-sonnet-20240229": {
 18 |         "total_time_s": 9.072,
 19 |         "output_tokens": 518,
 20 |         "output_tokens_per_second": 57.1
 21 |     },
 22 |     "deepseek-chat": {
 23 |         "total_time_s": 21.641,
 24 |         "output_tokens": 442,
 25 |         "output_tokens_per_second": 20.42
 26 |     },
 27 |     "deepseek-reasoner": {
 28 |         "total_time_s": 39.793,
 29 |         "output_tokens": 1097,
 30 |         "output_tokens_per_second": 27.57
 31 |     },
 32 |     "gemini-2.0-flash": {
 33 |         "total_time_s": 2.59,
 34 |         "output_tokens": 488,
 35 |         "output_tokens_per_second": 188.44
 36 |     },
 37 |     "gemini-2.0-flash-lite": {
 38 |         "total_time_s": 2.93,
 39 |         "output_tokens": 467,
 40 |         "output_tokens_per_second": 159.4
 41 |     },
 42 |     "gemini-2.0-flash-thinking-exp-01-21": {
 43 |         "total_time_s": 6.103,
 44 |         "output_tokens": 460,
 45 |         "output_tokens_per_second": 75.37
 46 |     },
 47 |     "gemini-2.5-pro-preview-03-25": {
 48 |         "total_time_s": 18.481,
 49 |         "output_tokens": 593,
 50 |         "output_tokens_per_second": 32.09
 51 |     },
 52 |     "gpt-4.1": {
 53 |         "total_time_s": 8.908,
 54 |         "output_tokens": 441,
 55 |         "output_tokens_per_second": 49.51
 56 |     },
 57 |     "gpt-4.1-mini": {
 58 |         "total_time_s": 6.184,
 59 |         "output_tokens": 506,
 60 |         "output_tokens_per_second": 81.82
 61 |     },
 62 |     "gpt-4.1-nano": {
 63 |         "total_time_s": 3.315,
 64 |         "output_tokens": 434,
 65 |         "output_tokens_per_second": 130.92
 66 |     },
 67 |     "gpt-4o": {
 68 |         "total_time_s": 10.358,
 69 |         "output_tokens": 446,
 70 |         "output_tokens_per_second": 43.06
 71 |     },
 72 |     "gpt-4o-mini": {
 73 |         "total_time_s": 6.086,
 74 |         "output_tokens": 416,
 75 |         "output_tokens_per_second": 68.36
 76 |     },
 77 |     "grok-3-fast-latest": {
 78 |         "total_time_s": 5.915,
 79 |         "output_tokens": 396,
 80 |         "output_tokens_per_second": 66.95
 81 |     },
 82 |     "grok-3-latest": {
 83 |         "total_time_s": 13.587,
 84 |         "output_tokens": 396,
 85 |         "output_tokens_per_second": 29.15
 86 |     },
 87 |     "grok-3-mini-fast-latest": {
 88 |         "total_time_s": 6.382,
 89 |         "output_tokens": 345,
 90 |         "output_tokens_per_second": 54.06
 91 |     },
 92 |     "grok-3-mini-latest": {
 93 |         "total_time_s": 9.32,
 94 |         "output_tokens": 366,
 95 |         "output_tokens_per_second": 39.27
 96 |     },
 97 |     "o1-preview": {
 98 |         "total_time_s": 19.538,
 99 |         "output_tokens": 1394,
100 |         "output_tokens_per_second": 71.35
101 |     },
102 |     "o3-mini": {
103 |         "total_time_s": 5.416,
104 |         "output_tokens": 395,
105 |         "output_tokens_per_second": 72.93
106 |     }
107 | }
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/core/evaluation/base.py:
--------------------------------------------------------------------------------

```python
 1 | # --- core/evaluation/base.py (NEW) ---
 2 | from abc import ABC, abstractmethod
 3 | from typing import Any, Dict, Literal, Optional, Type  # Added Type
 4 | 
 5 | from pydantic import BaseModel, Field  # Added Field
 6 | 
 7 | # Assuming ModelResponseData is correctly importable from its location
 8 | # This path might need adjustment based on your actual project structure.
 9 | # If core.models.tournament is in the same parent directory as core.evaluation,
10 | # then this relative import should work if Python's import system can find 'ultimate_mcp_server'.
11 | # A common way is to have 'ultimate_mcp_server' as a top-level package.
12 | try:
13 |     from ultimate_mcp_server.core.models.tournament import ModelResponseData
14 | except ImportError:
15 |     # Fallback for different structures or if running script directly for testing
16 |     # This is a common pattern but ensure your PYTHONPATH or project structure handles it in production
17 |     # For instance, if 'ultimate_mcp_server' is the root of your installable package.
18 |     from ..models.tournament import ModelResponseData
19 | 
20 | 
21 | class EvaluationScore(BaseModel):
22 |     score: float # Primary numerical score
23 |     details: Optional[str] = None # Textual explanation or breakdown
24 |     metrics: Dict[str, Any] = Field(default_factory=dict) # Additional quantitative metrics from this evaluator
25 | 
26 | class Evaluator(ABC):
27 |     """Abstract base class for evaluators."""
28 |     
29 |     evaluator_type: str # Must be overridden by subclasses, e.g., "llm_grader"
30 | 
31 |     def __init__(self, config: Dict[str, Any]):
32 |         """
33 |         Initialize the evaluator with its specific configuration.
34 |         `config` comes from `EvaluatorConfig.params`.
35 |         """
36 |         self.config = config
37 | 
38 |     @abstractmethod
39 |     async def score(
40 |         self, 
41 |         response_data: ModelResponseData,
42 |         original_prompt: str,
43 |         tournament_type: Literal["code", "text"] # "code" and "text" were undefined before
44 |     ) -> EvaluationScore:
45 |         """
46 |         Scores a model's response.
47 | 
48 |         Args:
49 |             response_data: The ModelResponseData object containing the response text, code, etc.
50 |             original_prompt: The initial prompt for the tournament.
51 |             tournament_type: The type of the tournament ('code' or 'text').
52 | 
53 |         Returns:
54 |             An EvaluationScore object.
55 |         """
56 |         pass
57 | 
58 |     @classmethod
59 |     def get_config_schema(cls) -> Optional[Dict[str, Any]]:
60 |         """
61 |         Optional: Returns a JSON schema for the evaluator's specific `params`.
62 |         This can be used for validation or UI generation.
63 |         """
64 |         return None
65 | 
66 | # Example: A registry for evaluators (could be more sophisticated with entry points)
67 | EVALUATOR_REGISTRY: Dict[str, Type[Evaluator]] = {}
68 | 
69 | def register_evaluator(cls: Type[Evaluator]):
70 |     if not hasattr(cls, 'evaluator_type') or not cls.evaluator_type:
71 |         raise ValueError(f"Evaluator class {cls.__name__} must define a 'evaluator_type' attribute.")
72 |     if cls.evaluator_type in EVALUATOR_REGISTRY:
73 |         raise ValueError(f"Evaluator type '{cls.evaluator_type}' already registered.")
74 |     EVALUATOR_REGISTRY[cls.evaluator_type] = cls
75 |     return cls
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/core/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Core functionality for Ultimate MCP Server."""
 2 | import asyncio
 3 | from typing import Optional
 4 | 
 5 | from ultimate_mcp_server.core.server import Gateway
 6 | from ultimate_mcp_server.utils import get_logger
 7 | 
 8 | logger = get_logger(__name__)
 9 | 
10 | # Add a provider manager getter function
11 | _gateway_instance = None
12 | 
13 | async def async_init_gateway():
14 |     """
15 |     Asynchronously initialize the global gateway instance.
16 |     
17 |     This function creates and initializes the Gateway singleton instance that manages 
18 |     provider connections and serves as the central access point for LLM capabilities.
19 |     It ensures the gateway is properly initialized only once, maintaining a global
20 |     instance that can be used across the application.
21 |     
22 |     The initialization process includes:
23 |     1. Creating a Gateway instance if none exists
24 |     2. Initializing all configured providers asynchronously
25 |     3. Setting up the provider connections and validating configurations
26 |     
27 |     Returns:
28 |         The initialized Gateway instance
29 |         
30 |     Note:
31 |         This function is designed to be called from async code. For synchronous
32 |         contexts, use get_gateway_instance() which handles event loop management.
33 |     """
34 |     global _gateway_instance
35 |     if _gateway_instance is None:
36 |         _gateway_instance = Gateway("provider-manager")
37 |         await _gateway_instance._initialize_providers()
38 |     return _gateway_instance
39 | 
40 | def get_provider_manager():
41 |     """Get the provider manager from the Gateway instance.
42 |     
43 |     Returns:
44 |         Provider manager with initialized providers
45 |     """
46 |     global _gateway_instance
47 |     
48 |     if _gateway_instance is None:
49 |         try:
50 |             # Try to run in current event loop
51 |             loop = asyncio.get_event_loop()
52 |             if loop.is_running():
53 |                 # Create a new task in the current event loop
54 |                 asyncio.create_task(async_init_gateway())
55 |                 logger.warning("Gateway instance requested before async init completed.")
56 |                 return {}
57 |             else:
58 |                 # Run in a new event loop (blocks)
59 |                 logger.info("Synchronously initializing gateway for get_provider_manager.")
60 |                 _gateway_instance = Gateway("provider-manager")
61 |                 loop.run_until_complete(_gateway_instance._initialize_providers())
62 |         except RuntimeError:
63 |             # No event loop running, create one (blocks)
64 |             logger.info("Synchronously initializing gateway for get_provider_manager (new loop).")
65 |             _gateway_instance = Gateway("provider-manager")
66 |             asyncio.run(_gateway_instance._initialize_providers())
67 |     
68 |     # Return the providers dictionary as a "manager"
69 |     return _gateway_instance.providers if _gateway_instance else {}
70 | 
71 | def get_gateway_instance() -> Optional[Gateway]:
72 |     """Synchronously get the initialized gateway instance.
73 |     
74 |     Returns:
75 |         The Gateway instance or None if it hasn't been initialized yet.
76 |     """
77 |     global _gateway_instance
78 |     if _gateway_instance is None:
79 |         logger.warning("get_gateway_instance() called before instance was initialized.")
80 |     return _gateway_instance
81 | 
82 | __all__ = ["Gateway", "get_provider_manager"]
```

--------------------------------------------------------------------------------
/examples/test_ollama.py:
--------------------------------------------------------------------------------

```python
 1 | #!/usr/bin/env python
 2 | """Test script for checking Ollama connectivity."""
 3 | import asyncio
 4 | import aiohttp
 5 | import sys
 6 | 
 7 | async def test_ollama():
 8 |     """Test connection to Ollama API."""
 9 |     print("Testing Ollama API connectivity...")
10 |     
11 |     urls_to_try = [
12 |         "http://localhost:11434",
13 |         "http://127.0.0.1:11434",
14 |     ]
15 |     
16 |     for base_url in urls_to_try:
17 |         print(f"\nTrying URL: {base_url}")
18 |         try:
19 |             # Create a session with a short timeout
20 |             timeout = aiohttp.ClientTimeout(total=5.0)
21 |             async with aiohttp.ClientSession(timeout=timeout) as session:
22 |                 try:
23 |                     # Try to connect to the tags endpoint
24 |                     url = f"{base_url}/api/tags"
25 |                     print(f"Connecting to: {url}")
26 |                     
27 |                     async with session.get(url) as response:
28 |                         status = response.status
29 |                         print(f"Status code: {status}")
30 |                         
31 |                         if status == 200:
32 |                             data = await response.json()
33 |                             models = data.get("models", [])
34 |                             print(f"Success! Found {len(models)} models.")
35 |                             if models:
36 |                                 print("Model names:")
37 |                                 for model in models:
38 |                                     print(f"  - {model.get('name')}")
39 |                         else:
40 |                             text = await response.text()
41 |                             print(f"Error response: {text[:200]}")
42 |                 except aiohttp.ClientConnectionError as e:
43 |                     print(f"Connection error: {type(e).__name__} - {str(e)}")
44 |                 except asyncio.TimeoutError:
45 |                     print("Connection timed out after 5 seconds")
46 |                 except Exception as e:
47 |                     print(f"Unexpected error: {type(e).__name__} - {str(e)}")
48 |         except Exception as e:
49 |             print(f"Session creation error: {type(e).__name__} - {str(e)}")
50 | 
51 |     # Also try through the library's provider interface
52 |     try:
53 |         print("\nTesting through Ultimate MCP Server classes...")
54 |         # Import the OllamaProvider class
55 |         from ultimate_mcp_server.core.providers.ollama import OllamaProvider
56 |         
57 |         # Create an instance
58 |         provider = OllamaProvider()
59 |         print(f"Provider created with URL: {provider.config.api_url}")
60 |         
61 |         # Initialize the provider
62 |         initialized = await provider.initialize()
63 |         print(f"Provider initialized: {initialized}")
64 |         
65 |         if initialized:
66 |             # Try to list models
67 |             models = await provider.list_models()
68 |             print(f"Models found through provider: {len(models)}")
69 |             if models:
70 |                 print("Model IDs:")
71 |                 for model in models:
72 |                     print(f"  - {model['id']}")
73 |         
74 |         # Make sure to shut down properly
75 |         await provider.shutdown()
76 |     except Exception as e:
77 |         print(f"Provider test error: {type(e).__name__} - {str(e)}")
78 | 
79 | if __name__ == "__main__":
80 |     print(f"Python version: {sys.version}")
81 |     print(f"aiohttp version: {aiohttp.__version__}")
82 |     asyncio.run(test_ollama()) 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/utils/logging/emojis.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Emoji definitions for Gateway logging system.
  3 | 
  4 | This module contains constants for emojis used in logging to provide visual cues
  5 | about the type and severity of log messages.
  6 | """
  7 | from typing import Dict
  8 | 
  9 | # Log level emojis
 10 | INFO = "ℹ️"
 11 | DEBUG = "🔍"
 12 | WARNING = "⚠️"
 13 | ERROR = "❌"
 14 | CRITICAL = "🚨"
 15 | SUCCESS = "✅"
 16 | TRACE = "📍"
 17 | 
 18 | # Status emojis
 19 | RUNNING = "🔄"
 20 | PENDING = "⏳"
 21 | COMPLETED = "🏁"
 22 | FAILED = "👎"
 23 | STARTING = "🚀"
 24 | STOPPING = "🛑"
 25 | RESTARTING = "🔁"
 26 | LOADING = "📥"
 27 | SAVING = "📤"
 28 | CANCELLED = "🚫"
 29 | TIMEOUT = "⏱️"
 30 | SKIPPED = "⏭️"
 31 | 
 32 | # Operation emojis (Adapt for ultimate)
 33 | REQUEST = "➡️" # Example
 34 | RESPONSE = "⬅️" # Example
 35 | PROCESS = "⚙️"  # Example
 36 | CACHE_HIT = "✅" # Example
 37 | CACHE_MISS = "❌" # Example
 38 | AUTHENTICATE = "🔒" # Example
 39 | AUTHORIZE = "🔑" # Example
 40 | VALIDATE = "✔️"
 41 | CONNECT = "🔌"
 42 | DISCONNECT = "🔌"
 43 | UPDATE = "📝"
 44 | 
 45 | # Component emojis (Adapt for ultimate)
 46 | CORE = "⚙️"
 47 | PROVIDER = "☁️" # Example
 48 | ROUTER = "🔀" # Example
 49 | CACHE = "📦"
 50 | API = "🌐"
 51 | MCP = "📡" # Keep if relevant
 52 | UTILS = "🔧" # Example
 53 | 
 54 | # Tool emojis (Keep/remove/add as needed)
 55 | # RIPGREP = "🔍"
 56 | # AWK = "🔧"
 57 | # JQ = "🧰"
 58 | # SQLITE = "🗃️"
 59 | 
 60 | # Result emojis
 61 | FOUND = "🎯"
 62 | NOT_FOUND = "🔍"
 63 | PARTIAL = "◐"
 64 | UNKNOWN = "❓"
 65 | HIGH_CONFIDENCE = "🔒"
 66 | MEDIUM_CONFIDENCE = "🔓"
 67 | LOW_CONFIDENCE = "🚪"
 68 | 
 69 | # System emojis
 70 | STARTUP = "🔆"
 71 | SHUTDOWN = "🔅"
 72 | CONFIG = "⚙️"
 73 | ERROR = "⛔" # Distinct from level error
 74 | WARNING = "⚠️" # Same as level warning
 75 | DEPENDENCY = "🧱"
 76 | VERSION = "🏷️"
 77 | UPDATE_AVAILABLE = "🆕"
 78 | 
 79 | # User interaction emojis (Keep if relevant)
 80 | INPUT = "⌨️"
 81 | OUTPUT = "📺"
 82 | HELP = "❓"
 83 | HINT = "💡"
 84 | EXAMPLE = "📋"
 85 | QUESTION = "❓"
 86 | ANSWER = "💬"
 87 | 
 88 | # Time emojis
 89 | TIMING = "⏱️"
 90 | SCHEDULED = "📅"
 91 | DELAYED = "⏰"
 92 | OVERTIME = "⌛"
 93 | 
 94 | # Convenience mapping for log levels
 95 | LEVEL_EMOJIS: Dict[str, str] = {
 96 |     "info": INFO,
 97 |     "debug": DEBUG,
 98 |     "warning": WARNING,
 99 |     "error": ERROR,
100 |     "critical": CRITICAL,
101 |     "success": SUCCESS,
102 |     "trace": TRACE,
103 | }
104 | 
105 | # Dictionary for mapping operation names to emojis
106 | OPERATION_EMOJIS: Dict[str, str] = {
107 |     "request": REQUEST,
108 |     "response": RESPONSE,
109 |     "process": PROCESS,
110 |     "cache_hit": CACHE_HIT,
111 |     "cache_miss": CACHE_MISS,
112 |     "authenticate": AUTHENTICATE,
113 |     "authorize": AUTHORIZE,
114 |     "validate": VALIDATE,
115 |     "connect": CONNECT,
116 |     "disconnect": DISCONNECT,
117 |     "update": UPDATE,
118 |     # Add other common operations here
119 |     "startup": STARTUP,
120 |     "shutdown": SHUTDOWN,
121 |     "config": CONFIG,
122 | }
123 | 
124 | # Dictionary for mapping component names to emojis
125 | COMPONENT_EMOJIS: Dict[str, str] = {
126 |     "core": CORE,
127 |     "provider": PROVIDER,
128 |     "router": ROUTER,
129 |     "cache": CACHE,
130 |     "api": API,
131 |     "mcp": MCP,
132 |     "utils": UTILS,
133 |     # Add other components here
134 | }
135 | 
136 | # Get emoji by name function for more dynamic access
137 | def get_emoji(category: str, name: str) -> str:
138 |     """Get an emoji by category and name.
139 |     
140 |     Args:
141 |         category: The category of emoji (e.g., 'level', 'status', 'operation', 'component')
142 |         name: The name of the emoji within that category
143 |     
144 |     Returns:
145 |         The emoji string or a default '?' if not found
146 |     """
147 |     category = category.lower()
148 |     name_lower = name.lower()
149 |     
150 |     if category == "level":
151 |         return LEVEL_EMOJIS.get(name_lower, "?")
152 |     elif category == "operation":
153 |         return OPERATION_EMOJIS.get(name_lower, "⚙️") # Default to generic gear
154 |     elif category == "component":
155 |         return COMPONENT_EMOJIS.get(name_lower, "🧩") # Default to puzzle piece
156 |     
157 |     # Fallback for other categories or direct constant lookup
158 |     name_upper = name.upper()
159 |     globals_dict = globals()
160 |     if name_upper in globals_dict:
161 |         return globals_dict[name_upper]
162 |         
163 |     # Default if nothing matches
164 |     return "❓" 
```

--------------------------------------------------------------------------------
/examples/sample/text_classification_samples/support_tickets.txt:
--------------------------------------------------------------------------------

```
 1 | BUG REPORT:
 2 | The export to PDF feature is completely broken in version 3.2.1. When I click the export button, the application freezes for about 30 seconds, then crashes with error code 0x8007EE7. This happens consistently on every attempt. I've tried reinstalling the software and clearing the cache as suggested in the knowledge base, but the issue persists. This is blocking our team from delivering reports to clients. System specs: Windows 11 Pro, 16GB RAM, Intel i7-12700K.
 3 | 
 4 | FEATURE REQUEST:
 5 | It would be extremely helpful if you could add a bulk editing option for tags in the content management system. Currently, we have to edit tags one by one, which is very time-consuming when managing hundreds of articles. Ideally, we should be able to select multiple content pieces and apply or remove tags from all of them at once. This would save our editorial team hours of work each week and reduce the chance of tagging inconsistencies.
 6 | 
 7 | ACCOUNT ISSUE:
 8 | I'm unable to access my premium account despite having an active subscription. When I log in, the system still shows I have a free account with limited features. I can see in my bank statement that the $14.99 monthly charge went through three days ago. I've tried logging out and back in, clearing cookies, and using a different browser, but the problem remains. My account email is [email protected] and my customer ID is CUST-58924.
 9 | 
10 | BILLING QUESTION:
11 | I noticed an unexpected charge of $29.99 on my credit card statement from your company dated June 15th. I was under the impression that my subscription was $19.99/month. Was there a price increase that I missed notification about? Or is this an error? Please clarify what this charge covers and if there's been a change to my subscription terms. My account is registered under [email protected].
12 | 
13 | TECHNICAL QUESTION:
14 | Is it possible to integrate your API with Zapier? We're trying to automate our workflow between your platform and our CRM system. I've looked through the documentation but couldn't find specific information about Zapier integrations. If this is supported, could you point me to relevant documentation or examples? If not, do you have any recommendations for alternative integration methods that wouldn't require custom development?
15 | 
16 | BUG REPORT:
17 | There appears to be a security vulnerability in the user permission system. I discovered that standard users can access administrative reports by directly navigating to the URL pattern /admin/reports/custom/[report-id] even without admin privileges. I've verified this with two different standard user accounts. This potentially exposes sensitive company data to unauthorized personnel. Please address this urgently as it represents a significant security concern for our organization.
18 | 
19 | FEATURE REQUEST:
20 | Could you please consider adding dark mode to both the web and mobile applications? Working with the current bright interface during evening hours is causing eye strain for many of our team members. Ideally, the dark mode would be automatically triggered based on system settings but with the option to manually override. This has become a standard feature in most professional applications, and would greatly improve the user experience for those of us who work long hours.
21 | 
22 | ACCOUNT ISSUE:
23 | Our team admin left the company last week, and we need to transfer administrative privileges to another team member. The admin account was under [email protected]. We need to assign admin rights to [email protected] as soon as possible, as we're currently unable to add new team members or modify subscription settings. Our business account number is BIZ-4452-T. Please advise on the process for this transfer. 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/services/cache/utils.py:
--------------------------------------------------------------------------------

```python
 1 | """Cache utility functions for Ultimate MCP Server.
 2 | 
 3 | This module provides utility functions for working with the cache service
 4 | that were previously defined in example scripts but are now part of the library.
 5 | """
 6 | 
 7 | import hashlib
 8 | 
 9 | from ultimate_mcp_server.constants import Provider
10 | from ultimate_mcp_server.core.providers.base import get_provider
11 | from ultimate_mcp_server.services.cache import get_cache_service
12 | from ultimate_mcp_server.utils import get_logger
13 | 
14 | # Initialize logger
15 | logger = get_logger("ultimate_mcp_server.services.cache.utils")
16 | 
17 | async def run_completion_with_cache(
18 |     prompt: str,
19 |     provider_name: str = Provider.OPENAI.value,
20 |     model: str = None,
21 |     temperature: float = 0.1,
22 |     max_tokens: int = None,
23 |     use_cache: bool = True,
24 |     ttl: int = 3600,  # Default 1 hour cache TTL
25 |     api_key: str = None
26 | ):
27 |     """Run a completion with automatic caching.
28 |     
29 |     This utility function handles provider initialization, cache key generation,
30 |     cache lookups, and caching results automatically.
31 |     
32 |     Args:
33 |         prompt: Text prompt for completion
34 |         provider_name: Provider to use (default: OpenAI)
35 |         model: Model name (optional, uses provider default if not specified)
36 |         temperature: Temperature for generation (default: 0.1)
37 |         max_tokens: Maximum tokens to generate (optional)
38 |         use_cache: Whether to use cache (default: True)
39 |         ttl: Cache TTL in seconds (default: 3600/1 hour)
40 |         api_key: Provider API key (optional, falls back to internal provider system)
41 |         
42 |     Returns:
43 |         Completion result with additional processing_time attribute
44 |     """
45 |     try:
46 |         # Let the provider system handle API keys if none provided
47 |         provider = await get_provider(provider_name, api_key=api_key)
48 |         await provider.initialize()
49 |     except Exception as e:
50 |          logger.error(f"Failed to initialize provider {provider_name}: {e}", emoji_key="error")
51 |          raise # Re-raise exception to stop execution if provider fails
52 |     
53 |     cache_service = get_cache_service()
54 |     
55 |     # Create a more robust cache key using all relevant parameters
56 |     model_id = model or provider.get_default_model() # Ensure we have a model id
57 |     
58 |     # Create consistent hash of parameters that affect the result
59 |     params_str = f"{prompt}:{temperature}:{max_tokens if max_tokens else 'default'}"
60 |     params_hash = hashlib.md5(params_str.encode()).hexdigest()
61 |     
62 |     cache_key = f"completion:{provider_name}:{model_id}:{params_hash}"
63 |     
64 |     if use_cache and cache_service.enabled:
65 |         cached_result = await cache_service.get(cache_key)
66 |         if cached_result is not None:
67 |             logger.success("Cache hit! Using cached result", emoji_key="cache")
68 |             # Set processing time for cache retrieval (negligible)
69 |             cached_result.processing_time = 0.001 
70 |             return cached_result
71 |     
72 |     # Generate completion if not cached or cache disabled
73 |     if use_cache:
74 |         logger.info("Cache miss. Generating new completion...", emoji_key="processing")
75 |     else:
76 |         logger.info("Cache disabled by request. Generating new completion...", emoji_key="processing")
77 |         
78 |     # Use the determined model_id and pass through other parameters
79 |     result = await provider.generate_completion(
80 |         prompt=prompt,
81 |         model=model_id,
82 |         temperature=temperature,
83 |         max_tokens=max_tokens
84 |     )
85 |     
86 |     # Save to cache if enabled
87 |     if use_cache and cache_service.enabled:
88 |         await cache_service.set(
89 |             key=cache_key,
90 |             value=result,
91 |             ttl=ttl
92 |         )
93 |         logger.info(f"Result saved to cache (key: ...{cache_key[-10:]})", emoji_key="cache")
94 |         
95 |     return result 
```

--------------------------------------------------------------------------------
/TEST_README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Ultimate MCP Server Test Scripts
  2 | 
  3 | This directory contains test scripts to validate your Ultimate MCP Server functionality.
  4 | 
  5 | ## Prerequisites
  6 | 
  7 | Make sure you have FastMCP installed:
  8 | ```bash
  9 | pip install fastmcp
 10 | # or
 11 | uv add fastmcp
 12 | ```
 13 | 
 14 | Also install aiohttp for REST API testing:
 15 | ```bash
 16 | pip install aiohttp
 17 | # or  
 18 | uv add aiohttp
 19 | ```
 20 | 
 21 | ## Test Scripts
 22 | 
 23 | ### 1. `quick_test.py` - Quick Connectivity Test
 24 | **Purpose**: Fast basic connectivity and functionality check
 25 | **Runtime**: ~5 seconds
 26 | 
 27 | ```bash
 28 | python quick_test.py
 29 | ```
 30 | 
 31 | This script tests:
 32 | - ✅ Basic MCP connection
 33 | - 📢 Echo tool functionality  
 34 | - 🔌 Provider availability
 35 | - 🛠️ Tool count
 36 | - 📚 Resource count
 37 | 
 38 | ### 2. `test_client.py` - Interactive Test Client
 39 | **Purpose**: Comprehensive testing with interactive mode
 40 | **Runtime**: Variable (can be used interactively)
 41 | 
 42 | ```bash
 43 | python test_client.py
 44 | ```
 45 | 
 46 | This script tests:
 47 | - 🔗 Server connection
 48 | - 📋 Tool listing and calling
 49 | - 📚 Resource reading
 50 | - 🤖 LLM completions
 51 | - 📁 Filesystem tools
 52 | - 🐍 Python execution
 53 | - 📝 Text processing tools
 54 | - 🎮 Interactive command mode
 55 | 
 56 | **Interactive Commands**:
 57 | - `list` - Show available tools
 58 | - `resources` - Show available resources  
 59 | - `call <tool_name> <json_params>` - Call a tool
 60 | - `read <resource_uri>` - Read a resource
 61 | - `quit` - Exit
 62 | 
 63 | ### 3. `comprehensive_test.py` - Full Test Suite
 64 | **Purpose**: Complete validation of MCP and REST API functionality
 65 | **Runtime**: ~30 seconds
 66 | 
 67 | ```bash
 68 | python comprehensive_test.py
 69 | ```
 70 | 
 71 | This script tests:
 72 | - 🔧 MCP Interface (tools, providers, filesystem, Python)
 73 | - 🌐 REST API Endpoints (discovery, health, docs, cognitive states, performance, artifacts)
 74 | - 🤖 LLM Completions (actual generation with available providers)
 75 | - 🧠 Memory System (storage, retrieval, cognitive states)
 76 | 
 77 | ## Understanding Results
 78 | 
 79 | ### ✅ Green Check - Working Correctly
 80 | The feature is functioning as expected.
 81 | 
 82 | ### ❌ Red X - Needs Attention  
 83 | The feature failed or is not available. Common reasons:
 84 | - API keys not configured
 85 | - Provider services unavailable
 86 | - Database connection issues
 87 | - Missing dependencies
 88 | 
 89 | ## Your Server Configuration
 90 | 
 91 | Based on your server startup logs, your server has:
 92 | - **107 tools** loaded (all available tools mode)
 93 | - **7 LLM providers** configured:
 94 |   - ✅ Anthropic (3 models)
 95 |   - ✅ DeepSeek (2 models) 
 96 |   - ✅ Gemini (4 models)
 97 |   - ✅ OpenRouter (3 models)
 98 |   - ✅ Ollama (3 models) - Local
 99 |   - ✅ Grok (4 models)
100 |   - ✅ OpenAI (47 models)
101 | 
102 | ## Endpoints Available
103 | 
104 | ### MCP Protocol
105 | - `http://127.0.0.1:8013/mcp` - Main MCP streamable-HTTP endpoint
106 | 
107 | ### REST API
108 | - `http://127.0.0.1:8013/` - Discovery endpoint
109 | - `http://127.0.0.1:8013/api/health` - Health check
110 | - `http://127.0.0.1:8013/api/docs` - Swagger UI documentation
111 | - `http://127.0.0.1:8013/api/cognitive-states` - Cognitive state management
112 | - `http://127.0.0.1:8013/api/performance/overview` - Performance metrics
113 | - `http://127.0.0.1:8013/api/artifacts` - Artifact management
114 | 
115 | ### UMS Explorer
116 | - `http://127.0.0.1:8013/api/ums-explorer` - Memory system explorer UI
117 | 
118 | ## Troubleshooting
119 | 
120 | ### Connection Failed
121 | - Verify server is running on port 8013
122 | - Check firewall settings
123 | - Ensure no other service is using the port
124 | 
125 | ### Provider Errors  
126 | - Check API keys in environment variables
127 | - Verify provider service availability
128 | - Test with local Ollama first (no API key needed)
129 | 
130 | ### Tool Errors
131 | - Check filesystem permissions
132 | - Verify Python sandbox configuration
133 | - Check database connectivity
134 | 
135 | ## Example Usage
136 | 
137 | ```bash
138 | # Quick smoke test
139 | python quick_test.py
140 | 
141 | # Interactive exploration
142 | python test_client.py
143 | # Then type: list
144 | # Then type: call echo {"message": "Hello!"}
145 | 
146 | # Full validation
147 | python comprehensive_test.py
148 | ```
149 | 
150 | ## Next Steps
151 | 
152 | After successful testing:
153 | 1. Check the Swagger UI at `http://127.0.0.1:8013/api/docs`
154 | 2. Explore the UMS Explorer at `http://127.0.0.1:8013/api/ums-explorer`  
155 | 3. Test with a real MCP client like Claude Desktop
156 | 4. Start building your applications using the MCP tools! 
```

--------------------------------------------------------------------------------
/tests/manual/test_extraction_advanced.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Manual test for advanced extraction tools using standardized completion.
  4 | This script tests the remaining extraction tools that were refactored to use
  5 | the standardized completion tool.
  6 | """
  7 | 
  8 | import asyncio
  9 | import json
 10 | import os
 11 | import sys
 12 | 
 13 | # Add the project root to the Python path
 14 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
 15 | 
 16 | from ultimate_mcp_server.constants import Provider
 17 | from ultimate_mcp_server.tools.extraction import extract_code_from_response, extract_semantic_schema
 18 | 
 19 | 
 20 | async def test_extract_semantic_schema():
 21 |     """Test the extract_semantic_schema function with a simple schema."""
 22 |     print("\n--- Testing extract_semantic_schema ---")
 23 |     
 24 |     # Define a JSON schema to extract data
 25 |     schema = {
 26 |         "type": "object",
 27 |         "properties": {
 28 |             "name": {"type": "string"},
 29 |             "email": {"type": "string"},
 30 |             "phone": {"type": "string"},
 31 |             "interests": {"type": "array", "items": {"type": "string"}}
 32 |         }
 33 |     }
 34 |     
 35 |     # Sample text containing information matching the schema
 36 |     sample_text = """
 37 |     Profile information:
 38 |     
 39 |     Name: Sarah Johnson
 40 |     Contact: [email protected]
 41 |     Phone Number: 555-987-6543
 42 |     
 43 |     Sarah is interested in: machine learning, data visualization, and hiking.
 44 |     """
 45 |     
 46 |     result = await extract_semantic_schema(
 47 |         text=sample_text,
 48 |         semantic_schema=schema,
 49 |         provider=Provider.OPENAI.value,
 50 |         model="gpt-3.5-turbo"
 51 |     )
 52 |     
 53 |     print(f"Success: {result.get('success', False)}")
 54 |     print(f"Model used: {result.get('model', 'unknown')}")
 55 |     print(f"Tokens: {result.get('tokens', {})}")
 56 |     print(f"Processing time: {result.get('processing_time', 0):.2f}s")
 57 |     
 58 |     # Pretty print the extracted data
 59 |     if result.get('data'):
 60 |         print("Extracted Schema Data:")
 61 |         print(json.dumps(result['data'], indent=2))
 62 |     else:
 63 |         print("Failed to extract schema data")
 64 |         print(f"Error: {result.get('error', 'unknown error')}")
 65 | 
 66 | 
 67 | async def test_extract_code_from_response():
 68 |     """Test the extract_code_from_response function."""
 69 |     print("\n--- Testing extract_code_from_response ---")
 70 |     
 71 |     # Sample text with a code block
 72 |     sample_text = """
 73 |     Here's a Python function to calculate the factorial of a number:
 74 |     
 75 |     ```python
 76 |     def factorial(n):
 77 |         if n == 0 or n == 1:
 78 |             return 1
 79 |         else:
 80 |             return n * factorial(n-1)
 81 |             
 82 |     # Example usage
 83 |     print(factorial(5))  # Output: 120
 84 |     ```
 85 |     
 86 |     This uses a recursive approach to calculate the factorial.
 87 |     """
 88 |     
 89 |     # Test with regex-based extraction
 90 |     print("Testing regex-based extraction...")
 91 |     extracted_code = await extract_code_from_response(
 92 |         response_text=sample_text,
 93 |         model="openai/gpt-3.5-turbo",
 94 |         timeout=10
 95 |     )
 96 |     
 97 |     print("Extracted Code:")
 98 |     print(extracted_code)
 99 |     
100 |     # Test with LLM-based extraction on text without markdown
101 |     print("\nTesting LLM-based extraction...")
102 |     sample_text_no_markdown = """
103 |     Here's a Python function to calculate the factorial of a number:
104 |     
105 |     def factorial(n):
106 |         if n == 0 or n == 1:
107 |             return 1
108 |         else:
109 |             return n * factorial(n-1)
110 |             
111 |     # Example usage
112 |     print(factorial(5))  # Output: 120
113 |     
114 |     This uses a recursive approach to calculate the factorial.
115 |     """
116 |     
117 |     extracted_code = await extract_code_from_response(
118 |         response_text=sample_text_no_markdown,
119 |         model="openai/gpt-3.5-turbo",
120 |         timeout=10
121 |     )
122 |     
123 |     print("Extracted Code:")
124 |     print(extracted_code)
125 | 
126 | 
127 | async def main():
128 |     """Run all tests."""
129 |     print("Testing advanced extraction tools with standardized completion...")
130 |     
131 |     await test_extract_semantic_schema()
132 |     await test_extract_code_from_response()
133 |     
134 |     print("\nAll tests completed.")
135 | 
136 | 
137 | if __name__ == "__main__":
138 |     asyncio.run(main()) 
```

--------------------------------------------------------------------------------
/examples/sample/article.txt:
--------------------------------------------------------------------------------

```
 1 | ## Tech Industry Shake-up: Microsoft Acquires AI Startup Anthropic for $10B
 2 | 
 3 | March 25, 2025 | Sarah Johnson, Technology Reporter
 4 | 
 5 | In a move that has sent shockwaves through Silicon Valley, Microsoft Corporation announced yesterday its acquisition of AI research company Anthropic for $10 billion. The deal, which was finalized after months of secretive negotiations, marks Microsoft's largest investment in artificial intelligence to date.
 6 | 
 7 | Microsoft CEO Satya Nadella explained the strategic importance of the acquisition during a press conference held at Microsoft's headquarters in Redmond, Washington. "Anthropic's Claude AI models represent some of the most sophisticated language systems ever developed," Nadella stated. "This acquisition strengthens our commitment to responsible AI development and ensures Microsoft remains at the forefront of the AI revolution."
 8 | 
 9 | Anthropic, founded in 2021 by former OpenAI researchers Dario Amodei and Daniela Amodei, has gained recognition for its Claude family of AI models that emphasize safety and interpretability. The company had previously received significant funding from Google and Amazon, with Google investing $300 million for a 10% stake in 2023, and Amazon committing up to $4 billion in September 2023.
10 | 
11 | Both Dario Amodei, who serves as Anthropic's CEO, and Daniela Amodei, the company's President, will join Microsoft's AI leadership team while continuing to oversee Anthropic's operations. "Joining forces with Microsoft gives us the computational resources and research talent needed to advance our constitutional AI approach," said Dario Amodei. "We believe this partnership will accelerate our mission to develop AI systems that are steerable, interpretable, and robust."
12 | 
13 | The acquisition has raised antitrust concerns, with the Federal Trade Commission (FTC) Chair Lina Khan announcing an immediate review of the deal. "We will scrutinize this acquisition carefully to ensure it doesn't further concentrate power in the already consolidated AI sector," Khan said in a statement released by the FTC.
14 | 
15 | Google's parent company Alphabet and Amazon, both major investors in Anthropic, may face significant losses from the acquisition. Alphabet's stock (GOOGL) fell 3.2% following the announcement, while Amazon (AMZN) saw a more modest decline of 1.5%. In contrast, Microsoft (MSFT) shares jumped 5.8% to $420.75.
16 | 
17 | OpenAI CEO Sam Altman expressed surprise at the acquisition in a post on X (formerly Twitter): "Congratulations to the Anthropic team. This creates an interesting competitive landscape. Game on." OpenAI, which has received approximately $13 billion in investment from Microsoft, now finds itself in the unusual position of competing with another Microsoft-owned AI company.
18 | 
19 | Industry analyst Maria Rodriguez from Morgan Stanley noted that the acquisition signals a new phase in the AI arms race. "Microsoft is clearly hedging its bets by owning stakes in both leading frontier AI labs. This could be interpreted as uncertainty about which approach to AI safety and capabilities will ultimately succeed," Rodriguez explained in a research note to investors.
20 | 
21 | The deal includes provisions for Anthropic to continue operating as a separate entity within Microsoft, with guaranteed compute resources on Microsoft's Azure cloud platform. All of Anthropic's 350 employees will be retained, and the company's San Francisco headquarters will remain operational.
22 | 
23 | According to sources familiar with the matter, the acquisition talks began after a dinner meeting between Nadella and Dario Amodei at the World Economic Forum in Davos, Switzerland in January 2025. Microsoft President Brad Smith and CFO Amy Hood were reportedly instrumental in structuring the complex deal.
24 | 
25 | The acquisition is expected to close by Q3 2025, pending regulatory approval. If approved, it would mark another significant milestone in the rapidly evolving artificial intelligence industry, where companies are increasingly competing for talent, technology, and market position. 
```

--------------------------------------------------------------------------------
/examples/sample/text_classification_samples/product_reviews.txt:
--------------------------------------------------------------------------------

```
 1 | POSITIVE REVIEW:
 2 | I absolutely love this coffee maker! It brews the perfect cup every time and the temperature control is spot on. After three months of daily use, I'm still impressed with how consistent the results are. The app connectivity seemed gimmicky at first, but being able to schedule brews from bed has been a game-changer for my morning routine. Clean-up is simple with the removable parts and the sleek design fits perfectly in my kitchen. Best appliance purchase I've made in years!
 3 | 
 4 | NEGATIVE REVIEW:
 5 | This laptop has been nothing but trouble since day one. The battery barely lasts 2 hours despite the "all-day battery life" claim, and it overheats constantly even with basic web browsing. The keyboard started having sticky keys after just two weeks, and customer support has been completely unhelpful. To make matters worse, the screen has strange flickering issues that come and go randomly. Save your money and avoid this model completely - total waste of $1200.
 6 | 
 7 | NEUTRAL REVIEW:
 8 | The wireless earbuds are decent for the price point. Sound quality is acceptable though not exceptional - you get what you pay for. Battery life matches the advertised 4 hours, and the charging case provides about 3 full charges as expected. The fit is comfortable enough for short periods, though they start to hurt after about 2 hours of continuous use. Connectivity is generally stable but occasional drops occur when the phone is in a pocket. Overall, a reasonable budget option if you're not an audiophile.
 9 | 
10 | POSITIVE REVIEW:
11 | This air fryer has completely transformed how I cook! Food comes out perfectly crispy on the outside and juicy on the inside, all with minimal or no oil. It preheats quickly and the digital controls are intuitive and responsive. I appreciate the dishwasher-safe basket which makes cleanup a breeze. Even my kids are eating more vegetables now because they taste so good prepared this way. The unit is a bit bulky on the counter, but the performance more than makes up for the space it takes.
12 | 
13 | NEGATIVE REVIEW:
14 | I regret purchasing this robot vacuum. It constantly gets stuck under furniture despite claiming to have "smart navigation," and the battery dies before finishing our modestly sized apartment. The app disconnects frequently requiring tedious reconnection processes. The dust bin is way too small and needs emptying after each use. Worst of all, it scratched our hardwood floors in several places! Customer service offered little help beyond basic troubleshooting steps I'd already tried. Returning this disappointment ASAP.
15 | 
16 | NEUTRAL REVIEW:
17 | The fitness tracker works as advertised for basic functions. Step counting seems accurate enough for casual use, and the sleep tracking provides interesting if not necessarily actionable data. Heart rate monitoring is hit or miss during high-intensity workouts but fine for resting measurements. The app is somewhat clunky but gets the job done. Battery lasts about 4 days which is adequate. The band is comfortable but shows signs of wear after a couple months. It's not outstanding but reasonable value for the price point.
18 | 
19 | POSITIVE REVIEW:
20 | This blender is an absolute powerhouse! I've thrown everything at it from frozen fruits to tough vegetables and nuts, and it creates perfectly smooth blends every time. The variable speed control gives precise results whether you want chunky salsa or silky smoothies. It's definitely louder than my previous blender, but the performance justifies the noise. The container is easy to clean and the blades are impressively durable. Yes, it's expensive, but given the quality and 7-year warranty, it's worth every penny for a serious home cook.
21 | 
22 | NEGATIVE REVIEW:
23 | These "premium" headphones are anything but premium. The sound quality is muddy with overwhelming bass that drowns out mids and highs. The noise cancellation is so weak it barely reduces ambient noise. The build quality feels cheap with plastic parts that creak when you adjust them. After just one month, the right ear cup started cutting out intermittently. For this price point, I expected far better quality and performance. Definitely returning these and going with a more reliable brand. 
```

--------------------------------------------------------------------------------
/examples/sample/text_classification_samples/news_samples.txt:
--------------------------------------------------------------------------------

```
 1 | TECH NEWS SAMPLE:
 2 | Apple unveiled its new Apple Intelligence features today, integrating advanced AI capabilities directly into iOS 18, macOS Sequoia, and iPadOS 18. This marks a significant shift in Apple's strategy, embracing generative AI while maintaining their focus on privacy by processing most tasks on-device. Features include intelligent email summarization, photo editing via text prompts, and a significantly enhanced Siri experience that can understand context across apps.
 3 | 
 4 | SPORTS NEWS SAMPLE:
 5 | The Boston Celtics clinched their 18th NBA championship with a decisive 106-88 victory over the Dallas Mavericks last night, winning the series 4-1. Jayson Tatum was named Finals MVP after averaging 28.5 points and 9.8 rebounds during the series. "This is what we've worked for all season," said Celtics coach Joe Mazzulla. The victory ends a 16-year championship drought for the storied franchise, which now moves ahead of the Los Angeles Lakers for most NBA titles.
 6 | 
 7 | POLITICS NEWS SAMPLE:
 8 | The Senate passed a major bipartisan infrastructure bill today with a 69-30 vote, allocating $1.2 trillion for roads, bridges, public transit, and broadband internet. The legislation represents a rare moment of cooperation in a deeply divided Congress. "This bill shows America can do big things when we work together," said President Biden at a press conference following the vote. The bill now moves to the House, where progressive Democrats have tied its passage to a larger $3.5 trillion social spending package.
 9 | 
10 | HEALTH NEWS SAMPLE:
11 | A new study published in The Lancet suggests that intermittent fasting may not offer significant weight loss advantages over traditional calorie restriction diets. Researchers followed 250 participants over a 12-month period and found that while both approaches led to weight loss, there was no statistically significant difference between the two methods. "What matters most is consistency and finding an eating pattern that works for your lifestyle," said lead researcher Dr. Emily Chen from Stanford University.
12 | 
13 | ENTERTAINMENT NEWS SAMPLE:
14 | The 96th Academy Awards ceremony delivered several surprises, with "The Quiet Hour" taking home Best Picture despite being a low-budget independent film. Lead actress Zoe Kazan won Best Actress for her role as a Holocaust survivor, while Christopher Nolan finally secured his first Best Director Oscar for "Synchronicity." The ceremony saw a 12% increase in viewership from last year, reversing a years-long decline in ratings for Hollywood's biggest night.
15 | 
16 | SCIENCE NEWS SAMPLE:
17 | NASA's Europa Clipper mission has entered its final assembly phase, with launch scheduled for October 2024. The spacecraft will conduct detailed reconnaissance of Jupiter's moon Europa, which scientists believe harbors a subsurface ocean that could potentially support life. "This mission represents our best chance to determine if Europa's ocean is habitable," said project scientist Dr. Robert Pappalardo. The spacecraft will make nearly 50 close flybys of Europa, collecting data that will help scientists understand the moon's potential to harbor life.
18 | 
19 | BUSINESS NEWS SAMPLE:
20 | Tesla announced record quarterly profits today, exceeding Wall Street expectations with revenue of $24.3 billion and earnings per share of $1.24. The electric vehicle manufacturer delivered 466,000 vehicles in Q2, a 50% increase from the same period last year. CEO Elon Musk attributed the success to improved production efficiency and strong demand for the Model Y. The company also revealed plans to begin production of its Cybertruck at the Texas Gigafactory by early next quarter, ending years of delays for the highly anticipated vehicle.
21 | 
22 | EDUCATION NEWS SAMPLE:
23 | A landmark study from the Department of Education found that states implementing universal pre-kindergarten programs saw significant improvements in literacy rates and reduced achievement gaps. The research, which followed 28,000 students across 12 states, showed that children who attended quality pre-K programs were 38% more likely to read at grade level by third grade compared to their peers. "This provides compelling evidence that early childhood education should be a national priority," said Education Secretary Miguel Cardona. 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/services/knowledge_base/utils.py:
--------------------------------------------------------------------------------

```python
  1 | """Utility functions for the knowledge base services."""
  2 | from typing import Any, Dict, List, Optional
  3 | 
  4 | 
  5 | def build_metadata_filter(
  6 |     filters: Optional[Dict[str, Any]] = None,
  7 |     operator: str = "$and"
  8 | ) -> Optional[Dict[str, Any]]:
  9 |     """Build a ChromaDB metadata filter.
 10 |     
 11 |     Args:
 12 |         filters: Dictionary of metadata filters (field->value or field->{op: value})
 13 |         operator: Logical operator to combine filters ($and or $or)
 14 |         
 15 |     Returns:
 16 |         ChromaDB-compatible filter or None
 17 |     """
 18 |     if not filters:
 19 |         return None
 20 |     
 21 |     # Handle direct equality case with single filter
 22 |     if len(filters) == 1 and not any(isinstance(v, dict) for v in filters.values()):
 23 |         field, value = next(iter(filters.items()))
 24 |         return {field: value}  # ChromaDB handles direct equality
 25 |     
 26 |     # Process complex filters
 27 |     filter_conditions = []
 28 |     
 29 |     for field, condition in filters.items():
 30 |         if isinstance(condition, dict):
 31 |             # Already has operators
 32 |             if any(k.startswith("$") for k in condition.keys()):
 33 |                 filter_conditions.append({field: condition})
 34 |             else:
 35 |                 # Convert to $eq
 36 |                 filter_conditions.append({field: {"$eq": condition}})
 37 |         else:
 38 |             # Simple equality
 39 |             filter_conditions.append({field: {"$eq": condition}})
 40 |     
 41 |     # If only one condition, no need for logical operator
 42 |     if len(filter_conditions) == 1:
 43 |         return filter_conditions[0]
 44 |     
 45 |     # Combine with logical operator
 46 |     return {operator: filter_conditions}
 47 | 
 48 | 
 49 | def extract_keywords(text: str, min_length: int = 3, max_keywords: int = 10) -> List[str]:
 50 |     """Extract important keywords from text.
 51 |     
 52 |     Args:
 53 |         text: Input text
 54 |         min_length: Minimum length of keywords
 55 |         max_keywords: Maximum number of keywords to extract
 56 |         
 57 |     Returns:
 58 |         List of keywords
 59 |     """
 60 |     # Simple keyword extraction (could be improved with NLP)
 61 |     words = text.lower().split()
 62 |     
 63 |     # Filter out short words and common stop words
 64 |     stop_words = {
 65 |         "the", "and", "a", "an", "in", "on", "at", "to", "for", "with", "by", 
 66 |         "is", "are", "was", "were", "be", "been", "has", "have", "had", "of", "that"
 67 |     }
 68 |     
 69 |     keywords = [
 70 |         word.strip(".,?!\"'()[]{}:;") 
 71 |         for word in words 
 72 |         if len(word) >= min_length and word.lower() not in stop_words
 73 |     ]
 74 |     
 75 |     # Count occurrences
 76 |     keyword_counts = {}
 77 |     for word in keywords:
 78 |         if word in keyword_counts:
 79 |             keyword_counts[word] += 1
 80 |         else:
 81 |             keyword_counts[word] = 1
 82 |     
 83 |     # Sort by frequency
 84 |     sorted_keywords = sorted(keyword_counts.items(), key=lambda x: x[1], reverse=True)
 85 |     
 86 |     # Return top keywords
 87 |     return [k for k, _ in sorted_keywords[:max_keywords]]
 88 | 
 89 | 
 90 | def generate_token_estimate(text: str) -> int:
 91 |     """Generate a rough estimate of token count.
 92 |     
 93 |     Args:
 94 |         text: Input text
 95 |         
 96 |     Returns:
 97 |         Estimated token count
 98 |     """
 99 |     # Rough estimate based on whitespace tokenization and a multiplier
100 |     # This is a very crude approximation
101 |     words = len(text.split())
102 |     
103 |     # Adjust for non-English or technical content
104 |     if any(ord(c) > 127 for c in text):  # Has non-ASCII chars
105 |         return int(words * 1.5)  # Non-English texts need more tokens
106 |     
107 |     # Standard English approximation
108 |     return int(words * 1.3)  # Account for tokenization differences
109 | 
110 | 
111 | def create_document_metadata(
112 |     document: str,
113 |     source: Optional[str] = None,
114 |     document_type: Optional[str] = None
115 | ) -> Dict[str, Any]:
116 |     """Create metadata for a document.
117 |     
118 |     Args:
119 |         document: Document text
120 |         source: Optional source of the document
121 |         document_type: Optional document type
122 |         
123 |     Returns:
124 |         Document metadata
125 |     """
126 |     # Basic metadata
127 |     metadata = {
128 |         "length": len(document),
129 |         "token_estimate": generate_token_estimate(document),
130 |         "created_at": int(1000 * import_time()),
131 |     }
132 |     
133 |     # Add source if provided
134 |     if source:
135 |         metadata["source"] = source
136 |     
137 |     # Add document type if provided
138 |     if document_type:
139 |         metadata["type"] = document_type
140 |     
141 |     # Extract potential title from first line
142 |     lines = document.strip().split("\n")
143 |     if lines and len(lines[0]) < 100:  # Potential title
144 |         metadata["potential_title"] = lines[0]
145 |     
146 |     return metadata
147 | 
148 | 
149 | # Import at the end to avoid circular imports
150 | import time as import_time  # noqa: E402
151 | 
```

--------------------------------------------------------------------------------
/tests/manual/test_extraction.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Manual test for extraction tools using standardized completion.
  4 | This script tests the key functions in extraction.py to ensure they work
  5 | with the updated standardized completion tool.
  6 | """
  7 | 
  8 | import asyncio
  9 | import json
 10 | import os
 11 | import sys
 12 | 
 13 | # Add the project root to the Python path
 14 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
 15 | 
 16 | from ultimate_mcp_server.constants import Provider
 17 | from ultimate_mcp_server.tools.extraction import (
 18 |     extract_json,
 19 |     extract_key_value_pairs,
 20 |     extract_table,
 21 | )
 22 | 
 23 | 
 24 | async def test_extract_json():
 25 |     """Test the extract_json function with a simple JSON object."""
 26 |     print("\n--- Testing extract_json ---")
 27 |     # Simplified JSON without nested structures
 28 |     sample_text = """
 29 |     Here's the result of my analysis:
 30 |     
 31 |     {
 32 |       "name": "John Smith",
 33 |       "age": 42,
 34 |       "skills": "programming, design, project management",
 35 |       "email": "[email protected]",
 36 |       "phone": "555-1234"
 37 |     }
 38 |     
 39 |     Let me know if you need any more information.
 40 |     """
 41 |     
 42 |     result = await extract_json(
 43 |         text=sample_text,
 44 |         provider=Provider.OPENAI.value,
 45 |         model="gpt-3.5-turbo"
 46 |     )
 47 |     
 48 |     print(f"Success: {result.get('success', False)}")
 49 |     print(f"Model used: {result.get('model', 'unknown')}")
 50 |     print(f"Tokens: {result.get('tokens', {})}")
 51 |     print(f"Processing time: {result.get('processing_time', 0):.2f}s")
 52 |     
 53 |     # Pretty print the extracted data
 54 |     if result.get('data'):
 55 |         print("Extracted JSON:")
 56 |         print(json.dumps(result['data'], indent=2))
 57 |     else:
 58 |         print("Failed to extract JSON")
 59 |         print(f"Error: {result.get('error', 'unknown error')}")
 60 | 
 61 | async def test_extract_table():
 62 |     """Test the extract_table function with a simple table."""
 63 |     print("\n--- Testing extract_table ---")
 64 |     sample_text = """
 65 |     Here's a summary of our quarterly sales:
 66 |     
 67 |     | Product  | Q1 Sales | Q2 Sales |
 68 |     |----------|----------|----------|
 69 |     | Widget A | 1200     | 1350     |
 70 |     | Widget B | 850      | 940      |
 71 |     
 72 |     As you can see, Widget A performed best in Q2.
 73 |     """
 74 |     
 75 |     result = await extract_table(
 76 |         text=sample_text,
 77 |         return_formats=["json"],  # Just request json to keep it simple
 78 |         provider=Provider.OPENAI.value,
 79 |         model="gpt-3.5-turbo"
 80 |     )
 81 |     
 82 |     print(f"Success: {result.get('success', False)}")
 83 |     print(f"Model used: {result.get('model', 'unknown')}")
 84 |     print(f"Tokens: {result.get('tokens', {})}")
 85 |     print(f"Processing time: {result.get('processing_time', 0):.2f}s")
 86 |     
 87 |     # Print the extracted data
 88 |     if result.get('data'):
 89 |         print("Extracted Table Data:")
 90 |         if isinstance(result['data'], dict) and "json" in result['data']:
 91 |             print("JSON Format:")
 92 |             print(json.dumps(result['data']["json"], indent=2))
 93 |         else:
 94 |             print(json.dumps(result['data'], indent=2))
 95 |     else:
 96 |         print("Failed to extract table")
 97 |         print(f"Error: {result.get('error', 'unknown error')}")
 98 |         if result.get('raw_text'):
 99 |             print(f"Raw text: {result.get('raw_text')[:200]}...")
100 | 
101 | async def test_extract_key_value_pairs():
102 |     """Test the extract_key_value_pairs function."""
103 |     print("\n--- Testing extract_key_value_pairs ---")
104 |     sample_text = """
105 |     Patient Information:
106 |     
107 |     Name: Jane Doe
108 |     DOB: 05/12/1985
109 |     Gender: Female
110 |     Blood Type: O+
111 |     Height: 5'6"
112 |     Weight: 145 lbs
113 |     Allergies: Penicillin, Shellfish
114 |     Primary Care Physician: Dr. Robert Chen
115 |     """
116 |     
117 |     result = await extract_key_value_pairs(
118 |         text=sample_text,
119 |         provider=Provider.OPENAI.value,
120 |         model="gpt-3.5-turbo"
121 |     )
122 |     
123 |     print(f"Success: {result.get('success', False)}")
124 |     print(f"Model used: {result.get('model', 'unknown')}")
125 |     print(f"Tokens: {result.get('tokens', {})}")
126 |     print(f"Processing time: {result.get('processing_time', 0):.2f}s")
127 |     
128 |     # Print the extracted data
129 |     if result.get('data'):
130 |         print("Extracted Key-Value Pairs:")
131 |         for key, value in result['data'].items():
132 |             print(f"  {key}: {value}")
133 |     else:
134 |         print("Failed to extract key-value pairs")
135 |         print(f"Error: {result.get('error', 'unknown error')}")
136 | 
137 | async def main():
138 |     """Run all tests."""
139 |     print("Testing extraction tools with standardized completion...")
140 |     
141 |     await test_extract_json()
142 |     await test_extract_table()
143 |     await test_extract_key_value_pairs()
144 |     
145 |     print("\nAll tests completed.")
146 | 
147 | if __name__ == "__main__":
148 |     asyncio.run(main()) 
```

--------------------------------------------------------------------------------
/examples/sample/text_classification_samples/email_classification.txt:
--------------------------------------------------------------------------------

```
  1 | SPAM EMAIL:
  2 | CONGRATULATIONS! You have been selected as the LUCKY WINNER of our INTERNATIONAL LOTTERY! Your email was randomly chosen from our database and you have won $5,000,000.00 USD (FIVE MILLION UNITED STATES DOLLARS). To claim your prize, please contact our claims agent immediately at [email protected] with your full name, address, phone number, and a copy of your ID. A processing fee of $199 is required to release your funds. DO NOT DELAY! This offer expires in 48 HOURS!
  3 | 
  4 | URGENT EMAIL:
  5 | Subject: Critical Security Breach - Immediate Action Required
  6 | 
  7 | Dear IT Team,
  8 | 
  9 | Our monitoring systems have detected an unauthorized access attempt to our main customer database at 3:42 AM EST. The attempt originated from an IP address in Eastern Europe and appears to have successfully extracted approximately 25,000 customer records including names, email addresses, and hashed passwords. Our security team has temporarily shut down external access to the affected systems.
 10 | 
 11 | Please implement the emergency response protocol immediately:
 12 | 1. Activate the incident response team
 13 | 2. Reset all administrative credentials
 14 | 3. Deploy the prepared statement to affected customers
 15 | 4. Begin forensic analysis of the breach vector
 16 | 
 17 | This is classified as a Severity 1 incident requiring immediate attention.
 18 | 
 19 | David Chen
 20 | Chief Information Security Officer
 21 | 
 22 | PROMOTIONAL EMAIL:
 23 | Subject: Summer Sale - 48 Hours Only! Up to 70% Off Everything
 24 | 
 25 | Beat the heat with sizzling savings! 🔥
 26 | 
 27 | Our biggest sale of the season is HERE! For just 48 hours, enjoy:
 28 | • Up to 70% off ALL clothing and accessories
 29 | • Buy one, get one 50% off on summer essentials
 30 | • Free shipping on orders over $50
 31 | • Extra 15% off with code: SUMMER24
 32 | 
 33 | Plus, the first 500 orders receive a FREE beach tote (valued at $45)!
 34 | 
 35 | Don't miss out - sale ends Sunday at midnight.
 36 | 
 37 | Shop now: https://www.fashionretailer.com/summer-sale
 38 | 
 39 | INFORMATIONAL EMAIL:
 40 | Subject: Upcoming System Maintenance - May 15th
 41 | 
 42 | Dear Valued Customer,
 43 | 
 44 | Please be informed that we will be conducting scheduled maintenance on our systems to improve performance and reliability. During this time, our services will be temporarily unavailable.
 45 | 
 46 | Maintenance details:
 47 | • Date: Tuesday, May 15th, 2024
 48 | • Time: 2:00 AM - 5:00 AM EDT (UTC-4)
 49 | • Affected services: Online banking portal, mobile app, and automated phone system
 50 | 
 51 | No action is required on your part. All services will resume automatically once maintenance is complete. We recommend completing any urgent transactions before the maintenance window begins.
 52 | 
 53 | We apologize for any inconvenience this may cause and appreciate your understanding as we work to enhance your experience.
 54 | 
 55 | Sincerely,
 56 | Customer Support Team
 57 | First National Bank
 58 | 
 59 | PHISHING EMAIL:
 60 | Subject: Your Amazon Account Has Been Suspended
 61 | 
 62 | Dear Valued Customer,
 63 | 
 64 | We regret to inform you that your Amazon account has been temporarily suspended due to unusual activity. Our security system has detected multiple failed login attempts from unrecognized devices.
 65 | 
 66 | To verify your identity and restore your account access, please update your payment information by clicking the link below:
 67 | 
 68 | >> Restore Account Access Now <<
 69 | 
 70 | If you do not verify your account within 24 hours, your account will be permanently deactivated and all pending orders will be canceled.
 71 | 
 72 | Thank you for your immediate attention to this matter.
 73 | 
 74 | Amazon Customer Service Team
 75 | 
 76 | PERSONAL EMAIL:
 77 | Subject: Vacation Plans for Next Month
 78 | 
 79 | Hi Sarah,
 80 | 
 81 | How are you doing? I hope everything's going well with the new job! I've been thinking about our conversation last month about taking a short vacation together, and I wanted to follow up.
 82 | 
 83 | I checked some options for that beach town we talked about, and there are some great deals for the weekend of the 15th. I found a cute rental cottage about two blocks from the beach for $180/night, which seems reasonable if we split it. The weather should be perfect that time of year too.
 84 | 
 85 | Let me know if you're still interested and if those dates work for you. I could book it this week to secure the place before summer rates kick in.
 86 | 
 87 | Can't wait to catch up properly!
 88 | 
 89 | Talk soon,
 90 | Michael
 91 | 
 92 | TRANSACTIONAL EMAIL:
 93 | Subject: Order #78291 Confirmation - Your Purchase from TechGadgets
 94 | 
 95 | Dear Alex Rodriguez,
 96 | 
 97 | Thank you for your recent purchase from TechGadgets. We're processing your order and will ship it soon.
 98 | 
 99 | Order Details:
100 | • Order Number: #78291
101 | • Order Date: April 3, 2024
102 | • Payment Method: Visa ending in 4872
103 | • Shipping Method: Standard (3-5 business days)
104 | 
105 | Items Purchased:
106 | 1. Wireless Earbuds Pro - Black (1) - $129.99
107 | 2. Fast Charging Cable 6ft (2) - $19.99 each
108 | 3. Screen Protector Ultra (1) - $24.99
109 | 
110 | Subtotal: $194.96
111 | Shipping: $5.99
112 | Tax: $16.57
113 | Total: $217.52
114 | 
115 | You will receive a shipping confirmation email with tracking information once your order ships. You can also check your order status anytime by logging into your account.
116 | 
117 | If you have any questions about your order, please contact our customer service team at [email protected] or call 1-800-555-1234.
118 | 
119 | Thank you for shopping with us!
120 | 
121 | The TechGadgets Team 
```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
  1 | [build-system]
  2 | requires = ["hatchling"]
  3 | build-backend = "hatchling.build"
  4 | 
  5 | [project]
  6 | name = "ultimate_mcp_server"
  7 | version = "0.1.0"
  8 | description = "The Ultimate Model Context Protocol (MCP) Server, providing unified access to a wide variety of useful and powerful tools."
  9 | readme = "README.md"
 10 | requires-python = ">=3.13"
 11 | license = {file = "LICENSE"}
 12 | authors = [
 13 |     {name = "Jeffrey Emanuel", email = "[email protected]"},
 14 | ]
 15 | maintainers = [
 16 |     {name = "Jeffrey Emanuel", email = "[email protected]"},
 17 | ]
 18 | keywords = ["ultimte", "mcp", "server", "agent", "ai", "claude", "gpt", "gemini", "deepseek"]
 19 | classifiers = [
 20 |     "Development Status :: 3 - Alpha",
 21 |     "Intended Audience :: Developers",
 22 |     "License :: OSI Approved :: MIT License",
 23 |     "Programming Language :: Python :: 3.13",
 24 |     "Topic :: Scientific/Engineering :: Artificial Intelligence",
 25 | ]
 26 | 
 27 | dependencies = [
 28 |     # Core MCP and LLM providers
 29 |     "mcp>=0",
 30 |     "anthropic>=0",
 31 |     "openai>=0",
 32 |     "google-genai>=0",
 33 |     # Async utilities
 34 |     "httpx>=0",
 35 |     "aiofiles>=0",
 36 |     # Data processing
 37 |     "pydantic>=0",
 38 |     "tenacity>=0", # For retry logic
 39 |     # Caching and persistence
 40 |     "diskcache>=0", # Persistent disk cache
 41 |     "msgpack>=0", # Efficient serialization
 42 |     # Vector database for semantic caching
 43 |     "numpy>=0",
 44 |     "sentence-transformers>=0", # For embeddings
 45 |     "chromadb>=0", # Vector DB
 46 |     # Analytics and monitoring
 47 |     "prometheus-client>=0",
 48 |     "pandas>=0",
 49 |     "rich>=0", # Console output formatting
 50 |     # Templating for prompt management
 51 |     "jinja2>=0",
 52 |     # Multi-modal support
 53 |     "pillow>=0", # Image processing
 54 |     # Utilities
 55 |     "python-slugify>=0", # For URL-friendly strings
 56 |     "colorama>=0", # Terminal colors
 57 |     "tqdm>=0", # Progress bars
 58 |     "tiktoken>=0", # Token counting
 59 |     "python-decouple>=0", # .env management
 60 |     "pydantic-settings>=0",
 61 |     "jsonschema>=0",
 62 |     "matplotlib>=0",
 63 |     "marqo>=0", # Added for Marqo search tool
 64 |     "pytest-playwright>=0", # For web browser automation
 65 |     "sqlalchemy>=0", # For SQL database interactions
 66 |     "aiosqlite>=0", # Async SQLite database access
 67 |     "pyvis>=0", # Graph visualization
 68 |     "python-docx>=0", # MS Word DOCX support
 69 |     "opencv-python>=0", # For OCR tools
 70 |     "pytesseract>=0", # For OCR
 71 |     "pdf2image>=0", # For OCR
 72 |     "PyPDF2>=0", # PDF conversion
 73 |     "pdfplumber>=0", # For OCR
 74 |     "fitz>=0", # For OCR
 75 |     "pymupdf>=0", # For OCR
 76 |     "beautifulsoup4>=0", # Dealing with HTML
 77 |     "xmldiff>=0", # for redlines
 78 |     "lxml>=0", # XML parser
 79 |     "faster-whisper>=0", # Audio transcripts
 80 |     "html2text>=0",
 81 |     "readability-lxml>=0",
 82 |     "markdownify>=0",
 83 |     "trafilatura>=0",
 84 |     "markdown>=0",
 85 |     "jsonpatch>=0",
 86 |     "jsonpointer>=0",
 87 |     "pygments>=0",
 88 |     "typer>=0", # For CLI interface
 89 |     "docling>=0", # For document conversion
 90 |     "aiohttp>=0",
 91 |     "boto3>=0", # For AWS secrets management
 92 |     "hvac>=0", # For HashiVault pw management
 93 |     "pandera>=0", # Data validation
 94 |     "rapidfuzz>=0",
 95 |     "magika>=0",
 96 |     "tabula-py>=0",
 97 |     "brotli>=0",
 98 |     "pygments>=0",
 99 |     "fastapi>=0.115.9",
100 |     "uvicorn>=0.34.2",
101 |     "networkx>0",
102 |     "scipy>0",
103 |     "fastmcp>0",
104 | 
105 | ]
106 | 
107 | [project.optional-dependencies]
108 | advanced = [
109 |   "torch",
110 |   "torchvision",
111 |   "torchaudio",
112 |   "pytorch-triton",
113 |   "transformers>=0",
114 |   "accelerate>=0",
115 | ]
116 | 
117 | #excel_automation = [
118 | #    "win32com", # Excel automation,
119 | #    "win32com",
120 | #]
121 | 
122 | # Development and testing
123 | dev = [
124 |     "pytest>=0",
125 |     "pytest-asyncio>=0",
126 |     "pytest-cov>=0",
127 |     "isort>=0",
128 |     "mypy>=0",
129 |     "ruff>=0",
130 |     "types-aiofiles>=0",
131 |     "pre-commit>=0",
132 | ]
133 | 
134 | # Documentation
135 | docs = [
136 |     "mkdocs>=0",
137 |     "mkdocs-material>=0",
138 |     "mkdocstrings>=0",
139 |     "mkdocstrings-python>=0",
140 | ]
141 | 
142 | # All extras
143 | all = ["ultimate_mcp_server[advanced,dev,docs]"]
144 | 
145 | [[tool.uv.index]]
146 | name = "pypi"
147 | url  = "https://pypi.org/simple"
148 | 
149 | [tool.uv.pip]
150 | prerelease = "allow"
151 | torch-backend = "auto"
152 | 
153 | [project.urls]
154 | Homepage = "https://github.com/Dicklesworthstone/ultimate_mcp_server"
155 | Documentation = "https://github.com/Dicklesworthstone/ultimate_mcp_server/docs"
156 | Repository = "https://github.com/Dicklesworthstone/ultimate_mcp_server.git"
157 | "Bug Reports" = "https://github.com/Dicklesworthstone/ultimate_mcp_server/issues"
158 | 
159 | [project.scripts]
160 | umcp = "ultimate_mcp_server.cli.typer_cli:cli"
161 | 
162 | [tool.hatch.version]
163 | path = "ultimate_mcp_server/__init__.py"
164 | 
165 | [tool.hatch.build.targets.sdist]
166 | include = [
167 |     "/ultimate_mcp_server",
168 |     "/examples",
169 |     "/tests",
170 |     "LICENSE",
171 |     "README.md",
172 |     "pyproject.toml",
173 | ]
174 | 
175 | [tool.hatch.build.targets.wheel]
176 | packages = ["ultimate_mcp_server"]
177 | 
178 | [tool.black]
179 | line-length = 100
180 | target-version = ["py313"]
181 | include = '\.pyi?$'
182 | 
183 | [tool.isort]
184 | profile = "black"
185 | line_length = 100
186 | multi_line_output = 3
187 | 
188 | [tool.mypy]
189 | python_version = "3.13"
190 | warn_return_any = true
191 | warn_unused_configs = true
192 | disallow_untyped_defs = true
193 | disallow_incomplete_defs = true
194 | check_untyped_defs = true
195 | disallow_untyped_decorators = true
196 | no_implicit_optional = true
197 | strict_optional = true
198 | 
199 | [tool.pytest.ini_options]
200 | minversion = "7.0"
201 | addopts = "--cov=ultimate_mcp_server --cov-report=term-missing -v"
202 | testpaths = ["tests"]
203 | asyncio_mode = "auto"
204 | asyncio_default_fixture_loop_scope = "function"
205 | 
206 | [tool.ruff]
207 | line-length = 100
208 | target-version = "py313"
209 | 
210 | [tool.ruff.lint]
211 | select = ["E", "F", "B", "I", "Q"]
212 | ignore = ["E203", "E501", "Q000"]
213 | 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/services/cache/persistence.py:
--------------------------------------------------------------------------------

```python
  1 | """Cache persistence mechanisms."""
  2 | import json
  3 | import os
  4 | import pickle
  5 | from pathlib import Path
  6 | from typing import Any, Dict, Optional
  7 | 
  8 | import aiofiles
  9 | 
 10 | from ultimate_mcp_server.utils import get_logger
 11 | 
 12 | logger = get_logger(__name__)
 13 | 
 14 | 
 15 | class CachePersistence:
 16 |     """Handles cache persistence operations."""
 17 |     
 18 |     def __init__(self, cache_dir: Path):
 19 |         """Initialize the cache persistence handler.
 20 |         
 21 |         Args:
 22 |             cache_dir: Directory for cache storage
 23 |         """
 24 |         self.cache_dir = cache_dir
 25 |         self.cache_file = cache_dir / "cache.pkl"
 26 |         self.metadata_file = cache_dir / "metadata.json"
 27 |         
 28 |         # Create cache directory if it doesn't exist
 29 |         self.cache_dir.mkdir(parents=True, exist_ok=True)
 30 |         
 31 |     async def save_cache(self, data: Dict[str, Any], metadata: Optional[Dict[str, Any]] = None) -> bool:
 32 |         """Save cache data to disk.
 33 |         
 34 |         Args:
 35 |             data: Cache data to save
 36 |             metadata: Optional metadata about the cache
 37 |             
 38 |         Returns:
 39 |             True if successful
 40 |         """
 41 |         try:
 42 |             # Save cache data
 43 |             temp_file = f"{self.cache_file}.tmp"
 44 |             async with aiofiles.open(temp_file, 'wb') as f:
 45 |                 await f.write(pickle.dumps(data))
 46 |                 
 47 |             # Rename temp file to cache file (atomic operation)
 48 |             os.replace(temp_file, self.cache_file)
 49 |             
 50 |             # Save metadata if provided
 51 |             if metadata:
 52 |                 await self.save_metadata(metadata)
 53 |                 
 54 |             logger.debug(
 55 |                 f"Saved cache data to {self.cache_file}",
 56 |                 emoji_key="cache"
 57 |             )
 58 |             return True
 59 |             
 60 |         except Exception as e:
 61 |             logger.error(
 62 |                 f"Failed to save cache data: {str(e)}",
 63 |                 emoji_key="error"
 64 |             )
 65 |             return False
 66 |             
 67 |     async def load_cache(self) -> Optional[Dict[str, Any]]:
 68 |         """Load cache data from disk.
 69 |         
 70 |         Returns:
 71 |             Cache data or None if file doesn't exist or error occurs
 72 |         """
 73 |         if not self.cache_file.exists():
 74 |             return None
 75 |             
 76 |         try:
 77 |             async with aiofiles.open(self.cache_file, 'rb') as f:
 78 |                 data = await f.read()
 79 |                 
 80 |             cache_data = pickle.loads(data)
 81 |             
 82 |             logger.debug(
 83 |                 f"Loaded cache data from {self.cache_file}",
 84 |                 emoji_key="cache"
 85 |             )
 86 |             return cache_data
 87 |             
 88 |         except Exception as e:
 89 |             logger.error(
 90 |                 f"Failed to load cache data: {str(e)}",
 91 |                 emoji_key="error"
 92 |             )
 93 |             return None
 94 |     
 95 |     async def save_metadata(self, metadata: Dict[str, Any]) -> bool:
 96 |         """Save cache metadata to disk.
 97 |         
 98 |         Args:
 99 |             metadata: Metadata to save
100 |             
101 |         Returns:
102 |             True if successful
103 |         """
104 |         try:
105 |             # Save metadata
106 |             temp_file = f"{self.metadata_file}.tmp"
107 |             async with aiofiles.open(temp_file, 'w') as f:
108 |                 await f.write(json.dumps(metadata, indent=2))
109 |                 
110 |             # Rename temp file to metadata file (atomic operation)
111 |             os.replace(temp_file, self.metadata_file)
112 |             
113 |             return True
114 |             
115 |         except Exception as e:
116 |             logger.error(
117 |                 f"Failed to save cache metadata: {str(e)}",
118 |                 emoji_key="error"
119 |             )
120 |             return False
121 |             
122 |     async def load_metadata(self) -> Optional[Dict[str, Any]]:
123 |         """Load cache metadata from disk.
124 |         
125 |         Returns:
126 |             Metadata or None if file doesn't exist or error occurs
127 |         """
128 |         if not self.metadata_file.exists():
129 |             return None
130 |             
131 |         try:
132 |             async with aiofiles.open(self.metadata_file, 'r') as f:
133 |                 data = await f.read()
134 |                 
135 |             metadata = json.loads(data)
136 |             
137 |             return metadata
138 |             
139 |         except Exception as e:
140 |             logger.error(
141 |                 f"Failed to load cache metadata: {str(e)}",
142 |                 emoji_key="error"
143 |             )
144 |             return None
145 |             
146 |     async def cleanup_old_cache_files(self, max_age_days: int = 30) -> int:
147 |         """Clean up old cache files.
148 |         
149 |         Args:
150 |             max_age_days: Maximum age of cache files in days
151 |             
152 |         Returns:
153 |             Number of files deleted
154 |         """
155 |         import time
156 |         
157 |         now = time.time()
158 |         max_age_seconds = max_age_days * 24 * 60 * 60
159 |         
160 |         deleted_count = 0
161 |         
162 |         try:
163 |             # Find all cache files
164 |             cache_files = list(self.cache_dir.glob("*.tmp"))
165 |             
166 |             # Delete old files
167 |             for file_path in cache_files:
168 |                 mtime = file_path.stat().st_mtime
169 |                 age = now - mtime
170 |                 
171 |                 if age > max_age_seconds:
172 |                     file_path.unlink()
173 |                     deleted_count += 1
174 |                     
175 |             if deleted_count > 0:
176 |                 logger.info(
177 |                     f"Cleaned up {deleted_count} old cache files",
178 |                     emoji_key="cache"
179 |                 )
180 |                 
181 |             return deleted_count
182 |             
183 |         except Exception as e:
184 |             logger.error(
185 |                 f"Failed to clean up old cache files: {str(e)}",
186 |                 emoji_key="error"
187 |             )
188 |             return deleted_count
```

--------------------------------------------------------------------------------
/check_api_keys.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python
  2 | """Script to check API key configurations for Ultimate MCP Server using rich formatting."""
  3 | import asyncio
  4 | import sys
  5 | from pathlib import Path
  6 | 
  7 | # Add project root to path for imports
  8 | sys.path.insert(0, str(Path(__file__).parent))
  9 | 
 10 | from rich.console import Console
 11 | from rich.panel import Panel
 12 | from rich.table import Table
 13 | from rich.text import Text
 14 | 
 15 | from ultimate_mcp_server.config import get_config
 16 | from ultimate_mcp_server.constants import Provider
 17 | from ultimate_mcp_server.core.server import Gateway
 18 | from ultimate_mcp_server.utils import get_logger
 19 | 
 20 | # Initialize rich console
 21 | console = Console()
 22 | 
 23 | logger = get_logger("api_key_checker")
 24 | 
 25 | # Map provider names to the corresponding environment variable names
 26 | # Used for informational display only
 27 | PROVIDER_ENV_VAR_MAP = {
 28 |     "openai": "OPENAI_API_KEY",
 29 |     "anthropic": "ANTHROPIC_API_KEY",
 30 |     "deepseek": "DEEPSEEK_API_KEY",
 31 |     "gemini": "GEMINI_API_KEY",
 32 |     "openrouter": "OPENROUTER_API_KEY",
 33 | }
 34 | 
 35 | async def check_api_keys():
 36 |     """
 37 |     Check API key configurations and display a comprehensive report.
 38 |     
 39 |     This async function:
 40 |     1. Loads the current configuration settings from all sources (environment variables,
 41 |        .env file, configuration files)
 42 |     2. Initializes a minimal Gateway instance to access provider configurations
 43 |     3. Checks if API keys are properly configured for all supported providers
 44 |     4. Displays formatted results using rich tables and panels, including:
 45 |        - Provider-by-provider API key status
 46 |        - Configuration loading priority information
 47 |        - How to set API keys properly
 48 |        - Example .env file content
 49 |     
 50 |     The function checks keys for all providers defined in the Provider enum,
 51 |     including OpenAI, Anthropic, DeepSeek, Gemini, and OpenRouter.
 52 |     
 53 |     Returns:
 54 |         int: Exit code (0 for success)
 55 |     """
 56 |     # Force load config to ensure we get the latest resolved settings
 57 |     cfg = get_config()
 58 |     
 59 |     # Create Gateway with minimal initialization (no tools) - kept for potential future checks
 60 |     gateway = Gateway(name="api-key-checker", register_tools=False)  # noqa: F841
 61 |     
 62 |     console.print(Panel(
 63 |         "Checking API Key Configuration based on loaded settings",
 64 |         title="[bold cyan]Ultimate MCP Server API Key Check[/bold cyan]",
 65 |         expand=False,
 66 |         border_style="blue"
 67 |     ))
 68 |     
 69 |     # Create table for results
 70 |     table = Table(title="Provider API Key Status", show_header=True, header_style="bold magenta")
 71 |     table.add_column("Provider", style="dim", width=12)
 72 |     table.add_column("API Key Status", style="cyan")
 73 |     table.add_column("Relevant Env Var", style="yellow")
 74 |     table.add_column("Status", style="bold")
 75 |     
 76 |     # Check each provider based on the loaded configuration
 77 |     for provider_name in [p.value for p in Provider]:
 78 |         # Get provider config from the loaded GatewayConfig object
 79 |         provider_config = getattr(cfg.providers, provider_name, None)
 80 |         
 81 |         # Check if key exists in the loaded config
 82 |         # This key would have been resolved from .env, env vars, or config file by get_config()
 83 |         config_key = provider_config.api_key if provider_config else None
 84 |         
 85 |         # Format key for display (if present)
 86 |         key_display = Text("Not set in config", style="dim yellow")
 87 |         status_text = Text("NOT CONFIGURED", style="red")
 88 |         status_icon = "❌"
 89 |         
 90 |         if config_key:
 91 |             if len(config_key) > 8:
 92 |                 key_display = Text(f"{config_key[:4]}...{config_key[-4:]}", style="green")
 93 |             else:
 94 |                 key_display = Text("[INVALID KEY FORMAT]", style="bold red")
 95 |             status_text = Text("CONFIGURED", style="green")
 96 |             status_icon = "✅"
 97 |         
 98 |         # Get the corresponding environment variable name for informational purposes
 99 |         env_var_name = PROVIDER_ENV_VAR_MAP.get(provider_name, "N/A")
100 |         
101 |         # Add row to table
102 |         table.add_row(
103 |             provider_name.capitalize(),
104 |             key_display,
105 |             env_var_name,
106 |             f"[{status_text.style}]{status_icon} {status_text}[/]"
107 |         )
108 |     
109 |     # Print the table
110 |     console.print(table)
111 |     
112 |     # Configuration Loading Info Panel
113 |     config_info = Text.assemble(
114 |         ("1. ", "bold blue"), ("Environment Variables", "cyan"), (" (e.g., ", "dim"), ("GATEWAY_PROVIDERS__OPENAI__API_KEY=...", "yellow"), (")\n", "dim"),
115 |         ("2. ", "bold blue"), ("Values in a ", "cyan"), (".env", "yellow"), (" file in the project root\n", "cyan"),
116 |         ("3. ", "bold blue"), ("Values in a config file", "cyan"), (" (e.g., ", "dim"), ("gateway_config.yaml", "yellow"), (")\n", "dim"),
117 |         ("4. ", "bold blue"), ("Default values defined in the configuration models", "cyan")
118 |     )
119 |     console.print(Panel(config_info, title="[bold]Configuration Loading Priority[/]", border_style="blue"))
120 |     
121 |     # How to Set Keys Panel
122 |     set_keys_info = Text.assemble(
123 |         ("Ensure API keys are available via one of the methods above,\n", "white"),
124 |         ("preferably using ", "white"), ("environment variables", "cyan"), (" or a ", "white"), (".env", "yellow"), (" file.", "white")
125 |     )
126 |     console.print(Panel(set_keys_info, title="[bold]How to Set API Keys[/]", border_style="green"))
127 |     
128 |     # Example .env Panel
129 |     env_example_lines = []
130 |     for env_var in PROVIDER_ENV_VAR_MAP.values():
131 |         env_example_lines.append(Text.assemble((env_var, "yellow"), "=", ("your_", "dim"), (env_var.lower(), "dim cyan"), ("_here", "dim")))
132 |     env_example_content = Text("\n").join(env_example_lines)
133 |     console.print(Panel(env_example_content, title="[bold dim]Example .env file content[/]", border_style="yellow"))
134 |     
135 |     console.print("[bold green]Run your example scripts or the main server after setting the API keys.[/bold green]")
136 |     return 0
137 | 
138 | if __name__ == "__main__":
139 |     exit_code = asyncio.run(check_api_keys())
140 |     sys.exit(exit_code) 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/services/prompts/repository.py:
--------------------------------------------------------------------------------

```python
  1 | """Prompt repository for managing and accessing prompts."""
  2 | import asyncio
  3 | import json
  4 | import os
  5 | from pathlib import Path
  6 | from typing import Any, Dict, List, Optional, Union
  7 | 
  8 | import aiofiles
  9 | 
 10 | from ultimate_mcp_server.utils import get_logger
 11 | 
 12 | logger = get_logger(__name__)
 13 | 
 14 | 
 15 | class PromptRepository:
 16 |     """Repository for managing and accessing prompts."""
 17 |     
 18 |     _instance = None
 19 |     
 20 |     def __new__(cls, *args, **kwargs):
 21 |         """Singleton implementation for prompt repository."""
 22 |         if cls._instance is None:
 23 |             cls._instance = super(PromptRepository, cls).__new__(cls)
 24 |             cls._instance._initialized = False
 25 |         return cls._instance
 26 |     
 27 |     def __init__(self, base_dir: Optional[Union[str, Path]] = None):
 28 |         """Initialize the prompt repository.
 29 |         
 30 |         Args:
 31 |             base_dir: Base directory for prompt storage
 32 |         """
 33 |         # Only initialize once for singleton
 34 |         if self._initialized:
 35 |             return
 36 |             
 37 |         # Set base directory
 38 |         if base_dir:
 39 |             self.base_dir = Path(base_dir)
 40 |         else:
 41 |             # Default to project directory / prompts
 42 |             self.base_dir = Path.home() / ".ultimate" / "prompts"
 43 |             
 44 |         # Create directory if it doesn't exist
 45 |         self.base_dir.mkdir(parents=True, exist_ok=True)
 46 |         
 47 |         # Cache for prompts
 48 |         self.prompts = {}
 49 |         
 50 |         # Flag as initialized
 51 |         self._initialized = True
 52 |         
 53 |         logger.info(
 54 |             f"Prompt repository initialized (base_dir: {self.base_dir})",
 55 |             emoji_key="provider"
 56 |         )
 57 |     
 58 |     async def get_prompt(self, prompt_id: str) -> Optional[Dict[str, Any]]:
 59 |         """Get a prompt by ID.
 60 |         
 61 |         Args:
 62 |             prompt_id: Prompt identifier
 63 |             
 64 |         Returns:
 65 |             Prompt data or None if not found
 66 |         """
 67 |         # Check cache first
 68 |         if prompt_id in self.prompts:
 69 |             return self.prompts[prompt_id]
 70 |             
 71 |         # Try to load from file
 72 |         prompt_path = self.base_dir / f"{prompt_id}.json"
 73 |         if not prompt_path.exists():
 74 |             logger.warning(
 75 |                 f"Prompt '{prompt_id}' not found",
 76 |                 emoji_key="warning"
 77 |             )
 78 |             return None
 79 |             
 80 |         try:
 81 |             # Load prompt from file
 82 |             async with asyncio.Lock():
 83 |                 with open(prompt_path, "r", encoding="utf-8") as f:
 84 |                     prompt_data = json.load(f)
 85 |                     
 86 |             # Cache for future use
 87 |             self.prompts[prompt_id] = prompt_data
 88 |             
 89 |             return prompt_data
 90 |         except Exception as e:
 91 |             logger.error(
 92 |                 f"Error loading prompt '{prompt_id}': {str(e)}",
 93 |                 emoji_key="error"
 94 |             )
 95 |             return None
 96 |     
 97 |     async def save_prompt(self, prompt_id: str, prompt_data: Dict[str, Any]) -> bool:
 98 |         """Save a prompt.
 99 |         
100 |         Args:
101 |             prompt_id: Prompt identifier
102 |             prompt_data: Prompt data to save
103 |             
104 |         Returns:
105 |             True if successful
106 |         """
107 |         # Validate prompt data
108 |         if not isinstance(prompt_data, dict) or "template" not in prompt_data:
109 |             logger.error(
110 |                 f"Invalid prompt data for '{prompt_id}'",
111 |                 emoji_key="error"
112 |             )
113 |             return False
114 |             
115 |         try:
116 |             # Save to cache
117 |             self.prompts[prompt_id] = prompt_data
118 |             
119 |             # Save to file
120 |             prompt_path = self.base_dir / f"{prompt_id}.json"
121 |             async with asyncio.Lock():
122 |                 async with aiofiles.open(prompt_path, "w", encoding="utf-8") as f:
123 |                     await f.write(json.dumps(prompt_data, indent=2))
124 |                     
125 |             logger.info(
126 |                 f"Saved prompt '{prompt_id}'",
127 |                 emoji_key="success"
128 |             )
129 |             return True
130 |         except Exception as e:
131 |             logger.error(
132 |                 f"Error saving prompt '{prompt_id}': {str(e)}",
133 |                 emoji_key="error"
134 |             )
135 |             return False
136 |     
137 |     async def delete_prompt(self, prompt_id: str) -> bool:
138 |         """Delete a prompt.
139 |         
140 |         Args:
141 |             prompt_id: Prompt identifier
142 |             
143 |         Returns:
144 |             True if successful
145 |         """
146 |         # Remove from cache
147 |         if prompt_id in self.prompts:
148 |             del self.prompts[prompt_id]
149 |             
150 |         # Remove file if exists
151 |         prompt_path = self.base_dir / f"{prompt_id}.json"
152 |         if prompt_path.exists():
153 |             try:
154 |                 os.remove(prompt_path)
155 |                 logger.info(
156 |                     f"Deleted prompt '{prompt_id}'",
157 |                     emoji_key="success"
158 |                 )
159 |                 return True
160 |             except Exception as e:
161 |                 logger.error(
162 |                     f"Error deleting prompt '{prompt_id}': {str(e)}",
163 |                     emoji_key="error"
164 |                 )
165 |                 return False
166 |         
167 |         return False
168 |     
169 |     async def list_prompts(self) -> List[str]:
170 |         """List available prompts.
171 |         
172 |         Returns:
173 |             List of prompt IDs
174 |         """
175 |         try:
176 |             # Get prompt files
177 |             prompt_files = list(self.base_dir.glob("*.json"))
178 |             
179 |             # Extract IDs from filenames
180 |             prompt_ids = [f.stem for f in prompt_files]
181 |             
182 |             return prompt_ids
183 |         except Exception as e:
184 |             logger.error(
185 |                 f"Error listing prompts: {str(e)}",
186 |                 emoji_key="error"
187 |             )
188 |             return []
189 | 
190 | 
191 | def get_prompt_repository(base_dir: Optional[Union[str, Path]] = None) -> PromptRepository:
192 |     """Get the prompt repository singleton instance.
193 |     
194 |     Args:
195 |         base_dir: Base directory for prompt storage
196 |         
197 |     Returns:
198 |         PromptRepository singleton instance
199 |     """
200 |     return PromptRepository(base_dir)
```

--------------------------------------------------------------------------------
/examples/simple_completion_demo.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python
  2 | """
  3 | Simple completion demo using Ultimate MCP Server's direct provider functionality.
  4 | 
  5 | This example demonstrates how to:
  6 | 1. Initialize the Ultimate MCP Server Gateway
  7 | 2. Connect directly to an LLM provider (OpenAI)
  8 | 3. Generate a text completion with a specific model
  9 | 4. Track and display token usage and costs
 10 | 
 11 | The demo bypasses the MCP tool interface and interacts directly with provider APIs,
 12 | which is useful for understanding the underlying provider connections or when you need
 13 | lower-level access to provider-specific features. It also showcases the CostTracker
 14 | utility for monitoring token usage and associated costs across multiple requests.
 15 | 
 16 | This script can be run as a standalone Python module and serves as a minimal example of
 17 | direct provider integration with the Ultimate MCP Server framework.
 18 | 
 19 | Usage:
 20 |     python examples/simple_completion_demo.py
 21 | """
 22 | import asyncio
 23 | import sys
 24 | from pathlib import Path
 25 | 
 26 | # Add project root to path for imports when running as script
 27 | sys.path.insert(0, str(Path(__file__).parent.parent))
 28 | 
 29 | from rich.panel import Panel
 30 | from rich.rule import Rule
 31 | from rich.table import Table
 32 | 
 33 | from ultimate_mcp_server.constants import Provider
 34 | from ultimate_mcp_server.core.server import Gateway
 35 | from ultimate_mcp_server.utils import get_logger
 36 | from ultimate_mcp_server.utils.display import CostTracker
 37 | from ultimate_mcp_server.utils.logging.console import console
 38 | 
 39 | # Initialize logger
 40 | logger = get_logger("example.simple_completion")
 41 | 
 42 | async def run_model_demo(tracker: CostTracker):
 43 |     """
 44 |     Run a simple completion demo using direct provider access to LLM APIs.
 45 |     
 46 |     This function demonstrates the complete workflow for generating text completions
 47 |     using the Ultimate MCP Server framework with direct provider access:
 48 |     
 49 |     1. Initialize a Gateway instance without registering tools
 50 |     2. Initialize the LLM providers from configuration
 51 |     3. Access a specific provider (OpenAI in this case)
 52 |     4. Generate a completion with a specific prompt and model
 53 |     5. Display the completion result with Rich formatting
 54 |     6. Track and display token usage and cost metrics
 55 |     
 56 |     Direct provider access (vs. using MCP tools) offers more control over provider-specific
 57 |     parameters and is useful for applications that need to customize provider interactions
 58 |     beyond what the standard MCP tools offer.
 59 |     
 60 |     Args:
 61 |         tracker: CostTracker instance to record token usage and costs for this operation.
 62 |                 The tracker will be updated with the completion results.
 63 |         
 64 |     Returns:
 65 |         int: Exit code - 0 for success, 1 for failure
 66 |         
 67 |     Raises:
 68 |         Various exceptions may be raised by the provider initialization or completion
 69 |         generation process, but these are logged and contained within this function.
 70 |     """
 71 |     logger.info("Starting simple completion demo", emoji_key="start")
 72 |     # Use Rich Rule for title
 73 |     console.print(Rule("[bold blue]Simple Completion Demo[/bold blue]"))
 74 |     
 75 |     # Create Gateway instance
 76 |     gateway = Gateway("simple-demo", register_tools=False)
 77 |     
 78 |     # Initialize providers
 79 |     logger.info("Initializing providers", emoji_key="provider")
 80 |     await gateway._initialize_providers()
 81 |     
 82 |     # Get provider (OpenAI)
 83 |     provider_name = Provider.OPENAI.value
 84 |     provider = gateway.providers.get(provider_name)
 85 |     
 86 |     if not provider:
 87 |         logger.error(f"Provider {provider_name} not available", emoji_key="error")
 88 |         return 1
 89 |         
 90 |     logger.success(f"Provider {provider_name} initialized", emoji_key="success")
 91 |     
 92 |     # List available models
 93 |     models = await provider.list_models()
 94 |     logger.info(f"Available models: {len(models)}", emoji_key="model")
 95 |     
 96 |     # Pick a valid model from the provider
 97 |     model = "gpt-4.1-mini"  # A valid model from constants.py
 98 |     
 99 |     # Generate a completion
100 |     prompt = "Explain quantum computing in simple terms."
101 |     
102 |     logger.info(f"Generating completion with {model}", emoji_key="processing")
103 |     result = await provider.generate_completion(
104 |         prompt=prompt,
105 |         model=model,
106 |         temperature=0.7,
107 |         max_tokens=150
108 |     )
109 |     
110 |     # Print the result using Rich Panel
111 |     logger.success("Completion generated successfully!", emoji_key="success")
112 |     console.print(Panel(
113 |         result.text.strip(),
114 |         title=f"Quantum Computing Explanation ({model})",
115 |         subtitle=f"Prompt: {prompt}",
116 |         border_style="green",
117 |         expand=False
118 |     ))
119 |     
120 |     # Print stats using Rich Table
121 |     stats_table = Table(title="Completion Stats", show_header=False, box=None)
122 |     stats_table.add_column("Metric", style="cyan")
123 |     stats_table.add_column("Value", style="white")
124 |     stats_table.add_row("Input Tokens", str(result.input_tokens))
125 |     stats_table.add_row("Output Tokens", str(result.output_tokens))
126 |     stats_table.add_row("Cost", f"${result.cost:.6f}")
127 |     stats_table.add_row("Processing Time", f"{result.processing_time:.2f}s")
128 |     console.print(stats_table)
129 | 
130 |     # Track the call
131 |     tracker.add_call(result)
132 | 
133 |     # Display cost summary
134 |     tracker.display_summary(console)
135 | 
136 |     return 0
137 | 
138 | async def main():
139 |     """
140 |     Entry point function that sets up the demo environment and error handling.
141 |     
142 |     This function:
143 |     1. Creates a CostTracker instance to monitor token usage and costs
144 |     2. Calls the run_model_demo function within a try-except block
145 |     3. Handles and logs any uncaught exceptions
146 |     4. Returns an appropriate exit code based on execution success/failure
147 |     
148 |     The separation between main() and run_model_demo() allows for clean error handling
149 |     and resource management at the top level while keeping the demo logic organized
150 |     in its own function.
151 |     
152 |     Returns:
153 |         int: Exit code - 0 for success, 1 for failure
154 |     """
155 |     tracker = CostTracker()
156 |     try:
157 |         return await run_model_demo(tracker)
158 |     except Exception as e:
159 |         logger.critical(f"Demo failed: {str(e)}", emoji_key="critical")
160 |         return 1
161 | 
162 | if __name__ == "__main__":
163 |     # Run the demo
164 |     exit_code = asyncio.run(main())
165 |     sys.exit(exit_code) 
```

--------------------------------------------------------------------------------
/examples/sample/medical_case.txt:
--------------------------------------------------------------------------------

```
  1 | PATIENT MEDICAL RECORD
  2 | Memorial Hospital Medical Center
  3 | 123 Medical Center Blvd, Boston, MA 02118
  4 | 
  5 | CONFIDENTIAL - FOR MEDICAL PERSONNEL ONLY
  6 | 
  7 | Patient ID: MH-459872
  8 | Date of Admission: April 10, 2025
  9 | Attending Physician: Dr. Elizabeth Chen, MD (Cardiology)
 10 | Consulting Physicians: Dr. Robert Martinez, MD (Neurology), Dr. Sarah Williams, MD (Endocrinology)
 11 | 
 12 | PATIENT INFORMATION
 13 | Name: John Anderson
 14 | DOB: 05/22/1968 (57 years old)
 15 | Gender: Male
 16 | Address: 45 Maple Street, Apt 3B, Cambridge, MA 02139
 17 | Contact: (617) 555-3829
 18 | Emergency Contact: Mary Anderson (Wife) - (617) 555-4912
 19 | Insurance: BlueCross BlueShield, Policy #BCB-88765432
 20 | 
 21 | CHIEF COMPLAINT
 22 | Patient presented to the Emergency Department with acute chest pain, shortness of breath, and left arm numbness beginning approximately 2 hours prior to arrival.
 23 | 
 24 | CURRENT MEDICATIONS
 25 | 1. Metformin 1000mg twice daily (for Type 2 Diabetes, prescribed by Dr. Williams in 2019)
 26 | 2. Atorvastatin 40mg daily (for Hypercholesterolemia, prescribed by Dr. Chen in 2022)
 27 | 3. Lisinopril 20mg daily (for Hypertension, prescribed by Dr. Chen in 2022)
 28 | 4. Aspirin 81mg daily (for cardiovascular health, prescribed by Dr. Chen in 2022)
 29 | 5. Sertraline 50mg daily (for Depression, prescribed by Dr. Thomas Gordon in 2023)
 30 | 
 31 | ALLERGIES
 32 | 1. Penicillin (Severe - Hives, Difficulty Breathing)
 33 | 2. Shellfish (Moderate - Gastrointestinal distress)
 34 | 
 35 | PAST MEDICAL HISTORY
 36 | 1. Type 2 Diabetes Mellitus (Diagnosed 2019 by Dr. Williams)
 37 | 2. Hypertension (Diagnosed 2020 by Dr. Chen)
 38 | 3. Hypercholesterolemia (Diagnosed 2020 by Dr. Chen)
 39 | 4. Depression (Diagnosed 2023 by Dr. Gordon)
 40 | 5. Left knee arthroscopy (2015, Boston Orthopedic Center, Dr. James Miller)
 41 | 
 42 | FAMILY HISTORY
 43 | - Father: Deceased at age 68 from myocardial infarction, had hypertension, type 2 diabetes
 44 | - Mother: Living, age 82, has hypertension, osteoarthritis
 45 | - Brother: Age 60, has type 2 diabetes, hypercholesterolemia
 46 | - Sister: Age 55, no known medical conditions
 47 | 
 48 | SOCIAL HISTORY
 49 | Occupation: High school mathematics teacher
 50 | Tobacco: Former smoker, quit in 2018 (25 pack-year history)
 51 | Alcohol: Occasional (1-2 drinks per week)
 52 | Exercise: Walks 20 minutes, 3 times per week
 53 | Diet: Reports following a "mostly" diabetic diet with occasional non-compliance
 54 | 
 55 | PHYSICAL EXAMINATION
 56 | Vital Signs:
 57 | - BP: 165/95 mmHg
 58 | - HR: 95 bpm
 59 | - RR: 22 breaths/min
 60 | - Temp: 98.6°F (37°C)
 61 | - O2 Saturation: 94% on room air
 62 | 
 63 | General: Patient is alert but anxious, in moderate distress
 64 | Cardiovascular: Irregular rhythm, tachycardia, S3 gallop present, no murmurs
 65 | Respiratory: Bibasilar crackles, decreased breath sounds at bases bilaterally
 66 | Neurological: Left arm weakness (4/5 strength), otherwise grossly intact
 67 | Extremities: No edema, normal peripheral pulses
 68 | 
 69 | DIAGNOSTIC STUDIES
 70 | Laboratory:
 71 | - Troponin I: 2.3 ng/mL (elevated)
 72 | - CK-MB: 12.5 ng/mL (elevated)
 73 | - BNP: 450 pg/mL (elevated)
 74 | - Complete Blood Count: WBC 12,000/μL, Hgb 13.5 g/dL, Plt 230,000/μL
 75 | - Complete Metabolic Panel: Glucose 185 mg/dL, Cr 1.1 mg/dL, BUN 22 mg/dL
 76 | - Lipid Panel: Total Chol 210 mg/dL, LDL 130 mg/dL, HDL 35 mg/dL, TG 190 mg/dL
 77 | - HbA1c: 7.8%
 78 | 
 79 | Imaging and Other Studies:
 80 | - ECG: ST-segment elevation in leads II, III, aVF; reciprocal changes in I, aVL
 81 | - Chest X-ray: Mild pulmonary edema, cardiomegaly
 82 | - Echocardiogram: EF 40%, inferior wall hypokinesis, moderate mitral regurgitation
 83 | - Cardiac Catheterization: 90% occlusion of right coronary artery, 70% occlusion of left circumflex artery
 84 | 
 85 | DIAGNOSIS
 86 | 1. Acute ST-elevation Myocardial Infarction (STEMI), inferior wall
 87 | 2. Coronary Artery Disease, multivessel
 88 | 3. Congestive Heart Failure, acute onset (NYHA Class III)
 89 | 4. Type 2 Diabetes Mellitus, inadequately controlled
 90 | 5. Essential Hypertension, inadequately controlled
 91 | 6. Hyperlipidemia
 92 | 
 93 | TREATMENT
 94 | Procedures:
 95 | 1. Emergency Percutaneous Coronary Intervention (PCI) with drug-eluting stent placement in right coronary artery by Dr. Michael Wilson on April 10, 2025
 96 | 2. Scheduled PCI for left circumflex artery by Dr. Wilson on April 13, 2025
 97 | 
 98 | Medications:
 99 | 1. Aspirin 325mg daily
100 | 2. Clopidogrel 75mg daily
101 | 3. Metoprolol succinate 50mg daily
102 | 4. Lisinopril 40mg daily (increased from 20mg)
103 | 5. Atorvastatin 80mg daily (increased from 40mg)
104 | 6. Furosemide 40mg twice daily
105 | 7. Metformin continued at 1000mg twice daily
106 | 8. Insulin glargine 20 units at bedtime (new)
107 | 9. Sertraline continued at 50mg daily
108 | 
109 | HOSPITAL COURSE
110 | Patient was admitted through the Emergency Department and taken emergently to the cardiac catheterization lab where he underwent successful PCI with stent placement to the right coronary artery. Post-procedure, the patient was transferred to the Cardiac Care Unit (CCU) for close monitoring. Patient experienced a brief episode of ventricular fibrillation on the first night, which was successfully treated with defibrillation. Cardiology and endocrinology were consulted for management of heart failure and diabetes. Follow-up echocardiogram on April 12 showed improvement in EF to 45%. Patient underwent scheduled PCI of the left circumflex artery on April 13 without complications.
111 | 
112 | DISCHARGE PLAN
113 | Discharge Date: April 16, 2025
114 | Discharge Disposition: Home with scheduled home health visits from Memorial Home Health Services
115 | 
116 | Follow-up Appointments:
117 | 1. Dr. Elizabeth Chen (Cardiology) - April 23, 2025 at 10:00 AM
118 | 2. Dr. Sarah Williams (Endocrinology) - April 25, 2025 at 2:30 PM
119 | 3. Cardiac Rehabilitation evaluation - April 30, 2025 at 1:00 PM
120 | 
121 | Patient Education:
122 | 1. STEMI and coronary artery disease management
123 | 2. Diabetes self-management and glucometer use
124 | 3. Heart-healthy diet (consultation with nutritionist completed)
125 | 4. Medication management and adherence
126 | 5. Warning signs requiring immediate medical attention
127 | 
128 | PROGNOSIS
129 | Guarded. Patient has significant coronary artery disease with reduced ejection fraction. Long-term prognosis will depend on medication adherence, lifestyle modifications, and management of comorbidities.
130 | 
131 | ATTESTATION
132 | I have personally examined the patient and reviewed all diagnostic studies. This documentation is complete and accurate to the best of my knowledge.
133 | 
134 | Electronically signed by:
135 | Elizabeth Chen, MD
136 | Cardiology
137 | Memorial Hospital Medical Center
138 | Date: April 16, 2025 | Time: 14:35 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/graceful_shutdown.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Graceful shutdown utilities for Ultimate MCP Server.
  3 | 
  4 | This module provides utilities to handle signals and gracefully terminate
  5 | the application with ZERO error outputs during shutdown using OS-level redirection.
  6 | """
  7 | 
  8 | import asyncio
  9 | import logging
 10 | import os
 11 | import signal
 12 | import sys
 13 | import warnings
 14 | from contextlib import suppress
 15 | from typing import Callable, List, Optional
 16 | 
 17 | logger = logging.getLogger("ultimate_mcp_server.shutdown")
 18 | 
 19 | # Track registered shutdown handlers and state
 20 | _shutdown_handlers: List[Callable] = []
 21 | _shutdown_in_progress = False
 22 | _original_stderr_fd = None
 23 | _devnull_fd = None
 24 | 
 25 | 
 26 | def _redirect_stderr_to_devnull():
 27 |     """Redirect stderr to /dev/null at the OS level"""
 28 |     global _original_stderr_fd, _devnull_fd
 29 |     
 30 |     try:
 31 |         if _original_stderr_fd is None:
 32 |             # Save original stderr file descriptor
 33 |             _original_stderr_fd = os.dup(sys.stderr.fileno())
 34 |             
 35 |             # Open /dev/null
 36 |             _devnull_fd = os.open(os.devnull, os.O_WRONLY)
 37 |             
 38 |             # Redirect stderr to /dev/null
 39 |             os.dup2(_devnull_fd, sys.stderr.fileno())
 40 |             
 41 |     except Exception:
 42 |         # If redirection fails, just continue
 43 |         pass
 44 | 
 45 | 
 46 | def _restore_stderr():
 47 |     """Restore original stderr"""
 48 |     global _original_stderr_fd, _devnull_fd
 49 |     
 50 |     try:
 51 |         if _original_stderr_fd is not None:
 52 |             os.dup2(_original_stderr_fd, sys.stderr.fileno())
 53 |             os.close(_original_stderr_fd)
 54 |             _original_stderr_fd = None
 55 |             
 56 |         if _devnull_fd is not None:
 57 |             os.close(_devnull_fd)
 58 |             _devnull_fd = None
 59 |             
 60 |     except Exception:
 61 |         pass
 62 | 
 63 | 
 64 | def register_shutdown_handler(handler: Callable) -> None:
 65 |     """Register a function to be called during graceful shutdown."""
 66 |     if handler not in _shutdown_handlers:
 67 |         _shutdown_handlers.append(handler)
 68 | 
 69 | 
 70 | def remove_shutdown_handler(handler: Callable) -> None:
 71 |     """Remove a previously registered shutdown handler."""
 72 |     if handler in _shutdown_handlers:
 73 |         _shutdown_handlers.remove(handler)
 74 | 
 75 | 
 76 | async def _execute_shutdown_handlers():
 77 |     """Execute all registered shutdown handlers with complete error suppression"""
 78 |     for handler in _shutdown_handlers:
 79 |         with suppress(Exception):  # Suppress ALL exceptions
 80 |             if asyncio.iscoroutinefunction(handler):
 81 |                 with suppress(asyncio.TimeoutError, asyncio.CancelledError):
 82 |                     await asyncio.wait_for(handler(), timeout=3.0)
 83 |             else:
 84 |                 handler()
 85 | 
 86 | 
 87 | def _handle_shutdown_signal(signum, frame):
 88 |     """Handle shutdown signals - IMMEDIATE TERMINATION"""
 89 |     global _shutdown_in_progress
 90 |     
 91 |     if _shutdown_in_progress:
 92 |         # Force immediate exit on second signal
 93 |         os._exit(1)
 94 |         return
 95 |         
 96 |     _shutdown_in_progress = True
 97 |     
 98 |     # Print final message to original stderr if possible
 99 |     try:
100 |         if _original_stderr_fd:
101 |             os.write(_original_stderr_fd, b"\n[Graceful Shutdown] Signal received. Exiting...\n")
102 |         else:
103 |             print("\n[Graceful Shutdown] Signal received. Exiting...", file=sys.__stderr__)
104 |     except Exception:
105 |         pass
106 |     
107 |     # Immediately redirect stderr to suppress any error output
108 |     _redirect_stderr_to_devnull()
109 |     
110 |     # Suppress all warnings
111 |     warnings.filterwarnings("ignore")
112 |     
113 |     # Try to run shutdown handlers quickly, but don't wait long
114 |     try:
115 |         loop = asyncio.get_running_loop()
116 |         # Create a task but don't wait for it - just exit
117 |         asyncio.create_task(_execute_shutdown_handlers())
118 |         # Give it a tiny bit of time then exit
119 |         loop.call_later(0.5, lambda: os._exit(0))
120 |     except RuntimeError:
121 |         # No running loop - just exit immediately
122 |         os._exit(0)
123 | 
124 | 
125 | def setup_signal_handlers(loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
126 |     """Set up signal handlers for immediate shutdown"""
127 |     # Use traditional signal handlers for immediate termination
128 |     signal.signal(signal.SIGINT, _handle_shutdown_signal)
129 |     signal.signal(signal.SIGTERM, _handle_shutdown_signal)
130 |     
131 |     # Also try to set up async handlers if we have a loop
132 |     if loop is not None:
133 |         try:
134 |             for sig in [signal.SIGINT, signal.SIGTERM]:
135 |                 try:
136 |                     loop.add_signal_handler(sig, lambda s=sig: _handle_shutdown_signal(s, None))
137 |                 except (NotImplementedError, OSError):
138 |                     # Platform doesn't support async signal handlers
139 |                     pass
140 |         except Exception:
141 |             # Fallback is already set up with signal.signal above
142 |             pass
143 | 
144 | 
145 | def enable_quiet_shutdown():
146 |     """Enable comprehensive quiet shutdown - immediate termination approach"""
147 |     # Set up signal handlers immediately
148 |     setup_signal_handlers()
149 |     
150 |     # Suppress asyncio debug mode
151 |     try:
152 |         asyncio.get_event_loop().set_debug(False)
153 |     except RuntimeError:
154 |         pass
155 |     
156 |     # Suppress warnings
157 |     warnings.filterwarnings("ignore")
158 | 
159 | 
160 | def force_silent_exit():
161 |     """Force immediate silent exit with no output whatsoever"""
162 |     global _shutdown_in_progress
163 |     _shutdown_in_progress = True
164 |     _redirect_stderr_to_devnull()
165 |     os._exit(0)
166 | 
167 | 
168 | class QuietUvicornServer:
169 |     """Custom Uvicorn server that overrides signal handling for quiet shutdown"""
170 |     
171 |     def __init__(self, config):
172 |         import uvicorn
173 |         self.config = config
174 |         self.server = uvicorn.Server(config)
175 |         
176 |     def install_signal_handlers(self):
177 |         """Override uvicorn's signal handlers with our quiet ones"""
178 |         # Set up our own signal handlers instead of uvicorn's
179 |         setup_signal_handlers()
180 |         
181 |     def run(self):
182 |         """Run the server with custom signal handling"""
183 |         # Patch the server's install_signal_handlers method
184 |         self.server.install_signal_handlers = self.install_signal_handlers
185 |         
186 |         # Set up our signal handlers immediately
187 |         setup_signal_handlers()
188 |         
189 |         # Run the server
190 |         self.server.run()
191 | 
192 | 
193 | def create_quiet_server(config):
194 |     """Create a uvicorn server with quiet shutdown handling"""
195 |     return QuietUvicornServer(config) 
```

--------------------------------------------------------------------------------
/model_preferences.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Model preferences for MCP servers.
  3 | 
  4 | This module implements the ModelPreferences capability from the MCP protocol,
  5 | allowing servers to express preferences for model selection during sampling.
  6 | """
  7 | from typing import List, Optional
  8 | 
  9 | 
 10 | class ModelHint:
 11 |     """
 12 |     Hint for model selection.
 13 |     
 14 |     Model hints allow the server to suggest specific models or model families
 15 |     that would be appropriate for a given task.
 16 |     """
 17 |     
 18 |     def __init__(self, name: str):
 19 |         """
 20 |         Initialize a model hint.
 21 |         
 22 |         Args:
 23 |             name: A hint for a model name (e.g., 'claude-3-5-sonnet', 'sonnet', 'claude').
 24 |                  This should be treated as a substring matching.
 25 |         """
 26 |         self.name = name
 27 |         
 28 |     def to_dict(self) -> dict:
 29 |         """Convert model hint to dictionary."""
 30 |         return {"name": self.name}
 31 | 
 32 | 
 33 | class ModelPreferences:
 34 |     """
 35 |     Preferences for model selection to guide LLM client decisions.
 36 |     
 37 |     The ModelPreferences class provides a standardized way for servers to express 
 38 |     prioritization along three key dimensions (intelligence, speed, cost) that can 
 39 |     help clients make more informed decisions when selecting LLM models for specific tasks.
 40 |     
 41 |     These preferences serve as advisory hints that help optimize the tradeoffs between:
 42 |     - Intelligence/capability: Higher quality, more capable models (but often slower/costlier)
 43 |     - Speed: Faster response time and lower latency (but potentially less capable)
 44 |     - Cost: Lower token or API costs (but potentially less capable or slower)
 45 |     
 46 |     The class also supports model-specific hints that can recommend particular models
 47 |     or model families that are well-suited for specific tasks (e.g., suggesting Claude
 48 |     models for creative writing or GPT-4V for image analysis).
 49 |     
 50 |     All preferences are expressed with normalized values between 0.0 (lowest priority) 
 51 |     and 1.0 (highest priority) to allow for consistent interpretation across different
 52 |     implementations.
 53 |     
 54 |     Note: These preferences are always advisory. Clients may use them as guidance but
 55 |     are not obligated to follow them, particularly if there are overriding user preferences
 56 |     or system constraints.
 57 |     
 58 |     Usage example:
 59 |         ```python
 60 |         # For a coding task requiring high intelligence but where cost is a major concern
 61 |         preferences = ModelPreferences(
 62 |             intelligence_priority=0.8,  # High priority on capability
 63 |             speed_priority=0.4,         # Moderate priority on speed
 64 |             cost_priority=0.7,          # High priority on cost
 65 |             hints=[ModelHint("gpt-4-turbo")]  # Specific model recommendation
 66 |         )
 67 |         ```
 68 |     """
 69 |     
 70 |     def __init__(
 71 |         self,
 72 |         intelligence_priority: float = 0.5,
 73 |         speed_priority: float = 0.5,
 74 |         cost_priority: float = 0.5,
 75 |         hints: Optional[List[ModelHint]] = None
 76 |     ):
 77 |         """
 78 |         Initialize model preferences.
 79 |         
 80 |         Args:
 81 |             intelligence_priority: How much to prioritize intelligence/capabilities (0.0-1.0).
 82 |                 Higher values favor more capable, sophisticated models that may produce
 83 |                 higher quality outputs, handle complex tasks, or follow instructions better.
 84 |                 Default: 0.5 (balanced)
 85 |             speed_priority: How much to prioritize sampling speed/latency (0.0-1.0).
 86 |                 Higher values favor faster models with lower latency, which is important
 87 |                 for real-time applications, interactive experiences, or time-sensitive tasks.
 88 |                 Default: 0.5 (balanced)
 89 |             cost_priority: How much to prioritize cost efficiency (0.0-1.0).
 90 |                 Higher values favor more economical models with lower token or API costs,
 91 |                 which is important for budget-constrained applications or high-volume usage.
 92 |                 Default: 0.5 (balanced)
 93 |             hints: Optional model hints in preference order. These can suggest specific
 94 |                 models or model families that would be appropriate for the task.
 95 |                 The list should be ordered by preference (most preferred first).
 96 |         """
 97 |         # Clamp values between 0 and 1
 98 |         self.intelligence_priority = max(0.0, min(1.0, intelligence_priority))
 99 |         self.speed_priority = max(0.0, min(1.0, speed_priority))
100 |         self.cost_priority = max(0.0, min(1.0, cost_priority))
101 |         self.hints = hints or []
102 |         
103 |     def to_dict(self) -> dict:
104 |         """Convert model preferences to dictionary."""
105 |         return {
106 |             "intelligencePriority": self.intelligence_priority,
107 |             "speedPriority": self.speed_priority,
108 |             "costPriority": self.cost_priority,
109 |             "hints": [hint.to_dict() for hint in self.hints]
110 |         }
111 | 
112 | 
113 | # Pre-defined preference templates for common use cases
114 | 
115 | # Default balanced preference profile - no strong bias in any direction
116 | # Use when there's no clear priority between intelligence, speed, and cost
117 | # Good for general-purpose applications where trade-offs are acceptable
118 | BALANCED_PREFERENCES = ModelPreferences(
119 |     intelligence_priority=0.5,
120 |     speed_priority=0.5,
121 |     cost_priority=0.5
122 | )
123 | 
124 | # Prioritizes high-quality, sophisticated model responses
125 | # Use for complex reasoning, creative tasks, or critical applications
126 | # where accuracy and capability matter more than speed or cost
127 | INTELLIGENCE_FOCUSED = ModelPreferences(
128 |     intelligence_priority=0.9,
129 |     speed_priority=0.3,
130 |     cost_priority=0.3,
131 |     hints=[ModelHint("claude-3-5-opus")]
132 | )
133 | 
134 | # Prioritizes response speed and low latency
135 | # Use for real-time applications, interactive experiences, 
136 | # chatbots, or any use case where user wait time is critical
137 | SPEED_FOCUSED = ModelPreferences(
138 |     intelligence_priority=0.3,
139 |     speed_priority=0.9,
140 |     cost_priority=0.5,
141 |     hints=[ModelHint("claude-3-haiku"), ModelHint("gemini-flash")]
142 | )
143 | 
144 | # Prioritizes cost efficiency and token economy
145 | # Use for high-volume applications, background processing,
146 | # or when operating under strict budget constraints
147 | COST_FOCUSED = ModelPreferences(
148 |     intelligence_priority=0.3,
149 |     speed_priority=0.5,
150 |     cost_priority=0.9,
151 |     hints=[ModelHint("mistral"), ModelHint("gemini-flash")]
152 | ) 
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/utils/logging/themes.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Color themes for Gateway logging system.
  3 | 
  4 | This module defines color schemes for different log levels, operations, and components
  5 | to provide visual consistency and improve readability of log output.
  6 | """
  7 | from typing import Optional, Tuple
  8 | 
  9 | from rich.style import Style
 10 | from rich.theme import Theme
 11 | 
 12 | COLORS = {
 13 |     # Main colors
 14 |     "primary": "bright_blue",
 15 |     "secondary": "cyan",
 16 |     "accent": "magenta",
 17 |     "success": "green",
 18 |     "warning": "yellow",
 19 |     "error": "red",
 20 |     "critical": "bright_red",
 21 |     "info": "bright_blue",
 22 |     "debug": "bright_black",
 23 |     "trace": "bright_black",
 24 |     
 25 |     # Component-specific colors (Adapt as needed for ultimate)
 26 |     "core": "blue", 
 27 |     "provider": "cyan", # Example: Renamed 'composite' to 'provider'
 28 |     "router": "green", # Example: Renamed 'analysis' to 'router'
 29 |     "cache": "bright_magenta",
 30 |     "api": "bright_yellow",
 31 |     "mcp": "bright_blue", # Kept if relevant
 32 |     "utils": "magenta", # Example: Added 'utils'
 33 |     "default_component": "bright_cyan", # Fallback component color
 34 |     
 35 |     # Tool-specific colors (Keep or remove as needed)
 36 |     "ripgrep": "blue",
 37 |     "awk": "green",
 38 |     "jq": "yellow",
 39 |     "sqlite": "magenta",
 40 |     
 41 |     # Result/Status colors
 42 |     "high_confidence": "green",
 43 |     "medium_confidence": "yellow",
 44 |     "low_confidence": "red",
 45 |     
 46 |     # Misc
 47 |     "muted": "bright_black",
 48 |     "highlight": "bright_white",
 49 |     "timestamp": "bright_black",
 50 |     "path": "bright_blue",
 51 |     "code": "bright_cyan",
 52 |     "data": "bright_yellow",
 53 |     "data.key": "bright_black", # Added for context tables
 54 | }
 55 | 
 56 | STYLES = {
 57 |     # Base styles for log levels
 58 |     "info": Style(color=COLORS["info"]),
 59 |     "debug": Style(color=COLORS["debug"]),
 60 |     "warning": Style(color=COLORS["warning"], bold=True),
 61 |     "error": Style(color=COLORS["error"], bold=True),
 62 |     "critical": Style(color=COLORS["critical"], bold=True, reverse=True),
 63 |     "success": Style(color=COLORS["success"], bold=True),
 64 |     "trace": Style(color=COLORS["trace"], dim=True),
 65 |     
 66 |     # Component styles (Matching adapted COLORS)
 67 |     "core": Style(color=COLORS["core"], bold=True),
 68 |     "provider": Style(color=COLORS["provider"], bold=True),
 69 |     "router": Style(color=COLORS["router"], bold=True),
 70 |     "cache": Style(color=COLORS["cache"], bold=True),
 71 |     "api": Style(color=COLORS["api"], bold=True),
 72 |     "mcp": Style(color=COLORS["mcp"], bold=True),
 73 |     "utils": Style(color=COLORS["utils"], bold=True),
 74 |     "default_component": Style(color=COLORS["default_component"], bold=True),
 75 |     
 76 |     # Operation styles
 77 |     "operation": Style(color=COLORS["accent"], bold=True),
 78 |     "startup": Style(color=COLORS["success"], bold=True),
 79 |     "shutdown": Style(color=COLORS["error"], bold=True),
 80 |     "request": Style(color=COLORS["primary"], bold=True),
 81 |     "response": Style(color=COLORS["secondary"], bold=True),
 82 |     
 83 |     # Confidence level styles
 84 |     "high_confidence": Style(color=COLORS["high_confidence"], bold=True),
 85 |     "medium_confidence": Style(color=COLORS["medium_confidence"], bold=True),
 86 |     "low_confidence": Style(color=COLORS["low_confidence"], bold=True),
 87 |     
 88 |     # Misc styles
 89 |     "timestamp": Style(color=COLORS["timestamp"], dim=True),
 90 |     "path": Style(color=COLORS["path"], underline=True),
 91 |     "code": Style(color=COLORS["code"], italic=True),
 92 |     "data": Style(color=COLORS["data"]),
 93 |     "data.key": Style(color=COLORS["data.key"], bold=True),
 94 |     "muted": Style(color=COLORS["muted"], dim=True),
 95 |     "highlight": Style(color=COLORS["highlight"], bold=True),
 96 | }
 97 | 
 98 | # Rich theme that can be used directly with Rich Console
 99 | RICH_THEME = Theme({name: style for name, style in STYLES.items()})
100 | 
101 | # Get the appropriate style for a log level
102 | def get_level_style(level: str) -> Style:
103 |     """Get the Rich style for a specific log level.
104 |     
105 |     Args:
106 |         level: The log level (info, debug, warning, error, critical, success, trace)
107 |         
108 |     Returns:
109 |         The corresponding Rich Style
110 |     """
111 |     level = level.lower()
112 |     return STYLES.get(level, STYLES["info"]) # Default to info style
113 | 
114 | # Get style for a component
115 | def get_component_style(component: str) -> Style:
116 |     """Get the Rich style for a specific component.
117 |     
118 |     Args:
119 |         component: The component name (core, provider, router, etc.)
120 |         
121 |     Returns:
122 |         The corresponding Rich Style
123 |     """
124 |     component = component.lower()
125 |     # Fallback to a default component style if specific one not found
126 |     return STYLES.get(component, STYLES["default_component"])
127 | 
128 | # Get color by name
129 | def get_color(name: str) -> str:
130 |     """Get a color by name.
131 |     
132 |     Args:
133 |         name: The color name
134 |         
135 |     Returns:
136 |         The color string that can be used with Rich
137 |     """
138 |     return COLORS.get(name.lower(), COLORS["primary"])
139 | 
140 | # Apply style to text directly
141 | def style_text(text: str, style_name: str) -> str:
142 |     """Apply a named style to text (for use without Rich console).
143 |     
144 |     This is a utility function that doesn't depend on Rich, useful for
145 |     simple terminal output or when Rich console isn't available.
146 |     
147 |     Args:
148 |         text: The text to style
149 |         style_name: The name of the style to apply
150 |         
151 |     Returns:
152 |         Text with ANSI color codes applied (using Rich tags for simplicity)
153 |     """
154 |     # This uses Rich markup format for simplicity, assuming it will be printed
155 |     # by a Rich console later or that the markup is acceptable.
156 |     return f"[{style_name}]{text}[/{style_name}]"
157 | 
158 | # Get foreground and background colors for a specific context
159 | def get_context_colors(
160 |     context: str, component: Optional[str] = None
161 | ) -> Tuple[str, Optional[str]]:
162 |     """Get appropriate foreground and background colors for a log context.
163 |     
164 |     Args:
165 |         context: The log context (e.g., 'request', 'response')
166 |         component: Optional component name for further refinement
167 |         
168 |     Returns:
169 |         Tuple of (foreground_color, background_color) or (color, None)
170 |     """
171 |     style = STYLES.get(context.lower()) or STYLES.get("default_component")
172 |     
173 |     if style and style.color:
174 |         return (str(style.color.name), str(style.bgcolor.name) if style.bgcolor else None)
175 |     else:
176 |         # Fallback to basic colors
177 |         fg = COLORS.get(context.lower(), COLORS["primary"])
178 |         return (fg, None) 
```

--------------------------------------------------------------------------------
/examples/test_code_extraction.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Test script for the LLM-based code extraction function.
  4 | 
  5 | This script loads the tournament state from a previous run and tests
  6 | the new code extraction function against the raw response texts.
  7 | """
  8 | 
  9 | import asyncio
 10 | import json
 11 | import sys
 12 | from pathlib import Path
 13 | from typing import Dict
 14 | 
 15 | # Add project root to path for imports when running as script
 16 | sys.path.insert(0, str(Path(__file__).parent.parent))
 17 | 
 18 | from rich import box
 19 | from rich.panel import Panel
 20 | from rich.table import Table
 21 | 
 22 | from ultimate_mcp_server.core.server import Gateway
 23 | 
 24 | # Import the extraction function from the library
 25 | from ultimate_mcp_server.tools import extract_code_from_response
 26 | from ultimate_mcp_server.utils import get_logger
 27 | from ultimate_mcp_server.utils.display import CostTracker  # Import CostTracker
 28 | from ultimate_mcp_server.utils.logging.console import console
 29 | 
 30 | # Initialize logger
 31 | logger = get_logger("example.test_extraction")
 32 | 
 33 | # Create a simple structure for cost tracking (though likely won't be used directly here)
 34 | # TrackableResult = namedtuple("TrackableResult", ["cost", "input_tokens", "output_tokens", "provider", "model", "processing_time"])
 35 | 
 36 | # Initialize global gateway
 37 | gateway = None
 38 | 
 39 | # Path to the tournament state file from the last run
 40 | TOURNAMENT_STATE_PATH = "/data/projects/ultimate_mcp_server/storage/tournaments/2025-04-01_03-24-37_tournament_76009a9a/tournament_state.json"
 41 | 
 42 | async def setup_gateway():
 43 |     """Set up the gateway for testing."""
 44 |     global gateway
 45 |     
 46 |     # Create gateway instance
 47 |     logger.info("Initializing gateway for testing", emoji_key="start")
 48 |     gateway = Gateway("test-extraction", register_tools=False)
 49 |     
 50 |     # Initialize the server with all providers and built-in tools
 51 |     await gateway._initialize_providers()
 52 |     
 53 |     logger.info("Gateway initialized", emoji_key="success")
 54 | 
 55 | async def load_tournament_state() -> Dict:
 56 |     """Load the tournament state from the previous run."""
 57 |     try:
 58 |         with open(TOURNAMENT_STATE_PATH, 'r', encoding='utf-8') as f:
 59 |             return json.load(f)
 60 |     except Exception as e:
 61 |         logger.error(f"Error loading tournament state: {str(e)}", emoji_key="error")
 62 |         return {}
 63 | 
 64 | async def test_extraction(tracker: CostTracker): # Add tracker
 65 |     """Test the LLM-based code extraction function."""
 66 |     # Load the tournament state
 67 |     tournament_state = await load_tournament_state()
 68 |     
 69 |     if not tournament_state:
 70 |         logger.error("Failed to load tournament state", emoji_key="error")
 71 |         return 1
 72 |     
 73 |     # Check if we have rounds_results
 74 |     rounds_results = tournament_state.get('rounds_results', [])
 75 |     if not rounds_results:
 76 |         logger.error("No round results found in tournament state", emoji_key="error")
 77 |         return 1
 78 |     
 79 |     # Create a table to display the results
 80 |     console.print("\n[bold]Testing LLM-based Code Extraction Function[/bold]\n")
 81 |     
 82 |     # Create a table for extraction results
 83 |     extraction_table = Table(box=box.MINIMAL, show_header=True, expand=False)
 84 |     extraction_table.add_column("Round", style="cyan")
 85 |     extraction_table.add_column("Model", style="magenta")
 86 |     extraction_table.add_column("Code Extracted", style="green")
 87 |     extraction_table.add_column("Line Count", style="yellow", justify="right")
 88 |     
 89 |     # Process each round
 90 |     for round_idx, round_data in enumerate(rounds_results):
 91 |         responses = round_data.get('responses', {})
 92 |         
 93 |         for model_id, response in responses.items():
 94 |             display_model = model_id.split(':')[-1] if ':' in model_id else model_id
 95 |             response_text = response.get('response_text', '')
 96 |             
 97 |             if response_text:
 98 |                 # Extract code using our new function, passing the tracker
 99 |                 extracted_code = await extract_code_from_response(response_text, tracker=tracker)
100 |                 
101 |                 # Calculate line count
102 |                 line_count = len(extracted_code.split('\n')) if extracted_code else 0
103 |                 
104 |                 # Add to the table
105 |                 extraction_table.add_row(
106 |                     str(round_idx),
107 |                     display_model,
108 |                     "✅" if extracted_code else "❌",
109 |                     str(line_count)
110 |                 )
111 |                 
112 |                 # Print detailed results
113 |                 if extracted_code:
114 |                     console.print(Panel(
115 |                         f"[bold]Round {round_idx} - {display_model}[/bold]\n\n"
116 |                         f"[green]Successfully extracted {line_count} lines of code[/green]\n",
117 |                         title="Extraction Result",
118 |                         expand=False
119 |                     ))
120 |                     
121 |                     # Print first 10 lines of code as a preview
122 |                     code_preview = "\n".join(extracted_code.split('\n')[:10])
123 |                     if len(extracted_code.split('\n')) > 10:
124 |                         code_preview += "\n..."
125 |                     
126 |                     console.print(Panel(
127 |                         code_preview,
128 |                         title="Code Preview",
129 |                         expand=False
130 |                     ))
131 |                 else:
132 |                     console.print(Panel(
133 |                         f"[bold]Round {round_idx} - {display_model}[/bold]\n\n"
134 |                         f"[red]Failed to extract code[/red]\n",
135 |                         title="Extraction Result",
136 |                         expand=False
137 |                     ))
138 |     
139 |     # Display the summary table
140 |     console.print("\n[bold]Extraction Summary:[/bold]")
141 |     console.print(extraction_table)
142 |     
143 |     # Display cost summary at the end
144 |     tracker.display_summary(console)
145 |     
146 |     return 0
147 | 
148 | async def main():
149 |     """Run the test script."""
150 |     tracker = CostTracker() # Instantiate tracker
151 |     try:
152 |         # Set up gateway
153 |         await setup_gateway()
154 |         
155 |         # Run the extraction test
156 |         return await test_extraction(tracker) # Pass tracker
157 |     except Exception as e:
158 |         logger.critical(f"Test failed: {str(e)}", emoji_key="critical", exc_info=True)
159 |         return 1
160 |     finally:
161 |         # Clean up
162 |         if gateway:
163 |             pass  # No cleanup needed for Gateway instance
164 | 
165 | if __name__ == "__main__":
166 |     # Run the script
167 |     exit_code = asyncio.run(main())
168 |     sys.exit(exit_code) 
```

--------------------------------------------------------------------------------
/examples/cache_demo.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python
  2 | """Cache demonstration for Ultimate MCP Server."""
  3 | import asyncio
  4 | import sys
  5 | import time
  6 | from pathlib import Path
  7 | 
  8 | # Add project root to path for imports when running as script
  9 | sys.path.insert(0, str(Path(__file__).parent.parent))
 10 | 
 11 | from rich.markup import escape
 12 | from rich.rule import Rule
 13 | 
 14 | from ultimate_mcp_server.services.cache import get_cache_service, run_completion_with_cache
 15 | from ultimate_mcp_server.utils import get_logger
 16 | from ultimate_mcp_server.utils.display import CostTracker, display_cache_stats
 17 | 
 18 | # --- Add Rich Imports ---
 19 | from ultimate_mcp_server.utils.logging.console import console
 20 | 
 21 | # ----------------------
 22 | 
 23 | # Initialize logger
 24 | logger = get_logger("example.cache_demo")
 25 | 
 26 | 
 27 | async def demonstrate_cache(tracker: CostTracker = None):
 28 |     """Demonstrate cache functionality using Rich."""
 29 |     console.print(Rule("[bold blue]Cache Demonstration[/bold blue]"))
 30 |     logger.info("Starting cache demonstration", emoji_key="start")
 31 |     
 32 |     cache_service = get_cache_service()
 33 |     
 34 |     if not cache_service.enabled:
 35 |         logger.warning("Cache is disabled by default. Enabling for demonstration.", emoji_key="warning")
 36 |         cache_service.enabled = True
 37 |     
 38 |     cache_service.clear() # Start with a clean slate
 39 |     logger.info("Cache cleared for demonstration", emoji_key="cache")
 40 |     
 41 |     prompt = "Explain how caching works in distributed systems."
 42 |     console.print(f"[cyan]Using Prompt:[/cyan] {escape(prompt)}")
 43 |     console.print()
 44 | 
 45 |     results = {}
 46 |     times = {}
 47 |     stats_log = {}
 48 | 
 49 |     try:
 50 |         # Helper function to get current stats snapshot
 51 |         def get_current_stats_dict():
 52 |             return {
 53 |                 "get_count": getattr(cache_service.metrics, "gets", 0), # Use gets for Total Gets
 54 |                 "hit_count": getattr(cache_service.metrics, "hits", 0),
 55 |                 "miss_count": getattr(cache_service.metrics, "misses", 0),
 56 |                 "set_count": getattr(cache_service.metrics, "stores", 0), # Use stores for Total Sets
 57 |                 # Add other stats if needed by display_cache_stats later
 58 |             }
 59 |             
 60 |         # 1. Cache Miss
 61 |         logger.info("1. Running first completion (expect cache MISS)...", emoji_key="processing")
 62 |         start_time = time.time()
 63 |         results[1] = await run_completion_with_cache(prompt, use_cache=True)
 64 |         times[1] = time.time() - start_time
 65 |         stats_log[1] = get_current_stats_dict()
 66 |         
 67 |         # Track cost - only for non-cache hits (actual API calls)
 68 |         if tracker:
 69 |             tracker.add_call(results[1])
 70 |             
 71 |         console.print(f"   [yellow]MISS:[/yellow] Took [bold]{times[1]:.3f}s[/bold] (Cost: ${results[1].cost:.6f}, Tokens: {results[1].total_tokens})")
 72 | 
 73 |         # 2. Cache Hit
 74 |         logger.info("2. Running second completion (expect cache HIT)...", emoji_key="processing")
 75 |         start_time = time.time()
 76 |         results[2] = await run_completion_with_cache(prompt, use_cache=True)
 77 |         times[2] = time.time() - start_time
 78 |         stats_log[2] = get_current_stats_dict()
 79 |         speedup = times[1] / times[2] if times[2] > 0 else float('inf')
 80 |         console.print(f"   [green]HIT:[/green]  Took [bold]{times[2]:.3f}s[/bold] (Speed-up: {speedup:.1f}x vs Miss)")
 81 | 
 82 |         # 3. Cache Bypass
 83 |         logger.info("3. Running third completion (BYPASS cache)...", emoji_key="processing")
 84 |         start_time = time.time()
 85 |         results[3] = await run_completion_with_cache(prompt, use_cache=False)
 86 |         times[3] = time.time() - start_time
 87 |         stats_log[3] = get_current_stats_dict() # Stats shouldn't change much for bypass
 88 |         
 89 |         # Track cost - bypassing cache calls the API
 90 |         if tracker:
 91 |             tracker.add_call(results[3])
 92 |             
 93 |         console.print(f"   [cyan]BYPASS:[/cyan] Took [bold]{times[3]:.3f}s[/bold] (Cost: ${results[3].cost:.6f}, Tokens: {results[3].total_tokens})")
 94 | 
 95 |         # 4. Another Cache Hit
 96 |         logger.info("4. Running fourth completion (expect cache HIT again)...", emoji_key="processing")
 97 |         start_time = time.time()
 98 |         results[4] = await run_completion_with_cache(prompt, use_cache=True)
 99 |         times[4] = time.time() - start_time
100 |         stats_log[4] = get_current_stats_dict()
101 |         speedup_vs_bypass = times[3] / times[4] if times[4] > 0 else float('inf')
102 |         console.print(f"   [green]HIT:[/green]  Took [bold]{times[4]:.3f}s[/bold] (Speed-up: {speedup_vs_bypass:.1f}x vs Bypass)")
103 |         console.print()
104 | 
105 |     except Exception as e:
106 |          logger.error(f"Error during cache demonstration run: {e}", emoji_key="error", exc_info=True)
107 |          console.print(f"[bold red]Error during demo run:[/bold red] {escape(str(e))}")
108 |          # Attempt to display stats even if error occurred mid-way
109 |          final_stats_dict = get_current_stats_dict() # Get stats even on error
110 |     else:
111 |          # Get final stats if all runs succeeded
112 |          final_stats_dict = get_current_stats_dict()
113 | 
114 |     # Prepare the final stats dictionary for display_cache_stats
115 |     # It expects top-level keys like 'enabled', 'persistence', and a 'stats' sub-dict
116 |     display_stats = {
117 |         "enabled": cache_service.enabled,
118 |         "persistence": cache_service.enable_persistence,
119 |         "stats": final_stats_dict,
120 |         # Add savings if available/calculated (Example: Placeholder)
121 |         # "savings": { "cost": getattr(cache_service.metrics, "saved_cost", 0.0) }
122 |     }
123 | 
124 |     # Display Final Cache Statistics using our display function
125 |     display_cache_stats(display_stats, stats_log, console)
126 |     
127 |     console.print()
128 |     # Use the persistence setting directly from cache_service
129 |     if cache_service.enable_persistence:
130 |         logger.info("Cache persistence is enabled.", emoji_key="cache")
131 |         if hasattr(cache_service, 'cache_dir'):
132 |             console.print(f"[dim]Cache Directory: {cache_service.cache_dir}[/dim]")
133 |     else:
134 |         logger.info("Cache persistence is disabled.", emoji_key="cache")
135 |     console.print()
136 | 
137 | 
138 | async def main():
139 |     """Run cache demonstration."""
140 |     tracker = CostTracker()  # Create cost tracker instance
141 |     try:
142 |         await demonstrate_cache(tracker)
143 |         
144 |         # Display cost summary at the end
145 |         tracker.display_summary(console)
146 |         
147 |     except Exception as e:
148 |         logger.critical(f"Cache demonstration failed: {str(e)}", emoji_key="critical")
149 |         return 1
150 |     
151 |     return 0
152 | 
153 | 
154 | if __name__ == "__main__":
155 |     # Run the demonstration
156 |     exit_code = asyncio.run(main())
157 |     sys.exit(exit_code)
```
Page 2/45FirstPrevNextLast