This is page 24 of 35. Use http://codebase.md/dicklesworthstone/llm_gateway_mcp_server?page={x} to view the full context.
# Directory Structure
```
├── .cursorignore
├── .env.example
├── .envrc
├── .gitignore
├── additional_features.md
├── check_api_keys.py
├── completion_support.py
├── comprehensive_test.py
├── docker-compose.yml
├── Dockerfile
├── empirically_measured_model_speeds.json
├── error_handling.py
├── example_structured_tool.py
├── examples
│ ├── __init__.py
│ ├── advanced_agent_flows_using_unified_memory_system_demo.py
│ ├── advanced_extraction_demo.py
│ ├── advanced_unified_memory_system_demo.py
│ ├── advanced_vector_search_demo.py
│ ├── analytics_reporting_demo.py
│ ├── audio_transcription_demo.py
│ ├── basic_completion_demo.py
│ ├── cache_demo.py
│ ├── claude_integration_demo.py
│ ├── compare_synthesize_demo.py
│ ├── cost_optimization.py
│ ├── data
│ │ ├── sample_event.txt
│ │ ├── Steve_Jobs_Introducing_The_iPhone_compressed.md
│ │ └── Steve_Jobs_Introducing_The_iPhone_compressed.mp3
│ ├── docstring_refiner_demo.py
│ ├── document_conversion_and_processing_demo.py
│ ├── entity_relation_graph_demo.py
│ ├── filesystem_operations_demo.py
│ ├── grok_integration_demo.py
│ ├── local_text_tools_demo.py
│ ├── marqo_fused_search_demo.py
│ ├── measure_model_speeds.py
│ ├── meta_api_demo.py
│ ├── multi_provider_demo.py
│ ├── ollama_integration_demo.py
│ ├── prompt_templates_demo.py
│ ├── python_sandbox_demo.py
│ ├── rag_example.py
│ ├── research_workflow_demo.py
│ ├── sample
│ │ ├── article.txt
│ │ ├── backprop_paper.pdf
│ │ ├── buffett.pdf
│ │ ├── contract_link.txt
│ │ ├── legal_contract.txt
│ │ ├── medical_case.txt
│ │ ├── northwind.db
│ │ ├── research_paper.txt
│ │ ├── sample_data.json
│ │ └── text_classification_samples
│ │ ├── email_classification.txt
│ │ ├── news_samples.txt
│ │ ├── product_reviews.txt
│ │ └── support_tickets.txt
│ ├── sample_docs
│ │ └── downloaded
│ │ └── attention_is_all_you_need.pdf
│ ├── sentiment_analysis_demo.py
│ ├── simple_completion_demo.py
│ ├── single_shot_synthesis_demo.py
│ ├── smart_browser_demo.py
│ ├── sql_database_demo.py
│ ├── sse_client_demo.py
│ ├── test_code_extraction.py
│ ├── test_content_detection.py
│ ├── test_ollama.py
│ ├── text_classification_demo.py
│ ├── text_redline_demo.py
│ ├── tool_composition_examples.py
│ ├── tournament_code_demo.py
│ ├── tournament_text_demo.py
│ ├── unified_memory_system_demo.py
│ ├── vector_search_demo.py
│ ├── web_automation_instruction_packs.py
│ └── workflow_delegation_demo.py
├── LICENSE
├── list_models.py
├── marqo_index_config.json.example
├── mcp_protocol_schema_2025-03-25_version.json
├── mcp_python_lib_docs.md
├── mcp_tool_context_estimator.py
├── model_preferences.py
├── pyproject.toml
├── quick_test.py
├── README.md
├── resource_annotations.py
├── run_all_demo_scripts_and_check_for_errors.py
├── storage
│ └── smart_browser_internal
│ ├── locator_cache.db
│ ├── readability.js
│ └── storage_state.enc
├── test_client.py
├── test_connection.py
├── TEST_README.md
├── test_sse_client.py
├── test_stdio_client.py
├── tests
│ ├── __init__.py
│ ├── conftest.py
│ ├── integration
│ │ ├── __init__.py
│ │ └── test_server.py
│ ├── manual
│ │ ├── test_extraction_advanced.py
│ │ └── test_extraction.py
│ └── unit
│ ├── __init__.py
│ ├── test_cache.py
│ ├── test_providers.py
│ └── test_tools.py
├── TODO.md
├── tool_annotations.py
├── tools_list.json
├── ultimate_mcp_banner.webp
├── ultimate_mcp_logo.webp
├── ultimate_mcp_server
│ ├── __init__.py
│ ├── __main__.py
│ ├── cli
│ │ ├── __init__.py
│ │ ├── __main__.py
│ │ ├── commands.py
│ │ ├── helpers.py
│ │ └── typer_cli.py
│ ├── clients
│ │ ├── __init__.py
│ │ ├── completion_client.py
│ │ └── rag_client.py
│ ├── config
│ │ └── examples
│ │ └── filesystem_config.yaml
│ ├── config.py
│ ├── constants.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── evaluation
│ │ │ ├── base.py
│ │ │ └── evaluators.py
│ │ ├── providers
│ │ │ ├── __init__.py
│ │ │ ├── anthropic.py
│ │ │ ├── base.py
│ │ │ ├── deepseek.py
│ │ │ ├── gemini.py
│ │ │ ├── grok.py
│ │ │ ├── ollama.py
│ │ │ ├── openai.py
│ │ │ └── openrouter.py
│ │ ├── server.py
│ │ ├── state_store.py
│ │ ├── tournaments
│ │ │ ├── manager.py
│ │ │ ├── tasks.py
│ │ │ └── utils.py
│ │ └── ums_api
│ │ ├── __init__.py
│ │ ├── ums_database.py
│ │ ├── ums_endpoints.py
│ │ ├── ums_models.py
│ │ └── ums_services.py
│ ├── exceptions.py
│ ├── graceful_shutdown.py
│ ├── services
│ │ ├── __init__.py
│ │ ├── analytics
│ │ │ ├── __init__.py
│ │ │ ├── metrics.py
│ │ │ └── reporting.py
│ │ ├── cache
│ │ │ ├── __init__.py
│ │ │ ├── cache_service.py
│ │ │ ├── persistence.py
│ │ │ ├── strategies.py
│ │ │ └── utils.py
│ │ ├── cache.py
│ │ ├── document.py
│ │ ├── knowledge_base
│ │ │ ├── __init__.py
│ │ │ ├── feedback.py
│ │ │ ├── manager.py
│ │ │ ├── rag_engine.py
│ │ │ ├── retriever.py
│ │ │ └── utils.py
│ │ ├── prompts
│ │ │ ├── __init__.py
│ │ │ ├── repository.py
│ │ │ └── templates.py
│ │ ├── prompts.py
│ │ └── vector
│ │ ├── __init__.py
│ │ ├── embeddings.py
│ │ └── vector_service.py
│ ├── tool_token_counter.py
│ ├── tools
│ │ ├── __init__.py
│ │ ├── audio_transcription.py
│ │ ├── base.py
│ │ ├── completion.py
│ │ ├── docstring_refiner.py
│ │ ├── document_conversion_and_processing.py
│ │ ├── enhanced-ums-lookbook.html
│ │ ├── entity_relation_graph.py
│ │ ├── excel_spreadsheet_automation.py
│ │ ├── extraction.py
│ │ ├── filesystem.py
│ │ ├── html_to_markdown.py
│ │ ├── local_text_tools.py
│ │ ├── marqo_fused_search.py
│ │ ├── meta_api_tool.py
│ │ ├── ocr_tools.py
│ │ ├── optimization.py
│ │ ├── provider.py
│ │ ├── pyodide_boot_template.html
│ │ ├── python_sandbox.py
│ │ ├── rag.py
│ │ ├── redline-compiled.css
│ │ ├── sentiment_analysis.py
│ │ ├── single_shot_synthesis.py
│ │ ├── smart_browser.py
│ │ ├── sql_databases.py
│ │ ├── text_classification.py
│ │ ├── text_redline_tools.py
│ │ ├── tournament.py
│ │ ├── ums_explorer.html
│ │ └── unified_memory_system.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── async_utils.py
│ │ ├── display.py
│ │ ├── logging
│ │ │ ├── __init__.py
│ │ │ ├── console.py
│ │ │ ├── emojis.py
│ │ │ ├── formatter.py
│ │ │ ├── logger.py
│ │ │ ├── panels.py
│ │ │ ├── progress.py
│ │ │ └── themes.py
│ │ ├── parse_yaml.py
│ │ ├── parsing.py
│ │ ├── security.py
│ │ └── text.py
│ └── working_memory_api.py
├── unified_memory_system_technical_analysis.md
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/ultimate_mcp_server/tools/enhanced-ums-lookbook.html:
--------------------------------------------------------------------------------
```html
<!DOCTYPE html>
<html lang="en" data-theme="dark">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<!-- Preconnect to external hosts for faster TCP/TLS handshakes -->
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link rel="preconnect" href="https://unpkg.com">
<link rel="preconnect" href="https://cdn.jsdelivr.net">
<link rel="preconnect" href="https://cdnjs.cloudflare.com">
<title>UI/UX Look Book: UMS Explorer</title>
<!-- Low-priority favicon -->
<link rel="icon" fetchpriority="low"
href="data:image/svg+xml,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='currentColor' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><path d='M12 2a3 3 0 0 0-3 3v7a3 3 0 0 0 6 0V5a3 3 0 0 0-3-3Z'/><path d='M5 10a7 7 0 1 0 14 0'/><path d='M15 13a3 3 0 0 0-6 0 v6a3 3 0 0 0 6 0 v-6Z'/></svg>">
<!-- TailwindCSS (CDN for development) -->
<script src="https://cdn.tailwindcss.com"></script>
<script>
tailwind.config = {
darkMode: 'class',
theme: {
extend: {
colors: {
primary: { 50: '#f0f9ff', 500: '#3b82f6', 600: '#2563eb', 700: '#1d4ed8', 900: '#1e3a8a' },
secondary: { 500: '#8b5cf6', 600: '#7c3aed' },
accent: { 500: '#ec4899', 600: '#db2777' }
},
animation: {
'slide-in': 'slideIn 0.5s ease-out',
'fade-in': 'fadeIn 0.3s ease-out',
'scale-in': 'scaleIn 0.2s ease-out',
'pulse-ring': 'pulseRing 2s infinite',
'shimmer': 'shimmer 2s infinite',
'bounce-subtle': 'bounceSubtle 1s ease-in-out infinite',
'float': 'float 6s ease-in-out infinite',
'glow': 'glow 2s ease-in-out infinite',
'gradient': 'gradient 15s ease infinite',
'wave': 'wave 10s ease-in-out infinite',
'morph': 'morph 8s ease-in-out infinite',
'rotate-slow': 'rotateSlow 20s linear infinite'
},
backdropBlur: { xs: '2px' }
}
}
}
</script>
<!-- DaisyUI -->
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/full.min.css" rel="stylesheet" type="text/css" />
<!-- Lucide Icons -->
<script defer src="https://unpkg.com/lucide@latest/dist/umd/lucide.min.js"></script>
<!-- Tippy.js for tooltips -->
<script defer src="https://unpkg.com/@popperjs/core@2"></script>
<script defer src="https://unpkg.com/tippy.js@6"></script>
<link rel="stylesheet" href="https://unpkg.com/tippy.js@6/animations/scale.css">
<!-- AOS (Animate On Scroll) -->
<link href="https://unpkg.com/[email protected]/dist/aos.css" rel="stylesheet">
<script defer src="https://unpkg.com/[email protected]/dist/aos.js"></script>
<!-- Particles.js -->
<script defer src="https://cdn.jsdelivr.net/particles.js/2.0.0/particles.min.js"></script>
<!-- GSAP for advanced animations -->
<script defer src="https://cdnjs.cloudflare.com/ajax/libs/gsap/3.12.2/gsap.min.js"></script>
<!-- Rough Notation for annotations -->
<script defer src="https://unpkg.com/rough-notation/lib/rough-notation.iife.js"></script>
<!-- Vanilla Tilt for 3D hover effects -->
<script defer src="https://unpkg.com/[email protected]/dist/vanilla-tilt.min.js"></script>
<!-- Non-blocking font loading -->
<link rel="preload"
href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800;900&display=swap" as="style"
onload="this.rel='stylesheet'">
<link rel="preload" href="https://fonts.googleapis.com/css2?family=Space+Grotesk:wght@400;500;600;700&display=swap"
as="style" onload="this.rel='stylesheet'">
<link rel="preload" href="https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@400;500&display=swap"
as="style" onload="this.rel='stylesheet'">
<noscript>
<link rel="stylesheet"
href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800;900&family=Space+Grotesk:wght@400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap">
</noscript>
<style>
/* @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800;900&family=Space+Grotesk:wght@400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap'); */
:root {
--color-primary: #3b82f6;
--color-secondary: #8b5cf6;
--color-accent: #ec4899;
--color-success: #10b981;
--color-warning: #f59e0b;
--color-error: #ef4444;
--gradient-primary: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
--gradient-secondary: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
--gradient-accent: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
--gradient-dark: linear-gradient(135deg, #1a1c2e 0%, #2d1b69 100%);
}
[data-theme="light"] {
--bg-primary: #ffffff;
--bg-secondary: #f8fafc;
--bg-tertiary: #f1f5f9;
--text-primary: #1e293b;
--text-secondary: #475569;
--text-tertiary: #64748b;
--border-color: #e2e8f0;
--glass-bg: rgba(255, 255, 255, 0.8);
--card-bg: rgba(255, 255, 255, 0.9);
}
[data-theme="dark"] {
--bg-primary: #0f172a;
--bg-secondary: #1e293b;
--bg-tertiary: #334155;
--text-primary: #f1f5f9;
--text-secondary: #e2e8f0;
--text-tertiary: #cbd5e1;
--border-color: rgba(255, 255, 255, 0.1);
--glass-bg: rgba(15, 23, 42, 0.8);
--card-bg: rgba(15, 23, 42, 0.9);
}
* {
font-family: 'Inter', sans-serif;
}
code,
pre {
font-family: 'JetBrains Mono', monospace;
}
h1,
h2,
h3 {
font-family: 'Space Grotesk', sans-serif;
}
/* Enhanced animations */
@keyframes slideIn {
from {
transform: translateY(30px);
opacity: 0;
}
to {
transform: translateY(0);
opacity: 1;
}
}
@keyframes fadeIn {
from {
opacity: 0;
}
to {
opacity: 1;
}
}
@keyframes scaleIn {
from {
transform: scale(0.9);
opacity: 0;
}
to {
transform: scale(1);
opacity: 1;
}
}
@keyframes pulseRing {
0% {
transform: scale(0.8);
opacity: 1;
}
80%,
100% {
transform: scale(1.2);
opacity: 0;
}
}
@keyframes shimmer {
0% {
background-position: -200px 0;
}
100% {
background-position: calc(200px + 100%) 0;
}
}
@keyframes bounceSubtle {
0%,
100% {
transform: translateY(0);
}
50% {
transform: translateY(-4px);
}
}
@keyframes float {
0%,
100% {
transform: translateY(0px);
}
50% {
transform: translateY(-20px);
}
}
@keyframes glow {
0%,
100% {
opacity: 1;
}
50% {
opacity: 0.5;
}
}
@keyframes gradient {
0% {
background-position: 0% 50%;
}
50% {
background-position: 100% 50%;
}
100% {
background-position: 0% 50%;
}
}
@keyframes wave {
0%,
100% {
transform: rotate(-3deg);
}
50% {
transform: rotate(3deg);
}
}
@keyframes morph {
0% {
border-radius: 60% 40% 30% 70% / 60% 30% 70% 40%;
}
50% {
border-radius: 30% 60% 70% 40% / 50% 60% 30% 60%;
}
100% {
border-radius: 60% 40% 30% 70% / 60% 30% 70% 40%;
}
}
@keyframes rotateSlow {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
/* Enhanced body background */
body {
position: relative;
overflow-x: hidden;
}
body::before {
content: '';
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background:
radial-gradient(circle at 20% 80%, rgba(120, 119, 198, 0.3), transparent 50%),
radial-gradient(circle at 80% 20%, rgba(255, 119, 198, 0.3), transparent 50%),
radial-gradient(circle at 40% 40%, rgba(120, 219, 255, 0.2), transparent 50%);
z-index: -2;
}
/* Particle container */
#particles-js {
position: fixed;
width: 100%;
height: 100%;
top: 0;
left: 0;
z-index: -1;
}
/* Enhanced glass effects */
[data-theme="dark"] .glass {
background: linear-gradient(135deg, rgba(255, 255, 255, 0.1) 0%, rgba(255, 255, 255, 0.05) 100%);
backdrop-filter: blur(20px) saturate(200%);
-webkit-backdrop-filter: blur(20px) saturate(200%);
border: 1px solid rgba(255, 255, 255, 0.18);
box-shadow: 0 8px 32px 0 rgba(31, 38, 135, 0.37);
}
[data-theme="light"] .glass {
background: linear-gradient(135deg, rgba(255, 255, 255, 0.9) 0%, rgba(255, 255, 255, 0.7) 100%);
backdrop-filter: blur(20px) saturate(200%);
-webkit-backdrop-filter: blur(20px) saturate(200%);
border: 1px solid rgba(209, 213, 219, 0.3);
box-shadow: 0 8px 32px 0 rgba(31, 38, 135, 0.15);
}
[data-theme="dark"] .glass-dark {
background: linear-gradient(135deg, rgba(0, 0, 0, 0.4) 0%, rgba(0, 0, 0, 0.2) 100%);
backdrop-filter: blur(30px) saturate(200%);
-webkit-backdrop-filter: blur(30px) saturate(200%);
border: 1px solid rgba(255, 255, 255, 0.1);
box-shadow: inset 0 0 0 1px rgba(255, 255, 255, 0.1);
}
[data-theme="light"] .glass-dark {
background: linear-gradient(135deg, rgba(255, 255, 255, 0.95) 0%, rgba(255, 255, 255, 0.85) 100%);
backdrop-filter: blur(30px) saturate(200%);
-webkit-backdrop-filter: blur(30px) saturate(200%);
border: 1px solid rgba(0, 0, 0, 0.1);
box-shadow: 0 8px 32px 0 rgba(31, 38, 135, 0.1);
}
/* Enhanced cards with gradient borders */
[data-theme="dark"] .enhanced-card {
background: linear-gradient(145deg, rgba(255, 255, 255, 0.1), rgba(255, 255, 255, 0.05));
backdrop-filter: blur(20px) saturate(200%);
-webkit-backdrop-filter: blur(20px) saturate(200%);
border: 1px solid transparent;
position: relative;
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
box-shadow: 0 10px 40px rgba(0, 0, 0, 0.2);
contain: layout style;
}
[data-theme="dark"] .enhanced-card::before {
content: '';
position: absolute;
inset: 0;
border-radius: inherit;
padding: 1px;
background: linear-gradient(135deg, rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0.05));
-webkit-mask: linear-gradient(#fff 0 0) content-box, linear-gradient(#fff 0 0);
-webkit-mask-composite: xor;
mask-composite: exclude;
opacity: 0.5;
transition: opacity 0.4s ease;
}
[data-theme="dark"] .enhanced-card:hover {
transform: translateY(-2px) scale(1.01);
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3);
}
[data-theme="dark"] .enhanced-card:hover::before {
opacity: 1;
}
[data-theme="light"] .enhanced-card {
background: linear-gradient(145deg, rgba(255, 255, 255, 0.95), rgba(255, 255, 255, 0.85));
backdrop-filter: blur(20px) saturate(200%);
-webkit-backdrop-filter: blur(20px) saturate(200%);
border: 1px solid rgba(0, 0, 0, 0.05);
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
box-shadow: 0 10px 40px rgba(0, 0, 0, 0.08);
contain: layout style;
}
[data-theme="light"] .enhanced-card:hover {
transform: translateY(-2px) scale(1.01);
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.12);
border-color: rgba(59, 130, 246, 0.3);
}
/* Memory type gradients with glow */
.memory-working {
border-left: 4px solid #f59e0b;
background: linear-gradient(90deg, rgba(245, 158, 11, 0.15), transparent);
box-shadow: inset 0 0 20px rgba(245, 158, 11, 0.1);
}
.memory-episodic {
border-left: 4px solid #3b82f6;
background: linear-gradient(90deg, rgba(59, 130, 246, 0.15), transparent);
box-shadow: inset 0 0 20px rgba(59, 130, 246, 0.1);
}
.memory-semantic {
border-left: 4px solid #10b981;
background: linear-gradient(90deg, rgba(16, 185, 129, 0.15), transparent);
box-shadow: inset 0 0 20px rgba(16, 185, 129, 0.1);
}
.memory-procedural {
border-left: 4px solid #8b5cf6;
background: linear-gradient(90deg, rgba(139, 92, 246, 0.15), transparent);
box-shadow: inset 0 0 20px rgba(139, 92, 246, 0.1);
}
/* Enhanced skeleton loader */
.skeleton {
background: linear-gradient(90deg,
rgba(255, 255, 255, 0.05) 25%,
rgba(255, 255, 255, 0.1) 50%,
rgba(255, 255, 255, 0.05) 75%);
background-size: 200px 100%;
animation: shimmer 1.5s infinite;
}
[data-theme="dark"] .skeleton {
background: linear-gradient(90deg,
rgba(255, 255, 255, 0.05) 25%,
rgba(255, 255, 255, 0.1) 50%,
rgba(255, 255, 255, 0.05) 75%);
}
/* Enhanced scrollbar */
::-webkit-scrollbar {
width: 10px;
height: 10px;
}
::-webkit-scrollbar-track {
background: rgba(0, 0, 0, 0.1);
border-radius: 5px;
}
::-webkit-scrollbar-thumb {
background: linear-gradient(135deg, rgba(59, 130, 246, 0.5), rgba(139, 92, 246, 0.5));
border-radius: 5px;
transition: all 0.3s ease;
}
::-webkit-scrollbar-thumb:hover {
background: linear-gradient(135deg, rgba(59, 130, 246, 0.8), rgba(139, 92, 246, 0.8));
}
/* Enhanced status indicators */
.status-indicator {
position: relative;
display: inline-block;
}
.status-indicator::before {
content: '';
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
border-radius: inherit;
animation: pulseRing 2s infinite;
}
.status-active::before {
background: var(--color-success);
box-shadow: 0 0 20px var(--color-success);
}
.status-completed::before {
background: var(--color-primary);
box-shadow: 0 0 20px var(--color-primary);
}
.status-failed::before {
background: var(--color-error);
box-shadow: 0 0 20px var(--color-error);
}
.status-paused::before {
background: var(--color-warning);
box-shadow: 0 0 20px var(--color-warning);
}
/* Enhanced data table */
.data-table {
background: rgba(255, 255, 255, 0.03);
backdrop-filter: blur(20px) saturate(200%);
-webkit-backdrop-filter: blur(20px) saturate(200%);
border-radius: 20px;
overflow: hidden;
box-shadow: 0 10px 40px rgba(0, 0, 0, 0.2);
}
.data-table th {
background: rgba(255, 255, 255, 0.08);
backdrop-filter: blur(20px);
-webkit-backdrop-filter: blur(20px);
position: sticky;
top: 0;
z-index: 10;
font-weight: 600;
text-transform: uppercase;
letter-spacing: 0.05em;
font-size: 0.75rem;
}
.data-table tr {
transition: all 0.3s ease;
}
.data-table tr:hover {
background: rgba(59, 130, 246, 0.1);
transform: scale(1.01);
}
/* Enhanced FAB */
.fab {
position: fixed;
bottom: 2rem;
right: 2rem;
width: 64px;
height: 64px;
border-radius: 50%;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
border: none;
box-shadow: 0 10px 30px rgba(102, 126, 234, 0.5);
cursor: pointer;
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
z-index: 100;
display: flex;
align-items: center;
justify-content: center;
overflow: hidden;
will-change: transform;
}
.fab::before {
content: '';
position: absolute;
inset: 0;
background: radial-gradient(circle at center, transparent 0%, rgba(255, 255, 255, 0.2) 100%);
opacity: 0;
transition: opacity 0.3s ease;
}
.fab:hover {
transform: scale(1.1) rotate(15deg);
box-shadow: 0 15px 40px rgba(102, 126, 234, 0.7);
}
.fab:hover::before {
opacity: 1;
}
.fab:active {
transform: scale(0.95);
}
/* Enhanced command palette */
.command-palette {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
width: 90%;
max-width: 600px;
backdrop-filter: blur(40px) saturate(200%);
-webkit-backdrop-filter: blur(40px) saturate(200%);
border-radius: 24px;
z-index: 1000;
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
box-shadow: 0 25px 100px rgba(0, 0, 0, 0.5);
}
[data-theme="dark"] .command-palette {
background: linear-gradient(135deg, rgba(15, 23, 42, 0.98) 0%, rgba(30, 41, 59, 0.98) 100%);
border: 1px solid rgba(255, 255, 255, 0.1);
color: #e5e7eb;
}
[data-theme="light"] .command-palette {
background: linear-gradient(135deg, rgba(255, 255, 255, 0.98) 0%, rgba(248, 250, 252, 0.98) 100%);
border: 1px solid rgba(0, 0, 0, 0.1);
color: #1e293b;
}
/* Enhanced chart containers */
.chart-container {
background: rgba(255, 255, 255, 0.03);
backdrop-filter: blur(20px) saturate(200%);
-webkit-backdrop-filter: blur(20px) saturate(200%);
border-radius: 20px;
padding: 2rem;
box-shadow: inset 0 0 20px rgba(255, 255, 255, 0.05);
position: relative;
overflow: hidden;
}
.chart-container::before {
content: '';
position: absolute;
top: -50%;
right: -50%;
width: 200%;
height: 200%;
background: radial-gradient(circle, rgba(59, 130, 246, 0.1) 0%, transparent 70%);
animation: rotate-slow 20s linear infinite;
}
/* Enhanced markdown content */
.markdown-content {
line-height: 1.8;
word-wrap: break-word;
}
.markdown-content h1,
.markdown-content h2,
.markdown-content h3 {
margin-top: 2em;
margin-bottom: 0.75em;
font-weight: 700;
line-height: 1.3;
background: linear-gradient(135deg, #e5e7eb 0%, #9ca3af 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
}
.markdown-content h1 {
font-size: 2.5em;
border-bottom: 2px solid rgba(255, 255, 255, 0.1);
padding-bottom: 0.3em;
}
.markdown-content h2 {
font-size: 2em;
border-bottom: 1px solid rgba(255, 255, 255, 0.05);
padding-bottom: 0.3em;
}
.markdown-content code {
padding: 0.3em 0.6em;
margin: 0 0.2em;
font-size: 85%;
background: linear-gradient(135deg, rgba(59, 130, 246, 0.15), rgba(139, 92, 246, 0.15));
border: 1px solid rgba(59, 130, 246, 0.3);
border-radius: 0.375rem;
font-weight: 500;
}
.markdown-content pre {
padding: 1.5em;
overflow: auto;
font-size: 85%;
line-height: 1.6;
background: linear-gradient(135deg, rgba(0, 0, 0, 0.4), rgba(0, 0, 0, 0.2));
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 0.75rem;
margin: 1.5em 0;
box-shadow: inset 0 2px 10px rgba(0, 0, 0, 0.2);
}
/* Enhanced masonry grid */
.masonry-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
gap: 2rem;
align-items: start;
}
.masonry-item {
break-inside: avoid;
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
will-change: transform;
}
.masonry-item:hover {
transform: translateY(-8px) scale(1.02);
box-shadow: 0 30px 60px rgba(0, 0, 0, 0.4);
}
/* Type-specific colors with gradients */
.type-image {
background: linear-gradient(135deg, #10b981, #059669);
box-shadow: 0 0 20px rgba(16, 185, 129, 0.3);
}
.type-document {
background: linear-gradient(135deg, #3b82f6, #1d4ed8);
box-shadow: 0 0 20px rgba(59, 130, 246, 0.3);
}
.type-code {
background: linear-gradient(135deg, #10b981, #059669);
box-shadow: 0 0 20px rgba(16, 185, 129, 0.3);
}
.type-data {
background: linear-gradient(135deg, #8b5cf6, #7c3aed);
box-shadow: 0 0 20px rgba(139, 92, 246, 0.3);
}
.type-model {
background: linear-gradient(135deg, #f59e0b, #d97706);
box-shadow: 0 0 20px rgba(245, 158, 11, 0.3);
}
.type-default {
background: linear-gradient(135deg, #6b7280, #4b5563);
box-shadow: 0 0 20px rgba(107, 114, 128, 0.3);
}
/* Text truncation */
.line-clamp-2 {
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-box-orient: vertical;
overflow: hidden;
}
/* Enhanced thought chain container */
.thought-chain-container {
background: linear-gradient(135deg, rgba(0, 0, 0, 0.3), rgba(0, 0, 0, 0.1));
backdrop-filter: blur(30px) saturate(200%);
-webkit-backdrop-filter: blur(30px) saturate(200%);
border-radius: 24px;
position: relative;
overflow: hidden;
min-height: 70vh;
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3);
}
.mermaid-container {
width: 100%;
height: 100%;
overflow: auto;
padding: 2rem;
position: relative;
}
/* Enhanced timeline scrubber */
.timeline-scrubber {
position: absolute;
bottom: 0;
left: 0;
right: 0;
background: linear-gradient(180deg, transparent, rgba(0, 0, 0, 0.95));
backdrop-filter: blur(30px) saturate(200%);
-webkit-backdrop-filter: blur(30px) saturate(200%);
padding: 2rem;
border-top: 1px solid rgba(255, 255, 255, 0.1);
}
.timeline-slider {
width: 100%;
height: 10px;
border-radius: 5px;
background: linear-gradient(90deg, #1e293b, #475569);
outline: none;
opacity: 0.8;
transition: all 0.3s ease;
cursor: pointer;
-webkit-appearance: none;
appearance: none;
}
.timeline-slider:hover {
opacity: 1;
height: 12px;
}
.timeline-slider::-webkit-slider-thumb {
-webkit-appearance: none;
appearance: none;
width: 24px;
height: 24px;
border-radius: 50%;
background: linear-gradient(135deg, #3b82f6, #8b5cf6);
cursor: pointer;
box-shadow: 0 4px 12px rgba(59, 130, 246, 0.6);
transition: all 0.3s ease;
}
.timeline-slider::-webkit-slider-thumb:hover {
transform: scale(1.2);
box-shadow: 0 6px 20px rgba(59, 130, 246, 0.8);
}
.timeline-slider::-moz-range-thumb {
width: 24px;
height: 24px;
border-radius: 50%;
background: linear-gradient(135deg, #3b82f6, #8b5cf6);
cursor: pointer;
border: none;
box-shadow: 0 4px 12px rgba(59, 130, 246, 0.6);
transition: all 0.3s ease;
}
.timeline-slider::-moz-range-thumb:hover {
transform: scale(1.2);
box-shadow: 0 6px 20px rgba(59, 130, 246, 0.8);
}
/* Enhanced playback controls */
.playback-controls {
display: flex;
align-items: center;
justify-content: center;
gap: 1rem;
margin-bottom: 1.5rem;
}
.playback-btn {
width: 56px;
height: 56px;
border-radius: 50%;
background: linear-gradient(135deg, #3b82f6, #8b5cf6);
border: none;
color: white;
cursor: pointer;
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
display: flex;
align-items: center;
justify-content: center;
box-shadow: 0 6px 20px rgba(59, 130, 246, 0.4);
position: relative;
overflow: hidden;
}
.playback-btn::before {
content: '';
position: absolute;
inset: 0;
background: radial-gradient(circle at center, transparent 0%, rgba(255, 255, 255, 0.2) 100%);
opacity: 0;
transition: opacity 0.3s ease;
}
.playback-btn:hover {
transform: scale(1.05);
box-shadow: 0 8px 25px rgba(59, 130, 246, 0.6);
}
.playback-btn:hover::before {
opacity: 1;
}
.playback-btn:active {
transform: scale(0.95);
}
.playback-btn.secondary {
background: linear-gradient(135deg, #6b7280, #4b5563);
box-shadow: 0 6px 20px rgba(107, 114, 128, 0.4);
}
.speed-control {
display: flex;
align-items: center;
gap: 0.5rem;
margin-left: 2rem;
}
.speed-btn {
padding: 0.5rem 1rem;
background: rgba(255, 255, 255, 0.1);
border: 1px solid rgba(255, 255, 255, 0.2);
border-radius: 8px;
color: #e5e7eb;
font-size: 0.875rem;
font-weight: 500;
cursor: pointer;
transition: all 0.3s ease;
}
.speed-btn:hover {
background: rgba(255, 255, 255, 0.15);
border-color: rgba(255, 255, 255, 0.3);
}
.speed-btn.active {
background: linear-gradient(135deg, #3b82f6, #8b5cf6);
border-color: transparent;
color: white;
box-shadow: 0 4px 12px rgba(59, 130, 246, 0.4);
}
/* Enhanced sidebars */
.chain-sidebar,
.graph-sidebar {
background: linear-gradient(135deg, rgba(255, 255, 255, 0.08), rgba(255, 255, 255, 0.03));
backdrop-filter: blur(30px) saturate(200%);
-webkit-backdrop-filter: blur(30px) saturate(200%);
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 20px;
padding: 2rem;
height: fit-content;
max-height: calc(100vh - 16rem);
overflow-y: auto;
box-shadow: 0 10px 40px rgba(0, 0, 0, 0.2);
}
.chain-metadata,
.thought-details {
background: rgba(255, 255, 255, 0.05);
padding: 1.25rem;
border-radius: 16px;
margin-bottom: 1.25rem;
border: 1px solid rgba(255, 255, 255, 0.05);
}
.thought-details {
border-left: 4px solid #3b82f6;
background: linear-gradient(90deg, rgba(59, 130, 246, 0.1), transparent);
}
.chain-stats {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(120px, 1fr));
gap: 1.25rem;
margin-bottom: 2rem;
}
.chain-stat {
text-align: center;
padding: 1.25rem;
background: linear-gradient(135deg, rgba(255, 255, 255, 0.08), rgba(255, 255, 255, 0.03));
border-radius: 16px;
border: 1px solid rgba(255, 255, 255, 0.05);
transition: all 0.3s ease;
}
.chain-stat:hover {
transform: translateY(-2px);
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.2);
}
.chain-stat-value {
font-size: 2rem;
font-weight: 800;
background: linear-gradient(135deg, #3b82f6, #8b5cf6);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
}
.chain-stat-label {
font-size: 0.75rem;
color: #9ca3af;
text-transform: uppercase;
letter-spacing: 0.1em;
font-weight: 600;
}
/* Enhanced graph container */
.graph-container {
background: linear-gradient(135deg, rgba(0, 0, 0, 0.3), rgba(0, 0, 0, 0.1));
backdrop-filter: blur(30px) saturate(200%);
-webkit-backdrop-filter: blur(30px) saturate(200%);
border-radius: 24px;
position: relative;
overflow: hidden;
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3);
}
.graph-svg {
width: 100%;
height: 100%;
cursor: grab;
}
.graph-svg:active {
cursor: grabbing;
}
.graph-node {
cursor: pointer;
stroke-width: 3px;
filter: drop-shadow(0 4px 8px rgba(0, 0, 0, 0.4));
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
}
.graph-node:hover {
stroke-width: 4px;
filter: drop-shadow(0 8px 16px rgba(0, 0, 0, 0.6));
transform: scale(1.1);
}
.graph-node.selected {
stroke-width: 5px;
filter: drop-shadow(0 12px 24px rgba(59, 130, 246, 0.8));
animation: pulse 2s infinite;
}
@keyframes pulse {
0% {
transform: scale(1);
}
50% {
transform: scale(1.05);
}
100% {
transform: scale(1);
}
}
.graph-link {
stroke-opacity: 0.4;
transition: all 0.3s ease;
stroke-linecap: round;
}
.graph-link:hover {
stroke-opacity: 0.8;
stroke-width: 3px;
}
.graph-text {
font-family: 'Space Grotesk', sans-serif;
font-size: 12px;
font-weight: 600;
fill: #e5e7eb;
text-anchor: middle;
pointer-events: none;
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.9);
}
/* Enhanced graph controls */
.graph-controls {
position: absolute;
top: 1.5rem;
left: 1.5rem;
z-index: 10;
display: flex;
flex-direction: column;
gap: 0.75rem;
}
.graph-controls button {
width: 48px;
height: 48px;
border-radius: 12px;
background: rgba(0, 0, 0, 0.6);
backdrop-filter: blur(20px);
-webkit-backdrop-filter: blur(20px);
border: 1px solid rgba(255, 255, 255, 0.1);
color: white;
cursor: pointer;
transition: all 0.3s ease;
display: flex;
align-items: center;
justify-content: center;
}
.graph-controls button:hover {
background: rgba(59, 130, 246, 0.4);
border-color: rgba(59, 130, 246, 0.6);
transform: scale(1.05);
}
/* Enhanced graph legend */
.graph-legend {
position: absolute;
top: 1.5rem;
right: 1.5rem;
z-index: 10;
background: linear-gradient(135deg, rgba(0, 0, 0, 0.9), rgba(0, 0, 0, 0.7));
backdrop-filter: blur(30px) saturate(200%);
-webkit-backdrop-filter: blur(30px) saturate(200%);
border-radius: 16px;
padding: 1.5rem;
min-width: 220px;
box-shadow: 0 10px 40px rgba(0, 0, 0, 0.4);
border: 1px solid rgba(255, 255, 255, 0.1);
}
.legend-item {
display: flex;
align-items: center;
margin-bottom: 0.75rem;
font-size: 0.875rem;
transition: all 0.3s ease;
}
.legend-item:hover {
transform: translateX(4px);
}
.legend-color {
width: 20px;
height: 20px;
border-radius: 50%;
margin-right: 0.75rem;
border: 2px solid rgba(255, 255, 255, 0.3);
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3);
}
/* Enhanced graph tooltip */
.graph-tooltip {
position: absolute;
background: linear-gradient(135deg, rgba(0, 0, 0, 0.95), rgba(15, 23, 42, 0.95));
backdrop-filter: blur(30px) saturate(200%);
-webkit-backdrop-filter: blur(30px) saturate(200%);
color: white;
padding: 1rem;
border-radius: 12px;
font-size: 0.875rem;
pointer-events: none;
z-index: 1000;
max-width: 320px;
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.5);
border: 1px solid rgba(255, 255, 255, 0.1);
}
/* Enhanced spinner */
.spinner {
width: 48px;
height: 48px;
border: 3px solid rgba(59, 130, 246, 0.2);
border-top: 3px solid transparent;
border-radius: 50%;
background: linear-gradient(transparent, transparent),
linear-gradient(90deg, #3b82f6, #8b5cf6);
background-clip: padding-box, border-box;
background-origin: padding-box, border-box;
animation: spin 1s linear infinite;
}
@keyframes spin {
0% {
transform: rotate(0deg);
}
100% {
transform: rotate(360deg);
}
}
/* Enhanced tippy tooltips */
.tippy-box[data-theme~='custom'] {
background: linear-gradient(135deg, rgba(30, 30, 46, 0.98) 0%, rgba(49, 46, 129, 0.98) 100%);
backdrop-filter: blur(30px) saturate(200%);
-webkit-backdrop-filter: blur(30px) saturate(200%);
border: 1px solid rgba(255, 255, 255, 0.15);
border-radius: 20px;
box-shadow: 0 30px 60px -15px rgba(0, 0, 0, 0.7);
font-size: 14px;
max-width: 380px;
}
.tippy-box[data-theme~='custom'] .tippy-content {
padding: 24px;
color: #ffffff;
line-height: 1.6;
}
.tippy-box[data-theme~='custom'] .tippy-arrow {
color: rgba(30, 30, 46, 0.98);
}
.tooltip-content h4 {
font-size: 18px;
margin-bottom: 12px;
font-weight: 700;
background: linear-gradient(135deg, #e5e7eb, #9ca3af);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
}
.tooltip-content p {
margin-bottom: 12px;
font-size: 14px;
opacity: 0.95;
}
.tooltip-content strong {
color: #f1f5f9;
font-weight: 600;
}
/* Enhanced demo sections */
.demo-section {
padding: 3rem;
margin-bottom: 3rem;
border-radius: 24px;
border: 1px solid rgba(255, 255, 255, 0.08);
background: linear-gradient(135deg, rgba(255, 255, 255, 0.03), rgba(255, 255, 255, 0.01));
backdrop-filter: blur(20px) saturate(150%);
-webkit-backdrop-filter: blur(20px) saturate(150%);
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.2);
position: relative;
overflow: hidden;
content-visibility: auto;
contain-intrinsic-size: 1000px 800px;
}
.demo-section::before {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
height: 1px;
background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.2), transparent);
}
.demo-section-title {
font-size: 2rem;
font-weight: 800;
margin-bottom: 0.75rem;
padding-bottom: 0.75rem;
background: linear-gradient(135deg, #e5e7eb, #9ca3af);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
position: relative;
}
.demo-section-title::after {
content: '';
position: absolute;
bottom: 0;
left: 0;
width: 100px;
height: 3px;
background: linear-gradient(90deg, #3b82f6, #8b5cf6);
border-radius: 2px;
}
.demo-section-description {
font-size: 1.125rem;
color: #cbd5e1;
margin-bottom: 2rem;
max-width: 80ch;
line-height: 1.7;
}
/* Hover lift effect for interactive elements */
.interactive {
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
}
.interactive:hover {
transform: translateY(-2px);
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.3);
}
/* Gradient text effect */
.gradient-text {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
}
/* Glow effect for important elements */
.glow {
box-shadow: 0 0 20px rgba(59, 130, 246, 0.5);
}
/* Noise texture overlay */
.noise::before {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-image: url("data:image/svg+xml,%3Csvg viewBox='0 0 256 256' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='noiseFilter'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='0.65' numOctaves='3' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23noiseFilter)' opacity='0.02'/%3E%3C/svg%3E");
pointer-events: none;
opacity: 0.03;
}
/* Custom focus states */
*:focus {
outline: none;
box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.5);
}
/* Smooth scroll behavior */
html {
scroll-behavior: smooth;
}
/* Selection color */
::selection {
background: rgba(139, 92, 246, 0.3);
color: white;
}
/* Loading states */
.loading {
position: relative;
overflow: hidden;
}
.loading::after {
content: '';
position: absolute;
top: 0;
left: -100%;
width: 100%;
height: 100%;
background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.1), transparent);
animation: shimmer 2s infinite;
}
/* Responsive adjustments */
@media (max-width: 768px) {
.demo-section {
padding: 2rem 1rem;
}
.masonry-grid {
grid-template-columns: 1fr;
}
}
/* Scrolled header state */
header.scrolled {
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.3);
}
</style>
</head>
<body class="transition-colors duration-300 bg-gradient-to-br from-slate-900 via-blue-900 to-slate-900 text-white noise"
style="background: linear-gradient(135deg, rgb(15, 23, 42), rgb(30, 58, 138), rgb(15, 23, 42));">
<!-- Particle Background -->
<div id="particles-js"></div>
<!-- Header (Static) -->
<header class="glass-dark border-b border-white/10 sticky top-0 z-40">
<div class="container mx-auto px-4 py-4">
<div class="flex items-center justify-between">
<div class="flex items-center space-x-4">
<div class="flex items-center space-x-3 cursor-pointer group" data-tilt data-tilt-max="10"
data-tilt-speed="400">
<div class="relative">
<div
class="w-12 h-12 rounded-xl bg-gradient-to-r from-blue-500 to-purple-600 flex items-center justify-center group-hover:scale-110 transition-transform shadow-lg">
<i data-lucide="brain-circuit" class="w-7 h-7"></i>
</div>
<div class="absolute inset-0 rounded-xl border-2 border-blue-400 animate-pulse-ring"></div>
</div>
<div>
<h1
class="text-2xl font-extrabold bg-gradient-to-r from-blue-400 via-purple-400 to-pink-400 bg-clip-text text-transparent group-hover:from-blue-300 group-hover:via-purple-300 group-hover:to-pink-300 transition-all animate-gradient bg-300%">
UMS Explorer
</h1>
<p class="text-xs text-gray-400 group-hover:text-gray-300 transition-colors font-medium">
UI/UX Look Book</p>
</div>
</div>
<nav class="hidden md:flex">
<ol class="flex items-center space-x-2 text-sm">
<li><a href="#" class="text-blue-400 hover:text-blue-300 transition-colors">Dashboard</a>
</li>
<li class="text-gray-600">/</li>
<li class="text-blue-400 capitalize">Components</li>
</ol>
</nav>
</div>
<div class="flex items-center space-x-4">
<div class="relative search-container">
<div class="relative group">
<input type="text" placeholder="Search everything..."
class="w-64 pl-12 pr-4 py-2.5 bg-white/10 backdrop-blur-md border border-white/20 rounded-xl text-white placeholder-gray-400 focus:outline-none focus:border-blue-400 focus:bg-white/20 transition-all focus:w-80">
<i data-lucide="search"
class="absolute left-3.5 top-1/2 transform -translate-y-1/2 w-5 h-5 text-gray-400 group-focus-within:text-blue-400 transition-colors"></i>
<div
class="absolute right-3 top-1/2 transform -translate-y-1/2 text-xs text-gray-500 font-medium">
⌘K</div>
</div>
</div>
<div class="hidden md:flex items-center space-x-2">
<div class="w-2.5 h-2.5 bg-green-400 rounded-full animate-pulse shadow-lg shadow-green-400/50">
</div>
<span class="text-sm text-gray-300 font-medium">Connected</span>
</div>
<button
class="p-2.5 rounded-xl bg-white/10 backdrop-blur-md border border-white/20 hover:bg-white/20 hover:scale-105 transition-all interactive">
<i data-lucide="sun" class="w-5 h-5"></i>
</button>
<button
class="p-2.5 rounded-xl bg-white/10 backdrop-blur-md border border-white/20 hover:bg-white/20 hover:scale-105 transition-all interactive">
<i data-lucide="command" class="w-5 h-5"></i>
</button>
</div>
</div>
</div>
</header>
<!-- Main content area -->
<main class="container mx-auto px-4 py-8">
<div class="text-center mb-16" data-aos="fade-up">
<h1 class="text-5xl lg:text-6xl font-extrabold tracking-tight mb-6 gradient-text animate-gradient bg-300%">
UI/UX Pattern Library</h1>
<p class="mt-4 text-xl text-gray-400 max-w-3xl mx-auto leading-relaxed">A visual inventory of all components
and interaction patterns from the UMS Explorer application.</p>
</div>
<!-- Core Components -->
<div class="demo-section" data-aos="fade-up" data-aos-delay="100">
<h2 class="demo-section-title">Core Components</h2>
<p class="demo-section-description">Fundamental building blocks of the UI, including interactive elements
like command palettes, modals, and notifications.</p>
<div class="grid grid-cols-1 md:grid-cols-2 gap-8">
<!-- Command Palette -->
<div data-aos="fade-right" data-aos-delay="200">
<h3 class="font-semibold text-lg mb-4">Command Palette</h3>
<div class="relative h-[420px] bg-black/30 rounded-xl flex items-center justify-center p-4 glass">
<div class="command-palette"
style="position: relative; transform: none; top: auto; left: auto; width: 100%;">
<div class="p-6">
<input type="text" value="view" placeholder="Type a command or search..."
class="w-full p-4 rounded-xl bg-transparent border border-white/20 text-lg focus:outline-none focus:border-blue-400 transition-all">
<div class="mt-6 max-h-80 overflow-y-auto space-y-2">
<div
class="p-4 rounded-xl bg-gradient-to-r from-blue-500/20 to-purple-500/20 cursor-pointer transition-all hover:from-blue-500/30 hover:to-purple-500/30 border border-white/10">
<div class="flex items-center">
<i data-lucide="git-branch" class="w-6 h-6 mr-4 text-purple-400"></i>
<div class="flex-1">
<div class="font-semibold text-lg">View Workflows</div>
<div class="text-sm text-gray-400">Navigate to workflows view</div>
</div>
<div class="ml-auto">
<kbd
class="px-2 py-1 text-xs bg-white/10 rounded-lg border border-white/20">W</kbd>
</div>
</div>
</div>
<div
class="p-4 rounded-xl hover:bg-white/10 cursor-pointer transition-all border border-transparent hover:border-white/10">
<div class="flex items-center">
<i data-lucide="brain" class="w-6 h-6 mr-4 text-blue-400"></i>
<div class="flex-1">
<div class="font-semibold text-lg">View Memories</div>
<div class="text-sm text-gray-400">Navigate to memories view</div>
</div>
<div class="ml-auto">
<kbd
class="px-2 py-1 text-xs bg-white/10 rounded-lg border border-white/20">M</kbd>
</div>
</div>
</div>
<div
class="p-4 rounded-xl hover:bg-white/10 cursor-pointer transition-all border border-transparent hover:border-white/10">
<div class="flex items-center">
<i data-lucide="bar-chart-3" class="w-6 h-6 mr-4 text-green-400"></i>
<div class="flex-1">
<div class="font-semibold text-lg">View Analytics</div>
<div class="text-sm text-gray-400">Navigate to analytics view</div>
</div>
<div class="ml-auto">
<kbd
class="px-2 py-1 text-xs bg-white/10 rounded-lg border border-white/20">A</kbd>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Modals -->
<div data-aos="fade-left" data-aos-delay="200">
<h3 class="font-semibold text-lg mb-4">Modal Dialog</h3>
<div class="relative h-[420px] bg-black/30 rounded-xl flex items-center justify-center p-4 glass">
<div class="enhanced-card rounded-2xl max-w-lg w-full" style="position: relative;" data-tilt
data-tilt-max="5" data-tilt-speed="300">
<div class="p-8">
<div class="flex items-center justify-between mb-6">
<h3 class="text-2xl font-bold gradient-text">Workflow Details</h3>
<button class="p-2 rounded-lg hover:bg-white/10 transition-colors group">
<i data-lucide="x"
class="w-5 h-5 group-hover:rotate-90 transition-transform"></i>
</button>
</div>
<div class="space-y-6">
<div class="grid grid-cols-1 md:grid-cols-2 gap-6">
<div>
<h4 class="font-semibold mb-3 text-gray-300">Basic Information</h4>
<div class="space-y-3 text-sm">
<div class="flex justify-between">
<span class="text-gray-400">Title:</span>
<span class="font-medium">Analyze Market Trends</span>
</div>
<div class="flex justify-between items-center">
<span class="text-gray-400">Status:</span>
<span
class="px-3 py-1 text-xs rounded-full bg-gradient-to-r from-green-500/20 to-emerald-500/20 text-green-400 border border-green-500/20">active</span>
</div>
<div class="flex justify-between">
<span class="text-gray-400">Created:</span>
<span class="font-medium">1/1/2024</span>
</div>
</div>
</div>
<div>
<h4 class="font-semibold mb-3 text-gray-300">Metrics</h4>
<div class="grid grid-cols-3 gap-4">
<div
class="text-center p-3 rounded-xl bg-white/5 border border-white/10">
<div class="text-2xl font-bold text-blue-400">12</div>
<div class="text-xs text-gray-400 mt-1">Actions</div>
</div>
<div
class="text-center p-3 rounded-xl bg-white/5 border border-white/10">
<div class="text-2xl font-bold text-green-400">34</div>
<div class="text-xs text-gray-400 mt-1">Memories</div>
</div>
<div
class="text-center p-3 rounded-xl bg-white/5 border border-white/10">
<div class="text-2xl font-bold text-purple-400">5</div>
<div class="text-xs text-gray-400 mt-1">Artifacts</div>
</div>
</div>
</div>
</div>
<div>
<h4 class="font-semibold mb-3 text-gray-300">Description</h4>
<p
class="text-gray-300 bg-gradient-to-br from-white/10 to-white/5 p-4 rounded-xl border border-white/10">
A multi-step workflow to analyze recent market trends for Q2, generate a
report, and create a presentation.</p>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Notifications -->
<div data-aos="fade-up" data-aos-delay="300">
<h3 class="font-semibold text-lg mb-4">Toast Notifications</h3>
<div class="space-y-4">
<div
class="p-4 rounded-xl bg-gradient-to-r from-green-500 to-emerald-500 text-white font-medium shadow-xl flex items-center transform hover:scale-105 transition-transform cursor-pointer">
<i data-lucide="check-circle" class="w-6 h-6 mr-3 animate-bounce-subtle"></i>
<span class="flex-1">Success! The operation was completed.</span>
<i data-lucide="x" class="w-4 h-4 opacity-70 hover:opacity-100"></i>
</div>
<div
class="p-4 rounded-xl bg-gradient-to-r from-red-500 to-rose-500 text-white font-medium shadow-xl flex items-center transform hover:scale-105 transition-transform cursor-pointer">
<i data-lucide="x-circle" class="w-6 h-6 mr-3 animate-pulse"></i>
<span class="flex-1">Error! Could not load the database.</span>
<i data-lucide="x" class="w-4 h-4 opacity-70 hover:opacity-100"></i>
</div>
<div
class="p-4 rounded-xl bg-gradient-to-r from-yellow-500 to-amber-500 text-white font-medium shadow-xl flex items-center transform hover:scale-105 transition-transform cursor-pointer">
<i data-lucide="alert-triangle" class="w-6 h-6 mr-3"></i>
<span class="flex-1">Warning: Memory usage is high.</span>
<i data-lucide="x" class="w-4 h-4 opacity-70 hover:opacity-100"></i>
</div>
<div
class="p-4 rounded-xl bg-gradient-to-r from-blue-500 to-indigo-500 text-white font-medium shadow-xl flex items-center transform hover:scale-105 transition-transform cursor-pointer">
<i data-lucide="info" class="w-6 h-6 mr-3"></i>
<span class="flex-1">Info: Switched to light theme.</span>
<i data-lucide="x" class="w-4 h-4 opacity-70 hover:opacity-100"></i>
</div>
</div>
</div>
<!-- Database Drop Zone -->
<div data-aos="fade-up" data-aos-delay="300">
<h3 class="font-semibold text-lg mb-4">File Drop Zone</h3>
<div class="enhanced-card rounded-2xl p-8 max-w-md w-full text-center" data-tilt data-tilt-max="10"
data-tilt-speed="400">
<div class="mb-6">
<div
class="w-24 h-24 bg-gradient-to-br from-blue-500 to-purple-600 rounded-2xl flex items-center justify-center mx-auto mb-4 animate-float shadow-2xl">
<i data-lucide="database" class="w-12 h-12"></i>
</div>
<h2 class="text-2xl font-bold mb-2 gradient-text">Load Your UMS Database</h2>
<p class="text-gray-400">Drag and drop your database file or click to browse</p>
</div>
<div
class="drop-zone relative rounded-xl border-2 border-dashed border-gray-600 hover:border-blue-400 transition-all p-8 cursor-pointer group bg-gradient-to-br from-blue-400/10 to-purple-400/10 border-blue-400">
<div class="text-center">
<i data-lucide="upload-cloud"
class="w-14 h-14 mx-auto mb-4 text-blue-400 group-hover:animate-bounce"></i>
<p class="font-medium mb-2 text-lg">Drop your database file here</p>
<p class="text-sm text-gray-500">Supports .db, .sqlite, .sqlite3 files</p>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Cards & Grids -->
<div class="demo-section" data-aos="fade-up" data-aos-delay="100">
<h2 class="demo-section-title">Cards & Grids</h2>
<p class="demo-section-description">Various card styles used to display summarized information in grid
layouts. These include stat cards, navigation cards, and data-specific cards for memories, actions, and
workflows.</p>
<h3 class="font-semibold text-lg mb-4">Stat & Navigation Cards</h3>
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-4 gap-6 mb-8">
<!-- Stat Card -->
<div class="enhanced-card rounded-2xl p-6 group" data-aos="zoom-in" data-aos-delay="200">
<div class="flex items-center justify-between mb-4">
<div
class="w-14 h-14 bg-gradient-to-br from-blue-500/30 to-blue-600/30 rounded-xl flex items-center justify-center group-hover:scale-110 transition-transform shadow-lg">
<i data-lucide="brain" class="w-7 h-7 text-blue-400"></i>
</div>
<div class="text-right">
<div class="text-3xl font-extrabold gradient-text">1,234</div>
<div class="text-sm text-gray-400 font-medium">Memories</div>
</div>
</div>
<div class="h-3 bg-gray-700/50 rounded-full overflow-hidden backdrop-blur-sm">
<div class="h-full bg-gradient-to-r from-blue-500 to-blue-400 rounded-full animate-pulse"
style="width: 65%;"></div>
</div>
</div>
<!-- Navigation Card with Tooltip -->
<div class="enhanced-card rounded-2xl p-6 cursor-pointer group dashboard-tooltip"
data-tippy-content="<div class='tooltip-content'><h4 class='font-bold text-purple-400 mb-2'>🔄 Workflows</h4><p>Explore and analyze AI agent execution workflows - complete journeys from start to finish.</p></div>"
data-tippy-allowHTML="true" data-tippy-theme="custom" data-aos="zoom-in" data-aos-delay="300">
<div class="flex items-center mb-4">
<div
class="w-14 h-14 bg-gradient-to-br from-purple-500/30 to-purple-600/30 rounded-xl flex items-center justify-center mr-4 group-hover:scale-110 transition-transform shadow-lg">
<i data-lucide="git-branch" class="w-7 h-7 text-purple-400"></i>
</div>
<div>
<h3 class="text-xl font-semibold">Workflows</h3>
<p class="text-sm text-gray-400">Explore AI agent workflows</p>
</div>
</div>
<div class="flex items-center justify-between">
<span class="text-sm text-gray-500">View all workflows</span>
<i data-lucide="arrow-right"
class="w-5 h-5 text-gray-400 group-hover:text-purple-400 group-hover:translate-x-2 transition-all"></i>
</div>
</div>
</div>
<h3 class="font-semibold text-lg my-4">Data-Specific Cards</h3>
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-6">
<!-- Memory Card -->
<div class="enhanced-card rounded-xl p-5 cursor-pointer group memory-episodic" data-aos="fade-up"
data-aos-delay="200">
<div class="flex items-start justify-between mb-3">
<div class="flex items-center space-x-2">
<span
class="px-3 py-1 text-xs rounded-full bg-gradient-to-r from-blue-500/20 to-indigo-500/20 text-blue-400 border border-blue-500/20 font-medium">episodic</span>
<span class="text-xs text-gray-500">observation</span>
</div>
<div class="flex items-center space-x-2 text-xs text-gray-500">
<span class="font-medium">I: 8/10</span>
<span class="font-medium">C: 95%</span>
</div>
</div>
<p class="text-gray-300 text-sm mb-3 line-clamp-2 group-hover:text-white transition-colors">Observed
a significant spike in user engagement after the new feature deployment. The API response time
also improved by 15%.</p>
<div class="flex items-center justify-between text-sm text-gray-500">
<span>2h ago</span>
<div class="flex items-center space-x-2 group-hover:text-blue-400 transition-colors">
<i data-lucide="eye" class="w-4 h-4"></i>
<span>15</span>
</div>
</div>
</div>
<!-- Action Card -->
<div class="enhanced-card rounded-2xl p-6 cursor-pointer group" data-aos="fade-up" data-aos-delay="300">
<div class="flex items-start justify-between mb-4">
<div class="flex items-center space-x-2">
<span
class="px-3 py-1 text-xs rounded-full bg-gradient-to-r from-green-500/20 to-emerald-500/20 text-green-400 border border-green-500/20 font-medium">completed</span>
<span class="text-xs text-gray-500">tool_use</span>
</div>
<div class="text-xs text-gray-500 font-medium">1m 15s</div>
</div>
<h3 class="font-semibold text-lg mb-2 group-hover:text-orange-400 transition-colors">Run Data
Analysis Script</h3>
<p class="text-gray-400 text-sm mb-4 line-clamp-2">Executing the python script `analyze_logs.py` to
process the latest server logs and extract key performance indicators.</p>
<div class="flex items-center justify-between text-sm text-gray-500">
<span>3h ago</span>
<div class="flex items-center space-x-2 group-hover:text-orange-400 transition-colors">
<i data-lucide="wrench" class="w-4 h-4"></i>
<span>Workflow</span>
</div>
</div>
</div>
<!-- Workflow Card -->
<div class="enhanced-card rounded-2xl p-6 cursor-pointer group" data-aos="fade-up" data-aos-delay="400">
<div class="flex items-start justify-between mb-4">
<h3 class="font-semibold text-lg group-hover:text-blue-400 transition-colors">Generate Q2
Financial Report</h3>
<div class="status-indicator">
<span
class="px-3 py-1 text-xs rounded-full bg-gradient-to-r from-red-500/20 to-rose-500/20 text-red-400 border border-red-500/20 font-medium status-failed">failed</span>
</div>
</div>
<p class="text-gray-400 text-sm mb-4 line-clamp-2">A workflow to gather financial data, process it,
and generate a final PDF report. Failed due to an API timeout.</p>
<div class="flex items-center justify-between text-sm text-gray-500">
<span>1d ago</span>
<div class="flex items-center space-x-4">
<span class="flex items-center space-x-1">
<i data-lucide="layers" class="w-4 h-4"></i>
<span>8 actions</span>
</span>
<span class="flex items-center space-x-1">
<i data-lucide="brain" class="w-4 h-4"></i>
<span>21 memories</span>
</span>
</div>
</div>
</div>
</div>
</div>
<!-- Visualizations -->
<div class="demo-section" data-aos="fade-up" data-aos-delay="100">
<h2 class="demo-section-title">Data Visualizations</h2>
<p class="demo-section-description">Static representations of dynamic, data-driven visualizations like
charts and graphs. These mockups capture the look and feel without the underlying JavaScript libraries.
</p>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-8">
<!-- Charts -->
<div data-aos="fade-right" data-aos-delay="200">
<h3 class="font-semibold text-lg mb-4">Charts (Chart.js)</h3>
<div class="space-y-6">
<div class="chart-container">
<h3 class="text-xl font-semibold mb-6 gradient-text">Memory Level Distribution</h3>
<svg viewBox="0 0 400 200" class="w-full h-auto">
<defs>
<filter id="glow">
<feGaussianBlur stdDeviation="3" result="coloredBlur" />
<feMerge>
<feMergeNode in="coloredBlur" />
<feMergeNode in="SourceGraphic" />
</feMerge>
</filter>
</defs>
<circle cx="120" cy="100" r="70" fill="none" stroke="#f59e0b" stroke-width="30"
filter="url(#glow)" opacity="0.9"></circle>
<path d="M 120 30 A 70 70 0 0 1 185.3 148.5" fill="none" stroke="#3b82f6"
stroke-width="30" filter="url(#glow)" opacity="0.9"></path>
<path d="M 185.3 148.5 A 70 70 0 0 1 54.7 148.5" fill="none" stroke="#10b981"
stroke-width="30" filter="url(#glow)" opacity="0.9"></path>
<text x="250" y="60" fill="#e5e7eb" font-size="14" font-weight="500">Working</text>
<circle cx="235" cy="56" r="6" fill="#f59e0b" filter="url(#glow)"></circle>
<text x="250" y="90" fill="#e5e7eb" font-size="14" font-weight="500">Episodic</text>
<circle cx="235" cy="86" r="6" fill="#3b82f6" filter="url(#glow)"></circle>
<text x="250" y="120" fill="#e5e7eb" font-size="14" font-weight="500">Semantic</text>
<circle cx="235" cy="116" r="6" fill="#10b981" filter="url(#glow)"></circle>
</svg>
</div>
<div class="chart-container">
<h3 class="text-xl font-semibold mb-6 gradient-text">Activity Timeline</h3>
<svg viewBox="0 0 400 200" class="w-full h-auto">
<defs>
<linearGradient id="lineGradient" x1="0%" y1="0%" x2="100%" y2="0%">
<stop offset="0%" style="stop-color:#10b981;stop-opacity:0.8" />
<stop offset="100%" style="stop-color:#3b82f6;stop-opacity:0.8" />
</linearGradient>
<linearGradient id="fillGradient" x1="0%" y1="0%" x2="0%" y2="100%">
<stop offset="0%" style="stop-color:#10b981;stop-opacity:0.3" />
<stop offset="100%" style="stop-color:#10b981;stop-opacity:0" />
</linearGradient>
</defs>
<path d="M 20 150 C 80 80, 150 180, 220 120 S 350 20, 380 50 L 380 180 L 20 180 Z"
fill="url(#fillGradient)"></path>
<path d="M 20 150 C 80 80, 150 180, 220 120 S 350 20, 380 50"
stroke="url(#lineGradient)" stroke-width="3" fill="none" filter="url(#glow)"></path>
<circle cx="20" cy="150" r="6" fill="#10b981" filter="url(#glow)">
<animate attributeName="r" values="6;8;6" dur="2s" repeatCount="indefinite" />
</circle>
<circle cx="220" cy="120" r="6" fill="#22d3ee" filter="url(#glow)">
<animate attributeName="r" values="6;8;6" dur="2s" begin="0.5s"
repeatCount="indefinite" />
</circle>
<circle cx="380" cy="50" r="6" fill="#3b82f6" filter="url(#glow)">
<animate attributeName="r" values="6;8;6" dur="2s" begin="1s"
repeatCount="indefinite" />
</circle>
<line x1="0" y1="180" x2="400" y2="180" stroke="#4b5563" stroke-width="1"></line>
<text x="20" y="195" fill="#9ca3af" font-size="12" font-weight="500">Mon</text>
<text x="120" y="195" fill="#9ca3af" font-size="12" font-weight="500">Tue</text>
<text x="220" y="195" fill="#9ca3af" font-size="12" font-weight="500">Wed</text>
<text x="320" y="195" fill="#9ca3af" font-size="12" font-weight="500">Thu</text>
</svg>
</div>
</div>
</div>
<!-- Memory Graph -->
<div data-aos="fade-left" data-aos-delay="200">
<h3 class="font-semibold text-lg mb-4">Memory Graph (D3.js)</h3>
<div class="grid grid-cols-12 gap-6">
<div class="col-span-12 lg:col-span-8">
<div class="graph-container" style="height: 420px;">
<svg class="graph-svg">
<defs>
<linearGradient id="linkGradient1" x1="0%" y1="0%" x2="100%" y2="0%">
<stop offset="0%" style="stop-color:#3b82f6;stop-opacity:0.6" />
<stop offset="100%" style="stop-color:#8b5cf6;stop-opacity:0.6" />
</linearGradient>
<filter id="nodeGlow">
<feGaussianBlur stdDeviation="3" result="coloredBlur" />
<feMerge>
<feMergeNode in="coloredBlur" />
<feMergeNode in="SourceGraphic" />
</feMerge>
</filter>
</defs>
<line class="graph-link" x1="100" y1="150" x2="200" y2="100"
stroke="url(#linkGradient1)" stroke-width="3"></line>
<line class="graph-link" x1="200" y1="100" x2="300" y2="150" stroke="#10b981"
stroke-width="3" opacity="0.6"></line>
<line class="graph-link" x1="200" y1="100" x2="250" y2="250" stroke="#8b5cf6"
stroke-width="3" opacity="0.6"></line>
<line class="graph-link" x1="100" y1="150" x2="250" y2="250" stroke="#3b82f6"
stroke-width="3" opacity="0.6"></line>
<circle class="graph-node selected" cx="200" cy="100" r="18" fill="#f59e0b"
stroke="#d97706" filter="url(#nodeGlow)"></circle>
<text class="graph-text" x="200" y="105" font-size="10" font-weight="700">W</text>
<text class="graph-text" x="200" y="130">Working</text>
<circle class="graph-node" cx="100" cy="150" r="15" fill="#3b82f6" stroke="#1d4ed8"
filter="url(#nodeGlow)"></circle>
<text class="graph-text" x="100" y="155" font-size="10" font-weight="700">E</text>
<text class="graph-text" x="100" y="180">Episodic</text>
<circle class="graph-node" cx="300" cy="150" r="13" fill="#10b981" stroke="#059669"
filter="url(#nodeGlow)"></circle>
<text class="graph-text" x="300" y="155" font-size="10" font-weight="700">S</text>
<text class="graph-text" x="300" y="180">Semantic</text>
<circle class="graph-node" cx="250" cy="250" r="15" fill="#8b5cf6" stroke="#7c3aed"
filter="url(#nodeGlow)"></circle>
<text class="graph-text" x="250" y="255" font-size="10" font-weight="700">P</text>
<text class="graph-text" x="250" y="280">Procedural</text>
</svg>
<div class="graph-legend">
<div class="text-sm font-semibold mb-3 gradient-text">Legend</div>
<div class="legend-item">
<div class="legend-color"
style="background: linear-gradient(135deg, #f59e0b, #d97706);"></div>
<span>Working</span>
</div>
<div class="legend-item">
<div class="legend-color"
style="background: linear-gradient(135deg, #3b82f6, #1d4ed8);"></div>
<span>Episodic</span>
</div>
<div class="legend-item">
<div class="legend-color"
style="background: linear-gradient(135deg, #10b981, #059669);"></div>
<span>Semantic</span>
</div>
<div class="legend-item">
<div class="legend-color"
style="background: linear-gradient(135deg, #8b5cf6, #7c3aed);"></div>
<span>Procedural</span>
</div>
</div>
</div>
</div>
<div class="col-span-12 lg:col-span-4">
<div class="graph-sidebar h-[420px]">
<h4 class="font-semibold gradient-text text-lg">Memory Details</h4>
<div class="space-y-4 mt-6">
<div>
<div class="text-xs text-gray-400 mb-2 uppercase tracking-wide">Type & Level
</div>
<div class="flex items-center space-x-2">
<span
class="px-3 py-1 text-xs rounded-full bg-gradient-to-r from-yellow-500/20 to-amber-500/20 text-yellow-400 border border-yellow-500/20 font-medium">working</span>
<span class="text-sm text-gray-300">thought</span>
</div>
</div>
<div>
<div class="text-xs text-gray-400 mb-2 uppercase tracking-wide">Content</div>
<div
class="text-sm bg-gradient-to-br from-white/10 to-white/5 p-4 rounded-xl max-h-32 overflow-y-auto border border-white/10">
<p>Initial thought process for market analysis workflow...</p>
</div>
</div>
<div class="grid grid-cols-2 gap-4">
<div class="text-center p-3 rounded-xl bg-white/5 border border-white/10">
<div class="text-xs text-gray-400 mb-1">Importance</div>
<div class="text-2xl font-bold gradient-text">9/10</div>
</div>
<div class="text-center p-3 rounded-xl bg-white/5 border border-white/10">
<div class="text-xs text-gray-400 mb-1">Confidence</div>
<div class="text-2xl font-bold gradient-text">98%</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Thought Chains -->
<div data-aos="fade-up" data-aos-delay="300">
<h3 class="font-semibold text-lg mb-4">Thought Chains (Mermaid.js)</h3>
<div class="thought-chain-container" style="min-height: 550px;">
<div class="mermaid-container">
<svg width="100%" height="300" viewBox="0 0 500 250" xmlns="http://www.w3.org/2000/svg">
<defs>
<linearGradient id="nodeGradient1" x1="0%" y1="0%" x2="100%" y2="100%">
<stop offset="0%" style="stop-color:#3b82f6;stop-opacity:1" />
<stop offset="100%" style="stop-color:#1d4ed8;stop-opacity:1" />
</linearGradient>
<linearGradient id="nodeGradient2" x1="0%" y1="0%" x2="100%" y2="100%">
<stop offset="0%" style="stop-color:#8b5cf6;stop-opacity:1" />
<stop offset="100%" style="stop-color:#6b21a8;stop-opacity:1" />
</linearGradient>
<filter id="softGlow">
<feGaussianBlur stdDeviation="4" result="coloredBlur" />
<feMerge>
<feMergeNode in="coloredBlur" />
<feMergeNode in="SourceGraphic" />
</feMerge>
</filter>
</defs>
<style>
.label {
font-family: 'Space Grotesk', sans-serif;
color: #e5e7eb
}
.label text {
fill: #e5e7eb;
font-weight: 600
}
.node rect {
rx: 12;
ry: 12;
filter: url(#softGlow)
}
.node.active rect {
fill: url(#nodeGradient1)
}
.node rect {
fill: #1f2937;
stroke: #3b82f6;
stroke-width: 2px
}
.flowchart-link {
stroke: rgba(139, 92, 246, 0.4);
stroke-width: 3;
fill: none
}
</style>
<g>
<g class="node active" transform="translate(75, 50)">
<rect x="-60" y="-25" width="120" height="50"></rect>
<text x="0" y="0" dominant-baseline="central" text-anchor="middle"
font-size="14">
<tspan>Start Goal</tspan>
</text>
</g>
<g class="node" transform="translate(250, 50)">
<rect x="-70" y="-25" width="140" height="50"></rect>
<text x="0" y="0" dominant-baseline="central" text-anchor="middle"
font-size="14">
<tspan>Analyze Data</tspan>
</text>
</g>
<g class="node" transform="translate(425, 50)">
<rect x="-60" y="-25" width="120" height="50"></rect>
<text x="0" y="0" dominant-baseline="central" text-anchor="middle"
font-size="14">
<tspan>Find Insight</tspan>
</text>
</g>
<g class="node" transform="translate(250, 150)">
<rect x="-80" y="-25" width="160" height="50" fill="url(#nodeGradient2)"></rect>
<text x="0" y="0" dominant-baseline="central" text-anchor="middle"
font-size="14">
<tspan>Branch: Get more data</tspan>
</text>
</g>
<g class="node" transform="translate(425, 200)">
<rect x="-70" y="-25" width="140" height="50"></rect>
<text x="0" y="0" dominant-baseline="central" text-anchor="middle"
font-size="14">
<tspan>Conclusion</tspan>
</text>
</g>
<path d="M 135 50 L 180 50" class="flowchart-link">
<animate attributeName="stroke-dasharray" values="0 150;150 0" dur="2s"
repeatCount="indefinite" />
</path>
<path d="M 320 50 L 365 50" class="flowchart-link">
<animate attributeName="stroke-dasharray" values="0 150;150 0" dur="2s"
begin="0.5s" repeatCount="indefinite" />
</path>
<path d="M 250 75 L 250 125" class="flowchart-link">
<animate attributeName="stroke-dasharray" values="0 150;150 0" dur="2s"
begin="1s" repeatCount="indefinite" />
</path>
<path d="M 425 75 L 425 175" class="flowchart-link">
<animate attributeName="stroke-dasharray" values="0 150;150 0" dur="2s"
begin="1.5s" repeatCount="indefinite" />
</path>
</g>
</svg>
</div>
<div class="timeline-scrubber">
<div class="playback-controls">
<button class="playback-btn secondary"><i data-lucide="skip-back"
class="w-6 h-6"></i></button>
<button class="playback-btn"><i data-lucide="play" class="w-7 h-7"></i></button>
<button class="playback-btn secondary"><i data-lucide="skip-forward"
class="w-6 h-6"></i></button>
<div class="speed-control">
<span class="text-xs text-gray-400 font-medium">Speed:</span>
<button class="speed-btn">0.5x</button>
<button class="speed-btn active">1x</button>
<button class="speed-btn">2x</button>
</div>
</div>
<input type="range" min="0" max="100" value="30" class="timeline-slider">
</div>
</div>
</div>
<!-- Artifact Gallery -->
<div data-aos="fade-up" data-aos-delay="400">
<h3 class="font-semibold text-lg mb-4">Artifact Gallery (Masonry Grid)</h3>
<div class="masonry-grid">
<div class="masonry-item enhanced-card rounded-xl overflow-hidden cursor-pointer group"
data-tilt data-tilt-max="5" data-tilt-speed="300">
<div class="relative overflow-hidden bg-gray-800 h-48">
<img loading="lazy" decoding="async"
src="https://images.unsplash.com/photo-1618005182384-a83a8bd57fbe?ixlib=rb-4.0.3&q=85&fm=jpg&crop=entropy&cs=srgb&w=600"
alt="Abstract render" width="600" height="400"
class="w-full h-full object-cover transition-transform duration-500 group-hover:scale-110">
<div
class="absolute inset-0 bg-gradient-to-t from-black/70 to-transparent opacity-0 group-hover:opacity-100 transition-opacity duration-300">
</div>
</div>
<div class="p-5">
<h3 class="font-semibold text-white line-clamp-1 text-lg">Abstract Wave Render</h3>
<p class="text-sm text-gray-400 line-clamp-2 mb-4 mt-2">Generated via a prompt about
fluid dynamics and color theory.</p>
<div class="flex flex-wrap gap-2">
<span
class="px-3 py-1 bg-gradient-to-r from-pink-500/20 to-rose-500/20 text-pink-300 rounded-full text-xs font-medium border border-pink-500/20">3D-render</span>
<span
class="px-3 py-1 bg-gradient-to-r from-pink-500/20 to-rose-500/20 text-pink-300 rounded-full text-xs font-medium border border-pink-500/20">abstract</span>
</div>
</div>
</div>
<div class="masonry-item enhanced-card rounded-xl overflow-hidden cursor-pointer group"
data-tilt data-tilt-max="5" data-tilt-speed="300">
<div class="relative overflow-hidden bg-gradient-to-br from-gray-900 to-gray-800 h-40 p-5">
<div class="flex items-center mb-3">
<i data-lucide="code" class="w-5 h-5 mr-2 text-green-400"></i>
<span class="text-sm text-green-400 font-medium">Python</span>
</div>
<pre class="text-xs text-gray-300 overflow-hidden font-mono"><code>def analyze_data(df):
"""Analyzes the dataframe."""
summary = df.describe()
corr = df.corr()
return summary, corr</code></pre>
</div>
<div class="p-5">
<h3 class="font-semibold text-white line-clamp-1 text-lg">Data Analysis Snippet</h3>
<p class="text-sm text-gray-400 line-clamp-2 mb-3 mt-2">Core function for pandas
dataframe analysis.</p>
</div>
</div>
<div class="masonry-item enhanced-card rounded-xl overflow-hidden cursor-pointer group"
data-tilt data-tilt-max="5" data-tilt-speed="300">
<div
class="relative overflow-hidden bg-gradient-to-br from-gray-800 to-gray-900 h-32 flex items-center justify-center">
<div class="text-center group-hover:scale-105 transition-transform">
<i data-lucide="file-text" class="w-16 h-16 mx-auto mb-3 text-blue-400"></i>
<div class="text-sm font-semibold text-gray-300">Q2_Report.md</div>
</div>
</div>
<div class="p-5">
<h3 class="font-semibold text-white line-clamp-1 text-lg">Q2 Report Draft</h3>
<p class="text-sm text-gray-400 line-clamp-2 mb-3 mt-2">Initial draft of the quarterly
financial report in Markdown format.</p>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Forms & Inputs -->
<div class="demo-section" data-aos="fade-up" data-aos-delay="100">
<h2 class="demo-section-title">Forms & Inputs</h2>
<p class="demo-section-description">A collection of input controls, including advanced search, filters, and
forms used for creating and editing data.</p>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-8">
<!-- Advanced Search & Filters -->
<div data-aos="fade-right" data-aos-delay="200">
<h3 class="font-semibold text-lg mb-4">Search & Filters</h3>
<div class="space-y-4">
<!-- Global Search with Suggestions -->
<div class="relative">
<input type="text" value="analyze" placeholder="Search everything..."
class="w-full pl-12 pr-4 py-3 bg-white/10 backdrop-blur-md border border-white/20 rounded-xl text-white placeholder-gray-400 focus:outline-none focus:border-blue-400 focus:bg-white/15 transition-all">
<i data-lucide="search"
class="absolute left-4 top-1/2 transform -translate-y-1/2 w-5 h-5 text-gray-400"></i>
<div
class="absolute top-full mt-2 w-full p-2 rounded-xl bg-gradient-to-br from-slate-800/95 to-slate-900/95 backdrop-blur-xl border border-white/20 z-10 shadow-2xl">
<div
class="p-3 hover:bg-gradient-to-r hover:from-blue-500/20 hover:to-purple-500/20 rounded-lg cursor-pointer transition-all group">
<div class="flex items-center">
<i data-lucide="git-branch"
class="w-5 h-5 mr-3 text-gray-400 group-hover:text-purple-400 transition-colors"></i>
<div>
<div class="text-sm font-semibold group-hover:text-white transition-colors">
Analyze Market Trends</div>
<div class="text-xs text-gray-500">Workflow</div>
</div>
</div>
</div>
<div
class="p-3 hover:bg-gradient-to-r hover:from-blue-500/20 hover:to-purple-500/20 rounded-lg cursor-pointer transition-all group">
<div class="flex items-center">
<i data-lucide="brain"
class="w-5 h-5 mr-3 text-gray-400 group-hover:text-blue-400 transition-colors"></i>
<div>
<div class="text-sm font-semibold group-hover:text-white transition-colors">
Analysis of the new user cohort...</div>
<div class="text-xs text-gray-500">Memory</div>
</div>
</div>
</div>
</div>
</div>
<!-- Filter Controls -->
<div class="flex items-center space-x-3">
<select
class="bg-white/10 border border-white/20 rounded-lg px-4 py-2.5 text-sm focus:outline-none focus:border-blue-400 transition-all cursor-pointer">
<option>All Status</option>
<option>Active</option>
<option>Completed</option>
<option>Failed</option>
</select>
<select
class="bg-white/10 border border-white/20 rounded-lg px-4 py-2.5 text-sm focus:outline-none focus:border-blue-400 transition-all cursor-pointer">
<option>All Types</option>
<option>Tool Use</option>
<option>Observation</option>
<option>Reasoning</option>
</select>
<input type="text" placeholder="Search actions..."
class="bg-white/10 border border-white/20 rounded-lg px-4 py-2.5 text-sm w-64 focus:outline-none focus:border-blue-400 transition-all">
</div>
</div>
</div>
<!-- Goal Creation Form -->
<div data-aos="fade-left" data-aos-delay="200">
<h3 class="font-semibold text-lg mb-4">Creation Form</h3>
<div class="enhanced-card rounded-2xl p-8">
<form class="space-y-5">
<div>
<label class="block text-sm font-semibold mb-2 text-gray-300">Title *</label>
<input type="text" required
class="w-full px-4 py-3 bg-white/10 border border-white/20 rounded-xl focus:outline-none focus:border-yellow-400 focus:bg-white/15 transition-all"
placeholder="Enter goal title">
</div>
<div class="grid grid-cols-2 gap-4">
<div>
<label class="block text-sm font-semibold mb-2 text-gray-300">Priority</label>
<select
class="w-full px-4 py-3 bg-white/10 border border-white/20 rounded-xl focus:outline-none focus:border-yellow-400 transition-all cursor-pointer">
<option>Low</option>
<option selected>Medium</option>
<option>High</option>
<option>Critical</option>
</select>
</div>
<div>
<label class="block text-sm font-semibold mb-2 text-gray-300">Status</label>
<select
class="w-full px-4 py-3 bg-white/10 border border-white/20 rounded-xl focus:outline-none focus:border-yellow-400 transition-all cursor-pointer">
<option>Pending</option>
<option>In Progress</option>
<option>Completed</option>
</select>
</div>
</div>
<div class="flex items-center justify-end space-x-3 pt-6 border-t border-white/10">
<button type="button"
class="px-6 py-2.5 text-gray-400 hover:text-white transition-colors font-medium">Cancel</button>
<button type="submit"
class="px-8 py-2.5 bg-gradient-to-r from-yellow-500 to-orange-500 hover:from-yellow-400 hover:to-orange-400 rounded-xl font-semibold shadow-lg hover:shadow-xl transform hover:scale-105 transition-all">Create
Goal</button>
</div>
</form>
</div>
</div>
</div>
</div>
<!-- Markdown Content -->
<div class="demo-section" data-aos="fade-up" data-aos-delay="100">
<h2 class="demo-section-title">Styled Content</h2>
<p class="demo-section-description">How standard content, like Markdown, is styled within the application
for readability and consistency.</p>
<div
class="markdown-content bg-gradient-to-br from-white/10 to-white/5 p-8 rounded-2xl border border-white/10 backdrop-blur-md">
<h1>This is a H1 Heading</h1>
<p>This is a paragraph of text. It can contain <strong>bold text</strong>, <em>italic text</em>, and
<code>inline code snippets</code>. Links are also styled, like this <a href="#"
class="text-blue-400 hover:text-blue-300 underline decoration-blue-400/30 hover:decoration-blue-300 transition-all">link
to another page</a>.</p>
<h2>This is a H2 Heading</h2>
<ul class="list-disc list-inside space-y-2 my-4 ml-4">
<li>An item in an unordered list.</li>
<li>Another item.
<ol class="list-decimal list-inside space-y-1 mt-2 ml-4">
<li>A sub-item in an ordered list.</li>
<li>Another sub-item.</li>
</ol>
</li>
</ul>
<h3>This is a H3 Heading</h3>
<pre><code class="language-python"># This is a Python code block
import numpy as np
def hello_world():
print("Hello from the UMS Explorer!")
return np.random.rand(5)
</code></pre>
</div>
</div>
</main>
<!-- Floating Action Button -->
<button class="fab" title="Open Command Palette">
<i data-lucide="command" class="w-7 h-7"></i>
</button>
<script>
// Wait for all deferred scripts to load
window.addEventListener('load', () => {
// Initialize Lucide Icons
if (typeof lucide !== 'undefined') {
lucide.createIcons();
}
// Initialize AOS
if (typeof AOS !== 'undefined') {
AOS.init({
duration: 800,
easing: 'ease-out-cubic',
once: true,
offset: 50
});
}
// Initialize Tippy
if (typeof tippy !== 'undefined') {
tippy('.dashboard-tooltip', {
theme: 'custom',
animation: 'scale',
duration: [300, 200],
delay: [500, 0],
interactive: false,
placement: 'top',
maxWidth: 380,
allowHTML: true,
});
}
// Initialize VanillaTilt with lazy loading
if (typeof VanillaTilt !== 'undefined') {
const lazyTilt = (entries, observer) => {
entries.forEach(entry => {
if (entry.isIntersecting) {
VanillaTilt.init(entry.target, {
max: 15,
speed: 400,
glare: true,
"max-glare": 0.2,
});
observer.unobserve(entry.target);
}
});
};
const tiltObserver = new IntersectionObserver(lazyTilt, { rootMargin: "200px" });
document.querySelectorAll('[data-tilt]').forEach(el => tiltObserver.observe(el));
}
// Initialize Particles
if (typeof particlesJS !== 'undefined') {
const initParticles = () => particlesJS('particles-js', {
"particles": {
"number": {
"value": 50,
"density": {
"enable": true,
"value_area": 800
}
},
"color": {
"value": ["#3b82f6", "#8b5cf6", "#ec4899"]
},
"shape": {
"type": "circle"
},
"opacity": {
"value": 0.3,
"random": true,
"anim": {
"enable": true,
"speed": 1,
"opacity_min": 0.1,
"sync": false
}
},
"size": {
"value": 3,
"random": true,
"anim": {
"enable": true,
"speed": 2,
"size_min": 0.1,
"sync": false
}
},
"line_linked": {
"enable": true,
"distance": 150,
"color": "#8b5cf6",
"opacity": 0.2,
"width": 1
},
"move": {
"enable": true,
"speed": 0.5,
"direction": "none",
"random": false,
"straight": false,
"out_mode": "out",
"bounce": false,
"attract": {
"enable": false,
"rotateX": 600,
"rotateY": 1200
}
}
},
"interactivity": {
"detect_on": "canvas",
"events": {
"onhover": {
"enable": true,
"mode": "grab"
},
"onclick": {
"enable": true,
"mode": "push"
},
"resize": true
},
"modes": {
"grab": {
"distance": 140,
"line_linked": {
"opacity": 0.5
}
},
"push": {
"particles_nb": 4
}
}
},
"retina_detect": true
});
if ('requestIdleCallback' in window) {
requestIdleCallback(initParticles);
} else {
setTimeout(initParticles, 0);
}
}
// Initialize GSAP animations
if (typeof gsap !== 'undefined') {
gsap.to(".fab", {
rotation: 360,
duration: 20,
repeat: -1,
ease: "none"
});
const fab = document.querySelector('.fab');
if (fab) {
fab.addEventListener('mouseenter', function () {
gsap.to(this, {
scale: 1.2,
duration: 0.3,
ease: "power2.out"
});
});
fab.addEventListener('mouseleave', function () {
gsap.to(this, {
scale: 1,
duration: 0.3,
ease: "power2.out"
});
});
}
}
// Optimized scroll handler
const headerEl = document.querySelector('header');
let ticking = false;
window.addEventListener('scroll', () => {
if (!ticking) {
ticking = true;
requestAnimationFrame(() => {
headerEl.classList.toggle('scrolled', window.scrollY > 50);
ticking = false;
});
}
}, { passive: true });
// Add interactive hover effects to cards
document.querySelectorAll('.enhanced-card').forEach(card => {
card.addEventListener('mouseenter', function () {
requestAnimationFrame(() => {
this.style.transform = 'translate3d(0, -4px, 0) scale(1.02)';
});
});
card.addEventListener('mouseleave', function () {
requestAnimationFrame(() => {
this.style.transform = 'translate3d(0, 0, 0) scale(1)';
});
});
});
// Add keyboard shortcuts
document.addEventListener('keydown', (e) => {
if ((e.metaKey || e.ctrlKey) && e.key === 'k') {
e.preventDefault();
const searchInput = document.querySelector('input[type="text"]');
if (searchInput) searchInput.focus();
}
}, { passive: false });
});
</script>
</body>
</html>
```
--------------------------------------------------------------------------------
/ultimate_mcp_server/core/server.py:
--------------------------------------------------------------------------------
```python
"""Main server implementation for Ultimate MCP Server."""
import asyncio
import logging
import logging.config
import os
import sys
import time
from contextlib import asynccontextmanager
from dataclasses import dataclass
from functools import wraps
from typing import Any, Dict, List, Optional
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastmcp import Context, FastMCP
import ultimate_mcp_server
# Import core specifically to set the global instance
import ultimate_mcp_server.core
from ultimate_mcp_server.config import get_config, load_config
from ultimate_mcp_server.constants import Provider
from ultimate_mcp_server.core.state_store import StateStore
# Import UMS API utilities and database functions
from ultimate_mcp_server.core.ums_api import (
setup_ums_api,
)
from ultimate_mcp_server.graceful_shutdown import (
create_quiet_server,
enable_quiet_shutdown,
register_shutdown_handler,
)
from ultimate_mcp_server.tools.smart_browser import (
_ensure_initialized as smart_browser_ensure_initialized,
)
from ultimate_mcp_server.tools.smart_browser import (
shutdown as smart_browser_shutdown,
)
from ultimate_mcp_server.tools.sql_databases import initialize_sql_tools, shutdown_sql_tools
# --- Import the trigger function directly instead of the whole module---
from ultimate_mcp_server.utils import get_logger
from ultimate_mcp_server.utils.logging import logger
# --- Define Logging Configuration Dictionary ---
LOG_FILE_PATH = "logs/ultimate_mcp_server.log"
# Ensure log directory exists before config is used
log_dir = os.path.dirname(LOG_FILE_PATH)
if log_dir:
os.makedirs(log_dir, exist_ok=True)
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False, # Let Uvicorn's loggers pass through if needed
"formatters": {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
"fmt": "%(levelprefix)s %(message)s",
"use_colors": None,
},
"access": {
"()": "uvicorn.logging.AccessFormatter",
"fmt": '%(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s',
},
"file": { # Formatter for file output
"format": "%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
"handlers": {
"default": { # Console handler - redirect to stderr
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr", # Changed from stdout to stderr
},
"access": { # Access log handler - redirect to stderr
"formatter": "access",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr", # Changed from stdout to stderr
},
"rich_console": { # Rich console handler
"()": "ultimate_mcp_server.utils.logging.formatter.create_rich_console_handler",
"stderr": True, # Add this parameter to use stderr
},
"file": { # File handler
"formatter": "file",
"class": "logging.handlers.RotatingFileHandler",
"filename": LOG_FILE_PATH,
"maxBytes": 2 * 1024 * 1024, # 2 MB
"backupCount": 5,
"encoding": "utf-8",
},
"tools_file": { # Tools log file handler
"formatter": "file",
"class": "logging.FileHandler",
"filename": "logs/direct_tools.log",
"encoding": "utf-8",
},
"completions_file": { # Completions log file handler
"formatter": "file",
"class": "logging.FileHandler",
"filename": "logs/direct_completions.log",
"encoding": "utf-8",
},
},
"loggers": {
"uvicorn": {"handlers": ["rich_console"], "level": "INFO", "propagate": False},
"uvicorn.error": {"level": "INFO", "propagate": True}, # Propagate errors to root
"uvicorn.access": {"handlers": ["access", "file"], "level": "INFO", "propagate": False},
"ultimate_mcp_server": { # Our application's logger namespace
"handlers": ["rich_console", "file"],
"level": "DEBUG",
"propagate": False,
},
"ultimate_mcp_server.tools": { # Tools-specific logger
"handlers": ["tools_file"],
"level": "DEBUG",
"propagate": True, # Propagate to parent for console display
},
"ultimate_mcp_server.completions": { # Completions-specific logger
"handlers": ["completions_file"],
"level": "DEBUG",
"propagate": True, # Propagate to parent for console display
},
},
"root": { # Root logger configuration
"level": "INFO",
"handlers": ["rich_console", "file"], # Root catches logs not handled by specific loggers
},
}
# DO NOT apply the config here - it will be applied by Uvicorn through log_config parameter
# Global server instance
_server_app = None
_gateway_instance = None
# Get loggers
tools_logger = get_logger("ultimate_mcp_server.tools")
completions_logger = get_logger("ultimate_mcp_server.completions")
@dataclass
class ProviderStatus:
"""
Structured representation of an LLM provider's configuration and availability status.
This dataclass encapsulates all essential status information about a language model
provider in the Ultimate MCP Server. It's used to track the state of each provider,
including whether it's properly configured, successfully initialized, and what models
it offers. This information is vital for:
1. Displaying provider status to clients via API endpoints
2. Making runtime decisions about provider availability
3. Debugging provider configuration and connectivity issues
4. Resource listings and capability discovery
The status is typically maintained in the Gateway's provider_status dictionary,
with provider names as keys and ProviderStatus instances as values.
Attributes:
enabled: Whether the provider is enabled in the configuration.
This reflects the user's intent, not actual availability.
available: Whether the provider is successfully initialized and ready for use.
This is determined by runtime checks during server initialization.
api_key_configured: Whether a valid API key was found for this provider.
A provider might be enabled but have no API key configured.
models: List of available models from this provider, with each model represented
as a dictionary containing model ID, name, and capabilities.
error: Error message explaining why a provider is unavailable, or None if
the provider initialized successfully or hasn't been initialized yet.
"""
enabled: bool
available: bool
api_key_configured: bool
models: List[Dict[str, Any]]
error: Optional[str] = None
class Gateway:
"""
Main Ultimate MCP Server implementation and central orchestrator.
The Gateway class serves as the core of the Ultimate MCP Server, providing a unified
interface to multiple LLM providers (OpenAI, Anthropic, etc.) and implementing the
Model Control Protocol (MCP). It manages provider connections, tool registration,
state persistence, and request handling.
Key responsibilities:
- Initializing and managing connections to LLM providers
- Registering and exposing tools for model interaction
- Providing consistent error handling and logging
- Managing state persistence across requests
- Exposing resources (guides, examples, reference info) for models
- Implementing the MCP protocol for standardized model interaction
The Gateway is designed to be instantiated once per server instance and serves
as the central hub for all model interactions. It can be accessed globally through
the ultimate_mcp_server.core._gateway_instance reference.
"""
def __init__(
self,
name: str = "main",
register_tools: bool = True,
provider_exclusions: List[str] = None,
load_all_tools: bool = False, # Remove result_serialization_mode
):
"""
Initialize the MCP Gateway with configured providers and tools.
This constructor sets up the complete MCP Gateway environment, including:
- Loading configuration from environment variables and config files
- Setting up logging infrastructure
- Initializing the MCP server framework
- Creating a state store for persistence
- Registering tools and resources based on configuration
The initialization process is designed to be flexible, allowing for customization
through the provided parameters and the configuration system. Provider initialization
is deferred until server startup to ensure proper async handling.
Args:
name: Server instance name, used for logging and identification purposes.
Default is "main".
register_tools: Whether to register standard MCP tools with the server.
If False, only the minimal core functionality will be available.
Default is True.
provider_exclusions: List of provider names to exclude from initialization.
This allows selectively disabling specific providers
regardless of their configuration status.
Default is None (no exclusions).
load_all_tools: If True, load all available tools. If False (default),
load only the defined 'Base Toolset'.
"""
self.name = name
self.providers = {}
self.provider_status = {}
self.logger = get_logger(f"ultimate_mcp_server.{name}")
self.event_handlers = {}
self.provider_exclusions = provider_exclusions or []
self.api_meta_tool = None # Initialize api_meta_tool attribute
self.load_all_tools = load_all_tools # Store the flag
# Load configuration if not already loaded
if get_config() is None:
self.logger.info("Initializing Gateway: Loading configuration...")
load_config()
# Initialize logger
self.logger.info(f"Initializing {self.name}...")
# Set MCP protocol version to 2025-03-25
import os
os.environ["MCP_PROTOCOL_VERSION"] = "2025-03-25"
# Create MCP server with modern FastMCP constructor
self.mcp = FastMCP(
name=self.name,
lifespan=self._server_lifespan,
instructions=self.system_instructions,
)
# Initialize the state store
persistence_dir = None
if (
get_config()
and hasattr(get_config(), "state_persistence")
and hasattr(get_config().state_persistence, "dir")
):
persistence_dir = get_config().state_persistence.dir
self.state_store = StateStore(persistence_dir)
# Register tools if requested
if register_tools:
self._register_tools(load_all=self.load_all_tools)
self._register_resources()
self.logger.info(f"Ultimate MCP Server '{self.name}' initialized")
def log_tool_calls(self, func):
"""
Decorator to log MCP tool calls with detailed timing and result information.
This decorator wraps MCP tool functions to provide consistent logging of:
- Tool name and parameters at invocation time
- Execution time for performance tracking
- Success or failure status
- Summarized results or error information
The decorator ensures that all tool calls are logged to a dedicated tools logger,
which helps with diagnostics, debugging, and monitoring of tool usage patterns.
Successful calls include timing information and a brief summary of the result,
while failed calls include exception details.
Args:
func: The async function to wrap with logging. This should be a tool function
registered with the MCP server that will be called by models.
Returns:
A wrapped async function that performs the same operations as the original
but with added logging before and after execution.
Note:
This decorator is automatically applied to all functions registered as tools
via the @mcp.tool() decorator in the _register_tools method, so it doesn't
need to be applied manually in most cases.
"""
@wraps(func)
async def wrapper(*args, **kwargs):
start_time = time.time()
tool_name = func.__name__
# Format parameters for logging
args_str = ", ".join([repr(arg) for arg in args[1:] if arg is not None])
kwargs_str = ", ".join([f"{k}={repr(v)}" for k, v in kwargs.items() if k != "ctx"])
params_str = ", ".join(filter(None, [args_str, kwargs_str]))
# Log the request - only through tools_logger
tools_logger.info(f"TOOL CALL: {tool_name}({params_str})")
try:
result = await func(*args, **kwargs)
processing_time = time.time() - start_time
# Format result for logging
if isinstance(result, dict):
result_keys = list(result.keys())
result_summary = f"dict with keys: {result_keys}"
else:
result_str = str(result)
result_summary = (
(result_str[:100] + "...") if len(result_str) > 100 else result_str
)
# Log successful completion - only through tools_logger
tools_logger.info(
f"TOOL SUCCESS: {tool_name} completed in {processing_time:.2f}s - Result: {result_summary}"
)
return result
except Exception as e:
processing_time = time.time() - start_time
tools_logger.error(
f"TOOL ERROR: {tool_name} failed after {processing_time:.2f}s: {str(e)}",
exc_info=True,
)
raise
return wrapper
@asynccontextmanager
async def _server_lifespan(self, server: FastMCP):
"""
Async context manager managing the server lifecycle during startup and shutdown.
This method implements the lifespan protocol used by FastMCP (based on ASGI) to:
1. Perform startup initialization before the server begins accepting requests
2. Clean up resources when the server is shutting down
3. Make shared context available to request handlers during the server's lifetime
During startup, this method:
- Initializes all configured LLM providers
- Triggers dynamic docstring generation for tools that need it
- Sets the global Gateway instance for access from other components
- Prepares a shared context dictionary for use by request handlers
During shutdown, it:
- Clears the global Gateway instance reference
- Handles any necessary cleanup of resources
The lifespan context is active throughout the entire server runtime, from
startup until shutdown is initiated.
Args:
server: The FastMCP server instance that's starting up, which provides
the framework context for the lifespan.
Yields:
Dict containing initialized resources that will be available to all
request handlers during the server's lifetime.
Note:
This method is called automatically by the FastMCP framework during
server startup and is not intended to be called directly.
"""
self.logger.info(f"Starting Ultimate MCP Server '{self.name}'")
# Add a flag to track if this is an SSE instance
is_sse_mode = getattr(self, '_sse_mode', False)
if is_sse_mode:
self.logger.info("SSE mode detected - using persistent lifespan management")
# Initialize providers
await self._initialize_providers()
try:
await initialize_sql_tools()
self.logger.info("SQL tools state initialized.")
except Exception as e:
self.logger.error(f"Failed to initialize SQL tools state: {e}", exc_info=True)
# --- OPTIONAL: Pre-initialize SmartBrowser ---
try:
self.logger.info("Pre-initializing Smart Browser components...")
# Call the imported initialization function
await smart_browser_ensure_initialized()
self.logger.info("Smart Browser successfully pre-initialized.")
except Exception as e:
# Log warning but don't stop server startup if pre-init fails
self.logger.warning(f"Could not pre-initialize Smart Browser: {e}", exc_info=True)
# ---------------------------------------------------------------------
# --- Trigger Dynamic Docstring Generation ---
# This should run after config is loaded but before the server is fully ready
# It checks cache and potentially calls an LLM.
self.logger.info("Initiating dynamic docstring generation for Marqo tool...")
try:
# Import the function here to avoid circular imports
from ultimate_mcp_server.tools.marqo_fused_search import (
trigger_dynamic_docstring_generation,
)
await trigger_dynamic_docstring_generation()
self.logger.info("Dynamic docstring generation/loading complete.")
except Exception as e:
self.logger.error(
f"Error during dynamic docstring generation startup task: {e}", exc_info=True
)
# ---------------------------------------------
# --- Set the global instance variable ---
# Make the fully initialized instance accessible globally AFTER init
ultimate_mcp_server.core._gateway_instance = self
self.logger.info("Global gateway instance set.")
# ----------------------------------------
# --- Attach StateStore to application state ---
# This makes the StateStore available to all tools via ctx.fastmcp._state_store
# Note: In FastMCP 2.0+, we store the state_store directly on the server instance
# Tools can access it via the with_state_management decorator
server._state_store = self.state_store
self.logger.info("StateStore attached to server instance.")
# -----------------------------------------------
# Create lifespan context (still useful for framework calls)
context = {
"providers": self.providers,
"provider_status": self.provider_status,
}
self.logger.info("Lifespan context initialized, MCP server ready to handle requests")
try:
# Import and call trigger_dynamic_docstring_generation again
from ultimate_mcp_server.tools.marqo_fused_search import (
trigger_dynamic_docstring_generation,
)
await trigger_dynamic_docstring_generation()
logger.info("Dynamic docstring generation/loading complete.")
if is_sse_mode:
# For SSE mode, create a persistent context that doesn't shutdown easily
self.logger.info("Creating persistent SSE lifespan context")
# Add a keepalive task for SSE mode
async def sse_lifespan_keepalive():
"""Keepalive task to maintain SSE server lifespan."""
while True:
await asyncio.sleep(60) # Keep alive every minute
# This task existing keeps the lifespan active
# Start the keepalive task
keepalive_task = asyncio.create_task(sse_lifespan_keepalive())
try:
yield context
finally:
# Cancel the keepalive task during shutdown
keepalive_task.cancel()
try:
await keepalive_task
except asyncio.CancelledError:
pass
else:
yield context
finally:
if is_sse_mode:
self.logger.info("SSE mode shutdown initiated")
try:
# --- Shutdown SQL Tools State ---
await shutdown_sql_tools()
self.logger.info("SQL tools state shut down.")
except Exception as e:
self.logger.error(f"Failed to shut down SQL tools state: {e}", exc_info=True)
# 2. Shutdown Smart Browser explicitly
try:
self.logger.info("Initiating explicit Smart Browser shutdown...")
await smart_browser_shutdown() # Call the imported function
self.logger.info("Smart Browser shutdown completed successfully.")
except Exception as e:
logger.error(f"Error during explicit Smart Browser shutdown: {e}", exc_info=True)
# --- Clear the global instance on shutdown ---
ultimate_mcp_server.core._gateway_instance = None
self.logger.info("Global gateway instance cleared.")
# -------------------------------------------
self.logger.info(f"Shutting down Ultimate MCP Server '{self.name}'")
async def _initialize_providers(self):
"""
Initialize all enabled LLM providers based on the loaded configuration.
This asynchronous method performs the following steps:
1. Identifies which providers are enabled and properly configured with API keys
2. Skips providers that are in the exclusion list (specified at Gateway creation)
3. Initializes each valid provider in parallel using asyncio tasks
4. Updates the provider_status dictionary with the initialization results
The method uses a defensive approach, handling cases where:
- A provider is enabled but missing API keys
- Configuration is incomplete or inconsistent
- Initialization errors occur with specific providers
After initialization, the Gateway will have a populated providers dictionary
with available provider instances, and a comprehensive provider_status dictionary
with status information for all providers (including those that failed to initialize).
This method is automatically called during server startup and is not intended
to be called directly by users of the Gateway class.
Raises:
No exceptions are propagated from this method. All provider initialization
errors are caught, logged, and reflected in the provider_status dictionary.
"""
self.logger.info("Initializing LLM providers")
cfg = get_config()
providers_to_init = []
# Determine which providers to initialize based SOLELY on the loaded config
for provider_name in [p.value for p in Provider]:
# Skip providers that are in the exclusion list
if provider_name in self.provider_exclusions:
self.logger.debug(f"Skipping provider {provider_name} (excluded)")
continue
provider_config = getattr(cfg.providers, provider_name, None)
# Special exception for Ollama: it doesn't require an API key since it runs locally
if (
provider_name == Provider.OLLAMA.value
and provider_config
and provider_config.enabled
):
self.logger.debug(
f"Found configured and enabled provider: {provider_name} (API key not required)"
)
providers_to_init.append(provider_name)
# Check if the provider is enabled AND has an API key configured in the loaded settings
elif provider_config and provider_config.enabled and provider_config.api_key:
self.logger.debug(f"Found configured and enabled provider: {provider_name}")
providers_to_init.append(provider_name)
elif provider_config and provider_config.enabled:
self.logger.warning(
f"Provider {provider_name} is enabled but missing API key in config. Skipping."
)
# else: # Provider not found in config or not enabled
# self.logger.debug(f"Provider {provider_name} not configured or not enabled.")
# Initialize providers in parallel
init_tasks = [
asyncio.create_task(
self._initialize_provider(provider_name), name=f"init-{provider_name}"
)
for provider_name in providers_to_init
]
if init_tasks:
await asyncio.gather(*init_tasks)
# Log initialization summary
available_providers = [
name for name, status in self.provider_status.items() if status.available
]
self.logger.info(
f"Providers initialized: {len(available_providers)}/{len(providers_to_init)} available"
)
async def _initialize_provider(self, provider_name: str):
"""
Initialize a single LLM provider with its API key and configuration.
This method is responsible for initializing an individual provider by:
1. Retrieving the provider's configuration and API key
2. Importing the appropriate provider class
3. Instantiating the provider with the configured API key
4. Calling the provider's initialize method to establish connectivity
5. Recording the provider's status (including available models)
The method handles errors gracefully, ensuring that exceptions during any
stage of initialization are caught, logged, and reflected in the provider's
status rather than propagated up the call stack.
Args:
provider_name: Name of the provider to initialize, matching a value
in the Provider enum (e.g., "openai", "anthropic").
Returns:
None. Results are stored in the Gateway's providers and provider_status
dictionaries rather than returned directly.
Note:
This method is called by _initialize_providers during server startup
and is not intended to be called directly by users of the Gateway class.
"""
api_key = None
api_key_configured = False
provider_config = None
try:
cfg = get_config()
provider_config = getattr(cfg.providers, provider_name, None)
# Get API key ONLY from the loaded config object
if provider_config and provider_config.api_key:
api_key = provider_config.api_key
api_key_configured = True
# Special case for Ollama: doesn't require an API key
elif provider_name == Provider.OLLAMA.value and provider_config:
api_key = None
api_key_configured = True
self.logger.debug("Initializing Ollama provider without API key (not required)")
else:
# This case should ideally not be reached if checks in _initialize_providers are correct,
# but handle defensively.
self.logger.warning(
f"Attempted to initialize {provider_name}, but API key not found in loaded config."
)
api_key_configured = False
if not api_key_configured:
# Record status for providers found in config but without a key
if provider_config:
self.provider_status[provider_name] = ProviderStatus(
enabled=provider_config.enabled, # Reflects config setting
available=False,
api_key_configured=False,
models=[],
error="API key not found in loaded configuration",
)
# Do not log the warning here again, just return
return
# --- API Key is configured, proceed with initialization ---
self.logger.debug(f"Initializing provider {provider_name} with key from config.")
# Import PROVIDER_REGISTRY to use centralized provider registry
from ultimate_mcp_server.core.providers import PROVIDER_REGISTRY
# Use the registry instead of hardcoded providers dictionary
provider_class = PROVIDER_REGISTRY.get(provider_name)
if not provider_class:
raise ValueError(f"Invalid provider name mapping: {provider_name}")
# Instantiate provider with the API key retrieved from the config (via decouple)
# Ensure provider classes' __init__ expect 'api_key' as a keyword argument
provider = provider_class(api_key=api_key)
# Initialize provider (which should use the config passed)
available = await provider.initialize()
# Update status based on initialization result
if available:
models = await provider.list_models()
self.providers[provider_name] = provider
self.provider_status[provider_name] = ProviderStatus(
enabled=provider_config.enabled,
available=True,
api_key_configured=True,
models=models,
)
self.logger.success(
f"Provider {provider_name} initialized successfully with {len(models)} models",
emoji_key="provider",
)
else:
self.provider_status[provider_name] = ProviderStatus(
enabled=provider_config.enabled,
available=False,
api_key_configured=True, # Key was found, but init failed
models=[],
error="Initialization failed (check provider API status or logs)",
)
self.logger.error(
f"Provider {provider_name} initialization failed", emoji_key="error"
)
except Exception as e:
# Handle unexpected errors during initialization
error_msg = f"Error initializing provider {provider_name}: {str(e)}"
self.logger.error(error_msg, exc_info=True)
# Ensure status is updated even on exceptions
enabled_status = provider_config.enabled if provider_config else False # Best guess
self.provider_status[provider_name] = ProviderStatus(
enabled=enabled_status,
available=False,
api_key_configured=api_key_configured, # Reflects if key was found before error
models=[],
error=error_msg,
)
@property
def system_instructions(self) -> str:
"""
Return comprehensive system-level instructions for LLMs on how to use the gateway.
This property generates detailed instructions that are injected into the system prompt
for LLMs using the Gateway. These instructions serve as a guide for LLMs to effectively
utilize the available tools and capabilities, helping them understand:
- The categories of available tools and their purposes
- Best practices for provider and model selection
- Error handling strategies and patterns
- Recommendations for efficient and appropriate tool usage
- Guidelines for choosing the right tool for specific tasks
The instructions are designed to be clear and actionable, helping LLMs make
informed decisions about when and how to use different components of the
Ultimate MCP Server. They're structured in a hierarchical format with sections
covering core categories, best practices, and additional resources.
Returns:
A formatted string containing detailed instructions for LLMs on how to
effectively use the Gateway's tools and capabilities. These instructions
are automatically included in the system prompt for all LLM interactions.
"""
# Tool loading message can be adjusted based on self.load_all_tools if needed
tool_loading_info = "all available tools" if self.load_all_tools else "the Base Toolset"
return f"""
# Ultimate MCP Server Tool Usage Instructions
You have access to the Ultimate MCP Server, which provides unified access to multiple language model
providers (OpenAI, Anthropic, etc.) through a standardized interface. This server instance has loaded {tool_loading_info}.
Follow these instructions to effectively use the available tools.
## Core Tool Categories
1. **Provider Tools**: Use these to discover available providers and models
- `get_provider_status`: Check which providers are available
- `list_models`: List models available from a specific provider
2. **Completion Tools**: Use these for text generation
- `generate_completion`: Single-prompt text generation (non-streaming)
- `chat_completion`: Multi-turn conversation with message history
- `multi_completion`: Compare outputs from multiple providers/models
3. **Tournament Tools**: Use these to run competitions between models
- `create_tournament`: Create and start a new tournament
- `get_tournament_status`: Check tournament progress
- `get_tournament_results`: Get detailed tournament results
- `list_tournaments`: List all tournaments
- `cancel_tournament`: Cancel a running tournament
## Best Practices
1. **Provider Selection**:
- Always check provider availability with `get_provider_status` before use
- Verify model availability with `list_models` before using specific models
2. **Error Handling**:
- All tools include error handling in their responses
- Check for the presence of an "error" field in responses
- If an error occurs, adapt your approach based on the error message
3. **Efficient Usage**:
- Use cached tools when repeatedly calling the same function with identical parameters
- For long-running operations like tournaments, poll status periodically
4. **Tool Selection Guidelines**:
- For single-turn text generation → `generate_completion`
- For conversation-based interactions → `chat_completion`
- For comparing outputs across models → `multi_completion`
- For evaluating model performance → Tournament tools
## Additional Resources
For more detailed information and examples, access these MCP resources:
- `info://server`: Basic server information
- `info://tools`: Overview of available tools
- `provider://{{provider_name}}`: Details about a specific provider
- `guide://llm`: Comprehensive usage guide for LLMs
- `guide://error-handling`: Detailed error handling guidance
- `examples://workflows`: Detailed examples of common workflows
- `examples://completions`: Examples of different completion types
- `examples://tournaments`: Guidance on tournament configuration and analysis
Remember to use appropriate error handling and follow the documented parameter formats
for each tool. All providers may not be available at all times, so always check status
first and be prepared to adapt to available providers.
"""
def _register_tools(self, load_all: bool = False):
"""
Register all MCP tools with the server instance.
This internal method sets up all available tools in the Ultimate MCP Server,
making them accessible to LLMs through the MCP protocol. It handles:
1. Setting up the basic echo tool for connectivity testing
2. Conditionally calling the register_all_tools function to set up either
the 'Base Toolset' or all specialized tools based on the `load_all` flag.
The registration process wraps each tool function with logging functionality
via the log_tool_calls decorator, ensuring consistent logging behavior across
all tools. This provides valuable diagnostic information during tool execution.
All registered tools become available through the MCP interface and can be
discovered and used by LLMs interacting with the server.
Args:
load_all: If True, register all tools. If False, register only the base set.
Note:
This method is called automatically during Gateway initialization when
register_tools=True (the default) and is not intended to be called directly.
"""
# Import here to avoid circular dependency
from ultimate_mcp_server.tools import register_all_tools
self.logger.info("Registering core tools...")
# Echo tool - define the function first, then register it
@self.log_tool_calls
async def echo(message: str, ctx: Context = None) -> Dict[str, Any]:
"""
Echo back the message for testing MCP connectivity.
Args:
message: The message to echo back
Returns:
Dictionary containing the echoed message
"""
self.logger.info(f"Echo tool called with message: {message}")
return {"message": message}
# Now register the decorated function with mcp.tool
self.mcp.tool(echo)
# Define our base toolset - use function names not module names
base_toolset = [
# Completion tools
"generate_completion",
"chat_completion",
"multi_completion",
# "stream_completion", # Not that useful for MCP
# Provider tools
"get_provider_status",
"list_models",
# Filesystem tools
"read_file",
"read_multiple_files",
"write_file",
"edit_file",
"create_directory",
"list_directory",
"directory_tree",
"move_file",
"search_files",
"get_file_info",
"list_allowed_directories",
"get_unique_filepath",
# Optimization tools
"estimate_cost",
"compare_models",
"recommend_model",
# Local text tools
"run_ripgrep",
"run_awk",
"run_sed",
"run_jq",
# Search tools
"marqo_fused_search",
# SmartBrowser class methods
"search",
"download",
"download_site_pdfs",
"collect_documentation",
"run_macro",
"autopilot",
# SQL class methods
"manage_database",
"execute_sql",
"explore_database",
"access_audit_log",
# Document processing class methods
"convert_document",
"chunk_document",
"clean_and_format_text_as_markdown",
"batch_format_texts",
"optimize_markdown_formatting",
"generate_qa_pairs",
"summarize_document",
"ocr_image",
"enhance_ocr_text",
"analyze_pdf_structure",
"extract_tables",
"process_document_batch",
# Python sandbox class methods
"execute_python",
"repl_python",
]
# Conditionally register tools based on load_all flag
if load_all:
self.logger.info("Calling register_all_tools to register ALL available tools...")
register_all_tools(self.mcp)
else:
self.logger.info("Calling register_all_tools to register only the BASE toolset...")
# Check if tool_registration filter is enabled in config
cfg = get_config()
if cfg.tool_registration.filter_enabled:
# If filtering is already enabled, respect that configuration
self.logger.info("Tool filtering is enabled - using config filter settings")
register_all_tools(self.mcp)
else:
# Otherwise, set up filtering for base toolset
cfg.tool_registration.filter_enabled = True
cfg.tool_registration.included_tools = base_toolset
self.logger.info(f"Registering base toolset: {', '.join(base_toolset)}")
register_all_tools(self.mcp)
# After tools are registered, save the tool names to a file for the tools estimator script
try:
import json
from ultimate_mcp_server.tools import STANDALONE_TOOL_FUNCTIONS
# Get tools from STANDALONE_TOOL_FUNCTIONS plus class-based tools
all_tool_names = []
# Add standalone tool function names
for tool_func in STANDALONE_TOOL_FUNCTIONS:
if hasattr(tool_func, "__name__"):
all_tool_names.append(tool_func.__name__)
# Add echo tool
all_tool_names.append("echo")
# Write to file
with open("tools_list.json", "w") as f:
json.dump(all_tool_names, f, indent=2)
self.logger.info(
f"Wrote {len(all_tool_names)} tool names to tools_list.json for context estimator"
)
except Exception as e:
self.logger.warning(f"Failed to write tool names to file: {str(e)}")
def _register_resources(self):
"""
Register all MCP resources with the server instance.
This internal method registers standard MCP resources that provide static
information and guidance to LLMs using the Ultimate MCP Server. Resources differ
from tools in that they:
1. Provide static reference information rather than interactive functionality
2. Are accessed via URI-like identifiers (e.g., "info://server", "guide://llm")
3. Don't require API calls or external services to generate their responses
Registered resources include:
- Server and tool information (info:// resources)
- Provider details (provider:// resources)
- Usage guides and tutorials (guide:// resources)
- Example workflows and usage patterns (examples:// resources)
These resources serve as a knowledge base for LLMs to better understand how to
effectively use the available tools and follow best practices. They help reduce
the need for extensive contextual information in prompts by making reference
material available on-demand through the MCP protocol.
Note:
This method is called automatically during Gateway initialization when
register_tools=True (the default) and is not intended to be called directly.
"""
@self.mcp.resource("info://server")
def get_server_info() -> Dict[str, Any]:
"""
Get information about the Ultimate MCP Server server.
This resource provides basic metadata about the Ultimate MCP Server server instance,
including its name, version, and supported providers. Use this resource to
discover server capabilities and version information.
Resource URI: info://server
Returns:
Dictionary containing server information:
- name: Name of the Ultimate MCP Server server
- version: Version of the Ultimate MCP Server server
- description: Brief description of server functionality
- providers: List of supported LLM provider names
Example:
{
"name": "Ultimate MCP Server",
"version": "0.1.0",
"description": "MCP server for accessing multiple LLM providers",
"providers": ["openai", "anthropic", "deepseek", "gemini"]
}
Usage:
This resource is useful for clients to verify server identity, check compatibility,
and discover basic capabilities. For detailed provider status, use the
get_provider_status tool instead.
"""
return {
"name": self.name,
"version": "0.1.0",
"description": "MCP server for accessing multiple LLM providers",
"providers": [p.value for p in Provider],
}
@self.mcp.resource("info://tools")
def get_tools_info() -> Dict[str, Any]:
"""
Get information about available Ultimate MCP Server tools.
This resource provides a descriptive overview of the tools available in the
Ultimate MCP Server, organized by category. Use this resource to understand which
tools are available and how they're organized.
Resource URI: info://tools
Returns:
Dictionary containing tools information organized by category:
- provider_tools: Tools for interacting with LLM providers
- completion_tools: Tools for text generation and completion
- tournament_tools: Tools for running model tournaments
- document_tools: Tools for document processing
Example:
{
"provider_tools": {
"description": "Tools for accessing and managing LLM providers",
"tools": ["get_provider_status", "list_models"]
},
"completion_tools": {
"description": "Tools for text generation and completion",
"tools": ["generate_completion", "chat_completion", "multi_completion"]
},
"tournament_tools": {
"description": "Tools for running and managing model tournaments",
"tools": ["create_tournament", "list_tournaments", "get_tournament_status",
"get_tournament_results", "cancel_tournament"]
}
}
Usage:
Use this resource to understand the capabilities of the Ultimate MCP Server and
discover available tools. For detailed information about specific tools,
use the MCP list_tools method.
"""
return {
"provider_tools": {
"description": "Tools for accessing and managing LLM providers",
"tools": ["get_provider_status", "list_models"],
},
"completion_tools": {
"description": "Tools for text generation and completion",
"tools": ["generate_completion", "chat_completion", "multi_completion"],
},
"tournament_tools": {
"description": "Tools for running and managing model tournaments",
"tools": [
"create_tournament",
"list_tournaments",
"get_tournament_status",
"get_tournament_results",
"cancel_tournament",
],
},
"document_tools": {
"description": "Tools for document processing (placeholder for future implementation)",
"tools": [],
},
}
@self.mcp.resource("guide://llm")
def get_llm_guide() -> str:
"""
Usage guide for LLMs using the Ultimate MCP Server.
This resource provides structured guidance specifically designed for LLMs to
effectively use the tools and resources provided by the Ultimate MCP Server. It includes
recommended tool selection strategies, common usage patterns, and examples.
Resource URI: guide://llm
Returns:
A detailed text guide with sections on tool selection, usage patterns,
and example workflows.
Usage:
This resource is primarily intended to be included in context for LLMs
that will be using the gateway tools, to help them understand how to
effectively use the available capabilities.
"""
return """
# Ultimate MCP Server Usage Guide for Language Models
## Overview
The Ultimate MCP Server provides a set of tools for accessing multiple language model providers
(OpenAI, Anthropic, etc.) through a unified interface. This guide will help you understand
how to effectively use these tools.
## Tool Selection Guidelines
### For Text Generation:
1. For single-prompt text generation:
- Use `generate_completion` with a specific provider and model
2. For multi-turn conversations:
- Use `chat_completion` with a list of message dictionaries
3. For streaming responses (real-time text output):
- Use streaming tools in the CompletionTools class
4. For comparing outputs across providers:
- Use `multi_completion` with a list of provider configurations
### For Provider Management:
1. To check available providers:
- Use `get_provider_status` to see which providers are available
2. To list available models:
- Use `list_models` to view models from all providers or a specific provider
### For Running Tournaments:
1. To create a new tournament:
- Use `create_tournament` with a prompt and list of model IDs
2. To check tournament status:
- Use `get_tournament_status` with a tournament ID
3. To get detailed tournament results:
- Use `get_tournament_results` with a tournament ID
## Common Workflows
### Provider Selection Workflow:
```
1. Call get_provider_status() to see available providers
2. Call list_models(provider="openai") to see available models
3. Call generate_completion(prompt="...", provider="openai", model="gpt-4o")
```
### Multi-Provider Comparison Workflow:
```
1. Call multi_completion(
prompt="...",
providers=[
{"provider": "openai", "model": "gpt-4o"},
{"provider": "anthropic", "model": "claude-3-opus-20240229"}
]
)
2. Compare results from each provider
```
### Tournament Workflow:
```
1. Call create_tournament(name="...", prompt="...", model_ids=["openai/gpt-4o", "anthropic/claude-3-opus"])
2. Store the tournament_id from the response
3. Call get_tournament_status(tournament_id="...") to monitor progress
4. Once status is "COMPLETED", call get_tournament_results(tournament_id="...")
```
## Error Handling Best Practices
1. Always check for "error" fields in tool responses
2. Verify provider availability before attempting to use specific models
3. For tournament tools, handle potential 404 errors for invalid tournament IDs
## Performance Considerations
1. Most completion tools include token usage and cost metrics in their responses
2. Use caching decorators for repetitive requests to save costs
3. Consider using stream=True for long completions to improve user experience
"""
@self.mcp.resource("provider://{{provider_name}}")
def get_provider_info(provider_name: str) -> Dict[str, Any]:
"""
Get detailed information about a specific LLM provider.
This resource provides comprehensive information about a specific provider,
including its capabilities, available models, and configuration status.
Resource URI template: provider://{provider_name}
Args:
provider_name: Name of the provider to retrieve information for
(e.g., "openai", "anthropic", "gemini")
Returns:
Dictionary containing detailed provider information:
- name: Provider name
- status: Current status (enabled, available, etc.)
- capabilities: List of supported capabilities
- models: List of available models and their details
- config: Current configuration settings (with sensitive info redacted)
Example:
{
"name": "openai",
"status": {
"enabled": true,
"available": true,
"api_key_configured": true,
"error": null
},
"capabilities": ["chat", "completion", "embeddings", "vision"],
"models": [
{
"id": "gpt-4o",
"name": "GPT-4o",
"context_window": 128000,
"features": ["chat", "completion", "vision"]
},
# More models...
],
"config": {
"base_url": "https://api.openai.com/v1",
"timeout_seconds": 30,
"default_model": "gpt-4.1-mini"
}
}
Error Handling:
If the provider doesn't exist or isn't configured, returns an appropriate
error message in the response.
Usage:
Use this resource to get detailed information about a specific provider
before using its models for completions or other operations.
"""
# Check if provider exists in status dictionary
provider_status = self.provider_status.get(provider_name)
if not provider_status:
return {
"name": provider_name,
"error": f"Provider '{provider_name}' not found or not configured",
"status": {"enabled": False, "available": False, "api_key_configured": False},
"models": [],
}
# Get provider instance if available
provider_instance = self.providers.get(provider_name)
# Build capability list based on provider name
capabilities = []
if provider_name in [
Provider.OPENAI.value,
Provider.ANTHROPIC.value,
Provider.GEMINI.value,
]:
capabilities = ["chat", "completion"]
if provider_name == Provider.OPENAI.value:
capabilities.extend(["embeddings", "vision", "image_generation"])
elif provider_name == Provider.ANTHROPIC.value:
capabilities.extend(["vision"])
# Return provider details
return {
"name": provider_name,
"status": {
"enabled": provider_status.enabled,
"available": provider_status.available,
"api_key_configured": provider_status.api_key_configured,
"error": provider_status.error,
},
"capabilities": capabilities,
"models": provider_status.models,
"config": {
# Include non-sensitive config info
"default_model": provider_instance.default_model if provider_instance else None,
"timeout_seconds": 30, # Example default
},
}
@self.mcp.resource("guide://error-handling")
def get_error_handling_guide() -> Dict[str, Any]:
"""
Get comprehensive guidance on handling errors from Ultimate MCP Server tools.
This resource provides detailed information about common error patterns,
error handling strategies, and recovery approaches for each tool in the
Ultimate MCP Server. It helps LLMs understand how to gracefully handle and recover
from various error conditions.
Resource URI: guide://error-handling
Returns:
Dictionary containing error handling guidance organized by tool type:
- provider_tools: Error handling for provider-related tools
- completion_tools: Error handling for completion tools
- tournament_tools: Error handling for tournament tools
Usage:
This resource helps LLMs implement robust error handling when using
the Ultimate MCP Server tools, improving the resilience of their interactions.
"""
return {
"general_principles": {
"error_detection": {
"description": "How to detect errors in tool responses",
"patterns": [
"Check for an 'error' field in the response dictionary",
"Look for status codes in error messages (e.g., 404, 500)",
"Check for empty or null results where data is expected",
"Look for 'warning' fields that may indicate partial success",
],
},
"error_recovery": {
"description": "General strategies for recovering from errors",
"strategies": [
"Retry with different parameters when appropriate",
"Fallback to alternative tools or providers",
"Gracefully degrade functionality when optimal path is unavailable",
"Clearly communicate errors to users with context and suggestions",
],
},
},
"provider_tools": {
"get_provider_status": {
"common_errors": [
{
"error": "Server context not available",
"cause": "The server may not be fully initialized",
"handling": "Wait and retry or report server initialization issue",
},
{
"error": "No providers are currently configured",
"cause": "No LLM providers are enabled or initialization is incomplete",
"handling": "Proceed with caution and check if specific providers are required",
},
],
"recovery_strategies": [
"If no providers are available, clearly inform the user of limited capabilities",
"If specific providers are unavailable, suggest alternatives based on task requirements",
],
},
"list_models": {
"common_errors": [
{
"error": "Invalid provider",
"cause": "Specified provider name doesn't exist or isn't configured",
"handling": "Use valid providers from the error message's 'valid_providers' field",
},
{
"warning": "Provider is configured but not available",
"cause": "Provider API key issues or service connectivity problems",
"handling": "Use an alternative provider or inform user of limited options",
},
],
"recovery_strategies": [
"When provider is invalid, fall back to listing all available providers",
"When models list is empty, suggest using the default model or another provider",
],
},
},
"completion_tools": {
"generate_completion": {
"common_errors": [
{
"error": "Provider not available",
"cause": "Specified provider doesn't exist or isn't configured",
"handling": "Switch to an available provider (check with get_provider_status)",
},
{
"error": "Failed to initialize provider",
"cause": "API key configuration or network issues",
"handling": "Try another provider or check provider status",
},
{
"error": "Completion generation failed",
"cause": "Provider API errors, rate limits, or invalid parameters",
"handling": "Retry with different parameters or use another provider",
},
],
"recovery_strategies": [
"Use multi_completion to try multiple providers simultaneously",
"Progressively reduce complexity (max_tokens, simplify prompt) if facing limits",
"Fall back to more reliable models if specialized ones are unavailable",
],
},
"multi_completion": {
"common_errors": [
{
"error": "Invalid providers format",
"cause": "Providers parameter is not a list of provider configurations",
"handling": "Correct the format to a list of dictionaries with provider info",
},
{
"partial_failure": "Some providers failed",
"cause": "Indicated by successful_count < total_providers",
"handling": "Use the successful results and analyze error fields for failed ones",
},
],
"recovery_strategies": [
"Focus on successful completions even if some providers failed",
"Check each provider's 'success' field to identify which ones worked",
"If timeout occurs, consider increasing the timeout parameter or reducing providers",
],
},
},
"tournament_tools": {
"create_tournament": {
"common_errors": [
{
"error": "Invalid input",
"cause": "Missing required fields or validation errors",
"handling": "Check all required parameters are provided with valid values",
},
{
"error": "Failed to start tournament execution",
"cause": "Server resource constraints or initialization errors",
"handling": "Retry with fewer rounds or models, or try again later",
},
],
"recovery_strategies": [
"Verify model IDs are valid before creating tournament",
"Start with simple tournaments to validate functionality before complex ones",
"Use error message details to correct specific input problems",
],
},
"get_tournament_status": {
"common_errors": [
{
"error": "Tournament not found",
"cause": "Invalid tournament ID or tournament was deleted",
"handling": "Verify tournament ID or use list_tournaments to see available tournaments",
},
{
"error": "Invalid tournament ID format",
"cause": "Tournament ID is not a string or is empty",
"handling": "Ensure tournament ID is a valid string matching the expected format",
},
],
"recovery_strategies": [
"When tournament not found, list all tournaments to find valid ones",
"If tournament status is FAILED, check error_message for details",
"Implement polling with backoff for monitoring long-running tournaments",
],
},
},
"error_pattern_examples": {
"retry_with_fallback": {
"description": "Retry with fallback to another provider",
"example": """
# Try primary provider
result = generate_completion(prompt="...", provider="openai", model="gpt-4o")
# Check for errors and fall back if needed
if "error" in result:
logger.warning(f"Primary provider failed: {result['error']}")
# Fall back to alternative provider
result = generate_completion(prompt="...", provider="anthropic", model="claude-3-opus-20240229")
""",
},
"validation_before_call": {
"description": "Validate parameters before making tool calls",
"example": """
# Get available providers first
provider_status = get_provider_status()
# Check if requested provider is available
requested_provider = "openai"
if requested_provider not in provider_status["providers"] or not provider_status["providers"][requested_provider]["available"]:
# Fall back to any available provider
available_providers = [p for p, status in provider_status["providers"].items() if status["available"]]
if available_providers:
requested_provider = available_providers[0]
else:
return {"error": "No LLM providers are available"}
""",
},
},
}
@self.mcp.resource("examples://workflows")
def get_workflow_examples() -> Dict[str, Any]:
"""
Get comprehensive examples of multi-tool workflows.
This resource provides detailed, executable examples showing how to combine
multiple tools into common workflows. These examples demonstrate best practices
for tool sequencing, error handling, and result processing.
Resource URI: examples://workflows
Returns:
Dictionary containing workflow examples organized by scenario:
- basic_provider_selection: Example of selecting a provider and model
- model_comparison: Example of comparing outputs across providers
- tournaments: Example of creating and monitoring a tournament
- advanced_chat: Example of a multi-turn conversation with system prompts
Usage:
These examples are designed to be used as reference by LLMs to understand
how to combine multiple tools in the Ultimate MCP Server to accomplish common tasks.
Each example includes expected outputs to help understand the flow.
"""
return {
"basic_provider_selection": {
"description": "Selecting a provider and model for text generation",
"steps": [
{
"step": 1,
"tool": "get_provider_status",
"parameters": {},
"purpose": "Check which providers are available",
"example_output": {
"providers": {
"openai": {"available": True, "models_count": 12},
"anthropic": {"available": True, "models_count": 6},
}
},
},
{
"step": 2,
"tool": "list_models",
"parameters": {"provider": "openai"},
"purpose": "Get available models for the selected provider",
"example_output": {
"models": {
"openai": [
{
"id": "gpt-4o",
"name": "GPT-4o",
"features": ["chat", "completion"],
}
]
}
},
},
{
"step": 3,
"tool": "generate_completion",
"parameters": {
"prompt": "Explain quantum computing in simple terms",
"provider": "openai",
"model": "gpt-4o",
"temperature": 0.7,
},
"purpose": "Generate text with the selected provider and model",
"example_output": {
"text": "Quantum computing is like...",
"model": "gpt-4o",
"provider": "openai",
"tokens": {"input": 8, "output": 150, "total": 158},
"cost": 0.000123,
},
},
],
"error_handling": [
"If get_provider_status shows provider unavailable, try a different provider",
"If list_models returns empty list, select a different provider",
"If generate_completion returns an error, check the error message for guidance",
],
},
"model_comparison": {
"description": "Comparing multiple models on the same task",
"steps": [
{
"step": 1,
"tool": "multi_completion",
"parameters": {
"prompt": "Write a haiku about programming",
"providers": [
{"provider": "openai", "model": "gpt-4o"},
{"provider": "anthropic", "model": "claude-3-opus-20240229"},
],
"temperature": 0.7,
},
"purpose": "Generate completions from multiple providers simultaneously",
"example_output": {
"results": {
"openai/gpt-4o": {
"success": True,
"text": "Code flows like water\nBugs emerge from the depths\nPatience brings order",
"model": "gpt-4o",
},
"anthropic/claude-3-opus-20240229": {
"success": True,
"text": "Fingers dance on keys\nLogic blooms in silent thought\nPrograms come alive",
"model": "claude-3-opus-20240229",
},
},
"successful_count": 2,
"total_providers": 2,
},
},
{
"step": 2,
"suggestion": "Compare the results for quality, style, and adherence to the haiku format",
},
],
"error_handling": [
"Check successful_count vs total_providers to see if all providers succeeded",
"For each provider, check the success field to determine if it completed successfully",
"If a provider failed, look at its error field for details",
],
},
"tournaments": {
"description": "Creating and monitoring a multi-model tournament",
"steps": [
{
"step": 1,
"tool": "create_tournament",
"parameters": {
"name": "Sorting Algorithm Tournament",
"prompt": "Implement a quicksort algorithm in Python that handles duplicates efficiently",
"model_ids": ["openai/gpt-4o", "anthropic/claude-3-opus-20240229"],
"rounds": 3,
"tournament_type": "code",
},
"purpose": "Create a new tournament comparing multiple models",
"example_output": {
"tournament_id": "tour_abc123xyz789",
"status": "PENDING",
},
},
{
"step": 2,
"tool": "get_tournament_status",
"parameters": {"tournament_id": "tour_abc123xyz789"},
"purpose": "Check if the tournament has started running",
"example_output": {
"tournament_id": "tour_abc123xyz789",
"status": "RUNNING",
"current_round": 1,
"total_rounds": 3,
},
},
{
"step": 3,
"suggestion": "Wait for the tournament to complete",
"purpose": "Tournaments run asynchronously and may take time to complete",
},
{
"step": 4,
"tool": "get_tournament_results",
"parameters": {"tournament_id": "tour_abc123xyz789"},
"purpose": "Retrieve full results once the tournament is complete",
"example_output": {
"tournament_id": "tour_abc123xyz789",
"status": "COMPLETED",
"rounds_data": [
{
"round_number": 1,
"model_outputs": {
"openai/gpt-4o": "def quicksort(arr): ...",
"anthropic/claude-3-opus-20240229": "def quicksort(arr): ...",
},
"scores": {
"openai/gpt-4o": 0.85,
"anthropic/claude-3-opus-20240229": 0.92,
},
}
# Additional rounds would be here in a real response
],
},
},
],
"error_handling": [
"If create_tournament fails, check the error message for missing or invalid parameters",
"If get_tournament_status returns an error, verify the tournament_id is correct",
"If tournament status is FAILED, check the error_message field for details",
],
},
"advanced_chat": {
"description": "Multi-turn conversation with system prompt and context",
"steps": [
{
"step": 1,
"tool": "chat_completion",
"parameters": {
"messages": [
{
"role": "user",
"content": "Hello, can you help me with Python?",
}
],
"provider": "anthropic",
"model": "claude-3-opus-20240229",
"system_prompt": "You are an expert Python tutor. Provide concise, helpful answers with code examples when appropriate.",
"temperature": 0.5,
},
"purpose": "Start a conversation with a system prompt for context",
"example_output": {
"text": "Hello! I'd be happy to help you with Python. What specific aspect are you interested in learning about?",
"model": "claude-3-opus-20240229",
"provider": "anthropic",
},
},
{
"step": 2,
"tool": "chat_completion",
"parameters": {
"messages": [
{
"role": "user",
"content": "Hello, can you help me with Python?",
},
{
"role": "assistant",
"content": "Hello! I'd be happy to help you with Python. What specific aspect are you interested in learning about?",
},
{
"role": "user",
"content": "How do I write a function that checks if a string is a palindrome?",
},
],
"provider": "anthropic",
"model": "claude-3-opus-20240229",
"system_prompt": "You are an expert Python tutor. Provide concise, helpful answers with code examples when appropriate.",
"temperature": 0.5,
},
"purpose": "Continue the conversation by including the full message history",
"example_output": {
"text": "Here's a simple function to check if a string is a palindrome in Python:\n\n```python\ndef is_palindrome(s):\n # Remove spaces and convert to lowercase for more flexible matching\n s = s.lower().replace(' ', '')\n # Compare the string with its reverse\n return s == s[::-1]\n\n# Examples\nprint(is_palindrome('racecar')) # True\nprint(is_palindrome('hello')) # False\nprint(is_palindrome('A man a plan a canal Panama')) # True\n```\n\nThis function works by:\n1. Converting the string to lowercase and removing spaces\n2. Checking if the processed string equals its reverse (using slice notation `[::-1]`)\n\nIs there anything specific about this solution you'd like me to explain further?",
"model": "claude-3-opus-20240229",
"provider": "anthropic",
},
},
],
"error_handling": [
"Always include the full conversation history in the messages array",
"Ensure each message has both 'role' and 'content' fields",
"If using system_prompt, ensure it's appropriate for the provider",
],
},
}
@self.mcp.resource("examples://completions")
def get_completion_examples() -> Dict[str, Any]:
"""
Get examples of different completion types and when to use them.
This resource provides detailed examples of different completion tools available
in the Ultimate MCP Server, along with guidance on when to use each type. It helps with
selecting the most appropriate completion tool for different scenarios.
Resource URI: examples://completions
Returns:
Dictionary containing completion examples organized by type:
- standard_completion: When to use generate_completion
- chat_completion: When to use chat_completion
- streaming_completion: When to use stream_completion
- multi_provider: When to use multi_completion
Usage:
This resource helps LLMs understand the appropriate completion tool
to use for different scenarios, with concrete examples and use cases.
"""
return {
"standard_completion": {
"tool": "generate_completion",
"description": "Single-turn text generation without streaming",
"best_for": [
"Simple, one-off text generation tasks",
"When you need a complete response at once",
"When you don't need conversation history",
],
"example": {
"request": {
"prompt": "Explain the concept of quantum entanglement in simple terms",
"provider": "openai",
"model": "gpt-4o",
"temperature": 0.7,
},
"response": {
"text": "Quantum entanglement is like having two magic coins...",
"model": "gpt-4o",
"provider": "openai",
"tokens": {"input": 10, "output": 150, "total": 160},
"cost": 0.00032,
"processing_time": 2.1,
},
},
},
"chat_completion": {
"tool": "chat_completion",
"description": "Multi-turn conversation with message history",
"best_for": [
"Maintaining conversation context across multiple turns",
"When dialogue history matters for the response",
"When using system prompts to guide assistant behavior",
],
"example": {
"request": {
"messages": [
{"role": "user", "content": "What's the capital of France?"},
{"role": "assistant", "content": "The capital of France is Paris."},
{"role": "user", "content": "And what's its population?"},
],
"provider": "anthropic",
"model": "claude-3-opus-20240229",
"system_prompt": "You are a helpful geography assistant.",
},
"response": {
"text": "The population of Paris is approximately 2.1 million people in the city proper...",
"model": "claude-3-opus-20240229",
"provider": "anthropic",
"tokens": {"input": 62, "output": 48, "total": 110},
"cost": 0.00055,
"processing_time": 1.8,
},
},
},
"streaming_completion": {
"tool": "stream_completion",
"description": "Generates text in smaller chunks as a stream",
"best_for": [
"When you need to show incremental progress to users",
"For real-time display of model outputs",
"Long-form content generation where waiting for the full response would be too long",
],
"example": {
"request": {
"prompt": "Write a short story about a robot learning to paint",
"provider": "openai",
"model": "gpt-4o",
},
"response_chunks": [
{
"text": "In the year 2150, ",
"chunk_index": 1,
"provider": "openai",
"model": "gpt-4o",
"finished": False,
},
{
"text": "a maintenance robot named ARIA-7 was assigned to",
"chunk_index": 2,
"provider": "openai",
"model": "gpt-4o",
"finished": False,
},
{
"text": "",
"chunk_index": 25,
"provider": "openai",
"full_text": "In the year 2150, a maintenance robot named ARIA-7 was assigned to...",
"processing_time": 8.2,
"finished": True,
},
],
},
},
"multi_provider": {
"tool": "multi_completion",
"description": "Get completions from multiple providers simultaneously",
"best_for": [
"Comparing outputs from different models",
"Finding consensus among multiple models",
"Fallback scenarios where one provider might fail",
"Benchmarking different providers on the same task",
],
"example": {
"request": {
"prompt": "Provide three tips for sustainable gardening",
"providers": [
{"provider": "openai", "model": "gpt-4o"},
{"provider": "anthropic", "model": "claude-3-opus-20240229"},
],
},
"response": {
"results": {
"openai/gpt-4o": {
"provider_key": "openai/gpt-4o",
"success": True,
"text": "1. Use compost instead of chemical fertilizers...",
"model": "gpt-4o",
},
"anthropic/claude-3-opus-20240229": {
"provider_key": "anthropic/claude-3-opus-20240229",
"success": True,
"text": "1. Implement water conservation techniques...",
"model": "claude-3-opus-20240229",
},
},
"successful_count": 2,
"total_providers": 2,
"processing_time": 3.5,
},
},
},
}
@self.mcp.resource("examples://tournaments")
def get_tournament_examples() -> Dict[str, Any]:
"""
Get detailed examples and guidance for running LLM tournaments.
This resource provides comprehensive examples and guidance for creating,
monitoring, and analyzing LLM tournaments. It includes detailed information
about tournament configuration, interpreting results, and best practices.
Resource URI: examples://tournaments
Returns:
Dictionary containing tournament examples and guidance:
- tournament_types: Different types of tournaments and their uses
- configuration_guide: Guidance on how to configure tournaments
- analysis_guide: How to interpret tournament results
- example_tournaments: Complete examples of different tournament configurations
Usage:
This resource helps LLMs understand how to effectively use the tournament
tools, with guidance on configuration, execution, and analysis.
"""
return {
"tournament_types": {
"code": {
"description": "Tournaments where models compete on coding tasks",
"ideal_for": [
"Algorithm implementation challenges",
"Debugging exercises",
"Code optimization problems",
"Comparing models' coding abilities",
],
"evaluation_criteria": [
"Code correctness",
"Efficiency",
"Readability",
"Error handling",
],
},
# Other tournament types could be added in the future
},
"configuration_guide": {
"model_selection": {
"description": "Guidelines for selecting models to include in tournaments",
"recommendations": [
"Include models from different providers for diverse approaches",
"Compare models within the same family (e.g., different Claude versions)",
"Consider including both specialized and general models",
"Ensure all models can handle the task complexity",
],
},
"rounds": {
"description": "How to determine the appropriate number of rounds",
"recommendations": [
"Start with 3 rounds for most tournaments",
"Use more rounds (5+) for more complex or nuanced tasks",
"Consider that each round increases total runtime and cost",
"Each round gives models a chance to refine their solutions",
],
},
"prompt_design": {
"description": "Best practices for tournament prompt design",
"recommendations": [
"Be specific about the problem requirements",
"Clearly define evaluation criteria",
"Specify output format expectations",
"Consider including test cases",
"Avoid ambiguous or underspecified requirements",
],
},
},
"analysis_guide": {
"score_interpretation": {
"description": "How to interpret model scores in tournament results",
"guidance": [
"Scores are normalized to a 0-1 scale (1 being perfect)",
"Consider relative scores between models rather than absolute values",
"Look for consistency across rounds",
"Consider output quality even when scores are similar",
],
},
"output_analysis": {
"description": "How to analyze model outputs from tournaments",
"guidance": [
"Compare approaches used by different models",
"Look for patterns in errors or limitations",
"Identify unique strengths of different providers",
"Consider both the score and actual output quality",
],
},
},
"example_tournaments": {
"algorithm_implementation": {
"name": "Binary Search Algorithm",
"prompt": "Implement a binary search algorithm in Python that can search for an element in a sorted array. Include proper error handling, documentation, and test cases.",
"model_ids": ["openai/gpt-4o", "anthropic/claude-3-opus-20240229"],
"rounds": 3,
"tournament_type": "code",
"explanation": "This tournament tests the models' ability to implement a standard algorithm with proper error handling and testing.",
},
"code_optimization": {
"name": "String Processing Optimization",
"prompt": "Optimize the following Python function to process large strings more efficiently: def find_substring_occurrences(text, pattern): return [i for i in range(len(text)) if text[i:i+len(pattern)] == pattern]",
"model_ids": [
"openai/gpt-4o",
"anthropic/claude-3-opus-20240229",
"anthropic/claude-3-sonnet-20240229",
],
"rounds": 4,
"tournament_type": "code",
"explanation": "This tournament compares models' ability to recognize and implement optimization opportunities in existing code.",
},
},
"workflow_examples": {
"basic_tournament": {
"description": "A simple tournament workflow from creation to result analysis",
"steps": [
{
"step": 1,
"description": "Create the tournament",
"code": "tournament_id = create_tournament(name='Sorting Algorithm Challenge', prompt='Implement an efficient sorting algorithm...', model_ids=['openai/gpt-4o', 'anthropic/claude-3-opus-20240229'], rounds=3, tournament_type='code')",
},
{
"step": 2,
"description": "Poll for tournament status",
"code": "status = get_tournament_status(tournament_id)['status']\nwhile status in ['PENDING', 'RUNNING']:\n time.sleep(30) # Check every 30 seconds\n status = get_tournament_status(tournament_id)['status']",
},
{
"step": 3,
"description": "Retrieve and analyze results",
"code": "results = get_tournament_results(tournament_id)\nwinner = max(results['final_scores'].items(), key=lambda x: x[1])[0]\noutputs = {model_id: results['rounds_data'][-1]['model_outputs'][model_id] for model_id in results['config']['model_ids']}",
},
],
}
},
}
def start_server(
host: Optional[str] = None,
port: Optional[int] = None,
workers: Optional[int] = None,
log_level: Optional[str] = None,
reload: bool = False,
transport_mode: str = "streamable-http",
include_tools: Optional[List[str]] = None,
exclude_tools: Optional[List[str]] = None,
load_all_tools: bool = False, # Added: Flag to control tool loading
) -> None:
"""
Start the Ultimate MCP Server with configurable settings.
This function serves as the main entry point for starting the Ultimate MCP Server
in either SSE (HTTP server) or stdio (direct process communication) mode. It handles
complete server initialization including:
1. Configuration loading and parameter validation
2. Logging setup with proper levels and formatting
3. Gateway instantiation with tool registration
4. Transport mode selection and server startup
The function provides flexibility in server configuration through parameters that
override settings from the configuration file, allowing for quick adjustments without
modifying configuration files. It also supports tool filtering, enabling selective
registration of specific tools.
Args:
host: Hostname or IP address to bind the server to (e.g., "localhost", "0.0.0.0").
If None, uses the value from the configuration file.
port: TCP port for the server to listen on when in SSE mode.
If None, uses the value from the configuration file.
workers: Number of worker processes to spawn for handling requests.
Higher values improve concurrency but increase resource usage.
If None, uses the value from the configuration file.
log_level: Logging verbosity level. One of "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL".
If None, uses the value from the configuration file.
reload: Whether to automatically reload the server when code changes are detected.
Useful during development but not recommended for production.
transport_mode: Communication mode for the server. Options:
- "stdio": Run using standard input/output for direct process communication (default)
- "sse": Run as an HTTP server with Server-Sent Events for streaming
- "streamable-http": Run as an HTTP server with streaming request/response bodies (recommended for HTTP clients)
include_tools: Optional list of specific tool names to include in registration.
If provided, only these tools will be registered unless they are
also in exclude_tools. If None, all tools are included by default.
exclude_tools: Optional list of tool names to exclude from registration.
These tools will not be registered even if they are also in include_tools.
load_all_tools: If True, load all available tools. If False (default), load only the base set.
Raises:
ValueError: If transport_mode is not one of the valid options.
ConfigurationError: If there are critical errors in the server configuration.
Note:
This function does not return as it initiates the server event loop, which
runs until interrupted (e.g., by a SIGINT signal). In SSE mode, it starts
a Uvicorn server; in stdio mode, it runs the FastMCP stdio handler.
"""
server_host = host or get_config().server.host
server_port = port or get_config().server.port
server_workers = workers or get_config().server.workers
# Get the current config and update tool registration settings
cfg = get_config()
if include_tools or exclude_tools:
cfg.tool_registration.filter_enabled = True
if include_tools:
cfg.tool_registration.included_tools = include_tools
if exclude_tools:
cfg.tool_registration.excluded_tools = exclude_tools
# Validate transport_mode
if transport_mode not in ["sse", "stdio", "streamable-http"]:
raise ValueError(
f"Invalid transport_mode: {transport_mode}. Must be 'sse', 'stdio', or 'streamable-http'"
)
# Determine final log level from the provided parameter or fallback to INFO
final_log_level = (log_level or "INFO").upper()
# Update LOGGING_CONFIG with the final level
LOGGING_CONFIG["root"]["level"] = final_log_level
LOGGING_CONFIG["loggers"]["ultimate_mcp_server"]["level"] = final_log_level
LOGGING_CONFIG["loggers"]["ultimate_mcp_server.tools"]["level"] = final_log_level
LOGGING_CONFIG["loggers"]["ultimate_mcp_server.completions"]["level"] = final_log_level
# Set Uvicorn access level based on final level
LOGGING_CONFIG["loggers"]["uvicorn.access"]["level"] = (
final_log_level if final_log_level != "CRITICAL" else "CRITICAL"
)
# Ensure Uvicorn base/error logs are at least INFO unless final level is DEBUG
uvicorn_base_level = "INFO" if final_log_level not in ["DEBUG"] else "DEBUG"
LOGGING_CONFIG["loggers"]["uvicorn"]["level"] = uvicorn_base_level
LOGGING_CONFIG["loggers"]["uvicorn.error"]["level"] = uvicorn_base_level
# Configure logging
logging.config.dictConfig(LOGGING_CONFIG)
# Initialize the gateway if not already created
global _gateway_instance
if not _gateway_instance:
# Create gateway with tool filtering based on config
cfg = get_config()
_gateway_instance = Gateway(
name=cfg.server.name,
register_tools=True,
load_all_tools=load_all_tools, # Pass the flag to Gateway
)
# Log startup info to stderr instead of using logging directly
print("Starting Ultimate MCP Server server", file=sys.stderr)
print(f"Host: {server_host}", file=sys.stderr)
print(f"Port: {server_port}", file=sys.stderr)
print(f"Workers: {server_workers}", file=sys.stderr)
print(f"Log level: {final_log_level}", file=sys.stderr)
print(f"Transport mode: {transport_mode}", file=sys.stderr)
if transport_mode == "streamable-http":
print(
"Note: streamable-http is the recommended transport for HTTP-based MCP clients",
file=sys.stderr,
)
# Log tool loading strategy
if load_all_tools:
print("Tool Loading: ALL available tools", file=sys.stderr)
else:
print("Tool Loading: Base Toolset Only", file=sys.stderr)
base_toolset = [
"completion",
"filesystem",
"optimization",
"provider",
"local_text",
"search",
]
print(f" (Includes: {', '.join(base_toolset)})", file=sys.stderr)
# Log tool filtering info if enabled
if cfg.tool_registration.filter_enabled:
if cfg.tool_registration.included_tools:
print(
f"Including tools: {', '.join(cfg.tool_registration.included_tools)}",
file=sys.stderr,
)
if cfg.tool_registration.excluded_tools:
print(
f"Excluding tools: {', '.join(cfg.tool_registration.excluded_tools)}",
file=sys.stderr,
)
if transport_mode in ["sse", "streamable-http"]:
# Run in HTTP mode (unified handling for both SSE and streamable-http)
import os
import subprocess
import threading
import time
import uvicorn
print(f"Running in {transport_mode} mode...", file=sys.stderr)
# Set up a function to run the tool context estimator after the server starts
def run_tool_context_estimator():
# Wait a bit for the server to start up
time.sleep(5)
try:
# Ensure tools_list.json exists
if not os.path.exists("tools_list.json"):
print("\n--- Tool Context Window Analysis ---", file=sys.stderr)
print(
"Error: tools_list.json not found. Tool registration may have failed.",
file=sys.stderr,
)
print(
"The tool context estimator will run with limited functionality.",
file=sys.stderr,
)
print("-" * 40, file=sys.stderr)
# Run the tool context estimator script with appropriate transport
cmd = ["python", "-m", "mcp_tool_context_estimator", "--quiet"]
# Pass transport mode for both HTTP transports (sse and streamable-http)
if transport_mode in ["sse", "streamable-http"]:
cmd.extend(["--transport", transport_mode])
result = subprocess.run(cmd, capture_output=True, text=True)
# Output the results to stderr
if result.stdout:
print("\n--- Tool Context Window Analysis ---", file=sys.stderr)
print(result.stdout, file=sys.stderr)
print("-" * 40, file=sys.stderr)
# Check if there was an error
if result.returncode != 0:
print("\n--- Tool Context Estimator Error ---", file=sys.stderr)
print(
"Failed to run mcp_tool_context_estimator.py - likely due to an error.",
file=sys.stderr,
)
print("Error output:", file=sys.stderr)
print(result.stderr, file=sys.stderr)
print("-" * 40, file=sys.stderr)
except Exception as e:
print(f"\nError running tool context estimator: {str(e)}", file=sys.stderr)
print(
"Check if mcp_tool_context_estimator.py exists and is executable.",
file=sys.stderr,
)
# Skip the tool-context estimator for SSE transport because it causes the server
# to shut down when the estimator disconnects after completing its analysis.
# SSE servers shut down when all clients disconnect, and the estimator is treated
# as a client. Run it for streamable-http mode where this isn't an issue.
if transport_mode == "streamable-http" and os.path.exists("mcp_tool_context_estimator.py"):
threading.Thread(target=run_tool_context_estimator, daemon=True).start()
# Setup graceful shutdown
logger = logging.getLogger("ultimate_mcp_server.server")
# Configure graceful shutdown with error suppression
enable_quiet_shutdown()
# Create a shutdown handler for gateway cleanup
async def cleanup_resources():
"""Performs cleanup for various components during shutdown."""
# First attempt quick tasks then long tasks with timeouts
print("Cleaning up Gateway instance and associated resources...", file=sys.stderr)
# Shutdown SQL Tools with timeout
try:
await asyncio.wait_for(shutdown_sql_tools(), timeout=3.0)
except (asyncio.TimeoutError, Exception):
pass # Suppress errors during shutdown
# Shutdown Connection Manager with timeout
try:
from ultimate_mcp_server.tools.sql_databases import _connection_manager
await asyncio.wait_for(_connection_manager.shutdown(), timeout=2.0)
except (asyncio.TimeoutError, Exception):
pass # Suppress errors during shutdown
# Shutdown Smart Browser with timeout
try:
await asyncio.wait_for(smart_browser_shutdown(), timeout=5.0)
except (asyncio.TimeoutError, Exception):
pass # Suppress errors during shutdown
# Register the cleanup function with the graceful shutdown system
register_shutdown_handler(cleanup_resources)
# Create FastMCP app with proper path configuration
if transport_mode == "sse":
# Mark the gateway instance as SSE mode for lifespan management
_gateway_instance._sse_mode = True
mcp_app = _gateway_instance.mcp.http_app(transport="sse", path="/sse")
print("Note: Running in legacy SSE mode.", file=sys.stderr)
# Add SSE keepalive mechanism to prevent automatic shutdown
def sse_keepalive():
"""Keepalive thread to prevent SSE server from shutting down when no clients are connected."""
while True:
time.sleep(30) # Send keepalive every 30 seconds
try:
# This simple presence keeps the server alive
# The actual SSE connections will handle their own keepalive
pass
except Exception:
# If there's any error, just continue
pass
# Start the keepalive thread as a daemon so it doesn't prevent shutdown
keepalive_thread = threading.Thread(target=sse_keepalive, daemon=True, name="SSE-Keepalive")
keepalive_thread.start()
print("SSE keepalive thread started to prevent automatic shutdown.", file=sys.stderr)
else: # This path is for streamable-http
mcp_app = _gateway_instance.mcp.http_app(path="/mcp")
print(f"Running in {transport_mode} mode...", file=sys.stderr)
print(f"[DEBUG] {transport_mode} app type: {type(mcp_app)}", file=sys.stderr)
# === BEGIN NEW SPLIT-APP ARCHITECTURE ===
from starlette.applications import Starlette
from starlette.routing import Mount
# 1) PRISTINE FastMCP wrapper – **NO** extra routes
mcp_starlette = Starlette(
routes=[Mount("/", mcp_app)],
lifespan=mcp_app.lifespan,
)
# 2) FastAPI application for rich REST APIs & automatic docs
api_app = FastAPI(
title="Ultimate MCP Server API",
description="REST API endpoints for the Ultimate MCP Server",
version="1.0.0",
docs_url="/docs",
redoc_url="/redoc",
openapi_url="/openapi.json",
)
# Add CORS middleware (FastAPI uses Starlette under the hood)
api_app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
allow_credentials=True,
)
endpoint_path = "/sse" if transport_mode == "sse" else "/mcp"
# Setup all UMS API endpoints
setup_ums_api(api_app)
# --- UMS Explorer Placeholder ---
# 3) Combined application – avoid overlapping mounts
final_app = Starlette(
routes=[
Mount(endpoint_path, mcp_starlette), # /mcp or /sse
Mount("/api", api_app), # REST API under /api
],
lifespan=mcp_app.lifespan,
)
# Logging of endpoints for clarity
print(
f"{transport_mode.upper()} endpoint available at: http://{server_host}:{server_port}{endpoint_path}",
file=sys.stderr,
)
print(
f"API endpoints available at: http://{server_host}:{server_port}/api/*",
file=sys.stderr,
)
print(
f"UMS Explorer available at: http://{server_host}:{server_port}/api/ums-explorer",
file=sys.stderr,
)
print(
f"Swagger UI available at: http://{server_host}:{server_port}/api/docs",
file=sys.stderr,
)
print(
f"ReDoc available at: http://{server_host}:{server_port}/api/redoc",
file=sys.stderr,
)
print(
f"OpenAPI spec available at: http://{server_host}:{server_port}/api/openapi.json",
file=sys.stderr,
)
print(
f"Discovery endpoint available at: http://{server_host}:{server_port}/",
file=sys.stderr,
)
# === END NEW SPLIT-APP ARCHITECTURE ===
# Use our custom quiet Uvicorn server for silent shutdown
config = uvicorn.Config(
final_app,
host=server_host,
port=server_port,
log_config=LOGGING_CONFIG,
log_level=final_log_level.lower(),
lifespan="on", # This tells uvicorn to look for and use the app's lifespan
)
server = create_quiet_server(config)
server.run()
else: # stdio mode
# --- Stdio Mode Execution ---
logger.info("Running in stdio mode...")
# Create a shutdown handler for stdio mode cleanup
async def cleanup_resources():
"""Performs cleanup for various components during shutdown."""
print("Cleaning up Gateway instance and associated resources...", file=sys.stderr)
# Shutdown SQL Tools with timeout
try:
await asyncio.wait_for(shutdown_sql_tools(), timeout=3.0)
except (asyncio.TimeoutError, Exception):
pass # Suppress errors during shutdown
# Shutdown Connection Manager with timeout
try:
from ultimate_mcp_server.tools.sql_databases import _connection_manager
await asyncio.wait_for(_connection_manager.shutdown(), timeout=2.0)
except (asyncio.TimeoutError, Exception):
pass # Suppress errors during shutdown
# Shutdown Smart Browser with timeout
try:
await asyncio.wait_for(smart_browser_shutdown(), timeout=5.0)
except (asyncio.TimeoutError, Exception):
pass # Suppress errors during shutdown
# Configure graceful shutdown with error suppression
enable_quiet_shutdown()
# Register the same cleanup function for stdio mode
register_shutdown_handler(cleanup_resources)
try:
# Run the FastMCP stdio loop - this will block until interrupted
_gateway_instance.mcp.run()
except (KeyboardInterrupt, SystemExit):
# Normal shutdown - handled by graceful shutdown system
pass
except Exception:
# Any other error - also handled by graceful shutdown
pass
# --- End Stdio Mode ---
# --- Post-Server Exit ---
logger.info("Server loop exited.")
```