#
tokens: 41024/50000 3/207 files (page 17/45)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 17 of 45. Use http://codebase.md/dicklesworthstone/llm_gateway_mcp_server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cursorignore
├── .env.example
├── .envrc
├── .gitignore
├── additional_features.md
├── check_api_keys.py
├── completion_support.py
├── comprehensive_test.py
├── docker-compose.yml
├── Dockerfile
├── empirically_measured_model_speeds.json
├── error_handling.py
├── example_structured_tool.py
├── examples
│   ├── __init__.py
│   ├── advanced_agent_flows_using_unified_memory_system_demo.py
│   ├── advanced_extraction_demo.py
│   ├── advanced_unified_memory_system_demo.py
│   ├── advanced_vector_search_demo.py
│   ├── analytics_reporting_demo.py
│   ├── audio_transcription_demo.py
│   ├── basic_completion_demo.py
│   ├── cache_demo.py
│   ├── claude_integration_demo.py
│   ├── compare_synthesize_demo.py
│   ├── cost_optimization.py
│   ├── data
│   │   ├── sample_event.txt
│   │   ├── Steve_Jobs_Introducing_The_iPhone_compressed.md
│   │   └── Steve_Jobs_Introducing_The_iPhone_compressed.mp3
│   ├── docstring_refiner_demo.py
│   ├── document_conversion_and_processing_demo.py
│   ├── entity_relation_graph_demo.py
│   ├── filesystem_operations_demo.py
│   ├── grok_integration_demo.py
│   ├── local_text_tools_demo.py
│   ├── marqo_fused_search_demo.py
│   ├── measure_model_speeds.py
│   ├── meta_api_demo.py
│   ├── multi_provider_demo.py
│   ├── ollama_integration_demo.py
│   ├── prompt_templates_demo.py
│   ├── python_sandbox_demo.py
│   ├── rag_example.py
│   ├── research_workflow_demo.py
│   ├── sample
│   │   ├── article.txt
│   │   ├── backprop_paper.pdf
│   │   ├── buffett.pdf
│   │   ├── contract_link.txt
│   │   ├── legal_contract.txt
│   │   ├── medical_case.txt
│   │   ├── northwind.db
│   │   ├── research_paper.txt
│   │   ├── sample_data.json
│   │   └── text_classification_samples
│   │       ├── email_classification.txt
│   │       ├── news_samples.txt
│   │       ├── product_reviews.txt
│   │       └── support_tickets.txt
│   ├── sample_docs
│   │   └── downloaded
│   │       └── attention_is_all_you_need.pdf
│   ├── sentiment_analysis_demo.py
│   ├── simple_completion_demo.py
│   ├── single_shot_synthesis_demo.py
│   ├── smart_browser_demo.py
│   ├── sql_database_demo.py
│   ├── sse_client_demo.py
│   ├── test_code_extraction.py
│   ├── test_content_detection.py
│   ├── test_ollama.py
│   ├── text_classification_demo.py
│   ├── text_redline_demo.py
│   ├── tool_composition_examples.py
│   ├── tournament_code_demo.py
│   ├── tournament_text_demo.py
│   ├── unified_memory_system_demo.py
│   ├── vector_search_demo.py
│   ├── web_automation_instruction_packs.py
│   └── workflow_delegation_demo.py
├── LICENSE
├── list_models.py
├── marqo_index_config.json.example
├── mcp_protocol_schema_2025-03-25_version.json
├── mcp_python_lib_docs.md
├── mcp_tool_context_estimator.py
├── model_preferences.py
├── pyproject.toml
├── quick_test.py
├── README.md
├── resource_annotations.py
├── run_all_demo_scripts_and_check_for_errors.py
├── storage
│   └── smart_browser_internal
│       ├── locator_cache.db
│       ├── readability.js
│       └── storage_state.enc
├── test_client.py
├── test_connection.py
├── TEST_README.md
├── test_sse_client.py
├── test_stdio_client.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── integration
│   │   ├── __init__.py
│   │   └── test_server.py
│   ├── manual
│   │   ├── test_extraction_advanced.py
│   │   └── test_extraction.py
│   └── unit
│       ├── __init__.py
│       ├── test_cache.py
│       ├── test_providers.py
│       └── test_tools.py
├── TODO.md
├── tool_annotations.py
├── tools_list.json
├── ultimate_mcp_banner.webp
├── ultimate_mcp_logo.webp
├── ultimate_mcp_server
│   ├── __init__.py
│   ├── __main__.py
│   ├── cli
│   │   ├── __init__.py
│   │   ├── __main__.py
│   │   ├── commands.py
│   │   ├── helpers.py
│   │   └── typer_cli.py
│   ├── clients
│   │   ├── __init__.py
│   │   ├── completion_client.py
│   │   └── rag_client.py
│   ├── config
│   │   └── examples
│   │       └── filesystem_config.yaml
│   ├── config.py
│   ├── constants.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── evaluation
│   │   │   ├── base.py
│   │   │   └── evaluators.py
│   │   ├── providers
│   │   │   ├── __init__.py
│   │   │   ├── anthropic.py
│   │   │   ├── base.py
│   │   │   ├── deepseek.py
│   │   │   ├── gemini.py
│   │   │   ├── grok.py
│   │   │   ├── ollama.py
│   │   │   ├── openai.py
│   │   │   └── openrouter.py
│   │   ├── server.py
│   │   ├── state_store.py
│   │   ├── tournaments
│   │   │   ├── manager.py
│   │   │   ├── tasks.py
│   │   │   └── utils.py
│   │   └── ums_api
│   │       ├── __init__.py
│   │       ├── ums_database.py
│   │       ├── ums_endpoints.py
│   │       ├── ums_models.py
│   │       └── ums_services.py
│   ├── exceptions.py
│   ├── graceful_shutdown.py
│   ├── services
│   │   ├── __init__.py
│   │   ├── analytics
│   │   │   ├── __init__.py
│   │   │   ├── metrics.py
│   │   │   └── reporting.py
│   │   ├── cache
│   │   │   ├── __init__.py
│   │   │   ├── cache_service.py
│   │   │   ├── persistence.py
│   │   │   ├── strategies.py
│   │   │   └── utils.py
│   │   ├── cache.py
│   │   ├── document.py
│   │   ├── knowledge_base
│   │   │   ├── __init__.py
│   │   │   ├── feedback.py
│   │   │   ├── manager.py
│   │   │   ├── rag_engine.py
│   │   │   ├── retriever.py
│   │   │   └── utils.py
│   │   ├── prompts
│   │   │   ├── __init__.py
│   │   │   ├── repository.py
│   │   │   └── templates.py
│   │   ├── prompts.py
│   │   └── vector
│   │       ├── __init__.py
│   │       ├── embeddings.py
│   │       └── vector_service.py
│   ├── tool_token_counter.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── audio_transcription.py
│   │   ├── base.py
│   │   ├── completion.py
│   │   ├── docstring_refiner.py
│   │   ├── document_conversion_and_processing.py
│   │   ├── enhanced-ums-lookbook.html
│   │   ├── entity_relation_graph.py
│   │   ├── excel_spreadsheet_automation.py
│   │   ├── extraction.py
│   │   ├── filesystem.py
│   │   ├── html_to_markdown.py
│   │   ├── local_text_tools.py
│   │   ├── marqo_fused_search.py
│   │   ├── meta_api_tool.py
│   │   ├── ocr_tools.py
│   │   ├── optimization.py
│   │   ├── provider.py
│   │   ├── pyodide_boot_template.html
│   │   ├── python_sandbox.py
│   │   ├── rag.py
│   │   ├── redline-compiled.css
│   │   ├── sentiment_analysis.py
│   │   ├── single_shot_synthesis.py
│   │   ├── smart_browser.py
│   │   ├── sql_databases.py
│   │   ├── text_classification.py
│   │   ├── text_redline_tools.py
│   │   ├── tournament.py
│   │   ├── ums_explorer.html
│   │   └── unified_memory_system.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── async_utils.py
│   │   ├── display.py
│   │   ├── logging
│   │   │   ├── __init__.py
│   │   │   ├── console.py
│   │   │   ├── emojis.py
│   │   │   ├── formatter.py
│   │   │   ├── logger.py
│   │   │   ├── panels.py
│   │   │   ├── progress.py
│   │   │   └── themes.py
│   │   ├── parse_yaml.py
│   │   ├── parsing.py
│   │   ├── security.py
│   │   └── text.py
│   └── working_memory_api.py
├── unified_memory_system_technical_analysis.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/examples/entity_relation_graph_demo.py:
--------------------------------------------------------------------------------

```python
   1 | #!/usr/bin/env python
   2 | # -*- coding: utf-8 -*-
   3 | """Entity relationship graph extraction and visualization demo using Ultimate MCP Server (New Version)."""
   4 | 
   5 | import asyncio
   6 | import json
   7 | import os
   8 | import sys
   9 | import time
  10 | from enum import Enum
  11 | from pathlib import Path
  12 | from typing import Any, Dict, Optional
  13 | 
  14 | import networkx as nx
  15 | from rich import box
  16 | from rich.markup import escape
  17 | from rich.panel import Panel
  18 | from rich.progress import Progress, SpinnerColumn, TextColumn
  19 | from rich.rule import Rule
  20 | from rich.syntax import Syntax
  21 | from rich.table import Table
  22 | from rich.tree import Tree
  23 | 
  24 | try:
  25 |     project_root = Path(__file__).resolve().parent.parent
  26 |     sys.path.insert(0, str(project_root))
  27 |     from ultimate_mcp_server.constants import Provider
  28 |     from ultimate_mcp_server.tools.entity_relation_graph import (
  29 |         COMMON_SCHEMAS,
  30 |         HAS_NETWORKX,
  31 |         HAS_VISUALIZATION_LIBS,
  32 |         GraphStrategy,
  33 |         OutputFormat,
  34 |         VisualizationFormat,
  35 |         extract_entity_graph,
  36 |     )
  37 |     from ultimate_mcp_server.utils import get_logger
  38 |     from ultimate_mcp_server.utils.logging.console import console
  39 | except ImportError as e:
  40 |     print(f"Error importing Ultimate MCP Server modules: {e}")
  41 |     print("Please ensure the script is run from the correct directory or the project path is set correctly.")
  42 |     sys.exit(1)
  43 | 
  44 | # Initialize logger
  45 | logger = get_logger("example.entity_graph") # Updated logger name
  46 | 
  47 | # Setup Directories
  48 | SCRIPT_DIR = Path(__file__).resolve().parent
  49 | SAMPLE_DIR = SCRIPT_DIR / "sample"
  50 | OUTPUT_DIR = SCRIPT_DIR / "output"
  51 | os.makedirs(OUTPUT_DIR, exist_ok=True)
  52 | 
  53 | class TextDomain(Enum):
  54 |     """Domain types for demonstration examples."""
  55 |     BUSINESS = "business"
  56 |     ACADEMIC = "academic"
  57 |     LEGAL = "legal"
  58 |     MEDICAL = "medical"
  59 |     GENERAL = "general" # Added for cases without a specific domain schema
  60 | 
  61 | # Console instances
  62 | main_console = console
  63 | # Keep detail_console if needed, but wasn't used in original provided script
  64 | # detail_console = Console(width=100, highlight=True)
  65 | 
  66 | def display_header(title: str) -> None:
  67 |     """Display a section header."""
  68 |     main_console.print()
  69 |     main_console.print(Rule(f"[bold blue]{title}[/bold blue]"))
  70 |     main_console.print()
  71 | 
  72 | def display_dataset_info(dataset_path: Path, title: str) -> None:
  73 |     """Display information about a dataset."""
  74 |     if not dataset_path.exists():
  75 |         main_console.print(f"[bold red]Error:[/bold red] Dataset file not found: {dataset_path}")
  76 |         return
  77 | 
  78 |     try:
  79 |         with open(dataset_path, "r", encoding="utf-8") as f:
  80 |             content = f.read()
  81 | 
  82 |         # Count entities/characters for display
  83 |         char_count = len(content)
  84 |         word_count = len(content.split())
  85 |         # Simple sentence count (approximate)
  86 |         sentence_count = content.count('.') + content.count('?') + content.count('!')
  87 |         if sentence_count == 0 and char_count > 0:
  88 |             sentence_count = 1 # At least one sentence if there's text
  89 | 
  90 |         # Preview of the content (first 300 chars)
  91 |         preview = escape(content[:300] + "..." if len(content) > 300 else content)
  92 | 
  93 |         main_console.print(Panel(
  94 |             f"[bold cyan]Dataset:[/bold cyan] {dataset_path.name}\n"
  95 |             f"[bold cyan]Size:[/bold cyan] {char_count:,} characters | {word_count:,} words | ~{sentence_count:,} sentences\n\n"
  96 |             f"[bold cyan]Preview:[/bold cyan]\n{preview}",
  97 |             title=title,
  98 |             border_style="cyan",
  99 |             expand=False
 100 |         ))
 101 |     except Exception as e:
 102 |         main_console.print(f"[bold red]Error reading dataset file {dataset_path.name}:[/bold red] {e}")
 103 | 
 104 | 
 105 | def display_extraction_params(params: Dict[str, Any]) -> None:
 106 |     """Display extraction parameters passed to the tool."""
 107 |     param_table = Table(title="Extraction Parameters", box=box.ROUNDED, show_header=True, header_style="bold magenta")
 108 |     param_table.add_column("Parameter", style="cyan", no_wrap=True)
 109 |     param_table.add_column("Value", style="green")
 110 | 
 111 |     # Filter parameters to display relevant ones
 112 |     display_keys = [
 113 |         "provider", "model", "strategy", "domain", "output_format", "visualization_format",
 114 |         "include_evidence", "include_attributes", "include_positions", "include_temporal_info",
 115 |         "normalize_entities", "max_entities", "max_relations", "min_confidence", "enable_reasoning",
 116 |         "language" # Added language if used
 117 |     ]
 118 | 
 119 |     for key in display_keys:
 120 |         if key in params:
 121 |             value = params[key]
 122 |             # Format enums and lists nicely
 123 |             if isinstance(value, Enum):
 124 |                 value_str = value.value
 125 |             elif isinstance(value, list):
 126 |                 value_str = escape(", ".join(str(v) for v in value)) if value else "[dim italic]Empty List[/dim italic]"
 127 |             elif isinstance(value, bool):
 128 |                 value_str = "[green]Yes[/green]" if value else "[red]No[/red]"
 129 |             elif value is None:
 130 |                 value_str = "[dim italic]None[/dim italic]"
 131 |             else:
 132 |                 value_str = escape(str(value))
 133 | 
 134 |             param_table.add_row(key, value_str)
 135 | 
 136 |     main_console.print(param_table)
 137 | 
 138 | def display_entity_stats(result: Dict[str, Any]) -> None:
 139 |     """Display statistics about extracted entities and relationships."""
 140 |     metadata = result.get("metadata", {})
 141 |     entities = result.get("entities", []) # Get entities directly for type counting if metadata missing
 142 |     relationships = result.get("relationships", [])
 143 | 
 144 |     entity_count = metadata.get("entity_count", len(entities))
 145 |     relationship_count = metadata.get("relationship_count", len(relationships))
 146 | 
 147 |     if entity_count == 0:
 148 |         main_console.print("[yellow]No entities found in extraction result.[/yellow]")
 149 |         return
 150 | 
 151 |     # Use metadata if available, otherwise count manually
 152 |     entity_types_meta = metadata.get("entity_types")
 153 |     rel_types_meta = metadata.get("relation_types")
 154 | 
 155 |     entity_type_counts = {}
 156 |     if entity_types_meta:
 157 |         for etype in entity_types_meta:
 158 |              # Count occurrences in the actual entity list for accuracy
 159 |              entity_type_counts[etype] = sum(1 for e in entities if e.get("type") == etype)
 160 |     else: # Fallback if metadata key is missing
 161 |         for entity in entities:
 162 |             ent_type = entity.get("type", "Unknown")
 163 |             entity_type_counts[ent_type] = entity_type_counts.get(ent_type, 0) + 1
 164 | 
 165 |     rel_type_counts = {}
 166 |     if rel_types_meta:
 167 |         for rtype in rel_types_meta:
 168 |              rel_type_counts[rtype] = sum(1 for r in relationships if r.get("type") == rtype)
 169 |     else: # Fallback
 170 |         for rel in relationships:
 171 |             rel_type = rel.get("type", "Unknown")
 172 |             rel_type_counts[rel_type] = rel_type_counts.get(rel_type, 0) + 1
 173 | 
 174 |     # Create entity stats table
 175 |     stats_table = Table(title="Extraction Statistics", box=box.ROUNDED, show_header=True, header_style="bold blue")
 176 |     stats_table.add_column("Metric", style="cyan")
 177 |     stats_table.add_column("Count", style="green", justify="right")
 178 | 
 179 |     stats_table.add_row("Total Entities", str(entity_count))
 180 |     stats_table.add_row("Total Relationships", str(relationship_count))
 181 | 
 182 |     # Add entity type counts
 183 |     stats_table.add_section()
 184 |     for ent_type, count in sorted(entity_type_counts.items(), key=lambda x: x[1], reverse=True):
 185 |         stats_table.add_row(f"Entity Type: [italic]{escape(ent_type)}[/italic]", str(count))
 186 | 
 187 |     # Add relationship type counts (top 5)
 188 |     if rel_type_counts:
 189 |         stats_table.add_section()
 190 |         for rel_type, count in sorted(rel_type_counts.items(), key=lambda x: x[1], reverse=True)[:5]:
 191 |             stats_table.add_row(f"Relationship Type: [italic]{escape(rel_type)}[/italic]", str(count))
 192 |         if len(rel_type_counts) > 5:
 193 |              stats_table.add_row("[dim]... (other relationship types)[/dim]", "")
 194 | 
 195 | 
 196 |     main_console.print(stats_table)
 197 | 
 198 | 
 199 | def display_graph_metrics(result: Dict[str, Any]) -> None:
 200 |     """Display graph metrics if available in metadata."""
 201 |     # Metrics are now nested under metadata
 202 |     metrics = result.get("metadata", {}).get("metrics", {})
 203 |     if not metrics:
 204 |         # Check the top level as a fallback for older structure compatibility if needed
 205 |         metrics = result.get("metrics", {})
 206 |         if not metrics:
 207 |             main_console.print("[dim]No graph metrics calculated (requires networkx).[/dim]")
 208 |             return
 209 | 
 210 |     metrics_table = Table(title="Graph Metrics", box=box.ROUNDED, show_header=True, header_style="bold blue")
 211 |     metrics_table.add_column("Metric", style="cyan")
 212 |     metrics_table.add_column("Value", style="green", justify="right")
 213 | 
 214 |     # Add metrics to table
 215 |     for key, value in metrics.items():
 216 |         if isinstance(value, (int, float)):
 217 |             # Improved formatting
 218 |             if isinstance(value, float):
 219 |                 if 0.0001 < abs(value) < 10000:
 220 |                     formatted_value = f"{value:.4f}"
 221 |                 else:
 222 |                     formatted_value = f"{value:.3e}" # Scientific notation for very small/large
 223 |             else:
 224 |                  formatted_value = f"{value:,}" # Add commas for integers
 225 |             metrics_table.add_row(key.replace("_", " ").title(), formatted_value)
 226 |         elif value is not None:
 227 |              metrics_table.add_row(key.replace("_", " ").title(), escape(str(value)))
 228 | 
 229 | 
 230 |     main_console.print(metrics_table)
 231 | 
 232 | 
 233 | def display_entities_table(result: Dict[str, Any], limit: int = 10) -> None:
 234 |     """Display extracted entities in a table, sorted appropriately."""
 235 |     entities = result.get("entities", [])
 236 |     if not entities:
 237 |         main_console.print("[yellow]No entities found to display.[/yellow]")
 238 |         return
 239 | 
 240 |     # Sorting based on available metrics from _add_graph_metrics
 241 |     # The new tool adds 'degree' and 'centrality'
 242 |     sort_key = "name" # Default sort
 243 |     if entities and isinstance(entities[0], dict):
 244 |         if "centrality" in entities[0] and any(e.get("centrality", 0) > 0 for e in entities):
 245 |             entities.sort(key=lambda x: x.get("centrality", 0.0), reverse=True)
 246 |             sort_key = "centrality"
 247 |         elif "degree" in entities[0] and any(e.get("degree", 0) > 0 for e in entities):
 248 |              entities.sort(key=lambda x: x.get("degree", 0.0), reverse=True)
 249 |              sort_key = "degree"
 250 |         elif "mentions" in entities[0] and any(e.get("mentions") for e in entities):
 251 |             entities.sort(key=lambda x: len(x.get("mentions", [])), reverse=True)
 252 |             sort_key = "mentions"
 253 |         else:
 254 |              entities.sort(key=lambda x: x.get("name", "").lower()) # Fallback to name
 255 | 
 256 | 
 257 |     # Limit to top entities
 258 |     display_entities = entities[:limit]
 259 | 
 260 |     title = f"Top {limit} Entities"
 261 |     if sort_key != "name":
 262 |         title += f" (Sorted by {sort_key.capitalize()})"
 263 | 
 264 |     entity_table = Table(title=title, box=box.ROUNDED, show_header=True, header_style="bold blue")
 265 |     entity_table.add_column("ID", style="dim", width=8)
 266 |     entity_table.add_column("Name", style="cyan", max_width=40)
 267 |     entity_table.add_column("Type", style="green", max_width=20)
 268 | 
 269 |     # Add columns for additional information if available
 270 |     has_degree = any(e.get("degree", 0) > 0 for e in display_entities)
 271 |     has_centrality = any(e.get("centrality", 0) > 0 for e in display_entities)
 272 |     has_mentions = any(e.get("mentions") for e in display_entities)
 273 |     has_attributes = any(e.get("attributes") for e in display_entities)
 274 | 
 275 |     if has_degree:
 276 |         entity_table.add_column("Degree", style="magenta", justify="right", width=8)
 277 |     if has_centrality:
 278 |         entity_table.add_column("Centrality", style="magenta", justify="right", width=10)
 279 |     if has_mentions:
 280 |         entity_table.add_column("Mentions", style="yellow", justify="right", width=8)
 281 |     if has_attributes:
 282 |         entity_table.add_column("Attributes", style="blue", max_width=50)
 283 | 
 284 |     # Add rows for each entity
 285 |     for entity in display_entities:
 286 |         row = [
 287 |             escape(entity.get("id", "")),
 288 |             escape(entity.get("name", "")),
 289 |             escape(entity.get("type", "Unknown"))
 290 |         ]
 291 | 
 292 |         if has_degree:
 293 |             degree = entity.get("degree", 0.0)
 294 |             row.append(f"{degree:.3f}")
 295 |         if has_centrality:
 296 |             centrality = entity.get("centrality", 0.0)
 297 |             row.append(f"{centrality:.4f}")
 298 |         if has_mentions:
 299 |             mentions_count = len(entity.get("mentions", []))
 300 |             row.append(str(mentions_count))
 301 |         if has_attributes:
 302 |             attributes = entity.get("attributes", {})
 303 |             # Format attributes more readably
 304 |             attr_str = "; ".join(f"{k}={v}" for k, v in attributes.items() if v) # Ignore empty values
 305 |             row.append(escape(attr_str[:45] + ("..." if len(attr_str) > 45 else "")))
 306 | 
 307 |         entity_table.add_row(*row)
 308 | 
 309 |     main_console.print(entity_table)
 310 | 
 311 |     if len(entities) > limit:
 312 |         main_console.print(f"[dim italic]...and {len(entities) - limit} more entities[/dim italic]")
 313 | 
 314 | 
 315 | def display_relationships_table(result: Dict[str, Any], limit: int = 10) -> None:
 316 |     """Display extracted relationships in a table, sorted by confidence."""
 317 |     relationships = result.get("relationships", [])
 318 |     # Create entity map for quick name lookups
 319 |     entity_map = {entity["id"]: entity for entity in result.get("entities", []) if isinstance(entity, dict) and "id" in entity}
 320 | 
 321 |     if not relationships:
 322 |         main_console.print("[yellow]No relationships found to display.[/yellow]")
 323 |         return
 324 | 
 325 |     # Sort relationships by confidence (new tool ensures confidence exists)
 326 |     relationships.sort(key=lambda x: x.get("confidence", 0.0), reverse=True)
 327 | 
 328 |     # Limit to top relationships
 329 |     display_relationships = relationships[:limit]
 330 | 
 331 |     rel_table = Table(title=f"Top {limit} Relationships (Sorted by Confidence)", box=box.ROUNDED, show_header=True, header_style="bold blue")
 332 |     rel_table.add_column("Source", style="cyan", max_width=30)
 333 |     rel_table.add_column("Type", style="green", max_width=25)
 334 |     rel_table.add_column("Target", style="cyan", max_width=30)
 335 |     rel_table.add_column("Conf.", style="magenta", justify="right", width=6)
 336 | 
 337 |     # Check if we have evidence or temporal info
 338 |     has_evidence = any(r.get("evidence") for r in display_relationships)
 339 |     has_temporal = any(r.get("temporal") for r in display_relationships)
 340 | 
 341 |     if has_evidence:
 342 |         rel_table.add_column("Evidence", style="yellow", max_width=40)
 343 |     if has_temporal:
 344 |          rel_table.add_column("Temporal", style="blue", max_width=20)
 345 | 
 346 | 
 347 |     # Add rows for each relationship
 348 |     for rel in display_relationships:
 349 |         source_id = rel.get("source", "")
 350 |         target_id = rel.get("target", "")
 351 | 
 352 |         # Get entity names if available, fallback to ID
 353 |         source_name = entity_map.get(source_id, {}).get("name", source_id)
 354 |         target_name = entity_map.get(target_id, {}).get("name", target_id)
 355 | 
 356 |         row = [
 357 |             escape(source_name),
 358 |             escape(rel.get("type", "Unknown")),
 359 |             escape(target_name),
 360 |             f"{rel.get('confidence', 0.0):.2f}",
 361 |         ]
 362 | 
 363 |         if has_evidence:
 364 |             evidence = rel.get("evidence", "")
 365 |             row.append(escape(evidence[:35] + ("..." if len(evidence) > 35 else "")))
 366 |         if has_temporal:
 367 |              temporal = rel.get("temporal", {})
 368 |              temp_str = "; ".join(f"{k}={v}" for k, v in temporal.items())
 369 |              row.append(escape(temp_str[:18] + ("..." if len(temp_str) > 18 else "")))
 370 | 
 371 | 
 372 |         rel_table.add_row(*row)
 373 | 
 374 |     main_console.print(rel_table)
 375 | 
 376 |     if len(relationships) > limit:
 377 |         main_console.print(f"[dim italic]...and {len(relationships) - limit} more relationships[/dim italic]")
 378 | 
 379 | 
 380 | def display_entity_graph_tree(result: Dict[str, Any], max_depth: int = 2, max_children: int = 5) -> None:
 381 |     """Display a tree representation of the entity graph, starting from the most central node."""
 382 |     entities = result.get("entities", [])
 383 |     relationships = result.get("relationships", [])
 384 |     entity_map = {entity["id"]: entity for entity in entities if isinstance(entity, dict) and "id" in entity}
 385 | 
 386 | 
 387 |     if not entities or not relationships or not HAS_NETWORKX: # Tree view less useful without sorting/structure
 388 |         if not HAS_NETWORKX:
 389 |              main_console.print("[yellow]Cannot display graph tree: NetworkX library not available for centrality sorting.[/yellow]")
 390 |         else:
 391 |              main_console.print("[yellow]Cannot display graph tree: insufficient data.[/yellow]")
 392 |         return
 393 | 
 394 |     # Sort entities by centrality (assuming metrics were calculated)
 395 |     entities.sort(key=lambda x: x.get("centrality", 0.0), reverse=True)
 396 | 
 397 |     # Get most central entity as root
 398 |     if not entities:
 399 |         return # Should not happen if check above passed, but safety
 400 |     root_entity = entities[0]
 401 |     root_id = root_entity["id"]
 402 | 
 403 |     # Create rich Tree
 404 |     tree = Tree(
 405 |         f"[bold cyan]{escape(root_entity.get('name', root_id))}[/bold cyan] "
 406 |         f"([dim italic]ID: {escape(root_id)}, Type: {escape(root_entity.get('type', 'Unknown'))}[/dim italic])"
 407 |     )
 408 | 
 409 |     # Keep track of edges explored to represent the tree structure
 410 |     explored_edges = set()
 411 | 
 412 |     # Recursively build tree using BFS approach for levels
 413 |     queue = [(root_id, tree, 0)] # (entity_id, parent_tree_node, current_depth)
 414 |     visited_nodes_in_tree = {root_id} # Prevent cycles *within this tree rendering*
 415 | 
 416 |     while queue:
 417 |         current_id, parent_node, depth = queue.pop(0)
 418 | 
 419 |         if depth >= max_depth:
 420 |             continue
 421 | 
 422 |         # Find outgoing relationships
 423 |         outgoing_rels = [
 424 |             r for r in relationships
 425 |             if r.get("source") == current_id
 426 |             and (current_id, r.get("target"), r.get("type")) not in explored_edges
 427 |         ]
 428 |         outgoing_rels.sort(key=lambda x: x.get("confidence", 0.0), reverse=True)
 429 | 
 430 |         # Find incoming relationships
 431 |         incoming_rels = [
 432 |              r for r in relationships
 433 |              if r.get("target") == current_id
 434 |              and (r.get("source"), current_id, r.get("type")) not in explored_edges
 435 |         ]
 436 |         incoming_rels.sort(key=lambda x: x.get("confidence", 0.0), reverse=True)
 437 | 
 438 | 
 439 |         # Add outgoing children
 440 |         children_count = 0
 441 |         for rel in outgoing_rels:
 442 |             if children_count >= max_children:
 443 |                 parent_node.add("[dim italic]... (more outgoing)[/dim]")
 444 |                 break
 445 | 
 446 |             target_id = rel.get("target")
 447 |             if target_id and target_id not in visited_nodes_in_tree: # Avoid cycles in display
 448 |                  target_entity = entity_map.get(target_id)
 449 |                  if target_entity:
 450 |                      edge_sig = (current_id, target_id, rel.get("type"))
 451 |                      explored_edges.add(edge_sig)
 452 |                      visited_nodes_in_tree.add(target_id)
 453 | 
 454 |                      rel_type = escape(rel.get("type", "related to"))
 455 |                      conf = rel.get("confidence", 0.0)
 456 |                      target_name = escape(target_entity.get("name", target_id))
 457 |                      target_type = escape(target_entity.get("type", "Unknown"))
 458 | 
 459 |                      branch_text = (
 460 |                          f"-[[green]{rel_type}[/green] ({conf:.1f})]-> "
 461 |                          f"[cyan]{target_name}[/cyan] ([dim italic]{target_type}[/dim italic])"
 462 |                      )
 463 |                      branch = parent_node.add(branch_text)
 464 |                      queue.append((target_id, branch, depth + 1))
 465 |                      children_count += 1
 466 | 
 467 | 
 468 |         # Add incoming children (optional, can make tree busy)
 469 |         # Comment out this block if you only want outgoing relationships in the tree
 470 |         # children_count = 0
 471 |         # for rel in incoming_rels:
 472 |         #     if children_count >= max_children // 2: # Show fewer incoming
 473 |         #         parent_node.add("[dim italic]... (more incoming)[/dim]")
 474 |         #         break
 475 |         #     source_id = rel.get("source")
 476 |         #     if source_id and source_id not in visited_nodes_in_tree:
 477 |         #          source_entity = entity_map.get(source_id)
 478 |         #          if source_entity:
 479 |         #              edge_sig = (source_id, current_id, rel.get("type"))
 480 |         #              explored_edges.add(edge_sig)
 481 |         #              visited_nodes_in_tree.add(source_id)
 482 |         #
 483 |         #              rel_type = escape(rel.get("type", "related to"))
 484 |         #              conf = rel.get("confidence", 0.0)
 485 |         #              source_name = escape(source_entity.get("name", source_id))
 486 |         #              source_type = escape(source_entity.get("type", "Unknown"))
 487 |         #
 488 |         #              branch_text = (
 489 |         #                  f"<-[[red]{rel_type}[/red] ({conf:.1f})]- "
 490 |         #                  f"[magenta]{source_name}[/magenta] ([dim italic]{source_type}[/dim italic])"
 491 |         #              )
 492 |         #              branch = parent_node.add(branch_text)
 493 |         #              queue.append((source_id, branch, depth + 1))
 494 |         #              children_count += 1
 495 | 
 496 | 
 497 |     main_console.print(Panel(tree, title=f"Entity Graph Tree View (Root: {escape(root_entity.get('name', ''))})", border_style="blue"))
 498 | 
 499 | 
 500 | def display_extraction_summary(result: Dict[str, Any]) -> None:
 501 |     """Display a summary of the extraction performance and cost."""
 502 |     metadata = result.get("metadata", {})
 503 |     provider = result.get("provider", "Unknown")
 504 |     model = result.get("model", "Unknown")
 505 |     tokens = result.get("tokens", {})
 506 |     cost = result.get("cost", 0.0) # Cost is now float
 507 |     processing_time = result.get("processing_time", 0.0) # Time is now float
 508 |     strategy = metadata.get("processing_strategy", "Unknown")
 509 |     schema_used = metadata.get("schema_used", "Unknown")
 510 | 
 511 | 
 512 |     summary_table = Table(box=box.ROUNDED, show_header=False, title="Extraction Summary")
 513 |     summary_table.add_column("Metric", style="cyan", no_wrap=True)
 514 |     summary_table.add_column("Value", style="green")
 515 | 
 516 |     summary_table.add_row("Provider", escape(provider))
 517 |     summary_table.add_row("Model", escape(model))
 518 |     summary_table.add_row("Strategy", escape(strategy))
 519 |     summary_table.add_row("Schema Used", escape(schema_used))
 520 |     summary_table.add_row("Input Tokens", f"{tokens.get('input', 0):,}")
 521 |     summary_table.add_row("Output Tokens", f"{tokens.get('output', 0):,}")
 522 |     summary_table.add_row("Total Tokens", f"{tokens.get('total', 0):,}")
 523 |     summary_table.add_row("Cost", f"${cost:.6f}")
 524 |     summary_table.add_row("Processing Time", f"{processing_time:.2f} seconds")
 525 | 
 526 |     main_console.print(summary_table)
 527 | 
 528 | 
 529 | def save_visualization(result: Dict[str, Any], domain: str, strategy: str, output_dir: Path) -> Optional[str]:
 530 |     """Save visualization file based on the format present in the result."""
 531 |     visualization = result.get("visualization") # Visualization data is now under this key
 532 |     if not visualization or not isinstance(visualization, dict):
 533 |         main_console.print("[dim]No visualization data found in the result.[/dim]")
 534 |         return None
 535 | 
 536 |     content = None
 537 |     extension = None
 538 |     file_path = None
 539 | 
 540 |     # Check for different visualization formats
 541 |     if "html" in visualization:
 542 |         content = visualization["html"]
 543 |         extension = "html"
 544 |     elif "svg" in visualization:
 545 |         content = visualization["svg"]
 546 |         extension = "svg"
 547 |     elif "png_url" in visualization: # Assuming PNG might save file directly and return URL
 548 |         file_path = visualization["png_url"].replace("file://", "")
 549 |         extension = "png"
 550 |     elif "dot" in visualization:
 551 |         content = visualization["dot"]
 552 |         extension = "dot"
 553 | 
 554 |     if file_path: # If path was returned directly (like maybe for PNG)
 555 |          if Path(file_path).exists():
 556 |              return file_path
 557 |          else:
 558 |              main_console.print(f"[red]Visualization file path provided but not found: {file_path}[/red]")
 559 |              return None
 560 | 
 561 |     if content and extension:
 562 |         timestamp = int(time.time())
 563 |         # Sanitize domain and strategy for filename
 564 |         safe_domain = domain.replace(" ", "_").lower()
 565 |         safe_strategy = strategy.replace(" ", "_").lower()
 566 |         output_path = output_dir / f"graph_{safe_domain}_{safe_strategy}_{timestamp}.{extension}"
 567 |         try:
 568 |             with open(output_path, "w", encoding="utf-8") as f:
 569 |                 f.write(content)
 570 |             return str(output_path)
 571 |         except Exception as e:
 572 |             main_console.print(f"[bold red]Error saving visualization file {output_path}:[/bold red] {e}")
 573 |             return None
 574 |     elif "error" in visualization:
 575 |          main_console.print(f"[yellow]Visualization generation failed:[/yellow] {visualization['error']}")
 576 |          return None
 577 |     else:
 578 |         main_console.print("[dim]Unsupported or missing visualization format in result.[/dim]")
 579 |         return None
 580 | 
 581 | 
 582 | async def run_entity_extraction(
 583 |     text: str,
 584 |     domain: TextDomain,
 585 |     strategy: GraphStrategy,
 586 |     model: str,
 587 |     output_format: OutputFormat = OutputFormat.JSON,
 588 |     visualization_format: VisualizationFormat = VisualizationFormat.HTML, # Keep HTML for demo vis
 589 |     provider: str = Provider.ANTHROPIC.value # Example provider
 590 | ) -> Optional[Dict[str, Any]]:
 591 |     """Run entity graph extraction with progress indicator and display params."""
 592 |     # Setup extraction parameters
 593 |     params = {
 594 |         "text": text,
 595 |         "provider": provider, # Pass provider name string
 596 |         "model": model,
 597 |         "strategy": strategy, # Pass enum directly
 598 |         "output_format": output_format, # Pass enum directly
 599 |         "visualization_format": visualization_format, # Pass enum directly
 600 |         # --- Include Flags (consider new defaults) ---
 601 |         "include_evidence": True, # Explicitly keep True for demo
 602 |         "include_attributes": True, # Explicitly keep True for demo
 603 |         "include_positions": False, # Change to False to match new default (saves tokens)
 604 |         "include_temporal_info": True, # Explicitly keep True for demo
 605 |         # --- Control Flags ---
 606 |         "normalize_entities": True, # Keep True (new default)
 607 |         "enable_reasoning": False, # Keep False for demo speed (can be enabled)
 608 |         # --- Limits ---
 609 |         "max_entities": 75,  # Adjusted limits for demo
 610 |         "max_relations": 150,
 611 |         "min_confidence": 0.55, # Slightly higher confidence
 612 |         # --- Optional ---
 613 |         "language": None, # Specify if needed, e.g., "Spanish"
 614 |         "domain": None, # Set below if applicable
 615 |         #"custom_prompt": None, # Add if testing custom prompts
 616 |         #"system_prompt": None, # Add if testing system prompts
 617 |         #"additional_params": {"temperature": 0.2} # Example
 618 |     }
 619 | 
 620 |     # Add domain value string if applicable and not GENERAL
 621 |     if domain != TextDomain.GENERAL and domain.value in COMMON_SCHEMAS:
 622 |         params["domain"] = domain.value
 623 | 
 624 |     # Display parameters being used
 625 |     display_extraction_params(params)
 626 | 
 627 |     # Run extraction with progress spinner
 628 |     result = None
 629 |     with Progress(
 630 |         SpinnerColumn(),
 631 |         TextColumn("[progress.description]{task.description}"),
 632 |         console=main_console,
 633 |         transient=False # Keep progress visible after completion
 634 |     ) as progress:
 635 |         task_desc = f"Extracting graph ({domain.value}/{strategy.value}/{model})..."
 636 |         task = progress.add_task(task_desc, total=None)
 637 | 
 638 |         try:
 639 |             start_time = time.monotonic()
 640 |             # Pass enums directly now, the tool handles conversion if needed
 641 |             result = await extract_entity_graph(**params) # type: ignore
 642 |             end_time = time.monotonic()
 643 |             duration = end_time - start_time
 644 |             progress.update(task, completed=100, description=f"[green]Extraction complete ({duration:.2f}s)[/green]")
 645 |             return result
 646 |         except Exception as e:
 647 |             logger.error(f"Extraction failed during run_entity_extraction: {e}", exc_info=True)
 648 |             progress.update(task, completed=100, description=f"[bold red]Extraction failed: {escape(str(e))}[/bold red]")
 649 |             # Optionally re-raise or just return None
 650 |             # raise # Uncomment to stop the demo on failure
 651 |             return None # Allow demo to continue
 652 | 
 653 | 
 654 | # --- Demonstration Functions (Updated calls to run_entity_extraction) ---
 655 | 
 656 | async def demonstrate_domain_extraction(
 657 |         domain: TextDomain,
 658 |         sample_file: str,
 659 |         strategy: GraphStrategy,
 660 |         model: str = "claude-3-5-haiku-20241022", # Default model for demos
 661 |         provider: str = Provider.ANTHROPIC.value
 662 |     ):
 663 |     """Helper function to demonstrate extraction for a specific domain."""
 664 |     domain_name = domain.value.capitalize()
 665 |     display_header(f"{domain_name} Domain Entity Graph Extraction ({strategy.value} strategy)")
 666 | 
 667 |     sample_path = SAMPLE_DIR / sample_file
 668 |     display_dataset_info(sample_path, f"{domain_name} Sample Text")
 669 | 
 670 |     if not sample_path.exists():
 671 |         return # Skip if file missing
 672 | 
 673 |     with open(sample_path, "r", encoding="utf-8") as f:
 674 |         text_content = f.read()
 675 | 
 676 |     try:
 677 |         result = await run_entity_extraction(
 678 |             text=text_content,
 679 |             domain=domain,
 680 |             strategy=strategy,
 681 |             model=model,
 682 |             provider=provider,
 683 |             visualization_format=VisualizationFormat.HTML # Request HTML for viewing
 684 |         )
 685 | 
 686 |         if result and result.get("success", False):
 687 |             # Display results
 688 |             display_entity_stats(result)
 689 |             if HAS_NETWORKX: # Only display metrics if networkx is installed
 690 |                  display_graph_metrics(result)
 691 |             display_entities_table(result)
 692 |             display_relationships_table(result)
 693 |             if HAS_NETWORKX: # Tree view also requires networkx
 694 |                  display_entity_graph_tree(result)
 695 | 
 696 |             # Save visualization if available
 697 |             vis_path = save_visualization(result, domain.value, strategy.value, OUTPUT_DIR)
 698 |             if vis_path:
 699 |                 main_console.print(f"\n[green]✓[/green] Visualization saved to: [blue link=file://{vis_path}]{vis_path}[/blue]")
 700 |             elif "visualization" in result and "error" in result["visualization"]:
 701 |                  main_console.print(f"[yellow]Visualization generation failed: {result['visualization']['error']}[/yellow]")
 702 | 
 703 | 
 704 |             # Display summary
 705 |             display_extraction_summary(result)
 706 |         elif result:
 707 |              main_console.print(f"[bold red]Extraction reported failure:[/bold red] {result.get('error', 'Unknown error')}")
 708 |         # If result is None, run_entity_extraction already printed the error
 709 | 
 710 |     except Exception as e:
 711 |         # Catch errors not caught by run_entity_extraction's try/except
 712 |         main_console.print(f"[bold red]Error during {domain_name} demonstration:[/bold red] {escape(str(e))}")
 713 |         logger.error(f"Unhandled error in {domain_name} demo: {e}", exc_info=True)
 714 | 
 715 | 
 716 | async def demonstrate_strategy_comparison():
 717 |     """Compare different extraction strategies on the same text."""
 718 |     display_header("Strategy Comparison")
 719 | 
 720 |     # Load business article for comparison
 721 |     comparison_file = "article.txt"
 722 |     comparison_path = SAMPLE_DIR / comparison_file
 723 |     display_dataset_info(comparison_path, f"{comparison_file} (For Strategy Comparison)")
 724 | 
 725 |     if not comparison_path.exists():
 726 |         return
 727 | 
 728 |     with open(comparison_path, "r", encoding="utf-8") as f:
 729 |         comparison_text = f.read()
 730 | 
 731 |     # Define strategies to compare
 732 |     strategies_to_compare = [
 733 |         (GraphStrategy.STANDARD, "Standard"),
 734 |         (GraphStrategy.MULTISTAGE, "Multistage"),
 735 |         (GraphStrategy.CHUNKED, "Chunked"), # Will process full text if short, or chunk if long
 736 |         (GraphStrategy.STRUCTURED, "Structured"), # Needs examples from domain
 737 |         (GraphStrategy.STRICT_SCHEMA, "Strict Schema") # Needs domain
 738 |     ]
 739 | 
 740 |     # Setup comparison table
 741 |     comparison_table = Table(title="Strategy Comparison Results", box=box.ROUNDED, show_header=True, header_style="bold magenta")
 742 |     comparison_table.add_column("Strategy", style="cyan")
 743 |     comparison_table.add_column("Entities", style="green", justify="right")
 744 |     comparison_table.add_column("Rels", style="green", justify="right")
 745 |     comparison_table.add_column("Time (s)", style="yellow", justify="right")
 746 |     comparison_table.add_column("Tokens", style="magenta", justify="right")
 747 |     comparison_table.add_column("Cost ($)", style="blue", justify="right")
 748 | 
 749 |     # Use a slightly smaller model for faster comparison if needed
 750 |     comparison_model = "gpt-4.1-mini"
 751 |     comparison_provider = Provider.OPENAI.value
 752 |     comparison_domain = TextDomain.BUSINESS
 753 | 
 754 |     # Compare each strategy
 755 |     for strategy, desc in strategies_to_compare:
 756 |         main_console.print(f"\n[bold underline]Running {desc} Strategy[/bold underline]")
 757 | 
 758 |         # Use full text for chunking demo, maybe excerpt for others if needed for speed?
 759 |         # Let's use full text for all to see chunking effect properly
 760 |         text_to_use = comparison_text
 761 | 
 762 |         result = None # Define result outside try block
 763 |         try:
 764 |             result = await run_entity_extraction(
 765 |                 text=text_to_use,
 766 |                 domain=comparison_domain, # Business domain has examples/schema
 767 |                 strategy=strategy,
 768 |                 model=comparison_model,
 769 |                 provider=comparison_provider,
 770 |                 visualization_format=VisualizationFormat.NONE # Skip visualization for comparison
 771 |             )
 772 | 
 773 |             if result and result.get("success", False):
 774 |                 # Extract metrics for comparison
 775 |                 entity_count = result.get("metadata", {}).get("entity_count", 0)
 776 |                 rel_count = result.get("metadata", {}).get("relationship_count", 0)
 777 |                 processing_time = result.get("processing_time", 0.0)
 778 |                 token_count = result.get("tokens", {}).get("total", 0)
 779 |                 cost = result.get("cost", 0.0)
 780 | 
 781 |                 # Add to comparison table
 782 |                 comparison_table.add_row(
 783 |                     desc,
 784 |                     str(entity_count),
 785 |                     str(rel_count),
 786 |                     f"{processing_time:.2f}",
 787 |                     f"{token_count:,}",
 788 |                     f"{cost:.6f}"
 789 |                 )
 790 |                 # Display brief stats for this strategy
 791 |                 display_entity_stats(result)
 792 |             else:
 793 |                  error_msg = result.get("error", "Extraction failed") if result else "Extraction returned None"
 794 |                  main_console.print(f"[bold red]Error with {desc} strategy:[/bold red] {escape(error_msg)}")
 795 |                  comparison_table.add_row(desc, "[red]ERR[/red]", "[red]ERR[/red]", "N/A", "N/A", "N/A")
 796 | 
 797 | 
 798 |         except Exception as e:
 799 |             logger.error(f"Unhandled error comparing strategy {desc}: {e}", exc_info=True)
 800 |             main_console.print(f"[bold red]Unhandled Error with {desc} strategy:[/bold red] {escape(str(e))}")
 801 |             comparison_table.add_row(desc, "[red]CRASH[/red]", "[red]CRASH[/red]", "N/A", "N/A", "N/A")
 802 | 
 803 | 
 804 |     # Display final comparison table
 805 |     main_console.print(Rule("Comparison Summary"))
 806 |     main_console.print(comparison_table)
 807 | 
 808 | 
 809 | async def demonstrate_output_formats():
 810 |     """Demonstrate different output formats using a sample text."""
 811 |     display_header("Output Format Demonstration")
 812 | 
 813 |     # Load academic paper for output format demo
 814 |     format_file = "research_paper.txt"
 815 |     format_path = SAMPLE_DIR / format_file
 816 |     display_dataset_info(format_path, f"{format_file} (For Output Formats)")
 817 | 
 818 |     if not format_path.exists():
 819 |         return
 820 | 
 821 |     with open(format_path, "r", encoding="utf-8") as f:
 822 |         format_text = f.read()
 823 | 
 824 |     # Define output formats to demonstrate
 825 |     # Exclude NetworkX if library not installed
 826 |     formats_to_demonstrate = [
 827 |         (OutputFormat.JSON, "Standard JSON"),
 828 |         (OutputFormat.CYTOSCAPE, "Cytoscape.js"),
 829 |         (OutputFormat.NEO4J, "Neo4j Cypher"),
 830 |         (OutputFormat.RDF, "RDF Triples"),
 831 |         (OutputFormat.D3, "D3.js nodes/links"),
 832 |     ]
 833 |     if HAS_NETWORKX:
 834 |         formats_to_demonstrate.insert(1, (OutputFormat.NETWORKX, "NetworkX Object"))
 835 | 
 836 | 
 837 |     main_console.print("[bold yellow]Note:[/bold yellow] This demonstrates how extracted data can be formatted.")
 838 | 
 839 |     # Use a short excerpt for speed
 840 |     text_excerpt = format_text[:2000]
 841 |     base_model = "gpt-4.1-mini" # Faster model for formats
 842 |     base_provider = Provider.OPENAI.value
 843 |     base_domain = TextDomain.ACADEMIC
 844 | 
 845 |     # Extract with each output format
 846 |     for fmt, desc in formats_to_demonstrate:
 847 |         main_console.print(f"\n[bold underline]Demonstrating {desc} Output Format[/bold underline]")
 848 | 
 849 |         result = None # Define outside try block
 850 |         try:
 851 |             result = await run_entity_extraction(
 852 |                 text=text_excerpt,
 853 |                 domain=base_domain,
 854 |                 strategy=GraphStrategy.STANDARD, # Use standard strategy
 855 |                 model=base_model,
 856 |                 provider=base_provider,
 857 |                 output_format=fmt, # Specify the output format
 858 |                 visualization_format=VisualizationFormat.NONE # No viz needed here
 859 |             )
 860 | 
 861 |             if not result or not result.get("success", False):
 862 |                  error_msg = result.get("error", "Extraction failed") if result else "Extraction returned None"
 863 |                  main_console.print(f"[bold red]Error extracting data for {desc} format:[/bold red] {escape(error_msg)}")
 864 |                  continue # Skip displaying output for this format
 865 | 
 866 | 
 867 |             # Display format-specific output key
 868 |             output_key = fmt.value # Default key matches enum value
 869 |             if fmt == OutputFormat.NETWORKX: 
 870 |                 output_key = "graph"
 871 |             if fmt == OutputFormat.NEO4J: 
 872 |                 output_key = "neo4j_queries"
 873 |             if fmt == OutputFormat.RDF: 
 874 |                 output_key = "rdf_triples"
 875 |             if fmt == OutputFormat.D3: 
 876 |                 output_key = "d3"
 877 |             if fmt == OutputFormat.CYTOSCAPE: 
 878 |                 output_key = "cytoscape"
 879 | 
 880 |             if output_key in result:
 881 |                 data_to_display = result[output_key]
 882 |                 display_title = f"Sample of {desc} Output (`{output_key}` key)"
 883 | 
 884 |                 if fmt == OutputFormat.JSON:
 885 |                     # Display a subset of the standard JSON keys
 886 |                     json_subset = {
 887 |                         "entities": result.get("entities", [])[:2],
 888 |                         "relationships": result.get("relationships", [])[:2],
 889 |                         "metadata": {k:v for k,v in result.get("metadata",{}).items() if k in ["entity_count", "relationship_count","processing_strategy"]}
 890 |                     }
 891 |                     output_content = Syntax(json.dumps(json_subset, indent=2), "json", theme="default", line_numbers=True, word_wrap=True)
 892 |                 elif fmt == OutputFormat.NETWORKX:
 893 |                     graph_obj = result["graph"]
 894 |                     info = (
 895 |                          f"[green]✓[/green] NetworkX graph object created: {isinstance(graph_obj, nx.DiGraph)}\n"
 896 |                          f"Nodes: {graph_obj.number_of_nodes()}, Edges: {graph_obj.number_of_edges()}\n\n"
 897 |                          "[italic]Allows graph algorithms (centrality, paths, etc.)[/italic]"
 898 |                     )
 899 |                     output_content = info # Simple text panel
 900 |                 elif fmt == OutputFormat.CYTOSCAPE:
 901 |                      sample = {
 902 |                          "nodes": data_to_display.get("nodes", [])[:2],
 903 |                          "edges": data_to_display.get("edges", [])[:2]
 904 |                      }
 905 |                      output_content = Syntax(json.dumps(sample, indent=2), "json", theme="default", line_numbers=True, word_wrap=True)
 906 |                 elif fmt == OutputFormat.NEO4J:
 907 |                     queries = data_to_display
 908 |                     sample_queries = queries[:3] # Show first few queries
 909 |                     output_content = Syntax("\n\n".join(sample_queries) + "\n...", "cypher", theme="default", line_numbers=True, word_wrap=True)
 910 |                 elif fmt == OutputFormat.RDF:
 911 |                      triples = data_to_display
 912 |                      sample_triples = ['("{}", "{}", "{}")'.format(*t) for t in triples[:5]] # Format first few
 913 |                      output_content = Syntax("\n".join(sample_triples) + "\n...", "turtle", theme="default", line_numbers=True, word_wrap=True) # Turtle isn't perfect but ok
 914 |                 elif fmt == OutputFormat.D3:
 915 |                      sample = {
 916 |                          "nodes": data_to_display.get("nodes", [])[:2],
 917 |                          "links": data_to_display.get("links", [])[:2]
 918 |                      }
 919 |                      output_content = Syntax(json.dumps(sample, indent=2), "json", theme="default", line_numbers=True, word_wrap=True)
 920 |                 else:
 921 |                      # Fallback for unexpected formats
 922 |                      output_content = escape(str(data_to_display)[:500] + "...")
 923 | 
 924 | 
 925 |                 main_console.print(Panel(
 926 |                     output_content,
 927 |                     title=display_title,
 928 |                     border_style="green",
 929 |                     expand=False
 930 |                 ))
 931 | 
 932 |             else:
 933 |                  main_console.print(f"[yellow]Output key '{output_key}' not found in result for {desc} format.[/yellow]")
 934 | 
 935 | 
 936 |         except Exception as e:
 937 |             logger.error(f"Unhandled error demonstrating format {desc}: {e}", exc_info=True)
 938 |             main_console.print(f"[bold red]Unhandled Error with {desc} format:[/bold red] {escape(str(e))}")
 939 | 
 940 | 
 941 | async def main():
 942 |     """Run entity relation graph extraction demonstrations."""
 943 |     try:
 944 |         # Display welcome message
 945 |         main_console.print(Rule("[bold magenta]Entity Relationship Graph Extraction Demo (v2 Tool)[/bold magenta]"))
 946 |         main_console.print(
 947 |             "[bold]This demonstrates the refactored `entity_graph` tool for extracting and visualizing "
 948 |             "knowledge graphs from text across different domains and using various strategies.[/bold]\n"
 949 |         )
 950 | 
 951 |         # Check for dependencies needed by the demo display itself
 952 |         if not HAS_VISUALIZATION_LIBS:
 953 |              main_console.print("[yellow]Warning:[/yellow] `networkx`, `pyvis`, or `matplotlib` not installed.")
 954 |              main_console.print("Graph metrics, tree view, and some visualizations may be unavailable.")
 955 |         if not HAS_NETWORKX:
 956 |              main_console.print("[yellow]Warning:[/yellow] `networkx` not installed. Graph metrics and tree view disabled.")
 957 | 
 958 | 
 959 |         # Initialize the Gateway (optional, depends if Gateway context is needed for config/logging)
 960 |         # If the tool functions standalone, this might not be strictly necessary for the demo.
 961 |         # gateway = Gateway("entity-graph-demo", register_tools=True)
 962 |         # logger.info("Ultimate MCP Server initialized (optional for demo).")
 963 | 
 964 | 
 965 |         # Check if sample directory exists
 966 |         if not SAMPLE_DIR.exists() or not any(SAMPLE_DIR.iterdir()):
 967 |             main_console.print(f"[bold red]Error:[/bold red] Sample directory '{SAMPLE_DIR}' not found or is empty!")
 968 |             main_console.print("Please create the 'sample' directory next to this script and add text files (e.g., article.txt).")
 969 |             return 1
 970 | 
 971 |         # --- Run Demonstrations ---
 972 |         # Define models to use - maybe select based on availability or speed
 973 |         # Using Sonnet as a balance, Haiku for comparisons/formats
 974 |         default_model = "gpt-4.1-mini"
 975 |         default_provider = Provider.OPENAI.value # String like "anthropic"
 976 | 
 977 |         # 1. Domain Examples (using appropriate strategies)
 978 |         await demonstrate_domain_extraction(TextDomain.BUSINESS, "article.txt", GraphStrategy.STANDARD, model=default_model, provider=default_provider)
 979 |         await demonstrate_domain_extraction(TextDomain.ACADEMIC, "research_paper.txt", GraphStrategy.MULTISTAGE, model=default_model, provider=default_provider)
 980 |         await demonstrate_domain_extraction(TextDomain.LEGAL, "legal_contract.txt", GraphStrategy.STRUCTURED, model=default_model, provider=default_provider)
 981 |         await demonstrate_domain_extraction(TextDomain.MEDICAL, "medical_case.txt", GraphStrategy.STRICT_SCHEMA, model=default_model, provider=default_provider)
 982 | 
 983 |         # 2. Strategy Comparison
 984 |         await demonstrate_strategy_comparison()
 985 | 
 986 |         # 3. Output Format Demonstration
 987 |         await demonstrate_output_formats()
 988 | 
 989 |         main_console.print(Rule("[bold green]Entity Relationship Graph Extraction Demo Complete[/bold green]"))
 990 |         # Split the print into two simpler statements to avoid rich markup issues
 991 |         main_console.print(f"\n[bold]Visualizations and outputs have been saved to:[/bold] {OUTPUT_DIR}")
 992 |         main_console.print("Open any HTML files in a web browser to explore interactive graphs.")
 993 | 
 994 |         return 0
 995 | 
 996 |     except Exception as e:
 997 |         # Catch-all for unexpected errors during setup or top-level execution
 998 |         logger.critical(f"Demo failed catastrophically: {e}", exc_info=True)
 999 |         main_console.print(f"[bold red]Critical Demo Error:[/bold red] {escape(str(e))}")
1000 |         return 1
1001 | 
1002 | if __name__ == "__main__":
1003 |     exit_code = asyncio.run(main())
1004 |     sys.exit(exit_code)
```

--------------------------------------------------------------------------------
/ultimate_mcp_server/core/ums_api/ums_models.py:
--------------------------------------------------------------------------------

```python
   1 | """Pydantic models for UMS API endpoints."""
   2 | 
   3 | from datetime import datetime
   4 | from typing import Any, Dict, List, Optional
   5 | 
   6 | from pydantic import BaseModel, Field
   7 | 
   8 | 
   9 | # ---------- Cognitive States Models ----------
  10 | 
  11 | class CognitiveState(BaseModel):
  12 |     state_id: str
  13 |     timestamp: float
  14 |     formatted_timestamp: str
  15 |     state_type: str
  16 |     description: Optional[str] = None
  17 |     workflow_id: Optional[str] = None
  18 |     workflow_title: Optional[str] = None
  19 |     complexity_score: float
  20 |     change_magnitude: float
  21 |     age_minutes: float
  22 |     memory_count: int
  23 |     action_count: int
  24 |     state_data: Dict[str, Any] = {}
  25 | 
  26 | 
  27 | class CognitiveStatesResponse(BaseModel):
  28 |     states: List[CognitiveState]
  29 |     total: int
  30 |     has_more: bool
  31 | 
  32 | 
  33 | class TimelineState(BaseModel):
  34 |     state_id: str
  35 |     timestamp: float
  36 |     formatted_time: str
  37 |     state_type: str
  38 |     workflow_id: Optional[str] = None
  39 |     description: Optional[str] = None
  40 |     sequence_number: int
  41 |     complexity_score: float
  42 |     change_magnitude: float
  43 | 
  44 | 
  45 | class TimelineSummaryStats(BaseModel):
  46 |     avg_complexity: float
  47 |     total_transitions: int
  48 |     max_change_magnitude: float
  49 | 
  50 | 
  51 | class CognitiveTimelineResponse(BaseModel):
  52 |     timeline_data: List[TimelineState]
  53 |     total_states: int
  54 |     time_range_hours: int
  55 |     granularity: str
  56 |     summary_stats: TimelineSummaryStats
  57 | 
  58 | 
  59 | class Memory(BaseModel):
  60 |     memory_id: str
  61 |     memory_type: str
  62 |     content: str
  63 |     importance: float
  64 |     created_at: float
  65 | 
  66 | 
  67 | class Action(BaseModel):
  68 |     action_id: str
  69 |     action_type: str
  70 |     tool_name: str
  71 |     status: str
  72 |     started_at: float
  73 | 
  74 | 
  75 | class DetailedCognitiveState(BaseModel):
  76 |     state_id: str
  77 |     timestamp: float
  78 |     formatted_timestamp: str
  79 |     state_type: str
  80 |     description: Optional[str] = None
  81 |     workflow_id: Optional[str] = None
  82 |     workflow_title: Optional[str] = None
  83 |     workflow_goal: Optional[str] = None
  84 |     state_data: Dict[str, Any]
  85 |     complexity_score: float
  86 |     memories: List[Memory] = []
  87 |     actions: List[Action] = []
  88 | 
  89 | 
  90 | class Pattern(BaseModel):
  91 |     type: str
  92 |     length: int
  93 |     similarity: float
  94 |     occurrences: int
  95 |     first_occurrence: float
  96 |     pattern_description: str
  97 | 
  98 | 
  99 | class Transition(BaseModel):
 100 |     transition: str
 101 |     count: int
 102 |     percentage: float
 103 | 
 104 | 
 105 | class Anomaly(BaseModel):
 106 |     state_id: str
 107 |     timestamp: float
 108 |     anomaly_type: str
 109 |     z_score: float
 110 |     description: str
 111 |     severity: str
 112 | 
 113 | 
 114 | class PatternSummary(BaseModel):
 115 |     pattern_count: int
 116 |     most_common_transition: Optional[Transition] = None
 117 |     anomaly_count: int
 118 | 
 119 | 
 120 | class CognitivePatternAnalysis(BaseModel):
 121 |     total_states: int
 122 |     time_range_hours: int
 123 |     patterns: List[Pattern] = []
 124 |     transitions: List[Transition] = []
 125 |     anomalies: List[Anomaly] = []
 126 |     summary: PatternSummary
 127 | 
 128 | 
 129 | class StateComparisonInfo(BaseModel):
 130 |     state_id: str
 131 |     timestamp: float
 132 |     formatted_timestamp: str
 133 | 
 134 | 
 135 | class StateDiff(BaseModel):
 136 |     added: Dict[str, Any] = {}
 137 |     removed: Dict[str, Any] = {}
 138 |     modified: Dict[str, Dict[str, Any]] = {}
 139 |     magnitude: float
 140 | 
 141 | 
 142 | class StateComparisonRequest(BaseModel):
 143 |     state_id_1: str = Field(
 144 |         ...,
 145 |         description="First cognitive state ID for comparison",
 146 |         example="state_abc123"
 147 |     )
 148 |     state_id_2: str = Field(
 149 |         ...,
 150 |         description="Second cognitive state ID for comparison", 
 151 |         example="state_xyz789"
 152 |     )
 153 | 
 154 | 
 155 | class StateComparisonResponse(BaseModel):
 156 |     state_1: StateComparisonInfo
 157 |     state_2: StateComparisonInfo
 158 |     time_diff_minutes: float
 159 |     diff: StateDiff
 160 | 
 161 | 
 162 | # ---------- Action Monitor Models ----------
 163 | 
 164 | class StatusIndicator(BaseModel):
 165 |     """Action status indicator with visual cues"""
 166 |     color: str = Field(..., description="Color for visual representation")
 167 |     icon: str = Field(..., description="Icon name for the status")
 168 |     label: str = Field(..., description="Human-readable status label")
 169 |     urgency: str = Field(..., description="Urgency level: low, medium, high")
 170 | 
 171 | 
 172 | class ResourceUsage(BaseModel):
 173 |     """Resource usage metrics for an action"""
 174 |     cpu_usage: float = Field(..., description="CPU usage percentage")
 175 |     memory_usage: float = Field(..., description="Memory usage percentage")
 176 |     network_io: float = Field(..., description="Network I/O in KB/s")
 177 |     disk_io: float = Field(..., description="Disk I/O in KB/s")
 178 | 
 179 | 
 180 | class RunningAction(BaseModel):
 181 |     """Model for a currently running action"""
 182 |     action_id: str = Field(..., description="Unique action identifier")
 183 |     workflow_id: Optional[str] = Field(None, description="Associated workflow ID")
 184 |     workflow_title: Optional[str] = Field(None, description="Workflow title")
 185 |     tool_name: str = Field(..., description="Name of the tool being executed")
 186 |     status: str = Field(..., description="Current execution status")
 187 |     started_at: float = Field(..., description="Start timestamp")
 188 |     formatted_start_time: str = Field(..., description="ISO formatted start time")
 189 |     execution_time_seconds: float = Field(
 190 |         ..., description="Current execution duration in seconds"
 191 |     )
 192 |     estimated_duration: Optional[float] = Field(
 193 |         None, description="Estimated duration in seconds"
 194 |     )
 195 |     progress_percentage: float = Field(..., description="Estimated progress percentage")
 196 |     status_indicator: StatusIndicator = Field(..., description="Visual status indicator")
 197 |     performance_category: str = Field(..., description="Performance categorization")
 198 |     resource_usage: ResourceUsage = Field(..., description="Current resource usage")
 199 |     tool_data: Dict[str, Any] = Field(
 200 |         default_factory=dict, description="Tool-specific data"
 201 |     )
 202 | 
 203 | 
 204 | class RunningActionsResponse(BaseModel):
 205 |     """Response for currently running actions"""
 206 |     running_actions: List[RunningAction] = Field(
 207 |         ..., description="List of currently executing actions"
 208 |     )
 209 |     total_running: int = Field(..., description="Total number of running actions")
 210 |     avg_execution_time: float = Field(
 211 |         ..., description="Average execution time of running actions"
 212 |     )
 213 |     timestamp: str = Field(..., description="Response timestamp")
 214 | 
 215 | 
 216 | class QueuedAction(BaseModel):
 217 |     """Model for a queued action"""
 218 |     action_id: str = Field(..., description="Unique action identifier")
 219 |     workflow_id: Optional[str] = Field(None, description="Associated workflow ID")
 220 |     workflow_title: Optional[str] = Field(None, description="Workflow title")
 221 |     tool_name: str = Field(..., description="Name of the tool to be executed")
 222 |     status: str = Field(..., description="Queue status")
 223 |     created_at: float = Field(..., description="Creation timestamp")
 224 |     formatted_queue_time: str = Field(..., description="ISO formatted queue time")
 225 |     queue_position: int = Field(..., description="Position in the queue (1-based)")
 226 |     queue_time_seconds: float = Field(..., description="Time spent in queue")
 227 |     estimated_wait_time: float = Field(..., description="Estimated wait time in seconds")
 228 |     priority: int = Field(..., description="Numeric priority value")
 229 |     priority_label: str = Field(..., description="Human-readable priority label")
 230 |     tool_data: Dict[str, Any] = Field(
 231 |         default_factory=dict, description="Tool-specific data"
 232 |     )
 233 | 
 234 | 
 235 | class ActionQueueResponse(BaseModel):
 236 |     """Response for action queue status"""
 237 |     queued_actions: List[QueuedAction] = Field(..., description="List of queued actions")
 238 |     total_queued: int = Field(..., description="Total number of queued actions")
 239 |     avg_queue_time: float = Field(..., description="Average time in queue")
 240 |     next_action: Optional[QueuedAction] = Field(
 241 |         None, description="Next action to be executed"
 242 |     )
 243 |     timestamp: str = Field(..., description="Response timestamp")
 244 | 
 245 | 
 246 | class ActionHistoryItem(BaseModel):
 247 |     """Model for a single action in history"""
 248 |     action_id: str = Field(..., description="Unique action identifier")
 249 |     workflow_id: Optional[str] = Field(None, description="Associated workflow ID")
 250 |     workflow_title: Optional[str] = Field(None, description="Associated workflow title")
 251 |     tool_name: str = Field(..., description="Name of the tool executed")
 252 |     action_type: Optional[str] = Field(None, description="Type of action")
 253 |     status: str = Field(..., description="Action completion status")
 254 |     started_at: float = Field(..., description="Unix timestamp when action started")
 255 |     completed_at: Optional[float] = Field(
 256 |         None, description="Unix timestamp when action completed"
 257 |     )
 258 |     execution_duration_seconds: float = Field(
 259 |         ..., description="Total execution time in seconds"
 260 |     )
 261 |     performance_score: float = Field(
 262 |         ..., description="Calculated performance score (0-100)"
 263 |     )
 264 |     efficiency_rating: str = Field(
 265 |         ..., description="Efficiency rating based on time and output"
 266 |     )
 267 |     success_rate_impact: int = Field(..., description="Impact on success rate (1 or 0)")
 268 |     formatted_start_time: str = Field(..., description="ISO formatted start time")
 269 |     formatted_completion_time: Optional[str] = Field(
 270 |         None, description="ISO formatted completion time"
 271 |     )
 272 |     tool_data: Dict[str, Any] = Field(
 273 |         default_factory=dict, description="Tool-specific data"
 274 |     )
 275 |     result_data: Dict[str, Any] = Field(
 276 |         default_factory=dict, description="Action result data"
 277 |     )
 278 |     result_size: int = Field(0, description="Size of the result data")
 279 | 
 280 | 
 281 | class PerformanceSummary(BaseModel):
 282 |     """Performance summary statistics"""
 283 |     avg_score: float = Field(..., description="Average performance score")
 284 |     top_performer: Optional[Dict[str, Any]] = Field(
 285 |         None, description="Best performing tool"
 286 |     )
 287 |     worst_performer: Optional[Dict[str, Any]] = Field(
 288 |         None, description="Worst performing tool"
 289 |     )
 290 |     efficiency_distribution: Dict[str, int] = Field(
 291 |         ..., description="Distribution of efficiency ratings"
 292 |     )
 293 | 
 294 | 
 295 | class ActionHistoryResponse(BaseModel):
 296 |     """Response model for action history"""
 297 |     action_history: List[ActionHistoryItem] = Field(
 298 |         ..., description="List of completed actions"
 299 |     )
 300 |     total_actions: int = Field(
 301 |         ..., description="Total number of actions in the time period"
 302 |     )
 303 |     success_rate: float = Field(..., description="Overall success rate percentage")
 304 |     avg_execution_time: float = Field(..., description="Average execution time in seconds")
 305 |     performance_summary: PerformanceSummary = Field(
 306 |         ..., description="Performance summary statistics"
 307 |     )
 308 |     timestamp: str = Field(..., description="Response timestamp")
 309 | 
 310 | 
 311 | class OverallMetrics(BaseModel):
 312 |     """Overall action execution metrics"""
 313 |     total_actions: int = Field(..., description="Total number of actions executed")
 314 |     successful_actions: int = Field(
 315 |         ..., description="Number of successfully completed actions"
 316 |     )
 317 |     failed_actions: int = Field(..., description="Number of failed actions")
 318 |     avg_duration: Optional[float] = Field(
 319 |         None, description="Average execution duration in seconds"
 320 |     )
 321 |     success_rate_percentage: float = Field(
 322 |         ..., description="Overall success rate as percentage"
 323 |     )
 324 |     failure_rate_percentage: float = Field(
 325 |         ..., description="Overall failure rate as percentage"
 326 |     )
 327 |     avg_duration_seconds: float = Field(..., description="Average duration in seconds")
 328 | 
 329 | 
 330 | class ToolUsageStat(BaseModel):
 331 |     """Statistics for a single tool"""
 332 |     tool_name: str = Field(..., description="Name of the tool")
 333 |     usage_count: int = Field(..., description="Number of times the tool was used")
 334 |     success_count: int = Field(..., description="Number of successful executions")
 335 |     avg_duration: Optional[float] = Field(
 336 |         None, description="Average execution time in seconds"
 337 |     )
 338 | 
 339 | 
 340 | class HourlyMetric(BaseModel):
 341 |     """Hourly performance metrics"""
 342 |     hour: str = Field(..., description="Hour of the day (0-23)")
 343 |     action_count: int = Field(..., description="Number of actions in this hour")
 344 |     avg_duration: Optional[float] = Field(
 345 |         None, description="Average duration for this hour"
 346 |     )
 347 |     success_count: int = Field(..., description="Number of successful actions")
 348 | 
 349 | 
 350 | class PerformanceInsight(BaseModel):
 351 |     """Performance insight or recommendation"""
 352 |     type: str = Field(..., description="Type of insight (warning, info, etc.)")
 353 |     title: str = Field(..., description="Title of the insight")
 354 |     message: str = Field(..., description="Detailed message")
 355 |     severity: str = Field(..., description="Severity level (high, medium, low)")
 356 | 
 357 | 
 358 | class ActionMetricsResponse(BaseModel):
 359 |     """Response model for action metrics"""
 360 |     overall_metrics: OverallMetrics = Field(..., description="Overall execution metrics")
 361 |     tool_usage_stats: List[ToolUsageStat] = Field(
 362 |         ..., description="Per-tool usage statistics"
 363 |     )
 364 |     hourly_performance: List[HourlyMetric] = Field(
 365 |         ..., description="Hourly performance breakdown"
 366 |     )
 367 |     performance_insights: List[PerformanceInsight] = Field(
 368 |         ..., description="Actionable insights and recommendations"
 369 |     )
 370 |     timestamp: str = Field(..., description="Response timestamp")
 371 | 
 372 | 
 373 | # ---------- Artifacts Models ----------
 374 | 
 375 | class Artifact(BaseModel):
 376 |     """Model for a single artifact"""
 377 |     artifact_id: str = Field(..., description="Unique artifact identifier")
 378 |     name: str = Field(..., description="Name of the artifact")
 379 |     artifact_type: str = Field(
 380 |         ..., description="Type of artifact (document, image, code, etc.)"
 381 |     )
 382 |     description: Optional[str] = Field(None, description="Description of the artifact")
 383 |     file_path: Optional[str] = Field(None, description="File system path to the artifact")
 384 |     workflow_id: Optional[str] = Field(None, description="Associated workflow ID")
 385 |     workflow_title: Optional[str] = Field(None, description="Title of associated workflow")
 386 |     created_at: float = Field(..., description="Creation timestamp")
 387 |     updated_at: float = Field(..., description="Last update timestamp")
 388 |     file_size: int = Field(..., description="File size in bytes")
 389 |     file_size_human: str = Field(..., description="Human-readable file size")
 390 |     importance: Optional[float] = Field(None, description="Importance score (1-10)")
 391 |     access_count: int = Field(0, description="Number of times accessed")
 392 |     tags: List[str] = Field(default_factory=list, description="Associated tags")
 393 |     metadata: Dict[str, Any] = Field(
 394 |         default_factory=dict, description="Additional metadata"
 395 |     )
 396 |     relationship_count: int = Field(0, description="Number of related artifacts")
 397 |     version_count: int = Field(0, description="Number of versions")
 398 |     formatted_created_at: str = Field(..., description="ISO formatted creation date")
 399 |     formatted_updated_at: str = Field(..., description="ISO formatted update date")
 400 |     age_days: float = Field(..., description="Age of artifact in days")
 401 | 
 402 | 
 403 | class ArtifactsFilter(BaseModel):
 404 |     """Filter parameters used in the request"""
 405 |     artifact_type: Optional[str] = Field(None, description="Type filter applied")
 406 |     workflow_id: Optional[str] = Field(None, description="Workflow filter applied")
 407 |     tags: Optional[str] = Field(None, description="Tags filter applied")
 408 |     search: Optional[str] = Field(None, description="Search query applied")
 409 |     sort_by: str = Field(..., description="Sort field used")
 410 |     sort_order: str = Field(..., description="Sort order used")
 411 | 
 412 | 
 413 | class ArtifactsResponse(BaseModel):
 414 |     """Response model for artifacts listing"""
 415 |     artifacts: List[Artifact] = Field(..., description="List of artifacts")
 416 |     total: int = Field(..., description="Total number of artifacts matching query")
 417 |     has_more: bool = Field(..., description="Whether there are more artifacts available")
 418 |     filters: ArtifactsFilter = Field(..., description="Filters that were applied")
 419 | 
 420 | 
 421 | class ArtifactTypeStats(BaseModel):
 422 |     """Statistics for a specific artifact type"""
 423 |     artifact_type: str = Field(..., description="Type of artifact")
 424 |     count: int = Field(..., description="Number of artifacts of this type")
 425 |     avg_importance: Optional[float] = Field(None, description="Average importance score")
 426 |     total_size: int = Field(..., description="Total size of all artifacts of this type")
 427 |     max_access_count: int = Field(..., description="Maximum access count for this type")
 428 | 
 429 | 
 430 | class ArtifactOverallStats(BaseModel):
 431 |     """Overall artifact statistics"""
 432 |     total_artifacts: int = Field(..., description="Total number of artifacts")
 433 |     unique_types: int = Field(..., description="Number of unique artifact types")
 434 |     unique_workflows: int = Field(..., description="Number of unique workflows")
 435 |     total_size: int = Field(..., description="Total size of all artifacts in bytes")
 436 |     total_size_human: str = Field(..., description="Human-readable total size")
 437 |     avg_size: float = Field(..., description="Average artifact size in bytes")
 438 |     latest_created: Optional[float] = Field(
 439 |         None, description="Timestamp of most recent artifact"
 440 |     )
 441 |     earliest_created: Optional[float] = Field(
 442 |         None, description="Timestamp of oldest artifact"
 443 |     )
 444 | 
 445 | 
 446 | class ArtifactStatsResponse(BaseModel):
 447 |     """Response model for artifact statistics"""
 448 |     overall: ArtifactOverallStats = Field(..., description="Overall statistics")
 449 |     by_type: List[ArtifactTypeStats] = Field(
 450 |         ..., description="Statistics broken down by type"
 451 |     )
 452 | 
 453 | 
 454 | # ---------- Memory Quality Models ----------
 455 | 
 456 | class MemoryDetail(BaseModel):
 457 |     """Detailed information about a memory"""
 458 |     memory_id: str = Field(..., description="Unique memory identifier")
 459 |     workflow_id: Optional[str] = Field(None, description="Associated workflow ID")
 460 |     memory_type: str = Field(..., description="Type of memory")
 461 |     importance: float = Field(..., description="Importance score")
 462 |     created_at: float = Field(..., description="Creation timestamp")
 463 | 
 464 | 
 465 | class DuplicateGroup(BaseModel):
 466 |     """Group of duplicate memories"""
 467 |     cluster_id: str = Field(..., description="Unique identifier for this duplicate cluster")
 468 |     content_preview: str = Field(..., description="Preview of the duplicated content")
 469 |     duplicate_count: int = Field(..., description="Number of duplicates in this group")
 470 |     memory_ids: List[str] = Field(..., description="List of all memory IDs in this group")
 471 |     primary_memory_id: str = Field(..., description="Suggested primary memory to keep")
 472 |     memory_details: List[MemoryDetail] = Field(..., description="Detailed info for each memory")
 473 |     first_created: float = Field(..., description="Timestamp of earliest duplicate")
 474 |     last_created: float = Field(..., description="Timestamp of latest duplicate")
 475 |     avg_importance: float = Field(..., description="Average importance across duplicates")
 476 |     recommendation: str = Field(..., description="Recommended action (merge/review)")
 477 | 
 478 | 
 479 | class DuplicatesResponse(BaseModel):
 480 |     """Response model for duplicate analysis"""
 481 |     success: bool = Field(..., description="Whether analysis completed successfully")
 482 |     clusters: List[DuplicateGroup] = Field(..., description="List of duplicate groups")
 483 |     duplicate_groups: List[DuplicateGroup] = Field(..., description="Alias for clusters (backward compatibility)")
 484 |     total_groups: int = Field(..., description="Total number of duplicate groups found")
 485 |     total_duplicates: int = Field(..., description="Total number of duplicate memories")
 486 | 
 487 | 
 488 | class OrphanedMemory(BaseModel):
 489 |     """Model for an orphaned memory"""
 490 |     memory_id: str = Field(..., description="Unique memory identifier")
 491 |     content: str = Field(..., description="Memory content")
 492 |     memory_type: str = Field(..., description="Type of memory")
 493 |     importance: float = Field(..., description="Importance score")
 494 |     created_at: float = Field(..., description="Creation timestamp")
 495 | 
 496 | 
 497 | class OrphanedMemoriesResponse(BaseModel):
 498 |     """Response model for orphaned memories"""
 499 |     success: bool = Field(..., description="Whether query completed successfully")
 500 |     orphaned_memories: List[OrphanedMemory] = Field(..., description="List of orphaned memories")
 501 |     total_orphaned: int = Field(..., description="Total count of orphaned memories")
 502 |     recommendation: str = Field(..., description="Recommended action for orphaned memories")
 503 | 
 504 | 
 505 | class BulkOperationRequest(BaseModel):
 506 |     """Request model for bulk operations"""
 507 |     operation_type: str = Field(
 508 |         ...,
 509 |         description="Type of bulk operation to perform",
 510 |         regex="^(delete|archive|merge)$"
 511 |     )
 512 |     memory_ids: List[str] = Field(
 513 |         ...,
 514 |         description="List of memory IDs to operate on",
 515 |         min_items=1
 516 |     )
 517 |     target_memory_id: Optional[str] = Field(
 518 |         None,
 519 |         description="Target memory ID for merge operations"
 520 |     )
 521 | 
 522 | 
 523 | class BulkOperationResponse(BaseModel):
 524 |     """Response model for bulk operations"""
 525 |     success: bool = Field(..., description="Whether operation completed successfully")
 526 |     operation_type: str = Field(..., description="Type of operation performed")
 527 |     memory_ids: List[str] = Field(..., description="Memory IDs that were operated on")
 528 |     success_count: int = Field(..., description="Number of successful operations")
 529 |     error_count: int = Field(..., description="Number of failed operations")
 530 |     message: str = Field(..., description="Summary message of the operation")
 531 |     errors: List[str] = Field(default_factory=list, description="List of error messages")
 532 |     merged_into: Optional[str] = Field(None, description="Target memory ID for merge operations")
 533 | 
 534 | 
 535 | class PreviewMemory(BaseModel):
 536 |     """Memory preview for bulk operations"""
 537 |     memory_id: str = Field(..., description="Memory ID")
 538 |     content: str = Field(..., description="Memory content")
 539 |     memory_type: str = Field(..., description="Type of memory")
 540 |     importance: float = Field(..., description="Importance score")
 541 |     workflow_id: Optional[str] = Field(None, description="Associated workflow")
 542 | 
 543 | 
 544 | class BulkOperationPreview(BaseModel):
 545 |     """Preview of bulk operation effects"""
 546 |     operation_type: str = Field(..., description="Type of operation to be performed")
 547 |     total_affected: int = Field(..., description="Total memories that will be affected")
 548 |     preview_description: str = Field(..., description="Description of what will happen")
 549 |     affected_memories: List[PreviewMemory] = Field(..., description="Details of affected memories")
 550 |     merge_target: Optional[PreviewMemory] = Field(None, description="Target memory for merge")
 551 |     will_be_deleted: Optional[List[PreviewMemory]] = Field(None, description="Memories to be deleted in merge")
 552 | 
 553 | 
 554 | class BulkPreviewResponse(BaseModel):
 555 |     """Response model for bulk operation preview"""
 556 |     success: bool = Field(..., description="Whether preview generated successfully")
 557 |     operation: BulkOperationPreview = Field(..., description="Preview of the operation")
 558 | 
 559 | 
 560 | # ---------- Working Memory Models ----------
 561 | 
 562 | class FocusMode(BaseModel):
 563 |     """Focus mode configuration"""
 564 |     enabled: bool = Field(..., description="Whether focus mode is enabled")
 565 |     focus_keywords: List[str] = Field(default_factory=list, description="Keywords for focus filtering")
 566 | 
 567 | 
 568 | class PerformanceMetrics(BaseModel):
 569 |     """Working memory performance metrics"""
 570 |     avg_relevance_score: float = Field(..., description="Average relevance score across all memories")
 571 |     optimization_suggestions: int = Field(..., description="Number of optimization suggestions")
 572 | 
 573 | 
 574 | class WorkingMemoryStatus(BaseModel):
 575 |     """Complete working memory system status"""
 576 |     initialized: bool = Field(..., description="Whether the system is initialized")
 577 |     total_capacity: int = Field(..., description="Maximum memory capacity")
 578 |     current_size: int = Field(..., description="Current number of memories in pool")
 579 |     utilization_percentage: float = Field(..., description="Percentage of capacity used")
 580 |     focus_mode: FocusMode = Field(..., description="Focus mode configuration")
 581 |     performance_metrics: PerformanceMetrics = Field(..., description="Performance metrics")
 582 |     category_distribution: Dict[str, int] = Field(default_factory=dict, description="Memory count by category")
 583 |     last_optimization: str = Field(..., description="ISO timestamp of last optimization")
 584 |     optimization_count: int = Field(..., description="Total number of optimizations performed")
 585 | 
 586 | 
 587 | class InitializeRequest(BaseModel):
 588 |     """Request model for initializing working memory"""
 589 |     capacity: int = Field(
 590 |         100,
 591 |         ge=10,
 592 |         le=1000,
 593 |         description="Maximum number of memories in working pool"
 594 |     )
 595 |     focus_threshold: float = Field(
 596 |         0.7,
 597 |         ge=0.0,
 598 |         le=1.0,
 599 |         description="Relevance threshold for focus mode"
 600 |     )
 601 | 
 602 | 
 603 | class InitializeResponse(BaseModel):
 604 |     """Response model for initialization"""
 605 |     success: bool = Field(..., description="Whether initialization was successful")
 606 |     message: str = Field(..., description="Status message")
 607 |     configuration: Dict[str, Any] = Field(..., description="Applied configuration")
 608 | 
 609 | 
 610 | class MemoryItem(BaseModel):
 611 |     """Model for a memory in the working pool"""
 612 |     memory_id: str = Field(..., description="Unique memory identifier")
 613 |     content: str = Field(..., description="Memory content")
 614 |     category: str = Field(..., description="Memory category")
 615 |     importance: float = Field(..., description="Importance score (0-10)")
 616 |     relevance_score: float = Field(..., description="Current relevance score (0-1)")
 617 |     added_at: float = Field(..., description="Timestamp when added to working memory")
 618 |     last_accessed: float = Field(..., description="Timestamp of last access")
 619 |     access_count: int = Field(..., description="Number of times accessed")
 620 | 
 621 | 
 622 | class ActiveMemoriesResponse(BaseModel):
 623 |     """Response for active memories query"""
 624 |     memories: List[MemoryItem] = Field(..., description="List of active memories sorted by relevance")
 625 |     total_count: int = Field(..., description="Total number of memories matching criteria")
 626 |     focus_active: bool = Field(..., description="Whether focus mode filtering is active")
 627 | 
 628 | 
 629 | class SetFocusModeRequest(BaseModel):
 630 |     """Request to set focus mode"""
 631 |     enabled: bool = Field(..., description="Enable or disable focus mode")
 632 |     keywords: List[str] = Field(default_factory=list, description="Keywords for focus filtering", max_items=20)
 633 | 
 634 | 
 635 | class OptimizeResponse(BaseModel):
 636 |     """Response for optimization operation"""
 637 |     success: bool = Field(..., description="Whether optimization was successful")
 638 |     removed_count: int = Field(..., description="Number of memories removed")
 639 |     message: str = Field(..., description="Optimization result message")
 640 | 
 641 | 
 642 | # ---------- Performance Profiler Models ----------
 643 | 
 644 | class PerformanceOverviewStats(BaseModel):
 645 |     """Overall performance statistics"""
 646 |     total_actions: int = Field(..., description="Total number of actions executed")
 647 |     active_workflows: int = Field(..., description="Number of unique workflows")
 648 |     avg_execution_time: float = Field(..., description="Average execution time in seconds")
 649 |     min_execution_time: Optional[float] = Field(None, description="Minimum execution time")
 650 |     max_execution_time: Optional[float] = Field(None, description="Maximum execution time")
 651 |     successful_actions: int = Field(..., description="Number of successful actions")
 652 |     failed_actions: int = Field(..., description="Number of failed actions")
 653 |     tools_used: int = Field(..., description="Number of distinct tools used")
 654 |     success_rate_percentage: float = Field(..., description="Success rate as percentage")
 655 |     throughput_per_hour: float = Field(..., description="Actions processed per hour")
 656 |     error_rate_percentage: float = Field(..., description="Error rate as percentage")
 657 |     avg_workflow_size: float = Field(..., description="Average actions per workflow")
 658 | 
 659 | 
 660 | class TimelineBucket(BaseModel):
 661 |     """Performance metrics for a time bucket"""
 662 |     time_bucket: str = Field(..., description="Time bucket identifier")
 663 |     action_count: int = Field(..., description="Number of actions in this bucket")
 664 |     avg_duration: Optional[float] = Field(None, description="Average duration in seconds")
 665 |     successful_count: int = Field(..., description="Number of successful actions")
 666 |     failed_count: int = Field(..., description="Number of failed actions")
 667 |     workflow_count: int = Field(..., description="Number of unique workflows")
 668 | 
 669 | 
 670 | class ToolUtilization(BaseModel):
 671 |     """Tool utilization metrics"""
 672 |     tool_name: str = Field(..., description="Name of the tool")
 673 |     usage_count: int = Field(..., description="Number of times used")
 674 |     avg_duration: Optional[float] = Field(None, description="Average execution duration")
 675 |     success_count: int = Field(..., description="Number of successful executions")
 676 |     max_duration: Optional[float] = Field(None, description="Maximum execution duration")
 677 | 
 678 | 
 679 | class Bottleneck(BaseModel):
 680 |     """Performance bottleneck information"""
 681 |     tool_name: str = Field(..., description="Tool causing the bottleneck")
 682 |     workflow_id: Optional[str] = Field(None, description="Associated workflow")
 683 |     action_id: str = Field(..., description="Action identifier")
 684 |     started_at: float = Field(..., description="Start timestamp")
 685 |     completed_at: Optional[float] = Field(None, description="Completion timestamp")
 686 |     duration: float = Field(..., description="Duration in seconds")
 687 |     status: str = Field(..., description="Action status")
 688 |     reasoning: Optional[str] = Field(None, description="Action reasoning")
 689 | 
 690 | 
 691 | class PerformanceOverviewResponse(BaseModel):
 692 |     """Response model for performance overview"""
 693 |     overview: PerformanceOverviewStats
 694 |     timeline: List[TimelineBucket]
 695 |     tool_utilization: List[ToolUtilization]
 696 |     bottlenecks: List[Bottleneck]
 697 |     analysis_period: Dict[str, Any] = Field(..., description="Analysis period information")
 698 |     timestamp: str = Field(..., description="Response generation timestamp")
 699 | 
 700 | 
 701 | class ToolBottleneck(BaseModel):
 702 |     """Tool performance bottleneck analysis"""
 703 |     tool_name: str = Field(..., description="Name of the tool")
 704 |     total_calls: int = Field(..., description="Total number of calls")
 705 |     avg_duration: float = Field(..., description="Average execution duration")
 706 |     max_duration: float = Field(..., description="Maximum execution duration")
 707 |     min_duration: float = Field(..., description="Minimum execution duration")
 708 |     p95_duration: float = Field(..., description="95th percentile duration")
 709 |     p99_duration: float = Field(..., description="99th percentile duration")
 710 |     failure_count: int = Field(..., description="Number of failures")
 711 |     total_time_spent: float = Field(..., description="Total time spent in seconds")
 712 | 
 713 | 
 714 | class WorkflowBottleneck(BaseModel):
 715 |     """Workflow performance bottleneck"""
 716 |     workflow_id: str = Field(..., description="Workflow identifier")
 717 |     title: Optional[str] = Field(None, description="Workflow title")
 718 |     action_count: int = Field(..., description="Number of actions")
 719 |     avg_action_duration: float = Field(..., description="Average action duration")
 720 |     max_action_duration: float = Field(..., description="Maximum action duration")
 721 |     total_workflow_time: float = Field(..., description="Total workflow execution time")
 722 |     workflow_start: float = Field(..., description="Workflow start timestamp")
 723 |     workflow_end: float = Field(..., description="Workflow end timestamp")
 724 |     total_elapsed_time: float = Field(..., description="Total elapsed wall-clock time")
 725 | 
 726 | 
 727 | class ParallelizationOpportunity(BaseModel):
 728 |     """Workflow parallelization opportunity"""
 729 |     workflow_id: str = Field(..., description="Workflow identifier")
 730 |     sequential_actions: int = Field(..., description="Number of sequential actions")
 731 |     total_sequential_time: float = Field(..., description="Total sequential execution time")
 732 |     actual_elapsed_time: float = Field(..., description="Actual elapsed time")
 733 |     potential_time_savings: float = Field(..., description="Potential time savings in seconds")
 734 |     parallelization_efficiency: float = Field(..., description="Current parallelization efficiency percentage")
 735 |     optimization_score: float = Field(..., description="Optimization potential score (0-10)")
 736 | 
 737 | 
 738 | class ResourceContention(BaseModel):
 739 |     """Resource contention analysis"""
 740 |     tool_name: str = Field(..., description="Tool name")
 741 |     concurrent_usage: int = Field(..., description="Number of concurrent usages")
 742 |     avg_duration_under_contention: float = Field(..., description="Average duration when contended")
 743 | 
 744 | 
 745 | class OptimizationRecommendation(BaseModel):
 746 |     """Performance optimization recommendation"""
 747 |     type: str = Field(..., description="Type of optimization")
 748 |     priority: str = Field(..., description="Priority level (high, medium, low)")
 749 |     title: str = Field(..., description="Recommendation title")
 750 |     description: str = Field(..., description="Detailed description")
 751 |     impact: str = Field(..., description="Expected impact description")
 752 |     actions: List[str] = Field(..., description="Recommended actions to take")
 753 | 
 754 | 
 755 | class BottleneckAnalysisResponse(BaseModel):
 756 |     """Response model for bottleneck analysis"""
 757 |     tool_bottlenecks: List[ToolBottleneck]
 758 |     workflow_bottlenecks: List[WorkflowBottleneck]
 759 |     parallelization_opportunities: List[ParallelizationOpportunity]
 760 |     resource_contention: List[ResourceContention]
 761 |     recommendations: List[OptimizationRecommendation]
 762 |     analysis_summary: Dict[str, Any]
 763 |     timestamp: str
 764 | 
 765 | 
 766 | class FlameGraphNode(BaseModel):
 767 |     """Model for a flame graph node"""
 768 |     name: str = Field(..., description="Name of the node (workflow, tool, or action)")
 769 |     value: float = Field(..., description="Duration in seconds")
 770 |     children: List['FlameGraphNode'] = Field(default_factory=list, description="Child nodes")
 771 |     action_id: Optional[str] = Field(None, description="Action ID if this is an action node")
 772 |     status: Optional[str] = Field(None, description="Execution status")
 773 |     reasoning: Optional[str] = Field(None, description="Reasoning for the action")
 774 |     started_at: Optional[float] = Field(None, description="Start timestamp")
 775 |     completed_at: Optional[float] = Field(None, description="Completion timestamp")
 776 | 
 777 | 
 778 | FlameGraphNode.model_rebuild()  # Needed for recursive model
 779 | 
 780 | 
 781 | class CriticalPathAction(BaseModel):
 782 |     """Model for a critical path action"""
 783 |     action_id: str = Field(..., description="Action identifier")
 784 |     tool_name: str = Field(..., description="Tool used for the action")
 785 |     duration: float = Field(..., description="Duration in seconds")
 786 |     start_time: float = Field(..., description="Start timestamp")
 787 |     end_time: float = Field(..., description="End timestamp")
 788 | 
 789 | 
 790 | class WorkflowMetrics(BaseModel):
 791 |     """Workflow performance metrics"""
 792 |     total_actions: int = Field(..., description="Total number of actions in workflow")
 793 |     total_cpu_time: float = Field(..., description="Total CPU time (sum of all action durations)")
 794 |     wall_clock_time: float = Field(..., description="Total wall clock time from start to end")
 795 |     parallelization_efficiency: float = Field(..., description="Efficiency percentage (0-100)")
 796 |     avg_action_duration: float = Field(..., description="Average duration per action")
 797 |     workflow_start: float = Field(..., description="Workflow start timestamp")
 798 |     workflow_end: float = Field(..., description="Workflow end timestamp")
 799 | 
 800 | 
 801 | class WorkflowAnalysis(BaseModel):
 802 |     """Analysis results for workflow optimization"""
 803 |     bottleneck_tool: Optional[str] = Field(None, description="Tool causing the main bottleneck")
 804 |     parallelization_potential: float = Field(..., description="Potential time savings through parallelization")
 805 |     optimization_score: float = Field(..., description="Overall optimization score (0-10)")
 806 | 
 807 | 
 808 | class FlameGraphResponse(BaseModel):
 809 |     """Response model for flame graph generation"""
 810 |     flame_graph: FlameGraphNode = Field(..., description="Hierarchical flame graph data")
 811 |     metrics: WorkflowMetrics = Field(..., description="Workflow performance metrics")
 812 |     critical_path: List[CriticalPathAction] = Field(..., description="Critical path through the workflow")
 813 |     analysis: WorkflowAnalysis = Field(..., description="Workflow optimization analysis")
 814 |     timestamp: str = Field(..., description="Response generation timestamp")
 815 | 
 816 | 
 817 | class DailyTrend(BaseModel):
 818 |     """Model for daily performance metrics"""
 819 |     date: str = Field(..., description="Date in YYYY-MM-DD format")
 820 |     action_count: int = Field(..., description="Number of actions executed")
 821 |     avg_duration: Optional[float] = Field(None, description="Average action duration in seconds")
 822 |     success_rate: float = Field(..., description="Success rate percentage (0-100)")
 823 |     throughput: float = Field(..., description="Actions per hour")
 824 |     error_rate: float = Field(..., description="Error rate percentage (0-100)")
 825 |     successful_actions: int = Field(..., description="Number of successful actions")
 826 |     failed_actions: int = Field(..., description="Number of failed actions")
 827 |     workflow_count: int = Field(..., description="Number of unique workflows")
 828 |     tool_count: int = Field(..., description="Number of unique tools used")
 829 | 
 830 | 
 831 | class ToolTrend(BaseModel):
 832 |     """Model for tool-specific performance trends"""
 833 |     tool_name: str = Field(..., description="Name of the tool")
 834 |     date: str = Field(..., description="Date in YYYY-MM-DD format")
 835 |     usage_count: int = Field(..., description="Number of times used")
 836 |     avg_duration: Optional[float] = Field(None, description="Average execution duration")
 837 |     success_count: int = Field(..., description="Number of successful executions")
 838 | 
 839 | 
 840 | class WorkflowComplexityTrend(BaseModel):
 841 |     """Model for workflow complexity trends"""
 842 |     date: str = Field(..., description="Date in YYYY-MM-DD format")
 843 |     workflow_id: str = Field(..., description="Workflow identifier")
 844 |     action_count: int = Field(..., description="Number of actions in workflow")
 845 |     total_duration: Optional[float] = Field(None, description="Total workflow duration")
 846 |     elapsed_time: Optional[float] = Field(None, description="Wall clock time")
 847 | 
 848 | 
 849 | class TrendAnalysis(BaseModel):
 850 |     """Trend analysis results"""
 851 |     performance_trend: str = Field(..., description="Overall performance trend (improving/degrading/stable/insufficient_data)")
 852 |     success_trend: str = Field(..., description="Success rate trend (improving/degrading/stable/insufficient_data)")
 853 |     data_points: int = Field(..., description="Number of data points analyzed")
 854 |     analysis_period_days: int = Field(..., description="Analysis period in days")
 855 | 
 856 | 
 857 | class InsightMetrics(BaseModel):
 858 |     """Performance insight metrics"""
 859 |     best_performing_day: Optional[DailyTrend] = Field(None, description="Day with best performance")
 860 |     worst_performing_day: Optional[DailyTrend] = Field(None, description="Day with worst performance")
 861 |     peak_throughput_day: Optional[DailyTrend] = Field(None, description="Day with highest throughput")
 862 |     avg_daily_actions: float = Field(..., description="Average actions per day")
 863 | 
 864 | 
 865 | class PerformanceTrendsResponse(BaseModel):
 866 |     """Response model for performance trends analysis"""
 867 |     daily_trends: List[DailyTrend] = Field(..., description="Daily performance metrics")
 868 |     tool_trends: List[ToolTrend] = Field(..., description="Tool-specific performance trends")
 869 |     workflow_complexity: List[WorkflowComplexityTrend] = Field(..., description="Workflow complexity trends")
 870 |     trend_analysis: TrendAnalysis = Field(..., description="Overall trend analysis")
 871 |     patterns: List[PerformancePattern] = Field(..., description="Detected performance patterns")
 872 |     insights: InsightMetrics = Field(..., description="Key performance insights")
 873 |     timestamp: str = Field(..., description="Response generation timestamp")
 874 | 
 875 | 
 876 | class ImpactEstimate(BaseModel):
 877 |     """Model for recommendation impact estimates"""
 878 |     time_savings_potential: float = Field(..., description="Estimated time savings in seconds")
 879 |     affected_actions: int = Field(..., description="Number of actions that would benefit")
 880 |     cost_benefit_ratio: float = Field(..., description="Ratio of benefit to implementation cost")
 881 |     affected_workflows: Optional[int] = Field(None, description="Number of affected workflows")
 882 |     efficiency_improvement: Optional[float] = Field(None, description="Percentage efficiency improvement")
 883 |     reliability_improvement: Optional[float] = Field(None, description="Percentage reliability improvement")
 884 |     user_experience_impact: Optional[str] = Field(None, description="Impact on user experience (high/medium/low)")
 885 | 
 886 | 
 887 | class PerformanceRecommendation(BaseModel):
 888 |     """Model for a single performance recommendation"""
 889 |     id: str = Field(..., description="Unique identifier for the recommendation")
 890 |     type: str = Field(..., description="Type of recommendation (tool_optimization, parallelization, reliability_improvement)")
 891 |     priority: str = Field(..., description="Priority level (high, medium, low)")
 892 |     title: str = Field(..., description="Brief title of the recommendation")
 893 |     description: str = Field(..., description="Detailed description of the issue and recommendation")
 894 |     impact_estimate: ImpactEstimate = Field(..., description="Estimated impact of implementing this recommendation")
 895 |     implementation_steps: List[str] = Field(..., description="Step-by-step implementation guide")
 896 |     estimated_effort: str = Field(..., description="Estimated implementation effort (low, medium, high)")
 897 |     prerequisites: List[str] = Field(..., description="Prerequisites for implementation")
 898 |     metrics_to_track: List[str] = Field(..., description="Metrics to track after implementation")
 899 | 
 900 | 
 901 | class RecommendationSummary(BaseModel):
 902 |     """Summary statistics for recommendations"""
 903 |     total_recommendations: int = Field(..., description="Total number of recommendations generated")
 904 |     high_priority: int = Field(..., description="Number of high priority recommendations")
 905 |     medium_priority: int = Field(..., description="Number of medium priority recommendations")
 906 |     low_priority: int = Field(..., description="Number of low priority recommendations")
 907 |     estimated_total_savings: float = Field(..., description="Total estimated time savings in seconds")
 908 |     analysis_period_hours: int = Field(..., description="Hours of data analyzed")
 909 | 
 910 | 
 911 | class ImplementationRoadmap(BaseModel):
 912 |     """Categorized implementation roadmap"""
 913 |     quick_wins: List[PerformanceRecommendation] = Field(..., description="Low effort, high impact recommendations")
 914 |     major_improvements: List[PerformanceRecommendation] = Field(..., description="High effort, high impact recommendations")
 915 |     maintenance_tasks: List[PerformanceRecommendation] = Field(..., description="Low priority maintenance recommendations")
 916 | 
 917 | 
 918 | class PerformanceRecommendationsResponse(BaseModel):
 919 |     """Response model for performance recommendations"""
 920 |     recommendations: List[PerformanceRecommendation] = Field(..., description="List of actionable recommendations")
 921 |     summary: RecommendationSummary = Field(..., description="Summary statistics")
 922 |     implementation_roadmap: ImplementationRoadmap = Field(..., description="Recommendations organized by implementation strategy")
 923 |     timestamp: str = Field(..., description="ISO timestamp of analysis")
 924 | 
 925 | 
 926 | # ---------- Workflow Management Models ----------
 927 | 
 928 | class WorkflowScheduleRequest(BaseModel):
 929 |     """Request model for scheduling a workflow"""
 930 |     scheduled_at: datetime = Field(
 931 |         ...,
 932 |         description="ISO timestamp for when to execute the workflow",
 933 |         example="2024-01-01T12:00:00Z"
 934 |     )
 935 |     priority: int = Field(
 936 |         default=5,
 937 |         ge=1,
 938 |         le=10,
 939 |         description="Execution priority (1=highest, 10=lowest)",
 940 |         example=3
 941 |     )
 942 | 
 943 | 
 944 | class ScheduleData(BaseModel):
 945 |     """Schedule data for the workflow"""
 946 |     workflow_id: str = Field(..., description="ID of the scheduled workflow")
 947 |     scheduled_at: str = Field(..., description="Scheduled execution time")
 948 |     priority: int = Field(..., description="Execution priority")
 949 |     status: str = Field(..., description="Schedule status")
 950 |     created_at: str = Field(..., description="When the schedule was created")
 951 | 
 952 | 
 953 | class WorkflowScheduleResponse(BaseModel):
 954 |     """Response model for workflow scheduling"""
 955 |     success: bool = Field(..., description="Whether scheduling was successful")
 956 |     schedule_id: str = Field(..., description="Unique identifier for this schedule")
 957 |     message: str = Field(..., description="Success or error message")
 958 |     schedule_data: ScheduleData = Field(..., description="Details of the created schedule")
 959 | 
 960 | 
 961 | class RestoreStateRequest(BaseModel):
 962 |     """Request model for restoring a cognitive state"""
 963 |     restore_mode: str = Field(
 964 |         default="full",
 965 |         regex="^(full|partial|snapshot)$",
 966 |         description="Type of restoration to perform",
 967 |         example="full"
 968 |     )
 969 | 
 970 | 
 971 | class RestoreData(BaseModel):
 972 |     """Restoration data"""
 973 |     state_id: str = Field(..., description="ID of the state being restored")
 974 |     restore_mode: str = Field(..., description="Restoration mode used")
 975 |     restored_at: str = Field(..., description="When the restoration occurred")
 976 |     original_timestamp: Optional[float] = Field(None, description="Original state timestamp")
 977 | 
 978 | 
 979 | class RestoreStateResponse(BaseModel):
 980 |     """Response model for state restoration"""
 981 |     success: bool = Field(..., description="Whether restoration was successful")
 982 |     message: str = Field(..., description="Success or error message")
 983 |     restore_data: RestoreData = Field(..., description="Details of the restoration")
 984 | 
 985 | 
 986 | # ---------- Health Check Models ----------
 987 | 
 988 | class HealthResponse(BaseModel):
 989 |     """Health check response"""
 990 |     status: str = Field(..., description="Health status indicator", example="ok")
 991 |     version: str = Field(..., description="Server version string", example="0.1.0")
 992 | 
 993 | 
 994 | # ---------- Performance Trends Models ----------
 995 | 
 996 | class PerformancePattern(BaseModel):
 997 |     """Detected performance pattern"""
 998 |     type: str = Field(..., description="Type of pattern detected")
 999 |     description: str = Field(..., description="Description of the pattern")
1000 |     impact: str = Field(..., description="Impact level (high/medium/low)")
1001 |     recommendation: str = Field(..., description="Recommended action")
1002 |     date: Optional[str] = Field(None, description="Date of occurrence for anomalies") 
```

--------------------------------------------------------------------------------
/run_all_demo_scripts_and_check_for_errors.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Runs all demo scripts in the 'examples' folder sequentially and checks for errors.
  4 | 
  5 | Uses rich for progress tracking and a summary report.
  6 | Incorporates specific knowledge about expected outcomes for individual scripts.
  7 | """
  8 | 
  9 | import asyncio
 10 | import re  # Import regex
 11 | import sys
 12 | from pathlib import Path
 13 | from typing import Any, Dict, List, Tuple
 14 | 
 15 | from rich import box
 16 | from rich.console import Console
 17 | from rich.live import Live
 18 | from rich.markup import escape
 19 | from rich.panel import Panel
 20 | from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn
 21 | from rich.rule import Rule
 22 | from rich.table import Table
 23 | 
 24 | # --- Configuration ---
 25 | EXAMPLES_DIR = Path(__file__).parent / "examples"
 26 | PYTHON_EXECUTABLE = sys.executable # Use the same Python interpreter that runs this script
 27 | OUTPUT_LOG_FILE = Path(__file__).parent / "all_demo_script_console_output_log.txt"
 28 | 
 29 | # Scripts to skip (not actual demo scripts or special cases)
 30 | SCRIPTS_TO_SKIP = ["sse_client_demo.py", "web_automation_instruction_packs.py", "__init__.py"]
 31 | 
 32 | # Strings indicating a critical error in the output (used if no specific allowed patterns)
 33 | DEFAULT_ERROR_INDICATORS = ["Traceback (most recent call last):", "CRITICAL"]
 34 | 
 35 | # --- Individual Demo Expectations ---
 36 | # Define expected outcomes for specific scripts.
 37 | # - expected_exit_code: The code the script should exit with (default: 0)
 38 | # - allowed_stderr_patterns: List of regex patterns for stderr messages that are OK for this script.
 39 | #                            If this list exists, DEFAULT_ERROR_INDICATORS are ignored for stderr.
 40 | # - allowed_stdout_patterns: List of regex patterns for stdout messages that are OK (less common).
 41 | #                            If this list exists, DEFAULT_ERROR_INDICATORS are ignored for stdout.
 42 | DEMO_EXPECTATIONS: Dict[str, Dict[str, Any]] = {
 43 |     # --- Scripts with specific known patterns ---
 44 |     "text_redline_demo.py": {
 45 |         "expected_exit_code": 0,
 46 |         "allowed_stderr_patterns": [
 47 |             # Provider availability issues - expected when API keys aren't configured
 48 |             r"Provider '(openai|anthropic|google)' not available or initialized", 
 49 |             r"Failed to get provider: No valid OpenAI key found",
 50 |             # Standard setup messages - not errors
 51 |             r"Configuration not yet loaded\. Loading now\.\.\.",
 52 |             r"Configuration loaded and environment variables applied via decouple\.",
 53 |             # UI formatting patterns - not errors
 54 |             r"─+.*─+", # Section dividers
 55 |         ]
 56 |     },
 57 |     "filesystem_operations_demo.py": {
 58 |         "expected_exit_code": 0,
 59 |         "allowed_stderr_patterns": [
 60 |             # SPECIFIC intentional demo patterns - these test protection features
 61 |             r"Protection Triggered! Deletion of \d+ files blocked", # Specific deletion protection test
 62 |             r"Could not set utime for.*?: \[Errno \d+\]", # Specific file timestamp issue with exact error format
 63 |             # Configuration verification messages - specific to demo setup
 64 |             r"WARNING: No allowed directories loaded in filesystem configuration", # Specific verification message
 65 |             r"WARNING: Temporary directory .* not found in loaded allowed dirs:", # Specific verification message
 66 |             # OS-specific limitations - with specific reasons
 67 |             r"WARNING: Symlink creation might not be supported or permitted on this system", # Windows-specific limitation
 68 |             r"WARNING: Could not create symlink \(.*\): \[Errno \d+\]", # OS-specific permission error with exact format
 69 |             # Standard setup messages - not errors
 70 |             r"Configuration not yet loaded\. Loading now\.\.\.",
 71 |             r"Configuration loaded and environment variables applied via decouple\.",
 72 |             r"Forcing configuration reload due to GATEWAY_FORCE_CONFIG_RELOAD=true\.",
 73 |             # UI formatting patterns - not errors
 74 |             r"─+.*─+", # Section dividers
 75 |         ],
 76 |         "allowed_stdout_patterns": [
 77 |             # Specific allowed stdout patterns that aren't errors
 78 |             r"WARNING: .*", # Warning messages in stdout
 79 |             r"ERROR: .*", # Error messages in stdout (these are demo outputs, not actual errors)
 80 |         ]
 81 |     },
 82 |     "sql_database_interactions_demo.py": {
 83 |         "expected_exit_code": 0,
 84 |         "allowed_stderr_patterns": [
 85 |             # SPECIFIC column statistics computation issues - known data type limitation
 86 |             r"Could not compute statistics for column customers\.signup_date: 'str' object has no attribute 'isoformat'", # Specific data type issue
 87 |             r"Could not compute statistics for column orders\.order_date: 'str' object has no attribute 'isoformat'", # Specific data type issue
 88 |             # Demo-specific database connection scenarios - intentional examples
 89 |             r"Connection failed: \(sqlite3\.OperationalError\) unable to open database file", # Specific SQLite error format
 90 |             r"Failed to connect to database \(sqlite:///.*\): \(sqlite3\.OperationalError\) unable to open database file", # Specific connection error format
 91 |             # Standard setup messages - not errors
 92 |             r"Configuration not yet loaded\. Loading now\.\.\.",
 93 |             r"Configuration loaded and environment variables applied via decouple\.",
 94 |         ]
 95 |     },
 96 |     "rag_example.py": {
 97 |         "expected_exit_code": 0,
 98 |         "allowed_stderr_patterns": [
 99 |             # SPECIFIC cleanup messages with reasons - intentional error handling
100 |             r"Could not delete collection 'demo_.*?': Collection '.*?' does not exist", # Non-existent collection during cleanup
101 |             r"Error deleting knowledge base 'demo-kb': Knowledge base 'demo-kb' not found", # Non-existent KB during cleanup
102 |             r"Error directly deleting vector collection 'demo_.*?': Collection '.*?' does not exist", # Non-existent collection
103 |             # SPECIFIC provider availability issues - expected when API keys aren't configured
104 |             r"Provider '(openai|anthropic|google)' not available or initialized", # Missing specific provider
105 |             r"No suitable provider found for embedding generation", # No embedding provider available
106 |             r"OpenAIError: No API key provided.", # Specific API key error
107 |             # Standard setup messages - not errors
108 |             r"Configuration not yet loaded\. Loading now\.\.\.",
109 |             r"Configuration loaded and environment variables applied via decouple\.",
110 |             r"Initializing Gateway: Loading configuration\.\.\.",
111 |             # UI formatting patterns - not errors
112 |             r"─+.*─+", # Section dividers
113 |         ]
114 |     },
115 |     "marqo_fused_search_demo.py": {
116 |         "expected_exit_code": 0,
117 |         "allowed_stderr_patterns": [
118 |             # SPECIFIC setup/config issues - expected on systems without Marqo
119 |             r"Marqo config file not found at path: .*config/marqo\.json", # Specific config file path
120 |             r"Error decoding Marqo config file: No JSON object could be decoded", # Specific JSON parsing error
121 |             r"Exiting demo as Marqo config could not be loaded\.", # Specific exit message
122 |             # SPECIFIC connection issues - expected on systems without Marqo service
123 |             r"Connection refused: \[Errno 111\] Connection refused", # Specific connection error with errno
124 |             # SPECIFIC skipping behavior - expected for incomplete setup
125 |             r"Skipping Example \d+: No suitable .* field found in dataset", # Specific reason for skipping
126 |         ]
127 |     },
128 |     "advanced_vector_search_demo.py": {
129 |         "expected_exit_code": 0,
130 |         "allowed_stderr_patterns": [
131 |             # Provider availability issues
132 |             r"Provider '(openai|anthropic|google)' not available or initialized",
133 |             r"No suitable provider found for embedding generation",
134 |             # Standard setup messages - not errors
135 |             r"Configuration not yet loaded\. Loading now\.\.\.",
136 |             r"Configuration loaded and environment variables applied via decouple\.",
137 |             # UI formatting patterns - not errors
138 |             r"─+.*─+", # Section dividers
139 |         ]
140 |     },
141 |      "vector_search_demo.py": {
142 |         "expected_exit_code": 0,
143 |         "allowed_stderr_patterns": [
144 |             # SPECIFIC cleanup messages with reasons - intentional cleanup operations
145 |             r"Could not delete collection 'demo_.*?': Collection '.*?' does not exist", # Non-existent collection during cleanup
146 |             # SPECIFIC provider availability issues - expected when API keys aren't configured
147 |             r"Failed to initialize provider '(openai|anthropic|google)': .*API key.*", # Specific provider with API key issue
148 |             r"No suitable provider found for embedding generation", # Specific embedding provider error
149 |             # SPECIFIC demo workflow messages - expected for educational examples
150 |             r"Skipping RAG demo - embedding provider not available", # Specific reason for skipping demo
151 |             # Standard setup messages - not errors
152 |             r"Configuration not yet loaded\. Loading now\.\.\.",
153 |             r"Configuration loaded and environment variables applied via decouple\.",
154 |             # UI formatting patterns - not errors
155 |             r"─+.*─+", # Section dividers
156 |         ]
157 |     },
158 |     "prompt_templates_demo.py": {
159 |         "expected_exit_code": 0,
160 |         "allowed_stderr_patterns": [
161 |             # SPECIFIC intentional template demo cases - expected behavior demonstrations
162 |             r"Template 'non_existent_template\.txt' not found in .*/templates/", # Specific non-existent template
163 |             r"Could not render with missing variables: \['variable_name'\]", # Specific missing variable demonstration
164 |             # SPECIFIC provider availability messages - expected when API keys aren't configured
165 |             r"No providers available for completion with template", # Specific provider availability message
166 |             # Standard setup messages - not errors
167 |             r"Initializing Gateway: Loading configuration\.\.\.", 
168 |             r"Configuration loaded and environment variables applied via decouple\.",
169 |             r"Ultimate MCP Server .* initialized", 
170 |             r"Initializing LLM providers",
171 |             r"Configuration not yet loaded\. Loading now\.\.\.",
172 |             # UI formatting patterns - not errors
173 |             r"─+.*─+", # Section dividers
174 |         ]
175 |     },
176 |     "tournament_code_demo.py": {
177 |         "expected_exit_code": 0,
178 |         "allowed_stderr_patterns": [
179 |             # Intentional demo cases (clean slate testing)
180 |             r"Error reading state file directly", r"State file not found at", # First run of tournament
181 |             r"No functions found in the code", # Test for empty code
182 |             # Known state handling messages
183 |             r"Cleanup error:", # Non-critical cleanup issues
184 |             # Provider availability (expected if not configured)
185 |             r"Failed to initialize providers", # Expected when API keys not configured
186 |             # Initialization logging (not errors)
187 |             r"Gateway initialized",
188 |             r"Initializing Gateway.*",
189 |             # Common setup/config messages (not errors)
190 |             r"Configuration not yet loaded\. Loading now\.\.\.",
191 |             r"Configuration loaded and environment variables applied via decouple\.",
192 |             # Formatting patterns (not errors)
193 |             r"─+.*─+", # Section dividers
194 |             r"INFO.*", # INFO level log messages
195 |         ]
196 |     },
197 |     "tournament_text_demo.py": {
198 |         "expected_exit_code": 0,
199 |         "allowed_stderr_patterns": [
200 |             # Intentional demo cases (clean slate testing)
201 |             r"Error reading state file directly", r"State file not found at", # First run of tournament
202 |             # Provider availability (expected if not configured)
203 |             r"Provider .* not available for evaluation", # Expected when API keys missing
204 |             r"Failed to initialize providers", # Expected when API keys missing
205 |             # Timeout handling (acceptable on slow CI)
206 |             r"Evaluation with .* timed out", # Long-running ops may timeout
207 |             # Common setup/config messages (not errors)
208 |             r"Gateway initialized",
209 |             r"Initializing Gateway.*",
210 |             r"Configuration not yet loaded\. Loading now\.\.\.",
211 |             r"Configuration loaded and environment variables applied via decouple\.",
212 |             # Formatting patterns (not errors)
213 |             r"─+.*─+", # Section dividers
214 |             r"INFO.*", # INFO level log messages
215 |         ]
216 |     },
217 |     "test_code_extraction.py": {
218 |         "expected_exit_code": 0,
219 |         "allowed_stderr_patterns": [
220 |             # Intentional demo cases (clean slate testing)
221 |             r"Error loading tournament state: .*No such file or directory", # First run
222 |             r"Failed to load tournament state", # First run
223 |             r"No round results found", # Expected for empty state
224 |             # Provider availability (expected if not configured)
225 |             r"Failed to initialize providers", # Expected if API keys not present
226 |             # Common setup/config messages (not errors)
227 |             r"Initializing Gateway", r"Configuration loaded", 
228 |             r"Ultimate MCP Server .* initialized", r"Initializing LLM providers",
229 |             r"Configuration not yet loaded\. Loading now\.\.\.",
230 |             r"Configuration loaded and environment variables applied via decouple\.",
231 |             # Formatting patterns (not errors)
232 |             r"─+.*─+", # Section dividers
233 |             r"INFO.*", # INFO level log messages
234 |             r"WARNING.*", # WARNING level log messages
235 |         ]
236 |     },
237 |     "advanced_extraction_demo.py": {
238 |         "expected_exit_code": 0, 
239 |         "allowed_stderr_patterns": [
240 |             # Provider availability (expected if not configured)
241 |             r"Failed to get OpenAI provider", # Expected if API key not present
242 |             r"Failed to initialize OpenAI provider", # Expected if API key not present
243 |             # Common setup/config messages (not errors)
244 |             r"Configuration not yet loaded\. Loading now\.\.\.",
245 |             r"Configuration loaded and environment variables applied via decouple\.",
246 |             # Formatting patterns (not errors)
247 |             r"─+.*─+", # Section dividers
248 |         ], 
249 |         # Allow the skip message in stdout
250 |         "allowed_stdout_patterns": [r"Skipping .* demo - no provider available", r"Raw Model Output \(JSON parsing failed\)"]
251 |     },
252 |     "analytics_reporting_demo.py": {
253 |         "expected_exit_code": 0,
254 |         "allowed_stderr_patterns": [
255 |             # SPECIFIC provider availability issues - expected when API keys aren't configured
256 |             r"Failed to get/initialize provider '(openai|anthropic|google)': .*", # Specific provider with reason
257 |             r"No providers could be initialized for this demonstration", # Specific provider initialization message
258 |             r"No default model found for provider '(openai|anthropic|google)'", # Specific model availability issue
259 |             # Standard setup messages - not errors
260 |             r"Configuration not yet loaded\. Loading now\.\.\.",
261 |             r"Configuration loaded and environment variables applied via decouple\.",
262 |             # UI formatting patterns - not errors
263 |             r"─+.*─+", # Section dividers
264 |             # Logging patterns - not errors
265 |             r"INFO \d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.*", # Timestamped INFO logs
266 |             # Initialization messages - not errors
267 |             r"Simulating usage with \d+ providers\." # Specific simulation statement
268 |         ]
269 |     },
270 |     "basic_completion_demo.py": {
271 |         "expected_exit_code": 0,
272 |         "allowed_stderr_patterns": [
273 |             # SPECIFIC provider availability issues - expected when API keys aren't configured
274 |             r"Provider '(openai|anthropic|google)' not available or initialized", # Specific missing provider
275 |             r"All providers failed: No providers available for completion", # Specific provider failure
276 |             # SPECIFIC demo features - expected component testing
277 |             r"Error with cached completion demo: Cache is disabled", # Specific cache demo error
278 |             # Standard setup and logging messages - not errors
279 |             r"Initializing Gateway: Loading configuration\.\.\.",
280 |             r"Configuration not yet loaded\. Loading now\.\.\.",
281 |             r"Configuration loaded and environment variables applied via decouple\.",
282 |             r"INFO \d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.*", # Timestamped INFO logs
283 |             r"Ultimate MCP Server 'basic-completion-demo' initialized", # Specific initialization message
284 |         ] 
285 |     },
286 |     "browser_automation_demo.py": {
287 |         "expected_exit_code": 0,
288 |         "allowed_stderr_patterns": [
289 |             # Browser automation issues - expected during demos
290 |             r"Could not find search input element with selectors: .*", # Element not found error
291 |             r"playwright\._impl\._api_types\.TimeoutError: Timeout \d+ms exceeded", # Timeout error
292 |             r"net::ERR_CONNECTION_REFUSED at .*", # Connection error
293 |             r"Navigation failed: net::ERR_CONNECTION_REFUSED at .*", # Navigation error
294 |             r"Execution error in.*: .*", # General execution errors 
295 |             r"Traceback \(most recent call last\):.*", # Tracebacks from browser automation
296 |             # Provider availability issues
297 |             r"Provider '(openai|anthropic|google)' not available or initialized",
298 |             # Standard setup messages - not errors
299 |             r"Configuration not yet loaded\. Loading now\.\.\.",
300 |             r"Configuration loaded and environment variables applied via decouple\.",
301 |             # UI formatting patterns - not errors
302 |             r"─+.*─+", # Section dividers
303 |         ]
304 |     },
305 |     "claude_integration_demo.py": {
306 |         "expected_exit_code": 0,
307 |         "allowed_stderr_patterns": [
308 |             # SPECIFIC provider availability issues - expected when API keys aren't configured
309 |             r"Provider 'anthropic' not available or initialized", # Specific Claude provider missing
310 |             r"No suitable Claude model found in available models: \[\]", # Specific Claude model selection issue
311 |             r"Selected models not found: \['claude-3-opus-20240229', 'claude-3-sonnet-20240229'\]", # Specific model availability issue
312 |             r"Model 'claude-3-opus-20240229' not available, falling back to default\.", # Specific fallback behavior
313 |             # Standard setup messages - not errors
314 |             r"Initializing Gateway: Loading configuration\.\.\.",
315 |             r"Configuration loaded and environment variables applied via decouple\.",
316 |             r"Ultimate MCP Server 'claude-demo' initialized", 
317 |             r"Initializing LLM providers",
318 |             r"Configuration not yet loaded\. Loading now\.\.\.",
319 |             # Logging patterns - not errors
320 |             r"INFO \d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.*", # Timestamped INFO logs
321 |             # UI formatting patterns - not errors
322 |             r"─+.*─+", # Section dividers
323 |             ]
324 |     },
325 |     "compare_synthesize_demo.py": {
326 |         "expected_exit_code": 0,
327 |         "allowed_stderr_patterns": [
328 |             # SPECIFIC provider availability issues - expected when API keys aren't configured
329 |             r"Failed to initialize providers: No providers available", # Specific provider initialization message
330 |             # SPECIFIC tool registration messages - expected behavior for specialized tools
331 |             r"compare_and_synthesize tool FAILED to register: Tool 'compare_and_synthesize' requires 2\+ providers", # Specific registration failure reason
332 |             # Standard setup messages - not errors
333 |             r"Initializing Gateway: Loading configuration\.\.\.",
334 |             r"Configuration not yet loaded\. Loading now\.\.\.",
335 |             r"Configuration loaded and environment variables applied via decouple\.",
336 |             r"INFO \d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.*", # Timestamped INFO logs
337 |             r"Ultimate MCP Server 'compare-synthesize-demo-v2' initialized", # Specific initialization message
338 |         ] 
339 |     },
340 |     "cost_optimization.py": {
341 |         "expected_exit_code": 0,
342 |         "allowed_stderr_patterns": [
343 |             # SPECIFIC provider availability issues - expected when API keys aren't configured
344 |             r"API key for provider '(openai|anthropic|google)' not found", # Specific API key missing message
345 |             r"Could not determine provider for model '.*?'", # Specific model-provider mapping issue
346 |             r"No models met criteria: max_cost=\$\d+\.\d+, .*", # Specific criteria filtering result
347 |             # Standard setup messages - not errors
348 |             r"Configuration not yet loaded\. Loading now\.\.\.",
349 |             r"Configuration loaded and environment variables applied via decouple\.",
350 |             # UI formatting patterns - not errors
351 |             r"─+.*─+", # Section dividers
352 |             # Logging patterns - not errors
353 |             r"INFO \d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.*", # Timestamped INFO logs
354 |         ] 
355 |     },
356 |     "document_processing.py": {
357 |         "expected_exit_code": 0,
358 |         "allowed_stderr_patterns": [
359 |             # SPECIFIC initialization messages - expected setup steps
360 |             r"Clearing cache before demonstration\.\.\.", # Specific cache operation
361 |             # Standard setup messages - not errors
362 |             r"Configuration not yet loaded\. Loading now\.\.\.",
363 |             r"Configuration loaded and environment variables applied via decouple\.",
364 |             # UI formatting patterns - not errors
365 |             r"─+.*─+", # Section dividers
366 |             # Logging patterns - not errors
367 |             r"INFO \d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.*", # Timestamped INFO logs
368 |         ] 
369 |     },
370 |     "multi_provider_demo.py": {
371 |         "expected_exit_code": 0,
372 |         "allowed_stderr_patterns": [
373 |             # SPECIFIC provider availability issues - expected when API keys aren't configured
374 |             r"Provider '(openai|anthropic|google)' not available or initialized", # Specific provider not available
375 |             r"All providers failed: \['(openai|anthropic|google)'.*?\]", # Specific list of failed providers
376 |             # Standard setup messages - not errors
377 |             r"Initializing Gateway: Loading configuration\.\.\.",
378 |             r"Configuration loaded and environment variables applied via decouple\.",
379 |             r"Ultimate MCP Server 'multi-provider-demo' initialized", 
380 |             r"Initializing LLM providers",
381 |             r"Configuration not yet loaded\. Loading now\.\.\.",
382 |             # UI formatting patterns - not errors
383 |             r"─+.*─+", # Section dividers
384 |             # Logging patterns - not errors
385 |             r"INFO \d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.*", # Timestamped INFO logs
386 |         ]
387 |     },
388 |     "simple_completion_demo.py": {
389 |         "expected_exit_code": 0,
390 |         "allowed_stderr_patterns": [
391 |             # SPECIFIC provider availability issues - expected when API keys aren't configured
392 |             r"Provider '(openai|anthropic|google)' not available", # Specific provider not available
393 |             # Standard setup messages - not errors
394 |             r"Initializing Gateway: Loading configuration\.\.\.",
395 |             r"Configuration loaded and environment variables applied via decouple\.",
396 |             r"Ultimate MCP Server 'simple-demo' initialized", 
397 |             r"Initializing LLM providers",
398 |             r"Configuration not yet loaded\. Loading now\.\.\.",
399 |             # UI formatting patterns - not errors
400 |             r"─+.*─+", # Section dividers
401 |             # Logging patterns - not errors
402 |             r"INFO \d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.*", # Timestamped INFO logs
403 |         ]
404 |     },
405 |     "workflow_delegation_demo.py": {
406 |         "expected_exit_code": 0,
407 |         "allowed_stderr_patterns": [
408 |             # SPECIFIC provider availability messages - expected initialization info
409 |             r"Some API keys missing: \['(openai|anthropic|google)'.*?\]", # Specific API keys warning
410 |             r"Provider '(openai|anthropic|google)' not available", # Specific provider not available
411 |             r"Failed to initialize provider: Invalid API key or provider configuration", # Specific initialization error
412 |             # SPECIFIC initialization messages - expected setup steps
413 |             r"Initializing required providers for delegation demo", # Specific initialization message
414 |             r"All required API keys seem to be present", # Specific configuration check
415 |             # Standard setup messages - not errors
416 |             r"Configuration not yet loaded\. Loading now\.\.\.",
417 |             r"Configuration loaded and environment variables applied via decouple\.",
418 |             # UI formatting patterns - not errors
419 |             r"─+.*─+", # Section dividers
420 |             # Logging patterns - not errors
421 |             r"INFO \d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.*", # Timestamped INFO logs
422 |         ]
423 |     },
424 |     "cache_demo.py": {
425 |         "expected_exit_code": 0,
426 |         "allowed_stderr_patterns": [
427 |             # SPECIFIC operational messages - expected configuration info
428 |             r"Cache is disabled \(GATEWAY__CACHE__ENABLED=false\)", # Specific cache configuration message
429 |             # Standard setup messages - not errors
430 |             r"Configuration not yet loaded\. Loading now\.\.\.",
431 |             r"Configuration loaded and environment variables applied via decouple\.",
432 |             # UI formatting patterns - not errors
433 |             r"─+.*─+", # Section dividers
434 |         ]
435 |     },
436 |     "audio_transcription_demo.py": {
437 |         "expected_exit_code": 0,
438 |         "allowed_stderr_patterns": [
439 |             # Provider availability issues
440 |             r"Provider '(openai|anthropic|google)' not available or initialized",
441 |             r"Failed to initialize OpenAI provider: Invalid API key", 
442 |             # Standard setup messages - not errors
443 |             r"Configuration not yet loaded\. Loading now\.\.\.",
444 |             r"Configuration loaded and environment variables applied via decouple\.",
445 |             # UI formatting patterns - not errors
446 |             r"─+.*─+", # Section dividers
447 |         ]
448 |     },
449 |     "entity_relation_graph_demo.py": {
450 |         "expected_exit_code": 0,
451 |         "allowed_stderr_patterns": [
452 |             # Provider availability issues
453 |             r"Provider '(openai|anthropic|google)' not available or initialized",
454 |             r"Skipping provider initialization as no API keys are available",
455 |             # Standard setup messages - not errors
456 |             r"Configuration not yet loaded\. Loading now\.\.\.",
457 |             r"Configuration loaded and environment variables applied via decouple\.",
458 |             # UI formatting patterns - not errors
459 |             r"─+.*─+", # Section dividers
460 |         ]
461 |     },
462 |     "grok_integration_demo.py": {
463 |         "expected_exit_code": 0,
464 |         "allowed_stderr_patterns": [
465 |             # Provider availability issues
466 |             r"Provider 'grok' not available or initialized",
467 |             r"No API key found for Grok",
468 |             # Standard setup messages - not errors
469 |             r"Configuration not yet loaded\. Loading now\.\.\.",
470 |             r"Configuration loaded and environment variables applied via decouple\.",
471 |             # UI formatting patterns - not errors
472 |             r"─+.*─+", # Section dividers
473 |         ]
474 |     },
475 |     "html_to_markdown_demo.py": {
476 |         "expected_exit_code": 0,
477 |         "allowed_stderr_patterns": [
478 |             # Provider availability issues
479 |             r"Provider '(openai|anthropic|google)' not available or initialized",
480 |             # Standard setup messages - not errors
481 |             r"Configuration not yet loaded\. Loading now\.\.\.",
482 |             r"Configuration loaded and environment variables applied via decouple\.",
483 |             # UI formatting patterns - not errors
484 |             r"─+.*─+", # Section dividers
485 |         ]
486 |     },
487 |     "measure_model_speeds.py": {
488 |         "expected_exit_code": 0,
489 |         "allowed_stderr_patterns": [
490 |             # Provider availability issues
491 |             r"Provider '(openai|anthropic|google|grok|meta)' not available or initialized",
492 |             r"No providers could be initialized",
493 |             # Standard setup messages - not errors
494 |             r"Configuration not yet loaded\. Loading now\.\.\.",
495 |             r"Configuration loaded and environment variables applied via decouple\.",
496 |             # UI formatting patterns - not errors
497 |             r"─+.*─+", # Section dividers
498 |         ]
499 |     },
500 |     "meta_api_demo.py": {
501 |         "expected_exit_code": 0,
502 |         "allowed_stderr_patterns": [
503 |             # Provider availability issues
504 |             r"Provider 'meta' not available or initialized",
505 |             r"No API key found for Meta",
506 |             # Standard setup messages - not errors
507 |             r"Configuration not yet loaded\. Loading now\.\.\.",
508 |             r"Configuration loaded and environment variables applied via decouple\.",
509 |             # UI formatting patterns - not errors
510 |             r"─+.*─+", # Section dividers
511 |         ]
512 |     },
513 |     "research_workflow_demo.py": {
514 |         "expected_exit_code": 0,
515 |         "allowed_stderr_patterns": [
516 |             # Provider availability issues
517 |             r"Provider '(openai|anthropic|google)' not available or initialized",
518 |             # Search and web access related messages
519 |             r"Failed to perform web search: .*",
520 |             r"Web search failed: .*",
521 |             # Standard setup messages - not errors
522 |             r"Configuration not yet loaded\. Loading now\.\.\.",
523 |             r"Configuration loaded and environment variables applied via decouple\.",
524 |             # UI formatting patterns - not errors
525 |             r"─+.*─+", # Section dividers
526 |         ]
527 |     },
528 |     "text_classification_demo.py": {
529 |         "expected_exit_code": 0,
530 |         "allowed_stderr_patterns": [
531 |             # Provider availability issues
532 |             r"Provider '(openai|anthropic|google)' not available or initialized",
533 |             # Standard setup messages - not errors
534 |             r"Configuration not yet loaded\. Loading now\.\.\.",
535 |             r"Configuration loaded and environment variables applied via decouple\.",
536 |             # UI formatting patterns - not errors
537 |             r"─+.*─+", # Section dividers
538 |         ]
539 |     },
540 | }
541 | 
542 | console = Console()
543 | 
544 | def find_demo_scripts() -> List[Path]:
545 |     """Find all Python demo scripts in the examples directory."""
546 |     if not EXAMPLES_DIR.is_dir():
547 |         console.print(f"[bold red]Error:[/bold red] Examples directory not found at '{EXAMPLES_DIR}'")
548 |         return []
549 |     
550 |     scripts = sorted([
551 |         p for p in EXAMPLES_DIR.glob("*.py") 
552 |         if p.is_file() and p.name not in SCRIPTS_TO_SKIP
553 |     ])
554 |     return scripts
555 | 
556 | async def run_script(script_path: Path) -> Tuple[int, str, str]:
557 |     """
558 |     Run a single Python script as a subprocess and capture its output.
559 |     
560 |     This async function executes a Python script in a separate process using the same
561 |     Python interpreter that's running this script. It captures both standard output
562 |     and standard error streams, as well as the exit code of the process.
563 |     
564 |     The function uses asyncio.create_subprocess_exec for non-blocking execution,
565 |     allowing multiple scripts to be run concurrently if needed, although the current
566 |     implementation runs them sequentially.
567 |     
568 |     Args:
569 |         script_path (Path): The path to the Python script to be executed.
570 |             This should be a fully resolved path object pointing to a valid Python file.
571 |     
572 |     Returns:
573 |         Tuple[int, str, str]: A tuple containing:
574 |             - exit_code (int): The return code of the process (0 typically means success)
575 |             - stdout (str): The captured standard output as a string, with encoding errors ignored
576 |             - stderr (str): The captured standard error as a string, with encoding errors ignored
577 |     
578 |     Note:
579 |         - The function waits for the script to complete before returning
580 |         - Any encoding errors in stdout/stderr are ignored during decoding
581 |         - The script is executed with the same Python interpreter as the parent process
582 |         - No environment variables or arguments are passed to the script
583 |     """
584 |     command = [PYTHON_EXECUTABLE, str(script_path)]
585 |     
586 |     process = await asyncio.create_subprocess_exec(
587 |         *command,
588 |         stdout=asyncio.subprocess.PIPE,
589 |         stderr=asyncio.subprocess.PIPE
590 |     )
591 |     
592 |     stdout, stderr = await process.communicate()
593 |     exit_code = process.returncode
594 |     
595 |     return exit_code, stdout.decode(errors='ignore'), stderr.decode(errors='ignore')
596 | 
597 | def check_for_errors(script_name: str, exit_code: int, stdout: str, stderr: str) -> Tuple[bool, str]:
598 |     """
599 |     Check script output against predefined expectations to determine success or failure.
600 |     
601 |     This function analyzes the execution results of a demo script and determines if it
602 |     succeeded or failed based on:
603 |     1. Comparing the actual exit code against the expected exit code for the script
604 |     2. Checking for unexpected error messages in stdout and stderr
605 |     3. Applying script-specific patterns for allowed errors and warnings
606 |     
607 |     The function uses the DEMO_EXPECTATIONS dictionary to get script-specific expectations
608 |     including allowed error patterns. For scripts without specific expectations defined,
609 |     it applies default success criteria (exit code 0 and no critical error indicators).
610 |     
611 |     The function handles two types of patterns for allowed output:
612 |     - allowed_stderr_patterns: Regex patterns for permitted messages in stderr
613 |     - allowed_stdout_patterns: Regex patterns for permitted messages in stdout
614 |     
615 |     Args:
616 |         script_name (str): Name of the script being checked (used to lookup expectations)
617 |         exit_code (int): The actual exit code returned by the script
618 |         stdout (str): The captured standard output from the script
619 |         stderr (str): The captured standard error from the script
620 |     
621 |     Returns:
622 |         Tuple[bool, str]: A tuple containing:
623 |             - success (bool): True if the script execution meets all success criteria
624 |             - reason (str): A descriptive message explaining the result
625 |                             "Success" for successful executions
626 |                             Error details for failed executions
627 |     
628 |     Note:
629 |         - Log messages at INFO, DEBUG, and WARNING levels are generally ignored
630 |           unless they match critical error patterns
631 |         - Script-specific allowed patterns take precedence over default error indicators
632 |         - If no script-specific expectations exist, only the DEFAULT_ERROR_INDICATORS
633 |           are used to check for problems
634 |     """
635 |     
636 |     expectations = DEMO_EXPECTATIONS.get(script_name, {})
637 |     expected_exit_code = expectations.get("expected_exit_code", 0)
638 |     allowed_stderr_patterns = expectations.get("allowed_stderr_patterns", [])
639 |     allowed_stdout_patterns = expectations.get("allowed_stdout_patterns", [])
640 | 
641 |     # 1. Check Exit Code
642 |     if exit_code != expected_exit_code:
643 |         return False, f"Exited with code {exit_code} (expected {expected_exit_code})"
644 | 
645 |     # --- Refined Error Log Checking --- 
646 |     
647 |     def find_unexpected_lines(output: str, allowed_patterns: List[str], default_indicators: List[str]) -> List[str]:
648 |         """
649 |         Find lines in script output that indicate errors or unexpected behavior.
650 |         
651 |         This function analyzes the output of a script (either stdout or stderr) and
652 |         identifies lines that may indicate an error or unexpected behavior. It handles
653 |         two different checking modes:
654 |         
655 |         1. With allowed_patterns: All lines that don't match at least one of the allowed
656 |            patterns are considered unexpected.
657 |         2. Without allowed_patterns: Only lines containing any of the default_indicators
658 |            are considered unexpected.
659 |         
660 |         The first mode is more restrictive (whitelist approach) while the second is
661 |         more permissive (blacklist approach). The function chooses the appropriate mode
662 |         based on whether allowed_patterns is provided.
663 |         
664 |         Args:
665 |             output (str): The script output to analyze (either stdout or stderr)
666 |             allowed_patterns (List[str]): List of regex patterns for allowed output lines.
667 |                 If provided, any line not matching at least one pattern is unexpected.
668 |             default_indicators (List[str]): List of string indicators of critical errors.
669 |                 Only used when allowed_patterns is empty, to identify error lines.
670 |         
671 |         Returns:
672 |             List[str]: A list of lines from the output that are considered unexpected or
673 |                       indicative of errors. Empty list means no unexpected lines found.
674 |         
675 |         Note:
676 |             - Empty lines are always ignored
677 |             - When allowed_patterns is provided, the function uses a whitelist approach
678 |             - When allowed_patterns is empty, the function uses a blacklist approach
679 |             - Regex matching is used for allowed_patterns, simple substring matching for default_indicators
680 |         """
681 |         lines = output.strip().splitlines()
682 |         unexpected_lines = []
683 |         for line in lines:
684 |             line_content = line.strip()
685 |             if not line_content: # Skip blank lines
686 |                 continue
687 |                 
688 |             is_allowed = False
689 |             # Check against specific allowed patterns for this script
690 |             if allowed_patterns:
691 |                 for pattern in allowed_patterns:
692 |                     if re.search(pattern, line_content):
693 |                         is_allowed = True
694 |                         break
695 |             
696 |             # If specific patterns were defined and line wasn't allowed, it's unexpected
697 |             if allowed_patterns and not is_allowed:
698 |                  unexpected_lines.append(line)
699 |             # If no specific patterns were defined, check against default critical indicators only
700 |             elif not allowed_patterns:
701 |                 for indicator in default_indicators:
702 |                      if indicator in line_content: # Use 'in' for default indicators for simplicity
703 |                          unexpected_lines.append(line)
704 |                          break # Found a default indicator, no need to check others for this line
705 |                          
706 |         return unexpected_lines
707 |         
708 |     unexpected_stderr = find_unexpected_lines(stderr, allowed_stderr_patterns, DEFAULT_ERROR_INDICATORS)
709 |     unexpected_stdout = find_unexpected_lines(stdout, allowed_stdout_patterns, DEFAULT_ERROR_INDICATORS)
710 |     
711 |     # Filter out lines that are just INFO/DEBUG/WARNING level logs unless they are explicitly disallowed
712 |     # (This assumes default log format: YYYY-MM-DD HH:MM:SS] LEVEL ...) or rich format
713 |     def is_ignorable_log(line: str) -> bool:
714 |         """
715 |         Determine if a log line can be safely ignored for error detection.
716 |         
717 |         This function identifies standard INFO, DEBUG, and WARNING level log messages
718 |         that should typically be ignored when checking for errors, unless they are
719 |         explicitly flagged as problematic by other patterns.
720 |         
721 |         The function recognizes common log line formats:
722 |         - Standard timestamp-prefixed format: [YYYY-MM-DD HH:MM:SS] LEVEL message
723 |         - Simple level-prefixed format: LEVEL message
724 |         
725 |         Args:
726 |             line (str): The log line to analyze
727 |             
728 |         Returns:
729 |             bool: True if the line appears to be a standard INFO, DEBUG, or WARNING
730 |                  log message that can be safely ignored. False otherwise.
731 |                  
732 |         Note:
733 |             This function only identifies the format of standard log lines;
734 |             it doesn't analyze the content of the messages themselves.
735 |         """
736 |         line_lower = line.lower()  # noqa: F841
737 |         return (
738 |             re.match(r"^\[\d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}\]\s+(INFO|DEBUG|WARNING)\s+", line.strip()) or 
739 |             re.match(r"^\s*(INFO|DEBUG|WARNING)\s+", line.strip())
740 |         )
741 | 
742 |     actual_stderr_errors = [line for line in unexpected_stderr if not is_ignorable_log(line)]
743 |     actual_stdout_errors = [line for line in unexpected_stdout if not is_ignorable_log(line)]
744 |     
745 |     if actual_stderr_errors:
746 |          return False, f"Unexpected errors found in stderr: ...{escape(actual_stderr_errors[0])}..."
747 |          
748 |     if actual_stdout_errors:
749 |          return False, f"Unexpected errors found in stdout: ...{escape(actual_stdout_errors[0])}..."
750 |     # --- End Refined Error Log Checking ---
751 | 
752 |     # If exit code matches and no unexpected critical errors found
753 |     return True, "Success"
754 | 
755 | def write_script_output_to_log(script_name: str, exit_code: int, stdout: str, stderr: str, is_success: bool):
756 |     """
757 |     Write the complete output of a script run to the consolidated log file.
758 |     
759 |     This function appends the execution results of a single script to a consolidated
760 |     log file for record-keeping and debugging purposes. The log includes:
761 |     - A header with the script name, exit code, and success/failure status
762 |     - The complete stdout output captured during execution
763 |     - The complete stderr output captured during execution
764 |     
765 |     Each script's log entry is clearly separated with delimiters for easy navigation
766 |     and searching within the log file.
767 |     
768 |     Args:
769 |         script_name (str): Name of the script that was executed
770 |         exit_code (int): The exit code returned by the script
771 |         stdout (str): The complete standard output captured during execution
772 |         stderr (str): The complete standard error captured during execution
773 |         is_success (bool): Whether the script execution was considered successful
774 |                          according to the check_for_errors criteria
775 |     
776 |     Returns:
777 |         None: The function writes to the log file specified by OUTPUT_LOG_FILE
778 |              but doesn't return any value
779 |     
780 |     Note:
781 |         - The function appends to the log file, preserving previous entries
782 |         - If stdout or stderr is empty, a placeholder message is logged
783 |         - No limit is placed on the size of the logged output
784 |     """
785 |     with open(OUTPUT_LOG_FILE, "a", encoding="utf-8") as log_file:
786 |         # Write script header with result
787 |         log_file.write(f"\n{'=' * 80}\n")
788 |         status = "SUCCESS" if is_success else "FAILURE"
789 |         log_file.write(f"SCRIPT: {script_name} - EXIT CODE: {exit_code} - STATUS: {status}\n")
790 |         log_file.write(f"{'-' * 80}\n\n")
791 |         
792 |         # Write stdout
793 |         log_file.write("STDOUT:\n")
794 |         log_file.write(stdout if stdout.strip() else "(No stdout)\n")
795 |         log_file.write("\n")
796 |         
797 |         # Write stderr
798 |         log_file.write("STDERR:\n")
799 |         log_file.write(stderr if stderr.strip() else "(No stderr)\n")
800 |         log_file.write("\n")
801 | 
802 | async def main():
803 |     """
804 |     Main function to run all demo scripts and generate a comprehensive report.
805 |     
806 |     This async function coordinates the entire process of:
807 |     1. Finding all demo scripts in the examples directory
808 |     2. Running each script sequentially and capturing its output
809 |     3. Checking each script's result against expected behavior
810 |     4. Logging detailed output to a consolidated log file
811 |     5. Generating a rich, interactive summary report in the console
812 |     
813 |     The function implements a progress bar display using rich.progress to provide
814 |     real-time feedback during execution. After all scripts have run, it displays
815 |     a detailed table summarizing the results of each script, including status,
816 |     exit code, and relevant output snippets.
817 |     
818 |     The function follows these specific steps:
819 |     - Locate Python scripts in the examples directory (skipping certain files)
820 |     - Initialize/clear the consolidated log file
821 |     - Run each script in sequence, updating the progress bar
822 |     - Check each script's output against expectations
823 |     - Write detailed output for each script to the log file
824 |     - Generate and display a summary table with success/failure indicators
825 |     - Display final counts of succeeded and failed scripts
826 |     
827 |     Returns:
828 |         int: Exit code for the parent process:
829 |             - 0 if all scripts succeed
830 |             - 1 if any script fails or if no scripts are found
831 |     
832 |     Note:
833 |         - Scripts listed in SCRIPTS_TO_SKIP are excluded from execution
834 |         - The function creates a new consolidated log file each time it runs
835 |         - Progress information is displayed using a rich progress bar
836 |         - The summary table highlights both successful and failed scripts
837 |     """
838 |     console.print(Rule("[bold blue]Running All Example Scripts[/bold blue]"))
839 |     
840 |     scripts = find_demo_scripts()
841 |     if not scripts:
842 |         console.print("[yellow]No demo scripts found to run.[/yellow]")
843 |         return 1
844 |         
845 |     console.print(f"Found {len(scripts)} demo scripts in '{EXAMPLES_DIR}'.")
846 |     
847 |     # Initialize/clear the output log file
848 |     with open(OUTPUT_LOG_FILE, "w", encoding="utf-8") as log_file:
849 |         log_file.write("DEMO SCRIPT CONSOLE OUTPUT LOG\n")
850 |         log_file.write(f"Generated by {Path(__file__).name}\n")
851 |         log_file.write(f"{'=' * 80}\n\n")
852 |     
853 |     results = []
854 |     success_count = 0
855 |     fail_count = 0
856 |     
857 |     # --- Progress Bar Setup ---
858 |     progress = Progress(
859 |         SpinnerColumn(),
860 |         TextColumn("[progress.description]{task.description}", justify="right"),
861 |         BarColumn(bar_width=None),
862 |         TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
863 |         TextColumn("({task.completed}/{task.total})"),
864 |         console=console,
865 |         transient=False # Keep progress bar visible after completion
866 |     )
867 | 
868 |     task_id = progress.add_task("[cyan]Running scripts...", total=len(scripts))
869 | 
870 |     with Live(progress, console=console, vertical_overflow="visible"):
871 |         for script in scripts:
872 |             script_name = script.name
873 |             progress.update(task_id, description=f"[cyan]Running {script_name}...")
874 | 
875 |             exit_code, stdout, stderr = await run_script(script)
876 |             is_success, reason = check_for_errors(script_name, exit_code, stdout, stderr)
877 |             
878 |             # Log all output to the consolidated log file
879 |             write_script_output_to_log(script_name, exit_code, stdout, stderr, is_success)
880 |             
881 |             results.append({
882 |                 "script": script_name,
883 |                 "success": is_success,
884 |                 "reason": reason,
885 |                 "exit_code": exit_code,
886 |                 "stdout": stdout,
887 |                 "stderr": stderr
888 |             })
889 | 
890 |             if is_success:
891 |                 success_count += 1
892 |             else:
893 |                 fail_count += 1
894 |             
895 |             progress.update(task_id, advance=1)
896 |         
897 |         progress.update(task_id, description="[bold green]All scripts finished![/bold green]")
898 |         await asyncio.sleep(0.5) # Allow final update to render
899 | 
900 |     # --- Summary Report ---
901 |     console.print(Rule("[bold blue]Demo Run Summary[/bold blue]"))
902 |     
903 |     summary_table = Table(title="Script Execution Results", box=box.ROUNDED, show_header=True, header_style="bold magenta")
904 |     summary_table.add_column("Script Name", style="cyan", no_wrap=True)
905 |     summary_table.add_column("Status", style="white")
906 |     summary_table.add_column("Exit Code", style="yellow", justify="right")
907 |     summary_table.add_column("Reason / Output Snippet", style="white")
908 |     
909 |     for result in results:
910 |         status_icon = "[green]✅ SUCCESS[/green]" if result["success"] else "[bold red]❌ FAILURE[/bold red]"
911 |         reason_or_output = result["reason"]
912 |         
913 |         # --- Enhanced Snippet Logic ---
914 |         # Prioritize showing snippet related to the failure reason
915 |         if not result["success"]:
916 |             output_to_search = result["stderr"] + result["stdout"] # Combined output
917 |             snippet = ""
918 |             
919 |             # If failure is due to unexpected error message
920 |             if "Unexpected errors found" in reason_or_output:
921 |                 # Extract the specific error shown in the reason
922 |                 match = re.search(r"Unexpected errors found in (stdout|stderr): \.\.\.(.*)\.\.\.\"?", reason_or_output)
923 |                 if match:
924 |                     error_snippet_text = match.group(2).strip()
925 |                     # Try to find this snippet in the actual output
926 |                     start_idx = output_to_search.find(error_snippet_text)
927 |                     if start_idx != -1:
928 |                         # Find the start of the line containing the snippet
929 |                         line_start_idx = output_to_search.rfind('\n', 0, start_idx) + 1
930 |                         lines_around_error = output_to_search[line_start_idx:].splitlines()
931 |                         snippet = "\n".join(lines_around_error[:5]) # Show 5 lines from error
932 |                         if len(lines_around_error) > 5:
933 |                             snippet += "\n..."
934 |            
935 |             # If failure is due to exit code, show end of stderr/stdout
936 |             elif "Exited with code" in reason_or_output:
937 |                 if result["stderr"].strip():
938 |                      lines = result["stderr"].strip().splitlines()
939 |                      snippet = "\n".join(lines[-5:]) # Last 5 lines of stderr
940 |                 elif result["stdout"].strip():
941 |                      lines = result["stdout"].strip().splitlines()
942 |                      snippet = "\n".join(lines[-5:]) # Last 5 lines of stdout
943 |            
944 |             # Fallback if no specific snippet found yet for failure
945 |             if not snippet:
946 |                  lines = output_to_search.strip().splitlines()
947 |                  snippet = "\n".join(lines[-5:]) # Last 5 lines overall
948 | 
949 |             if snippet:
950 |                  reason_or_output += f"\n---\n[dim]{escape(snippet)}[/dim]"
951 | 
952 |         elif result["success"]:
953 |              # Show last few lines of stdout for successful runs
954 |              lines = result["stdout"].strip().splitlines()
955 |              if lines:
956 |                  snippet = "\n".join(lines[-3:]) # Show last 3 lines
957 |                  reason_or_output += f"\n---\n[dim]{escape(snippet)}[/dim]"
958 |              else: # Handle case with no stdout
959 |                   reason_or_output += "\n---\n[dim](No stdout produced)[/dim]"
960 |         # --- End Enhanced Snippet Logic ---
961 | 
962 |         summary_table.add_row(
963 |             result["script"],
964 |             status_icon,
965 |             str(result["exit_code"]),
966 |             reason_or_output
967 |         )
968 |         
969 |     console.print(summary_table)
970 |     
971 |     # --- Final Count ---
972 |     console.print(Rule())
973 |     total_scripts = len(scripts)
974 |     final_message = f"[bold green]{success_count}[/bold green] succeeded, [bold red]{fail_count}[/bold red] failed out of {total_scripts} scripts."
975 |     final_color = "green" if fail_count == 0 else "red"
976 |     console.print(Panel(final_message, border_style=final_color, expand=False))
977 |     
978 |     console.print(f"\nComplete output log saved to: [cyan]{OUTPUT_LOG_FILE}[/cyan]")
979 |     
980 |     return 1 if fail_count > 0 else 0
981 | 
982 | if __name__ == "__main__":
983 |     exit_code = asyncio.run(main())
984 |     sys.exit(exit_code) 
```
Page 17/45FirstPrevNextLast