#
tokens: 37078/50000 2/207 files (page 20/45)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 20 of 45. Use http://codebase.md/dicklesworthstone/llm_gateway_mcp_server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cursorignore
├── .env.example
├── .envrc
├── .gitignore
├── additional_features.md
├── check_api_keys.py
├── completion_support.py
├── comprehensive_test.py
├── docker-compose.yml
├── Dockerfile
├── empirically_measured_model_speeds.json
├── error_handling.py
├── example_structured_tool.py
├── examples
│   ├── __init__.py
│   ├── advanced_agent_flows_using_unified_memory_system_demo.py
│   ├── advanced_extraction_demo.py
│   ├── advanced_unified_memory_system_demo.py
│   ├── advanced_vector_search_demo.py
│   ├── analytics_reporting_demo.py
│   ├── audio_transcription_demo.py
│   ├── basic_completion_demo.py
│   ├── cache_demo.py
│   ├── claude_integration_demo.py
│   ├── compare_synthesize_demo.py
│   ├── cost_optimization.py
│   ├── data
│   │   ├── sample_event.txt
│   │   ├── Steve_Jobs_Introducing_The_iPhone_compressed.md
│   │   └── Steve_Jobs_Introducing_The_iPhone_compressed.mp3
│   ├── docstring_refiner_demo.py
│   ├── document_conversion_and_processing_demo.py
│   ├── entity_relation_graph_demo.py
│   ├── filesystem_operations_demo.py
│   ├── grok_integration_demo.py
│   ├── local_text_tools_demo.py
│   ├── marqo_fused_search_demo.py
│   ├── measure_model_speeds.py
│   ├── meta_api_demo.py
│   ├── multi_provider_demo.py
│   ├── ollama_integration_demo.py
│   ├── prompt_templates_demo.py
│   ├── python_sandbox_demo.py
│   ├── rag_example.py
│   ├── research_workflow_demo.py
│   ├── sample
│   │   ├── article.txt
│   │   ├── backprop_paper.pdf
│   │   ├── buffett.pdf
│   │   ├── contract_link.txt
│   │   ├── legal_contract.txt
│   │   ├── medical_case.txt
│   │   ├── northwind.db
│   │   ├── research_paper.txt
│   │   ├── sample_data.json
│   │   └── text_classification_samples
│   │       ├── email_classification.txt
│   │       ├── news_samples.txt
│   │       ├── product_reviews.txt
│   │       └── support_tickets.txt
│   ├── sample_docs
│   │   └── downloaded
│   │       └── attention_is_all_you_need.pdf
│   ├── sentiment_analysis_demo.py
│   ├── simple_completion_demo.py
│   ├── single_shot_synthesis_demo.py
│   ├── smart_browser_demo.py
│   ├── sql_database_demo.py
│   ├── sse_client_demo.py
│   ├── test_code_extraction.py
│   ├── test_content_detection.py
│   ├── test_ollama.py
│   ├── text_classification_demo.py
│   ├── text_redline_demo.py
│   ├── tool_composition_examples.py
│   ├── tournament_code_demo.py
│   ├── tournament_text_demo.py
│   ├── unified_memory_system_demo.py
│   ├── vector_search_demo.py
│   ├── web_automation_instruction_packs.py
│   └── workflow_delegation_demo.py
├── LICENSE
├── list_models.py
├── marqo_index_config.json.example
├── mcp_protocol_schema_2025-03-25_version.json
├── mcp_python_lib_docs.md
├── mcp_tool_context_estimator.py
├── model_preferences.py
├── pyproject.toml
├── quick_test.py
├── README.md
├── resource_annotations.py
├── run_all_demo_scripts_and_check_for_errors.py
├── storage
│   └── smart_browser_internal
│       ├── locator_cache.db
│       ├── readability.js
│       └── storage_state.enc
├── test_client.py
├── test_connection.py
├── TEST_README.md
├── test_sse_client.py
├── test_stdio_client.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── integration
│   │   ├── __init__.py
│   │   └── test_server.py
│   ├── manual
│   │   ├── test_extraction_advanced.py
│   │   └── test_extraction.py
│   └── unit
│       ├── __init__.py
│       ├── test_cache.py
│       ├── test_providers.py
│       └── test_tools.py
├── TODO.md
├── tool_annotations.py
├── tools_list.json
├── ultimate_mcp_banner.webp
├── ultimate_mcp_logo.webp
├── ultimate_mcp_server
│   ├── __init__.py
│   ├── __main__.py
│   ├── cli
│   │   ├── __init__.py
│   │   ├── __main__.py
│   │   ├── commands.py
│   │   ├── helpers.py
│   │   └── typer_cli.py
│   ├── clients
│   │   ├── __init__.py
│   │   ├── completion_client.py
│   │   └── rag_client.py
│   ├── config
│   │   └── examples
│   │       └── filesystem_config.yaml
│   ├── config.py
│   ├── constants.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── evaluation
│   │   │   ├── base.py
│   │   │   └── evaluators.py
│   │   ├── providers
│   │   │   ├── __init__.py
│   │   │   ├── anthropic.py
│   │   │   ├── base.py
│   │   │   ├── deepseek.py
│   │   │   ├── gemini.py
│   │   │   ├── grok.py
│   │   │   ├── ollama.py
│   │   │   ├── openai.py
│   │   │   └── openrouter.py
│   │   ├── server.py
│   │   ├── state_store.py
│   │   ├── tournaments
│   │   │   ├── manager.py
│   │   │   ├── tasks.py
│   │   │   └── utils.py
│   │   └── ums_api
│   │       ├── __init__.py
│   │       ├── ums_database.py
│   │       ├── ums_endpoints.py
│   │       ├── ums_models.py
│   │       └── ums_services.py
│   ├── exceptions.py
│   ├── graceful_shutdown.py
│   ├── services
│   │   ├── __init__.py
│   │   ├── analytics
│   │   │   ├── __init__.py
│   │   │   ├── metrics.py
│   │   │   └── reporting.py
│   │   ├── cache
│   │   │   ├── __init__.py
│   │   │   ├── cache_service.py
│   │   │   ├── persistence.py
│   │   │   ├── strategies.py
│   │   │   └── utils.py
│   │   ├── cache.py
│   │   ├── document.py
│   │   ├── knowledge_base
│   │   │   ├── __init__.py
│   │   │   ├── feedback.py
│   │   │   ├── manager.py
│   │   │   ├── rag_engine.py
│   │   │   ├── retriever.py
│   │   │   └── utils.py
│   │   ├── prompts
│   │   │   ├── __init__.py
│   │   │   ├── repository.py
│   │   │   └── templates.py
│   │   ├── prompts.py
│   │   └── vector
│   │       ├── __init__.py
│   │       ├── embeddings.py
│   │       └── vector_service.py
│   ├── tool_token_counter.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── audio_transcription.py
│   │   ├── base.py
│   │   ├── completion.py
│   │   ├── docstring_refiner.py
│   │   ├── document_conversion_and_processing.py
│   │   ├── enhanced-ums-lookbook.html
│   │   ├── entity_relation_graph.py
│   │   ├── excel_spreadsheet_automation.py
│   │   ├── extraction.py
│   │   ├── filesystem.py
│   │   ├── html_to_markdown.py
│   │   ├── local_text_tools.py
│   │   ├── marqo_fused_search.py
│   │   ├── meta_api_tool.py
│   │   ├── ocr_tools.py
│   │   ├── optimization.py
│   │   ├── provider.py
│   │   ├── pyodide_boot_template.html
│   │   ├── python_sandbox.py
│   │   ├── rag.py
│   │   ├── redline-compiled.css
│   │   ├── sentiment_analysis.py
│   │   ├── single_shot_synthesis.py
│   │   ├── smart_browser.py
│   │   ├── sql_databases.py
│   │   ├── text_classification.py
│   │   ├── text_redline_tools.py
│   │   ├── tournament.py
│   │   ├── ums_explorer.html
│   │   └── unified_memory_system.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── async_utils.py
│   │   ├── display.py
│   │   ├── logging
│   │   │   ├── __init__.py
│   │   │   ├── console.py
│   │   │   ├── emojis.py
│   │   │   ├── formatter.py
│   │   │   ├── logger.py
│   │   │   ├── panels.py
│   │   │   ├── progress.py
│   │   │   └── themes.py
│   │   ├── parse_yaml.py
│   │   ├── parsing.py
│   │   ├── security.py
│   │   └── text.py
│   └── working_memory_api.py
├── unified_memory_system_technical_analysis.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/examples/sentiment_analysis_demo.py:
--------------------------------------------------------------------------------

```python
   1 | #!/usr/bin/env python
   2 | """Business sentiment analysis demonstration using Ultimate MCP Server."""
   3 | 
   4 | import asyncio
   5 | import sys
   6 | from pathlib import Path
   7 | from typing import Any, Dict, List
   8 | 
   9 | # Add project root to path for imports when running as script
  10 | sys.path.insert(0, str(Path(__file__).parent.parent))
  11 | 
  12 | # Third-party imports
  13 | from rich import box
  14 | from rich.console import Group
  15 | from rich.markup import escape
  16 | from rich.panel import Panel
  17 | from rich.progress import Progress, SpinnerColumn, TextColumn
  18 | from rich.rule import Rule
  19 | from rich.table import Table
  20 | from rich.tree import Tree
  21 | 
  22 | # Project imports
  23 | from ultimate_mcp_server.constants import Provider
  24 | from ultimate_mcp_server.core.server import Gateway
  25 | from ultimate_mcp_server.tools.sentiment_analysis import (
  26 |     analyze_business_sentiment,
  27 |     analyze_business_text_batch,
  28 | )
  29 | from ultimate_mcp_server.utils import get_logger
  30 | from ultimate_mcp_server.utils.display import CostTracker
  31 | from ultimate_mcp_server.utils.logging.console import console
  32 | 
  33 | # Initialize logger
  34 | logger = get_logger("example.business_sentiment_demo")
  35 | 
  36 | # Provider and model configuration - easy to change
  37 | PROVIDER = Provider.OPENAI.value  # Change this to switch providers (e.g., Provider.OPENAI.value)
  38 | MODEL = 'gpt-4.1-nano'  # Set to None to use default model for the provider, or specify a model name
  39 | 
  40 | # Sample data for demonstrations
  41 | SAMPLE_FEEDBACK = {
  42 |     "retail": "I recently purchased your premium blender model BX-9000. While the build quality is excellent and it looks stylish on my countertop, I've been disappointed with its performance on tough ingredients like frozen fruits. It leaves chunks unblended even after several minutes of operation. Your customer service was responsive when I called, but they couldn't offer any solutions beyond what was already in the manual. For a product in this price range ($249), I expected better performance. On the positive side, it's much quieter than my previous blender and the preset programs are convenient.",
  43 |     "financial": "I've been using your online banking platform for my small business for about 6 months now. The transaction categorization feature has saved me hours of bookkeeping time, and the integration with my accounting software is seamless. However, I've experienced the mobile app crashing during check deposits at least once a week, forcing me to restart the process. This has caused delays in funds availability that have impacted my cash flow. Your support team acknowledged the issue but said a fix wouldn't be available until the next quarterly update. The competitive rates and fee structure are keeping me as a customer for now, but I'm actively evaluating alternatives.",
  44 |     "healthcare": "My recent stay at Memorial Care Hospital exceeded expectations. The nursing staff was exceptionally attentive and checked on me regularly. Dr. Thompson took time to thoroughly explain my procedure and answered all my questions without rushing. The facility was immaculately clean, though the room temperature was difficult to regulate. The discharge process was a bit disorganized—I waited over 3 hours and received conflicting information from different staff members about my follow-up care. The billing department was efficient and transparent about costs, which I appreciated. Overall, my health outcome was positive and I would recommend this hospital despite the discharge issues.",
  45 |     "b2b_tech": "We implemented your enterprise resource planning solution across our manufacturing division last quarter. The system has successfully centralized our previously fragmented data processes, and we've measured a 17% reduction in order processing time. However, the implementation took 2 months longer than projected in your timeline, causing significant operational disruptions. Some of the customizations we paid for ($27,500 additional) still don't work as specified in our contract. Your technical support has been responsive, but they often escalate issues to developers who take days to respond. We're achieving ROI more slowly than anticipated but expect to reach our efficiency targets by Q3. Training materials for new staff are excellent.",
  46 |     "support_ticket": "URGENT: Critical system outage affecting all users in EU region. Monitoring dashboard shows 100% packet loss to EU servers since 3:15 PM CET. This is impacting approximately 3,200 enterprise users across 14 countries. We've attempted standard troubleshooting steps including restarting services and verifying network routes, but nothing has resolved the issue. Need immediate assistance as this is affecting production systems and SLA violations will begin accruing in approximately 45 minutes. Our technical contact is Jan Kowalski (+48 555 123 456). This is the third outage this month, following similar incidents on the 7th and 15th. Reference case numbers: INC-7723 and INC-8105.",
  47 | }
  48 | 
  49 | BATCH_FEEDBACK = [
  50 |     {
  51 |         "customer_id": "AB-10293",
  52 |         "channel": "Email Survey",
  53 |         "product": "CloudSync Pro",
  54 |         "text": "Your automated onboarding process was a game-changer for our IT department. We deployed to 50+ employees in one afternoon instead of the week it would have taken manually. The admin dashboard is intuitive although the reporting functionality is somewhat limited compared to your competitor ServiceDesk+. We've already recommended your solution to several partner companies.",
  55 |     },
  56 |     {
  57 |         "customer_id": "XY-58204",
  58 |         "channel": "Support Ticket",
  59 |         "product": "CloudSync Pro",
  60 |         "text": "We've been experiencing intermittent synchronization failures for the past 3 days. Data from approximately 20% of our field employees isn't being captured, which is affecting our ability to bill clients accurately. This is creating significant revenue leakage. Your tier 1 support hasn't been able to resolve the issue despite multiple calls. We need escalation to engineering ASAP. Our contract SLA guarantees 99.9% reliability and we're well below that threshold currently.",
  61 |     },
  62 |     {
  63 |         "customer_id": "LM-39157",
  64 |         "channel": "NPS Survey",
  65 |         "product": "CloudSync Basic",
  66 |         "text": "I find the mobile app version significantly less functional than the desktop version. Critical features like approval workflows and document history are buried in submenus or entirely missing from the mobile experience. It's frustrating when I'm traveling and need to approve time-sensitive requests. That said, when everything works on desktop, it's a solid product that has streamlined our operations considerably. Your recent price increase of 12% seems excessive given the lack of significant new features in the past year.",
  67 |     },
  68 |     {
  69 |         "customer_id": "PQ-73046",
  70 |         "channel": "Sales Follow-up",
  71 |         "product": "CloudSync Enterprise",
  72 |         "text": "The ROI analysis your team provided convinced our CFO to approve the upgrade to Enterprise tier. We're particularly excited about the advanced security features and dedicated support representative. The timeline you've proposed for migration from our legacy system looks reasonable, but we'll need detailed documentation for training our global teams across different time zones. We're concerned about potential downtime during the transition since we operate 24/7 manufacturing facilities. Your competitor offered a slightly lower price point, but your solution's integration capabilities with our existing tech stack ultimately won us over.",
  73 |     },
  74 | ]
  75 | 
  76 | 
  77 | async def analyze_single_feedback(gateway, tracker: CostTracker):
  78 |     """Demonstrate analysis of a single piece of business feedback."""
  79 |     console.print(Rule("[bold blue]Individual Business Feedback Analysis[/bold blue]"))
  80 |     logger.info("Starting individual feedback analysis", emoji_key="start")
  81 | 
  82 |     # Select a feedback sample
  83 |     industry = "retail"
  84 |     feedback_text = SAMPLE_FEEDBACK[industry]
  85 | 
  86 |     # Display the feedback
  87 |     console.print(
  88 |         Panel(
  89 |             escape(feedback_text),
  90 |             title=f"[bold magenta]Sample {industry.capitalize()} Customer Feedback[/bold magenta]",
  91 |             border_style="magenta",
  92 |             expand=False,
  93 |         )
  94 |     )
  95 | 
  96 |     # Analysis configuration
  97 |     analysis_config = {
  98 |         "industry": industry,
  99 |         "analysis_mode": "comprehensive",
 100 |         "entity_extraction": True,
 101 |         "aspect_based": True,
 102 |         "competitive_analysis": False,
 103 |         "intent_detection": True,
 104 |         "risk_assessment": True,
 105 |     }
 106 | 
 107 |     # Display configuration
 108 |     config_table = Table(title="Analysis Configuration", show_header=True, box=box.ROUNDED)
 109 |     config_table.add_column("Parameter", style="cyan")
 110 |     config_table.add_column("Value", style="green")
 111 | 
 112 |     for key, value in analysis_config.items():
 113 |         config_table.add_row(key, str(value))
 114 | 
 115 |     console.print(config_table)
 116 | 
 117 |     try:
 118 |         # Show progress during analysis
 119 |         with Progress(
 120 |             SpinnerColumn(),
 121 |             TextColumn("[bold blue]Analyzing business sentiment..."),
 122 |             transient=True,
 123 |         ) as progress:
 124 |             task = progress.add_task("Analyzing...", total=None)  # noqa: F841
 125 | 
 126 |             # Directly call analyze_business_sentiment with proper parameters
 127 |             result = await analyze_business_sentiment(
 128 |                 text=feedback_text,
 129 |                 provider=PROVIDER,
 130 |                 model=MODEL,
 131 |                 **analysis_config,
 132 |             )
 133 | 
 134 |             # Track cost
 135 |             if "meta" in result:
 136 |                 tracker.record_call(
 137 |                     provider=result["meta"]["provider"],
 138 |                     model=result["meta"]["model"],
 139 |                     input_tokens=result["meta"]["tokens"]["input"],
 140 |                     output_tokens=result["meta"]["tokens"]["output"],
 141 |                     cost=result["meta"]["cost"],
 142 |                 )
 143 | 
 144 |         # Display results
 145 |         if result["success"]:
 146 |             logger.success("Sentiment analysis completed successfully", emoji_key="success")
 147 | 
 148 |             # Core metrics panel
 149 |             core_metrics = result.get("core_metrics", {})
 150 |             metrics_table = Table(box=box.SIMPLE)
 151 |             metrics_table.add_column("Metric", style="cyan")
 152 |             metrics_table.add_column("Value", style="white")
 153 | 
 154 |             metrics_table.add_row(
 155 |                 "Sentiment", f"[bold]{core_metrics.get('primary_sentiment', 'N/A')}[/bold]"
 156 |             )
 157 |             metrics_table.add_row(
 158 |                 "Sentiment Score", f"{core_metrics.get('sentiment_score', 0.0):.2f}"
 159 |             )
 160 |             metrics_table.add_row(
 161 |                 "Satisfaction",
 162 |                 f"{result.get('business_dimensions', {}).get('customer_satisfaction', 0.0):.2f}",
 163 |             )
 164 |             metrics_table.add_row("Urgency", core_metrics.get("urgency", "N/A"))
 165 | 
 166 |             # Business dimension visualization
 167 |             dimensions = result.get("business_dimensions", {})
 168 |             viz_table = Table(show_header=False, box=None)
 169 |             viz_table.add_column("Dimension", style="blue")
 170 |             viz_table.add_column("Score", style="white")
 171 |             viz_table.add_column("Visual", style="yellow")
 172 | 
 173 |             max_bar_length = 20
 174 |             for key, value in dimensions.items():
 175 |                 if isinstance(value, (int, float)):
 176 |                     # Create visual bar based on score
 177 |                     bar_length = int(value * max_bar_length)
 178 |                     bar = "█" * bar_length + "░" * (max_bar_length - bar_length)
 179 |                     viz_table.add_row(key.replace("_", " ").title(), f"{value:.2f}", bar)
 180 | 
 181 |             # Aspect sentiment visualization
 182 |             aspects = result.get("aspect_sentiment", {})
 183 |             aspect_table = Table(title="Aspect-Based Sentiment", box=box.ROUNDED)
 184 |             aspect_table.add_column("Aspect", style="cyan")
 185 |             aspect_table.add_column("Sentiment", style="white")
 186 |             aspect_table.add_column("Visual", style="yellow")
 187 | 
 188 |             for aspect, score in aspects.items():
 189 |                 # Create visual bar with color
 190 |                 if score >= 0:
 191 |                     bar_length = int(score * 10)
 192 |                     bar = f"[green]{'█' * bar_length}{'░' * (10 - bar_length)}[/green]"
 193 |                 else:
 194 |                     bar_length = int(abs(score) * 10)
 195 |                     bar = f"[red]{'█' * bar_length}{'░' * (10 - bar_length)}[/red]"
 196 | 
 197 |                 aspect_table.add_row(aspect.replace("_", " ").title(), f"{score:.2f}", bar)
 198 | 
 199 |             # Display all visualizations
 200 |             console.print(
 201 |                 Panel(
 202 |                     Group(metrics_table, Rule(style="dim"), viz_table),
 203 |                     title="[bold green]Core Business Metrics[/bold green]",
 204 |                     border_style="green",
 205 |                 )
 206 |             )
 207 | 
 208 |             console.print(aspect_table)
 209 | 
 210 |             # Entity extraction
 211 |             if "entity_extraction" in result:
 212 |                 entity_panel = Panel(
 213 |                     _format_entities(result["entity_extraction"]),
 214 |                     title="[bold blue]Extracted Entities[/bold blue]",
 215 |                     border_style="blue",
 216 |                 )
 217 |                 console.print(entity_panel)
 218 | 
 219 |             # Intent analysis
 220 |             if "intent_analysis" in result:
 221 |                 intent_panel = _display_intent_analysis(result["intent_analysis"])
 222 |                 console.print(intent_panel)
 223 | 
 224 |             # Risk assessment
 225 |             if "risk_assessment" in result:
 226 |                 risk_panel = _display_risk_assessment(result["risk_assessment"])
 227 |                 console.print(risk_panel)
 228 | 
 229 |             # Recommended actions
 230 |             if "recommended_actions" in result:
 231 |                 actions = result["recommended_actions"]
 232 |                 if actions:
 233 |                     # Format and display actions
 234 |                     formatted_actions = []
 235 |                     for i, action in enumerate(actions):
 236 |                         if isinstance(action, dict):
 237 |                             # Format dictionary as readable string
 238 |                             if "action" in action:
 239 |                                 action_text = f"[bold]{i + 1}.[/bold] {action['action']}"
 240 |                                 # Add additional fields if available
 241 |                                 details = []
 242 |                                 for key, value in action.items():
 243 |                                     if key != "action":  # Skip the action field we already added
 244 |                                         details.append(f"{key}: {value}")
 245 |                                 if details:
 246 |                                     action_text += f" ({', '.join(details)})"
 247 |                                 formatted_actions.append(action_text)
 248 |                             else:
 249 |                                 # Generic dictionary formatting
 250 |                                 action_text = f"[bold]{i + 1}.[/bold] " + ", ".join(
 251 |                                     [f"{k}: {v}" for k, v in action.items()]
 252 |                                 )
 253 |                                 formatted_actions.append(action_text)
 254 |                         else:
 255 |                             formatted_actions.append(f"[bold]{i + 1}.[/bold] {action}")
 256 | 
 257 |                     console.print(
 258 |                         Panel(
 259 |                             "\n".join(formatted_actions),
 260 |                             title="[bold yellow]Prioritized Action Plan[/bold yellow]",
 261 |                             border_style="yellow",
 262 |                             expand=False,
 263 |                         )
 264 |                     )
 265 | 
 266 |             # Execution metrics
 267 |             meta = result.get("meta", {})
 268 |             exec_table = Table(title="Execution Metrics", box=box.SIMPLE, show_header=False)
 269 |             exec_table.add_column("Metric", style="dim cyan")
 270 |             exec_table.add_column("Value", style="dim white")
 271 | 
 272 |             exec_table.add_row(
 273 |                 "Provider/Model", f"{meta.get('provider', 'N/A')}/{meta.get('model', 'N/A')}"
 274 |             )
 275 |             exec_table.add_row("Processing Time", f"{meta.get('processing_time', 0.0):.2f}s")
 276 |             exec_table.add_row(
 277 |                 "Tokens",
 278 |                 f"Input: {meta.get('tokens', {}).get('input', 0)}, Output: {meta.get('tokens', {}).get('output', 0)}",
 279 |             )
 280 |             exec_table.add_row("Cost", f"${meta.get('cost', 0.0):.6f}")
 281 | 
 282 |             console.print(exec_table)
 283 |         else:
 284 |             logger.error(
 285 |                 f"Sentiment analysis failed: {result.get('error', 'Unknown error')}",
 286 |                 emoji_key="error",
 287 |             )
 288 | 
 289 |     except Exception as e:
 290 |         logger.error(
 291 |             f"Error in individual feedback analysis: {str(e)}", emoji_key="error", exc_info=True
 292 |         )
 293 | 
 294 | 
 295 | async def compare_analysis_modes(gateway, tracker: CostTracker):
 296 |     """Compare different analysis modes for the same feedback."""
 297 |     console.print(Rule("[bold blue]Analysis Mode Comparison[/bold blue]"))
 298 |     logger.info("Comparing different analysis modes", emoji_key="start")
 299 | 
 300 |     # Select a feedback sample
 301 |     industry = "b2b_tech"
 302 |     feedback_text = SAMPLE_FEEDBACK[industry]
 303 | 
 304 |     # Display the feedback
 305 |     console.print(
 306 |         Panel(
 307 |             escape(feedback_text),
 308 |             title="[bold magenta]B2B Technology Feedback[/bold magenta]",
 309 |             border_style="magenta",
 310 |             expand=False,
 311 |         )
 312 |     )
 313 | 
 314 |     # Analysis modes to compare
 315 |     analysis_modes = ["standard", "product_feedback", "customer_experience", "sales_opportunity"]
 316 | 
 317 |     # Results storage
 318 |     mode_results = {}
 319 | 
 320 |     try:
 321 |         # Show progress during analysis
 322 |         with Progress(
 323 |             SpinnerColumn(),
 324 |             TextColumn("[bold blue]Comparing analysis modes..."),
 325 |             transient=False,
 326 |         ) as progress:
 327 |             # Create tasks for each mode
 328 |             tasks = {
 329 |                 mode: progress.add_task(f"[cyan]Analyzing {mode}...", total=None)
 330 |                 for mode in analysis_modes
 331 |             }
 332 | 
 333 |             # Process each mode
 334 |             for mode in analysis_modes:
 335 |                 try:
 336 |                     logger.info(f"Trying analysis mode: {mode}", emoji_key="processing")
 337 | 
 338 |                     # Analysis configuration
 339 |                     analysis_config = {
 340 |                         "industry": industry,
 341 |                         "analysis_mode": mode,
 342 |                         "entity_extraction": False,  # Simplified for mode comparison
 343 |                         "aspect_based": True,
 344 |                         "competitive_analysis": False,
 345 |                         "intent_detection": False,
 346 |                         "risk_assessment": False,
 347 |                     }
 348 | 
 349 |                     # Directly call the analyze_business_sentiment function
 350 |                     result = await analyze_business_sentiment(
 351 |                         text=feedback_text,
 352 |                         provider=PROVIDER,
 353 |                         model=MODEL,
 354 |                         **analysis_config,
 355 |                     )
 356 | 
 357 |                     # Track cost
 358 |                     if "meta" in result and result["success"]:
 359 |                         tracker.record_call(
 360 |                             provider=result["meta"]["provider"],
 361 |                             model=result["meta"]["model"],
 362 |                             input_tokens=result["meta"]["tokens"]["input"],
 363 |                             output_tokens=result["meta"]["tokens"]["output"],
 364 |                             cost=result["meta"]["cost"],
 365 |                         )
 366 | 
 367 |                     # Store result
 368 |                     mode_results[mode] = result
 369 | 
 370 |                     # Complete the task
 371 |                     progress.update(tasks[mode], completed=True)
 372 | 
 373 |                 except Exception as e:
 374 |                     logger.warning(f"Error analyzing mode {mode}: {str(e)}", emoji_key="warning")
 375 |                     # Create mock result if analysis fails
 376 |                     mode_results[mode] = {
 377 |                         "success": False,
 378 |                         "error": str(e),
 379 |                         "core_metrics": {
 380 |                             "primary_sentiment": f"Error in {mode}",
 381 |                             "sentiment_score": 0.0,
 382 |                         },
 383 |                         "business_dimensions": {},
 384 |                         "aspect_sentiment": {},
 385 |                         "recommended_actions": [],
 386 |                     }
 387 |                     progress.update(tasks[mode], completed=True)
 388 | 
 389 |         # Compare the results
 390 |         comparison_table = Table(title="Analysis Mode Comparison", box=box.ROUNDED)
 391 |         comparison_table.add_column("Metric", style="white")
 392 |         for mode in analysis_modes:
 393 |             comparison_table.add_column(mode.replace("_", " ").title(), style="cyan")
 394 | 
 395 |         # Add sentiment rows
 396 |         comparison_table.add_row(
 397 |             "Primary Sentiment",
 398 |             *[
 399 |                 mode_results[mode].get("core_metrics", {}).get("primary_sentiment", "N/A")
 400 |                 for mode in analysis_modes
 401 |             ],
 402 |         )
 403 | 
 404 |         # Add score rows
 405 |         comparison_table.add_row(
 406 |             "Sentiment Score",
 407 |             *[
 408 |                 f"{mode_results[mode].get('core_metrics', {}).get('sentiment_score', 0.0):.2f}"
 409 |                 for mode in analysis_modes
 410 |             ],
 411 |         )
 412 | 
 413 |         # Add satisfaction rows
 414 |         comparison_table.add_row(
 415 |             "Satisfaction",
 416 |             *[
 417 |                 f"{mode_results[mode].get('business_dimensions', {}).get('customer_satisfaction', 0.0):.2f}"
 418 |                 for mode in analysis_modes
 419 |             ],
 420 |         )
 421 | 
 422 |         # Display top aspects for each mode
 423 |         aspect_trees = {}
 424 |         for mode in analysis_modes:
 425 |             aspects = mode_results[mode].get("aspect_sentiment", {})
 426 |             if aspects:
 427 |                 tree = Tree(f"[bold]{mode.replace('_', ' ').title()} Aspects[/bold]")
 428 |                 sorted_aspects = sorted(aspects.items(), key=lambda x: abs(x[1]), reverse=True)
 429 |                 for aspect, score in sorted_aspects[:3]:  # Top 3 aspects
 430 |                     color = "green" if score >= 0 else "red"
 431 |                     tree.add(f"[{color}]{aspect.replace('_', ' ').title()}: {score:.2f}[/{color}]")
 432 |                 aspect_trees[mode] = tree
 433 | 
 434 |         # Add recommended actions comparison
 435 |         action_trees = {}
 436 |         for mode in analysis_modes:
 437 |             actions = mode_results[mode].get("recommended_actions", [])
 438 |             if actions:
 439 |                 tree = Tree(f"[bold]{mode.replace('_', ' ').title()} Actions[/bold]")
 440 |                 for action in actions[:2]:  # Top 2 actions
 441 |                     # Handle case where action is a dictionary
 442 |                     if isinstance(action, dict):
 443 |                         # Format dictionary as readable string
 444 |                         if "action" in action:
 445 |                             action_text = f"{action['action']}"
 446 |                             if "priority" in action:
 447 |                                 action_text += f" (Priority: {action['priority']})"
 448 |                             tree.add(action_text)
 449 |                         else:
 450 |                             # Generic dictionary formatting
 451 |                             action_text = ", ".join([f"{k}: {v}" for k, v in action.items()])
 452 |                             tree.add(action_text)
 453 |                     else:
 454 |                         tree.add(str(action))
 455 |                 action_trees[mode] = tree
 456 | 
 457 |         # Display comparison table
 458 |         console.print(comparison_table)
 459 | 
 460 |         # Display aspects side by side if possible
 461 |         if aspect_trees:
 462 |             console.print("\n[bold cyan]Top Aspects by Analysis Mode[/bold cyan]")
 463 |             # Print trees based on available width
 464 |             for _mode, tree in aspect_trees.items():
 465 |                 console.print(tree)
 466 | 
 467 |         # Display recommended actions
 468 |         if action_trees:
 469 |             console.print("\n[bold yellow]Recommended Actions by Analysis Mode[/bold yellow]")
 470 |             for _mode, tree in action_trees.items():
 471 |                 console.print(tree)
 472 | 
 473 |         # Display execution metrics
 474 |         exec_table = Table(title="Execution Metrics by Mode", box=box.SIMPLE)
 475 |         exec_table.add_column("Mode", style="cyan")
 476 |         exec_table.add_column("Processing Time", style="dim white")
 477 |         exec_table.add_column("Tokens (In/Out)", style="dim white")
 478 |         exec_table.add_column("Cost", style="green")
 479 | 
 480 |         for mode in analysis_modes:
 481 |             meta = mode_results[mode].get("meta", {})
 482 |             if meta:
 483 |                 exec_table.add_row(
 484 |                     mode.replace("_", " ").title(),
 485 |                     f"{meta.get('processing_time', 0.0):.2f}s",
 486 |                     f"{meta.get('tokens', {}).get('input', 0)}/{meta.get('tokens', {}).get('output', 0)}",
 487 |                     f"${meta.get('cost', 0.0):.6f}",
 488 |                 )
 489 | 
 490 |         console.print(exec_table)
 491 | 
 492 |     except Exception as e:
 493 |         logger.error(
 494 |             f"Error in analysis mode comparison: {str(e)}", emoji_key="error", exc_info=True
 495 |         )
 496 | 
 497 | 
 498 | async def analyze_support_ticket_with_risk(gateway, tracker: CostTracker):
 499 |     """Analyze a support ticket with focus on risk assessment."""
 500 |     console.print(Rule("[bold blue]Support Ticket Risk Assessment[/bold blue]"))
 501 |     logger.info("Analyzing support ticket with risk focus", emoji_key="start")
 502 | 
 503 |     # Use the support ticket sample
 504 |     ticket_text = SAMPLE_FEEDBACK["support_ticket"]
 505 | 
 506 |     # Display the ticket
 507 |     console.print(
 508 |         Panel(
 509 |             escape(ticket_text),
 510 |             title="[bold red]URGENT Support Ticket[/bold red]",
 511 |             border_style="red",
 512 |             expand=False,
 513 |         )
 514 |     )
 515 | 
 516 |     # Analysis configuration focusing on risk and urgency
 517 |     analysis_config = {
 518 |         "industry": "technology",
 519 |         "analysis_mode": "support_ticket",
 520 |         "entity_extraction": True,
 521 |         "aspect_based": False,
 522 |         "competitive_analysis": False,
 523 |         "intent_detection": True,
 524 |         "risk_assessment": True,
 525 |         "threshold_config": {
 526 |             "urgency": 0.7,  # Higher threshold for urgency
 527 |             "churn_risk": 0.5,  # Standard threshold for churn risk
 528 |         },
 529 |     }
 530 | 
 531 |     try:
 532 |         # Show progress during analysis
 533 |         with Progress(
 534 |             SpinnerColumn(),
 535 |             TextColumn("[bold red]Analyzing support ticket..."),
 536 |             transient=True,
 537 |         ) as progress:
 538 |             task = progress.add_task("Analyzing...", total=None)  # noqa: F841
 539 | 
 540 |             # Directly call analyze_business_sentiment
 541 |             result = await analyze_business_sentiment(
 542 |                 text=ticket_text,
 543 |                 provider=PROVIDER,
 544 |                 model=MODEL,
 545 |                 **analysis_config,
 546 |             )
 547 | 
 548 |             # Track cost
 549 |             if "meta" in result:
 550 |                 tracker.record_call(
 551 |                     provider=result["meta"]["provider"],
 552 |                     model=result["meta"]["model"],
 553 |                     input_tokens=result["meta"]["tokens"]["input"],
 554 |                     output_tokens=result["meta"]["tokens"]["output"],
 555 |                     cost=result["meta"]["cost"],
 556 |                 )
 557 | 
 558 |         # Display results focusing on risk assessment
 559 |         if result["success"]:
 560 |             logger.success("Support ticket analysis completed", emoji_key="success")
 561 | 
 562 |             # Core urgency metrics
 563 |             core_metrics = result.get("core_metrics", {})
 564 |             urgency = core_metrics.get("urgency", "medium")
 565 |             urgency_color = {
 566 |                 "low": "green",
 567 |                 "medium": "yellow",
 568 |                 "high": "orange",
 569 |                 "critical": "red",
 570 |             }.get(urgency.lower(), "yellow")
 571 | 
 572 |             # Risk assessment panel
 573 |             risk_data = result.get("risk_assessment", {})
 574 |             # If risk_data is empty, add a default escalation probability
 575 |             if not risk_data or not any(
 576 |                 key in risk_data
 577 |                 for key in [
 578 |                     "response_urgency",
 579 |                     "churn_probability",
 580 |                     "pr_risk",
 581 |                     "escalation_probability",
 582 |                 ]
 583 |             ):
 584 |                 risk_data["escalation_probability"] = 0.95
 585 | 
 586 |             if risk_data:
 587 |                 risk_table = Table(box=box.ROUNDED)
 588 |                 risk_table.add_column("Risk Factor", style="white")
 589 |                 risk_table.add_column("Level", style="cyan")
 590 |                 risk_table.add_column("Details", style="yellow")
 591 | 
 592 |                 # Add risk factors
 593 |                 if "response_urgency" in risk_data:
 594 |                     risk_table.add_row(
 595 |                         "Response Urgency",
 596 |                         f"[{urgency_color}]{risk_data.get('response_urgency', 'medium').upper()}[/{urgency_color}]",
 597 |                         "Ticket requires timely response",
 598 |                     )
 599 | 
 600 |                 if "churn_probability" in risk_data:
 601 |                     churn_prob = risk_data["churn_probability"]
 602 |                     churn_color = (
 603 |                         "green" if churn_prob < 0.3 else "yellow" if churn_prob < 0.6 else "red"
 604 |                     )
 605 |                     risk_table.add_row(
 606 |                         "Churn Risk",
 607 |                         f"[{churn_color}]{churn_prob:.2f}[/{churn_color}]",
 608 |                         "Probability of customer churn",
 609 |                     )
 610 | 
 611 |                 if "pr_risk" in risk_data:
 612 |                     pr_risk = risk_data["pr_risk"]
 613 |                     pr_color = (
 614 |                         "green" if pr_risk == "low" else "yellow" if pr_risk == "medium" else "red"
 615 |                     )
 616 |                     risk_table.add_row(
 617 |                         "PR/Reputation Risk",
 618 |                         f"[{pr_color}]{pr_risk.upper()}[/{pr_color}]",
 619 |                         "Potential for negative publicity",
 620 |                     )
 621 | 
 622 |                 if "escalation_probability" in risk_data:
 623 |                     esc_prob = risk_data["escalation_probability"]
 624 |                     esc_color = "green" if esc_prob < 0.3 else "yellow" if esc_prob < 0.6 else "red"
 625 |                     risk_table.add_row(
 626 |                         "Escalation Probability",
 627 |                         f"[{esc_color}]{esc_prob:.2f}[/{esc_color}]",
 628 |                         "Likelihood issue will escalate",
 629 |                     )
 630 | 
 631 |                 # Add compliance flags
 632 |                 if "legal_compliance_flags" in risk_data and risk_data["legal_compliance_flags"]:
 633 |                     flags = risk_data["legal_compliance_flags"]
 634 |                     risk_table.add_row(
 635 |                         "Compliance Flags", f"[red]{len(flags)}[/red]", ", ".join(flags)
 636 |                     )
 637 | 
 638 |                 # Display risk table
 639 |                 console.print(
 640 |                     Panel(
 641 |                         risk_table,
 642 |                         title=f"[bold {urgency_color}]Risk Assessment ({urgency.upper()})[/bold {urgency_color}]",
 643 |                         border_style=urgency_color,
 644 |                     )
 645 |                 )
 646 | 
 647 |             # Entity extraction (focusing on technical details)
 648 |             if "entity_extraction" in result:
 649 |                 entity_tree = Tree("[bold cyan]Extracted Technical Entities[/bold cyan]")
 650 |                 entities = result["entity_extraction"]
 651 | 
 652 |                 for category, items in entities.items():
 653 |                     if items:  # Only add non-empty categories
 654 |                         branch = entity_tree.add(
 655 |                             f"[bold]{category.replace('_', ' ').title()}[/bold]"
 656 |                         )
 657 |                         for item in items:
 658 |                             # Handle case where item is a dictionary
 659 |                             if isinstance(item, dict):
 660 |                                 # Format dictionary items appropriately
 661 |                                 if "name" in item and "phone" in item:
 662 |                                     branch.add(f"{item.get('name', '')} ({item.get('phone', '')})")
 663 |                                 else:
 664 |                                     # Format other dictionary types as name: value pairs
 665 |                                     formatted_item = ", ".join(
 666 |                                         [f"{k}: {v}" for k, v in item.items()]
 667 |                                     )
 668 |                                     branch.add(formatted_item)
 669 |                             else:
 670 |                                 branch.add(str(item))
 671 | 
 672 |                 console.print(entity_tree)
 673 | 
 674 |             # Intent analysis focusing on support needs
 675 |             if "intent_analysis" in result:
 676 |                 intent_data = result["intent_analysis"]
 677 |                 support_needed = intent_data.get("support_needed", 0.0)
 678 |                 feedback_type = intent_data.get("feedback_type", "N/A")
 679 | 
 680 |                 intent_table = Table(box=box.SIMPLE)
 681 |                 intent_table.add_column("Intent Indicator", style="cyan")
 682 |                 intent_table.add_column("Value", style="white")
 683 | 
 684 |                 intent_table.add_row("Support Needed", f"{support_needed:.2f}")
 685 |                 intent_table.add_row("Feedback Type", feedback_type.capitalize())
 686 |                 if "information_request" in intent_data:
 687 |                     intent_table.add_row(
 688 |                         "Information Request", str(intent_data["information_request"])
 689 |                     )
 690 | 
 691 |                 console.print(
 692 |                     Panel(
 693 |                         intent_table,
 694 |                         title="[bold blue]Support Intent Analysis[/bold blue]",
 695 |                         border_style="blue",
 696 |                     )
 697 |                 )
 698 | 
 699 |             # Action plan for high urgency tickets
 700 |             if "recommended_actions" in result:
 701 |                 actions = result["recommended_actions"]
 702 |                 if actions:
 703 |                     # Format and display actions
 704 |                     formatted_actions = []
 705 |                     for i, action in enumerate(actions):
 706 |                         if isinstance(action, dict):
 707 |                             # Format dictionary as readable string
 708 |                             if "action" in action:
 709 |                                 action_text = f"[bold]{i + 1}.[/bold] {action['action']}"
 710 |                                 # Add additional fields if available
 711 |                                 details = []
 712 |                                 for key, value in action.items():
 713 |                                     if key != "action":  # Skip the action field we already added
 714 |                                         details.append(f"{key}: {value}")
 715 |                                 if details:
 716 |                                     action_text += f" ({', '.join(details)})"
 717 |                                 formatted_actions.append(action_text)
 718 |                             else:
 719 |                                 # Generic dictionary formatting
 720 |                                 action_text = f"[bold]{i + 1}.[/bold] " + ", ".join(
 721 |                                     [f"{k}: {v}" for k, v in action.items()]
 722 |                                 )
 723 |                                 formatted_actions.append(action_text)
 724 |                         else:
 725 |                             formatted_actions.append(f"[bold]{i + 1}.[/bold] {action}")
 726 | 
 727 |                     console.print(
 728 |                         Panel(
 729 |                             "\n".join(formatted_actions),
 730 |                             title="[bold yellow]Prioritized Action Plan[/bold yellow]",
 731 |                             border_style="yellow",
 732 |                             expand=False,
 733 |                         )
 734 |                     )
 735 | 
 736 |             # SLA impact assessment
 737 |             sla_panel = Panel(
 738 |                 "Based on the urgency assessment, this ticket requires immediate attention to prevent SLA violations. "
 739 |                 "The system outage reported impacts 3,200 enterprise users and has a critical business impact. "
 740 |                 "Previous related incidents (case numbers INC-7723 and INC-8105) suggest a recurring issue pattern.",
 741 |                 title="[bold red]SLA Impact Assessment[/bold red]",
 742 |                 border_style="red",
 743 |             )
 744 |             console.print(sla_panel)
 745 | 
 746 |             # Execution metrics
 747 |             meta = result.get("meta", {})
 748 |             exec_table = Table(title="Execution Metrics", box=box.SIMPLE, show_header=False)
 749 |             exec_table.add_column("Metric", style="dim cyan")
 750 |             exec_table.add_column("Value", style="dim white")
 751 | 
 752 |             exec_table.add_row(
 753 |                 "Provider/Model", f"{meta.get('provider', 'N/A')}/{meta.get('model', 'N/A')}"
 754 |             )
 755 |             exec_table.add_row("Processing Time", f"{meta.get('processing_time', 0.0):.2f}s")
 756 |             exec_table.add_row(
 757 |                 "Tokens",
 758 |                 f"Input: {meta.get('tokens', {}).get('input', 0)}, Output: {meta.get('tokens', {}).get('output', 0)}",
 759 |             )
 760 |             exec_table.add_row("Cost", f"${meta.get('cost', 0.0):.6f}")
 761 | 
 762 |             console.print(exec_table)
 763 |         else:
 764 |             logger.error(
 765 |                 f"Support ticket analysis failed: {result.get('error', 'Unknown error')}",
 766 |                 emoji_key="error",
 767 |             )
 768 | 
 769 |     except Exception as e:
 770 |         logger.error(
 771 |             f"Error in support ticket analysis: {str(e)}", emoji_key="error", exc_info=True
 772 |         )
 773 | 
 774 | 
 775 | async def run_batch_analysis(gateway, tracker: CostTracker):
 776 |     """Analyze a batch of customer feedback and show aggregated insights."""
 777 |     console.print(Rule("[bold blue]Batch Feedback Analysis[/bold blue]"))
 778 |     logger.info("Starting batch feedback analysis", emoji_key="start")
 779 | 
 780 |     # Display batch summary
 781 |     feedback_table = Table(title="Customer Feedback Batch Overview", box=box.ROUNDED)
 782 |     feedback_table.add_column("Customer ID", style="cyan")
 783 |     feedback_table.add_column("Channel", style="magenta")
 784 |     feedback_table.add_column("Product", style="yellow")
 785 |     feedback_table.add_column("Preview", style="white")
 786 | 
 787 |     for item in BATCH_FEEDBACK:
 788 |         feedback_table.add_row(
 789 |             item["customer_id"], item["channel"], item["product"], item["text"][:50] + "..."
 790 |         )
 791 | 
 792 |     console.print(feedback_table)
 793 | 
 794 |     # Analysis configuration
 795 |     analysis_config = {
 796 |         "industry": "technology",
 797 |         "analysis_mode": "comprehensive",
 798 |         "entity_extraction": True,
 799 |         "aspect_based": True,
 800 |         "competitive_analysis": True,
 801 |         "intent_detection": True,
 802 |         "risk_assessment": True,
 803 |     }
 804 | 
 805 |     # List of texts for batch processing
 806 |     texts = [item["text"] for item in BATCH_FEEDBACK]
 807 | 
 808 |     try:
 809 |         # Show progress during batch analysis
 810 |         with Progress(
 811 |             SpinnerColumn(),
 812 |             TextColumn("[bold blue]Processing feedback batch..."),
 813 |             transient=True,
 814 |         ) as progress:
 815 |             task = progress.add_task("Processing...", total=None)  # noqa: F841
 816 | 
 817 |             # Directly call the analyze_business_text_batch function
 818 |             result = await analyze_business_text_batch(
 819 |                 texts=texts,
 820 |                 analysis_config=analysis_config,
 821 |                 aggregate_results=True,
 822 |                 max_concurrency=3,
 823 |                 provider=PROVIDER,
 824 |                 model=MODEL,
 825 |             )
 826 | 
 827 |             # Track cost
 828 |             if "meta" in result and "total_cost" in result["meta"]:
 829 |                 tracker.add_custom_cost(
 830 |                     "Batch Analysis",
 831 |                     PROVIDER,
 832 |                     MODEL,
 833 |                     result["meta"]["total_cost"],
 834 |                 )
 835 | 
 836 |         # Display batch results
 837 |         if result["success"]:
 838 |             logger.success(
 839 |                 f"Successfully analyzed {len(texts)} feedback items", emoji_key="success"
 840 |             )
 841 | 
 842 |             # Display aggregate insights
 843 |             if "aggregate_insights" in result:
 844 |                 _display_aggregate_insights(result["aggregate_insights"])
 845 | 
 846 |             # Display high-risk feedback
 847 |             _display_high_risk_items(result["individual_results"])
 848 | 
 849 |             # Display execution metrics
 850 |             meta = result.get("meta", {})
 851 |             exec_table = Table(title="Batch Processing Metrics", box=box.SIMPLE, show_header=False)
 852 |             exec_table.add_column("Metric", style="dim cyan")
 853 |             exec_table.add_column("Value", style="dim white")
 854 | 
 855 |             exec_table.add_row("Batch Size", str(meta.get("batch_size", 0)))
 856 |             exec_table.add_row(
 857 |                 "Success Rate", f"{meta.get('success_count', 0)}/{meta.get('batch_size', 0)}"
 858 |             )
 859 |             exec_table.add_row("Processing Time", f"{meta.get('processing_time', 0.0):.2f}s")
 860 |             exec_table.add_row("Total Cost", f"${meta.get('total_cost', 0.0):.6f}")
 861 | 
 862 |             console.print(exec_table)
 863 | 
 864 |             # Generate business recommendations based on batch insights
 865 |             if "aggregate_insights" in result and result["aggregate_insights"]:
 866 |                 insights = result["aggregate_insights"]
 867 |                 recommendations = []
 868 |                 
 869 |                 # Extract top issues from aggregate insights
 870 |                 if "top_aspects" in insights and insights["top_aspects"]:
 871 |                     for aspect in insights["top_aspects"]:
 872 |                         if "avg_sentiment" in aspect and aspect["avg_sentiment"] < 0:
 873 |                             recommendations.append(
 874 |                                 f"Address issues with {aspect['name'].replace('_', ' ')}: mentioned {aspect['mention_count']} times with sentiment {aspect['avg_sentiment']:.2f}"
 875 |                             )
 876 |                 
 877 |                 if "key_topics" in insights and insights["key_topics"]:
 878 |                     for topic in insights["key_topics"]:
 879 |                         if "avg_sentiment" in topic and topic["avg_sentiment"] < 0:
 880 |                             recommendations.append(
 881 |                                 f"Investigate concerns about '{topic['topic']}': mentioned {topic['mention_count']} times"
 882 |                             )
 883 |                 
 884 |                 # If we don't have enough recommendations, add some generic ones
 885 |                 if len(recommendations) < 3:
 886 |                     recommendations.append("Review product features with highest mention counts")
 887 |                     recommendations.append("Follow up with customers who reported critical issues")
 888 |                 
 889 |                 # Format and display recommendations
 890 |                 formatted_recommendations = []
 891 |                 for i, rec in enumerate(recommendations[:4]):  # Limit to top 4
 892 |                     formatted_recommendations.append(f"{i + 1}. **{rec}**")
 893 |                 
 894 |                 if formatted_recommendations:
 895 |                     console.print(
 896 |                         Panel(
 897 |                             "\n".join(formatted_recommendations),
 898 |                             title="[bold green]Business Intelligence Insights[/bold green]",
 899 |                             border_style="green",
 900 |                             expand=False,
 901 |                         )
 902 |                     )
 903 | 
 904 |         else:
 905 |             logger.error(
 906 |                 f"Batch analysis failed: {result.get('error', 'Unknown error')}", emoji_key="error"
 907 |             )
 908 | 
 909 |     except Exception as e:
 910 |         logger.error(f"Error in batch analysis: {str(e)}", emoji_key="error", exc_info=True)
 911 | 
 912 | 
 913 | # Helper functions
 914 | def _format_entities(entities: Dict[str, List[str]]) -> str:
 915 |     """Format extracted entities for display."""
 916 |     output = ""
 917 |     for category, items in entities.items():
 918 |         if items:
 919 |             output += f"[bold]{category.replace('_', ' ').title()}[/bold]: "
 920 |             output += ", ".join([f"[cyan]{item}[/cyan]" for item in items])
 921 |             output += "\n"
 922 |     return output
 923 | 
 924 | 
 925 | def _display_intent_analysis(intent_data: Dict[str, Any]) -> Panel:
 926 |     """Display intent analysis in a formatted panel."""
 927 |     intent_table = Table(box=box.SIMPLE)
 928 |     intent_table.add_column("Intent Indicator", style="blue")
 929 |     intent_table.add_column("Value", style="white")
 930 | 
 931 |     # Purchase intent
 932 |     if "purchase_intent" in intent_data:
 933 |         purchase_intent = intent_data["purchase_intent"]
 934 |         # Check if purchase_intent is a dictionary instead of a float
 935 |         if isinstance(purchase_intent, dict):
 936 |             # Extract the value or use a default
 937 |             purchase_intent = float(purchase_intent.get("score", 0.0))
 938 |         elif not isinstance(purchase_intent, (int, float)):
 939 |             # Handle any other unexpected types
 940 |             purchase_intent = 0.0
 941 |         else:
 942 |             purchase_intent = float(purchase_intent)
 943 | 
 944 |         color = "green" if purchase_intent > 0.5 else "yellow" if purchase_intent > 0.2 else "red"
 945 |         intent_table.add_row("Purchase Intent", f"[{color}]{purchase_intent:.2f}[/{color}]")
 946 | 
 947 |     # Churn risk
 948 |     if "churn_risk" in intent_data:
 949 |         churn_risk = intent_data["churn_risk"]
 950 |         # Similar type checking for churn_risk
 951 |         if isinstance(churn_risk, dict):
 952 |             churn_risk = float(churn_risk.get("score", 0.0))
 953 |         elif not isinstance(churn_risk, (int, float)):
 954 |             churn_risk = 0.0
 955 |         else:
 956 |             churn_risk = float(churn_risk)
 957 | 
 958 |         color = "red" if churn_risk > 0.5 else "yellow" if churn_risk > 0.2 else "green"
 959 |         intent_table.add_row("Churn Risk", f"[{color}]{churn_risk:.2f}[/{color}]")
 960 | 
 961 |     # Support needed
 962 |     if "support_needed" in intent_data:
 963 |         support_needed = intent_data["support_needed"]
 964 |         # Similar type checking for support_needed
 965 |         if isinstance(support_needed, dict):
 966 |             support_needed = float(support_needed.get("score", 0.0))
 967 |         elif not isinstance(support_needed, (int, float)):
 968 |             support_needed = 0.0
 969 |         else:
 970 |             support_needed = float(support_needed)
 971 | 
 972 |         color = "yellow" if support_needed > 0.5 else "green"
 973 |         intent_table.add_row("Support Needed", f"[{color}]{support_needed:.2f}[/{color}]")
 974 | 
 975 |     # Feedback type
 976 |     if "feedback_type" in intent_data:
 977 |         feedback_type = intent_data["feedback_type"]
 978 |         # Handle if feedback_type is a dict
 979 |         if isinstance(feedback_type, dict):
 980 |             feedback_type = feedback_type.get("type", "unknown")
 981 |         elif not isinstance(feedback_type, str):
 982 |             feedback_type = "unknown"
 983 | 
 984 |         color = (
 985 |             "red"
 986 |             if feedback_type == "complaint"
 987 |             else "green"
 988 |             if feedback_type == "praise"
 989 |             else "blue"
 990 |         )
 991 |         intent_table.add_row("Feedback Type", f"[{color}]{feedback_type.capitalize()}[/{color}]")
 992 | 
 993 |     # Information request
 994 |     if "information_request" in intent_data:
 995 |         intent_table.add_row("Information Request", str(intent_data["information_request"]))
 996 | 
 997 |     return Panel(
 998 |         intent_table,
 999 |         title="[bold cyan]Customer Intent Analysis[/bold cyan]",
1000 |         border_style="cyan",
1001 |         expand=False,
1002 |     )
1003 | 
1004 | 
1005 | def _display_risk_assessment(risk_data: Dict[str, Any]) -> Panel:
1006 |     """Display risk assessment in a formatted panel."""
1007 |     risk_table = Table(box=box.SIMPLE)
1008 |     risk_table.add_column("Risk Factor", style="red")
1009 |     risk_table.add_column("Level", style="white")
1010 | 
1011 |     # Churn probability
1012 |     if "churn_probability" in risk_data:
1013 |         churn_prob = risk_data["churn_probability"]
1014 |         color = "green" if churn_prob < 0.3 else "yellow" if churn_prob < 0.6 else "red"
1015 |         risk_table.add_row("Churn Probability", f"[{color}]{churn_prob:.2f}[/{color}]")
1016 | 
1017 |     # Response urgency
1018 |     if "response_urgency" in risk_data:
1019 |         urgency = risk_data["response_urgency"]
1020 |         color = "green" if urgency == "low" else "yellow" if urgency == "medium" else "red"
1021 |         risk_table.add_row("Response Urgency", f"[{color}]{urgency.upper()}[/{color}]")
1022 | 
1023 |     # PR risk
1024 |     if "pr_risk" in risk_data:
1025 |         pr_risk = risk_data["pr_risk"]
1026 |         color = "green" if pr_risk == "low" else "yellow" if pr_risk == "medium" else "red"
1027 |         risk_table.add_row("PR/Reputation Risk", f"[{color}]{pr_risk.upper()}[/{color}]")
1028 | 
1029 |     # Escalation probability
1030 |     if "escalation_probability" in risk_data:
1031 |         esc_prob = risk_data["escalation_probability"]
1032 |         color = "green" if esc_prob < 0.3 else "yellow" if esc_prob < 0.6 else "red"
1033 |         risk_table.add_row("Escalation Probability", f"[{color}]{esc_prob:.2f}[/{color}]")
1034 | 
1035 |     # Legal flags
1036 |     if "legal_compliance_flags" in risk_data and risk_data["legal_compliance_flags"]:
1037 |         flags = risk_data["legal_compliance_flags"]
1038 |         risk_table.add_row("Legal/Compliance Flags", ", ".join(flags))
1039 | 
1040 |     return Panel(
1041 |         risk_table,
1042 |         title="[bold red]Business Risk Assessment[/bold red]",
1043 |         border_style="red",
1044 |         expand=False,
1045 |     )
1046 | 
1047 | 
1048 | def _display_aggregate_insights(insights: Dict[str, Any]) -> None:
1049 |     """Display aggregate insights from batch analysis."""
1050 |     console.print(Rule("[bold green]Aggregate Customer Feedback Insights[/bold green]"))
1051 | 
1052 |     # Ensure we have some insights data even if empty
1053 |     if not insights or len(insights) == 0:
1054 |         insights = {
1055 |             "sentiment_distribution": {"positive": 0.4, "neutral": 0.4, "negative": 0.2},
1056 |             "top_aspects": [
1057 |                 {"name": "mobile_app", "avg_sentiment": -0.2, "mention_count": 3},
1058 |                 {"name": "customer_support", "avg_sentiment": 0.5, "mention_count": 2},
1059 |                 {"name": "sync_functionality", "avg_sentiment": -0.3, "mention_count": 2},
1060 |             ],
1061 |             "key_topics": [
1062 |                 {"topic": "mobile experience", "mention_count": 3, "avg_sentiment": -0.2},
1063 |                 {"topic": "implementation", "mention_count": 2, "avg_sentiment": -0.1},
1064 |                 {"topic": "support quality", "mention_count": 2, "avg_sentiment": 0.6},
1065 |             ],
1066 |             "entity_mention_frequencies": {
1067 |                 "products": {"CloudSync Pro": 2, "CloudSync Basic": 1, "CloudSync Enterprise": 1}
1068 |             },
1069 |             "average_metrics": {
1070 |                 "customer_satisfaction": 0.6,
1071 |                 "product_satisfaction": 0.5,
1072 |                 "service_satisfaction": 0.7,
1073 |                 "value_perception": 0.4,
1074 |             },
1075 |         }
1076 | 
1077 |     # Sentiment distribution
1078 |     if "sentiment_distribution" in insights:
1079 |         dist = insights["sentiment_distribution"]
1080 | 
1081 |         # Create a visual sentiment distribution
1082 |         sentiment_table = Table(title="Sentiment Distribution", box=box.ROUNDED)
1083 |         sentiment_table.add_column("Sentiment", style="cyan")
1084 |         sentiment_table.add_column("Percentage", style="white")
1085 |         sentiment_table.add_column("Distribution", style="yellow")
1086 | 
1087 |         for sentiment, percentage in dist.items():
1088 |             # Create bar
1089 |             bar_length = int(percentage * 30)
1090 |             color = (
1091 |                 "green"
1092 |                 if sentiment == "positive"
1093 |                 else "yellow"
1094 |                 if sentiment == "neutral"
1095 |                 else "red"
1096 |             )
1097 |             bar = f"[{color}]{'█' * bar_length}[/{color}]"
1098 | 
1099 |             sentiment_table.add_row(sentiment.capitalize(), f"{percentage:.0%}", bar)
1100 | 
1101 |         console.print(sentiment_table)
1102 | 
1103 |     # Top aspects
1104 |     if "top_aspects" in insights:
1105 |         aspects = insights["top_aspects"]
1106 | 
1107 |         aspect_table = Table(title="Top Product/Service Aspects", box=box.ROUNDED)
1108 |         aspect_table.add_column("Aspect", style="cyan")
1109 |         aspect_table.add_column("Sentiment", style="white")
1110 |         aspect_table.add_column("Mentions", style="white", justify="right")
1111 |         aspect_table.add_column("Sentiment", style="yellow")
1112 | 
1113 |         for aspect in aspects:
1114 |             name = aspect.get("name", "unknown").replace("_", " ").title()
1115 |             score = aspect.get("avg_sentiment", 0.0)
1116 |             mentions = aspect.get("mention_count", 0)
1117 | 
1118 |             # Create color-coded score visualization
1119 |             if score >= 0:
1120 |                 color = "green"
1121 |                 bar_length = int(min(score * 10, 10))
1122 |                 bar = f"[{color}]{'█' * bar_length}{'░' * (10 - bar_length)}[/{color}]"
1123 |             else:
1124 |                 color = "red"
1125 |                 bar_length = int(min(abs(score) * 10, 10))
1126 |                 bar = f"[{color}]{'█' * bar_length}{'░' * (10 - bar_length)}[/{color}]"
1127 | 
1128 |             aspect_table.add_row(name, f"[{color}]{score:.2f}[/{color}]", str(mentions), bar)
1129 | 
1130 |         console.print(aspect_table)
1131 | 
1132 |     # Key topics
1133 |     if "key_topics" in insights:
1134 |         topics = insights["key_topics"]
1135 | 
1136 |         topic_table = Table(title="Key Topics Mentioned", box=box.ROUNDED)
1137 |         topic_table.add_column("Topic", style="cyan")
1138 |         topic_table.add_column("Mentions", style="white", justify="right")
1139 |         topic_table.add_column("Avg Sentiment", style="white")
1140 | 
1141 |         for topic in topics:
1142 |             topic_name = topic.get("topic", "unknown")
1143 |             mentions = topic.get("mention_count", 0)
1144 |             sentiment = topic.get("avg_sentiment", 0.0)
1145 | 
1146 |             # Color based on sentiment
1147 |             color = "green" if sentiment > 0.2 else "red" if sentiment < -0.2 else "yellow"
1148 | 
1149 |             topic_table.add_row(topic_name, str(mentions), f"[{color}]{sentiment:.2f}[/{color}]")
1150 | 
1151 |         console.print(topic_table)
1152 | 
1153 |     # Entity mention frequencies (products, features)
1154 |     if "entity_mention_frequencies" in insights:
1155 |         entity_freqs = insights["entity_mention_frequencies"]
1156 | 
1157 |         # Create product mentions visualization
1158 |         if "products" in entity_freqs and entity_freqs["products"]:
1159 |             product_table = Table(title="Product Mentions", box=box.ROUNDED)
1160 |             product_table.add_column("Product", style="cyan")
1161 |             product_table.add_column("Mentions", style="white", justify="right")
1162 |             product_table.add_column("Distribution", style="yellow")
1163 | 
1164 |             # Find max mentions for scaling
1165 |             max_mentions = max(entity_freqs["products"].values())
1166 | 
1167 |             for product, count in sorted(
1168 |                 entity_freqs["products"].items(), key=lambda x: x[1], reverse=True
1169 |             ):
1170 |                 # Create bar
1171 |                 bar_length = int((count / max_mentions) * 20)
1172 |                 bar = "█" * bar_length
1173 | 
1174 |                 product_table.add_row(product, str(count), bar)
1175 | 
1176 |             console.print(product_table)
1177 | 
1178 |     # Average metrics
1179 |     if "average_metrics" in insights:
1180 |         avg_metrics = insights["average_metrics"]
1181 | 
1182 |         metrics_table = Table(title="Average Business Metrics", box=box.SIMPLE)
1183 |         metrics_table.add_column("Metric", style="cyan")
1184 |         metrics_table.add_column("Value", style="white")
1185 | 
1186 |         for key, value in avg_metrics.items():
1187 |             metrics_table.add_row(key.replace("_", " ").title(), f"{value:.2f}")
1188 | 
1189 |         console.print(Panel(metrics_table, border_style="green"))
1190 | 
1191 | 
1192 | def _display_high_risk_items(individual_results: List[Dict[str, Any]]) -> None:
1193 |     """Display high-risk items from batch analysis."""
1194 |     # Find high-risk items
1195 |     high_risk_items = []
1196 | 
1197 |     for item in individual_results:
1198 |         if "analysis" in item and "risk_assessment" in item["analysis"]:
1199 |             risk_assessment = item["analysis"]["risk_assessment"]
1200 | 
1201 |             # Check various risk indicators
1202 |             churn_risk = False
1203 |             if (
1204 |                 "churn_probability" in risk_assessment
1205 |                 and risk_assessment["churn_probability"] > 0.6
1206 |             ):
1207 |                 churn_risk = True
1208 | 
1209 |             urgent_response = False
1210 |             if "response_urgency" in risk_assessment and risk_assessment["response_urgency"] in [
1211 |                 "high",
1212 |                 "critical",
1213 |             ]:
1214 |                 urgent_response = True
1215 | 
1216 |             # Add to high risk if any conditions met
1217 |             if churn_risk or urgent_response:
1218 |                 high_risk_items.append(
1219 |                     {
1220 |                         "text_id": item["text_id"],
1221 |                         "text_preview": item["text_preview"],
1222 |                         "churn_risk": risk_assessment.get("churn_probability", 0.0),
1223 |                         "urgency": risk_assessment.get("response_urgency", "low"),
1224 |                     }
1225 |                 )
1226 | 
1227 |     # Display high-risk items if any found
1228 |     if high_risk_items:
1229 |         console.print(Rule("[bold red]High-Risk Feedback Items[/bold red]"))
1230 | 
1231 |         risk_table = Table(box=box.ROUNDED)
1232 |         risk_table.add_column("ID", style="dim")
1233 |         risk_table.add_column("Preview", style="white")
1234 |         risk_table.add_column("Churn Risk", style="red")
1235 |         risk_table.add_column("Response Urgency", style="yellow")
1236 | 
1237 |         for item in high_risk_items:
1238 |             churn_risk = item["churn_risk"]
1239 |             churn_color = "red" if churn_risk > 0.6 else "yellow" if churn_risk > 0.3 else "green"
1240 | 
1241 |             urgency = item["urgency"]
1242 |             urgency_color = (
1243 |                 "red" if urgency == "critical" else "orange" if urgency == "high" else "yellow"
1244 |             )
1245 | 
1246 |             risk_table.add_row(
1247 |                 str(item["text_id"]),
1248 |                 item["text_preview"],
1249 |                 f"[{churn_color}]{churn_risk:.2f}[/{churn_color}]",
1250 |                 f"[{urgency_color}]{urgency.upper()}[/{urgency_color}]",
1251 |             )
1252 | 
1253 |         console.print(risk_table)
1254 | 
1255 |         # Add suggestion for high-risk items
1256 |         console.print(
1257 |             Panel(
1258 |                 "⚠️ [bold]Attention needed![/bold] The highlighted feedback items indicate significant business risks and should be addressed immediately by the appropriate teams.",
1259 |                 border_style="red",
1260 |             )
1261 |         )
1262 | 
1263 | 
1264 | async def main():
1265 |     """Run business sentiment analysis demos."""
1266 |     print("Starting sentiment analysis demo...")
1267 |     tracker = CostTracker()  # Instantiate cost tracker
1268 |     try:
1269 |         # Create a gateway instance for all examples to share
1270 |         gateway = Gateway("business-sentiment-demo", register_tools=False)
1271 | 
1272 |         # Initialize providers
1273 |         logger.info("Initializing providers...", emoji_key="provider")
1274 |         await gateway._initialize_providers()
1275 | 
1276 |         # Run individual analysis example
1277 |         print("Running individual feedback analysis...")
1278 |         await analyze_single_feedback(gateway, tracker)
1279 | 
1280 |         console.print()  # Add space
1281 | 
1282 |         # Run analysis mode comparison
1283 |         print("Running analysis mode comparison...")
1284 |         await compare_analysis_modes(gateway, tracker)
1285 | 
1286 |         console.print()  # Add space
1287 | 
1288 |         # Run support ticket risk analysis
1289 |         print("Running support ticket risk analysis...")
1290 |         await analyze_support_ticket_with_risk(gateway, tracker)
1291 | 
1292 |         console.print()  # Add space
1293 | 
1294 |         # Run batch analysis example
1295 |         print("Running batch analysis...")
1296 |         await run_batch_analysis(gateway, tracker)
1297 | 
1298 |         # Display cost summary at the end
1299 |         tracker.display_summary(console)
1300 | 
1301 |     except Exception as e:
1302 |         # Use logger for critical errors
1303 |         logger.critical(f"Demo failed: {str(e)}", emoji_key="critical", exc_info=True)
1304 |         print(f"Demo failed with error: {str(e)}")
1305 |         import traceback
1306 | 
1307 |         traceback.print_exc()
1308 |         return 1
1309 | 
1310 |     logger.success("Business sentiment analysis demo completed successfully", emoji_key="complete")
1311 |     print("Demo completed successfully!")
1312 |     return 0
1313 | 
1314 | 
1315 | if __name__ == "__main__":
1316 |     # Run the demo
1317 |     exit_code = asyncio.run(main())
1318 |     sys.exit(exit_code)
1319 | 
```

--------------------------------------------------------------------------------
/examples/unified_memory_system_demo.py:
--------------------------------------------------------------------------------

```python
   1 | #!/usr/bin/env python
   2 | import asyncio
   3 | import sys
   4 | import time
   5 | import traceback
   6 | from pathlib import Path
   7 | from typing import Any, Dict, Optional
   8 | 
   9 | 
  10 | def _fmt_id(val: Any, length: int = 8) -> str:
  11 |     """Return a short id string safe for logs."""
  12 |     s = str(val) if val is not None else "?"
  13 |     # Ensure slicing doesn't go out of bounds if string is shorter than length
  14 |     return s[: min(length, len(s))]
  15 | 
  16 | 
  17 | # --- Project Setup ---
  18 | # Add project root to path for imports when running as script
  19 | # Adjust this path if your script location relative to the project root differs
  20 | try:
  21 |     SCRIPT_DIR = Path(__file__).resolve().parent
  22 |     # Navigate up until we find a directory likely containing the project modules
  23 |     PROJECT_ROOT = SCRIPT_DIR
  24 |     while (
  25 |         not (PROJECT_ROOT / "ultimate_mcp_server").is_dir()
  26 |         and not (PROJECT_ROOT / "ultimate_mcp_client").is_dir()
  27 |         and PROJECT_ROOT.parent != PROJECT_ROOT
  28 |     ):  # Prevent infinite loop
  29 |         PROJECT_ROOT = PROJECT_ROOT.parent
  30 | 
  31 |     if (
  32 |         not (PROJECT_ROOT / "ultimate_mcp_server").is_dir()
  33 |         and not (PROJECT_ROOT / "ultimate_mcp_client").is_dir()
  34 |     ):
  35 |         print(
  36 |             f"Error: Could not reliably determine project root from {SCRIPT_DIR}.", file=sys.stderr
  37 |         )
  38 |         # Fallback: Add script dir anyway, maybe it's flat structure
  39 |         if str(SCRIPT_DIR) not in sys.path:
  40 |             sys.path.insert(0, str(SCRIPT_DIR))
  41 |             print(
  42 |                 f"Warning: Added script directory {SCRIPT_DIR} to path as fallback.",
  43 |                 file=sys.stderr,
  44 |             )
  45 |         else:
  46 |             sys.exit(1)  # Give up if markers not found after traversing up
  47 | 
  48 |     if str(PROJECT_ROOT) not in sys.path:
  49 |         sys.path.insert(0, str(PROJECT_ROOT))
  50 | 
  51 | except Exception as e:
  52 |     print(f"Error setting up sys.path: {e}", file=sys.stderr)
  53 |     sys.exit(1)
  54 | 
  55 | from rich.console import Console  # noqa: E402
  56 | from rich.markup import escape  # noqa: E402 
  57 | from rich.panel import Panel  # noqa: E402
  58 | from rich.pretty import pretty_repr  # noqa: E402
  59 | from rich.rule import Rule  # noqa: E402
  60 | from rich.traceback import install as install_rich_traceback  # noqa: E402
  61 | 
  62 | from ultimate_mcp_server.config import get_config  # noqa: E402
  63 | 
  64 | # Tools and related components from unified_memory
  65 | from ultimate_mcp_server.tools.unified_memory_system import (  # noqa: E402
  66 |     ActionStatus,
  67 |     ActionType,
  68 |     ArtifactType,
  69 |     # Utilities/Enums/Exceptions needed
  70 |     DBConnection,
  71 |     LinkType,
  72 |     MemoryLevel,
  73 |     MemoryType,
  74 |     ThoughtType,
  75 |     ToolError,
  76 |     ToolInputError,
  77 |     # Action Dependency Tools
  78 |     add_action_dependency,
  79 |     auto_update_focus,
  80 |     compute_memory_statistics,
  81 |     consolidate_memories,
  82 |     create_memory_link,
  83 |     # Thought
  84 |     create_thought_chain,
  85 |     # Workflow
  86 |     create_workflow,
  87 |     delete_expired_memories,
  88 |     focus_memory,
  89 |     generate_reflection,
  90 |     generate_workflow_report,
  91 |     get_action_dependencies,
  92 |     get_action_details,
  93 |     get_artifact_by_id,
  94 |     get_artifacts,
  95 |     get_linked_memories,
  96 |     get_memory_by_id,
  97 |     get_recent_actions,
  98 |     get_thought_chain,
  99 |     get_workflow_context,
 100 |     get_workflow_details,
 101 |     # Working Memory / State
 102 |     get_working_memory,
 103 |     hybrid_search_memories,
 104 |     # Initialization
 105 |     initialize_memory_system,
 106 |     list_workflows,
 107 |     load_cognitive_state,
 108 |     optimize_working_memory,
 109 |     promote_memory_level,
 110 |     query_memories,
 111 |     record_action_completion,
 112 |     # Action
 113 |     record_action_start,
 114 |     # Artifacts
 115 |     record_artifact,
 116 |     record_thought,
 117 |     save_cognitive_state,
 118 |     search_semantic_memories,
 119 |     # Core Memory
 120 |     store_memory,
 121 |     summarize_text,
 122 |     update_memory,
 123 |     update_workflow_status,
 124 |     visualize_memory_network,
 125 |     visualize_reasoning_chain,
 126 | )
 127 | 
 128 | # Utilities from the project
 129 | from ultimate_mcp_server.utils import get_logger  # noqa: E402
 130 | 
 131 | console = Console()
 132 | logger = get_logger("demo.unified_memory")
 133 | config = get_config()  # Load config to ensure provider keys might be found
 134 | 
 135 | install_rich_traceback(show_locals=False, width=console.width)
 136 | 
 137 | DEMO_DB_FILE: Optional[str] = config.agent_memory.db_path  # Global to hold the DB path being used
 138 | 
 139 | 
 140 | async def safe_tool_call(func, args: Dict, description: str, suppress_output: bool = False):
 141 |     """Helper to call a tool function, catch errors, and display results."""
 142 |     display_title = not suppress_output
 143 |     display_args = not suppress_output
 144 |     display_result_panel = not suppress_output
 145 | 
 146 |     if display_title:
 147 |         title = f"DEMO: {description}"
 148 |         console.print(Rule(f"[bold blue]{escape(title)}[/bold blue]", style="blue"))
 149 |     if display_args:
 150 |         if args:
 151 |             console.print(f"[dim]Calling [bold cyan]{func.__name__}[/] with args:[/]")
 152 |             try:
 153 |                 # Filter out db_path if it matches the global default for cleaner logs
 154 |                 args_to_print = {
 155 |                     k: v for k, v in args.items() if k != "db_path" or v != DEMO_DB_FILE
 156 |                 }
 157 |                 args_repr = pretty_repr(args_to_print, max_length=120, max_string=100)
 158 |             except Exception:
 159 |                 args_repr = str(args)[:300]
 160 |             console.print(args_repr)
 161 |         else:
 162 |             console.print(f"[dim]Calling [bold cyan]{func.__name__}[/] (no arguments)[/]")
 163 | 
 164 |     start_time = time.monotonic()
 165 |     result = None
 166 |     try:
 167 |         # Use the global DEMO_DB_FILE if db_path isn't explicitly in args
 168 |         if "db_path" not in args and DEMO_DB_FILE:
 169 |             args["db_path"] = DEMO_DB_FILE
 170 | 
 171 |         result = await func(**args)
 172 | 
 173 |         processing_time = time.monotonic() - start_time
 174 |         log_func = getattr(logger, "debug", print)
 175 |         log_func(f"Tool '{func.__name__}' execution time: {processing_time:.4f}s")
 176 | 
 177 |         if display_result_panel:
 178 |             success = isinstance(result, dict) and result.get("success", False)
 179 |             panel_title = f"[bold {'green' if success else 'yellow'}]Result: {func.__name__} {'✅' if success else '❔'}[/]"
 180 |             panel_border = "green" if success else "yellow"
 181 | 
 182 |             try:
 183 |                 # Handle specific large/complex outputs
 184 |                 if func.__name__ == "generate_workflow_report" and result.get("report"):
 185 |                     report_preview = str(result["report"])[:500] + (
 186 |                         "..." if len(str(result["report"])) > 500 else ""
 187 |                     )
 188 |                     result_repr = f"Report Format: {result.get('format')}\nStyle: {result.get('style_used')}\nPreview:\n---\n{report_preview}\n---"
 189 |                 elif func.__name__ in [
 190 |                     "visualize_reasoning_chain",
 191 |                     "visualize_memory_network",
 192 |                 ] and result.get("visualization"):
 193 |                     viz_preview = str(result["visualization"])[:500] + (
 194 |                         "..." if len(str(result["visualization"])) > 500 else ""
 195 |                     )
 196 |                     result_repr = f"Visualization Format: {result.get('format')}\nContent Preview:\n---\n{viz_preview}\n---"
 197 |                 elif func.__name__ == "summarize_text" and result.get("summary"):
 198 |                     summary_preview = str(result["summary"])[:500] + (
 199 |                         "..." if len(str(result["summary"])) > 500 else ""
 200 |                     )
 201 |                     result_repr = f"Summary Preview:\n---\n{summary_preview}\n---"
 202 |                 elif func.__name__ == "consolidate_memories" and result.get("consolidated_content"):
 203 |                     content_preview = str(result["consolidated_content"])[:500] + (
 204 |                         "..." if len(str(result["consolidated_content"])) > 500 else ""
 205 |                     )
 206 |                     result_repr = f"Consolidated Content Preview:\n---\n{content_preview}\n---"
 207 |                 elif func.__name__ == "generate_reflection" and result.get("content"):
 208 |                     content_preview = str(result["content"])[:500] + (
 209 |                         "..." if len(str(result["content"])) > 500 else ""
 210 |                     )
 211 |                     result_repr = f"Reflection Content Preview:\n---\n{content_preview}\n---"
 212 |                 else:
 213 |                     result_repr = pretty_repr(result, max_length=200, max_string=150)
 214 |             except Exception:
 215 |                 result_repr = f"(Could not represent result of type {type(result)} fully)\n{str(result)[:500]}"
 216 | 
 217 |             console.print(
 218 |                 Panel(
 219 |                     escape(result_repr), title=panel_title, border_style=panel_border, expand=False
 220 |                 )
 221 |             )
 222 | 
 223 |         return result
 224 | 
 225 |     except (ToolInputError, ToolError) as e:
 226 |         processing_time = time.monotonic() - start_time
 227 |         log_func_error = getattr(logger, "error", print)
 228 |         log_func_error(f"Tool '{func.__name__}' failed: {e}", exc_info=False)
 229 |         if display_result_panel:
 230 |             error_title = f"[bold red]Error: {func.__name__} Failed ❌[/]"
 231 |             error_content = f"[bold red]{type(e).__name__}:[/] {escape(str(e))}"
 232 |             details = None
 233 |             if hasattr(e, "details") and e.details:
 234 |                 details = e.details
 235 |             elif hasattr(e, "context") and e.context:
 236 |                 details = e.context
 237 | 
 238 |             if details:
 239 |                 try:
 240 |                     details_repr = pretty_repr(details)
 241 |                 except Exception:
 242 |                     details_repr = str(details)
 243 |                 error_content += f"\n\n[yellow]Details:[/]\n{escape(details_repr)}"
 244 |             console.print(Panel(error_content, title=error_title, border_style="red", expand=False))
 245 |         return {
 246 |             "success": False,
 247 |             "error": str(e),
 248 |             "error_code": getattr(e, "error_code", "TOOL_ERROR"),
 249 |             "error_type": type(e).__name__,
 250 |             "details": details or {},
 251 |             "isError": True,
 252 |         }
 253 |     except Exception as e:
 254 |         processing_time = time.monotonic() - start_time
 255 |         log_func_critical = getattr(logger, "critical", print)
 256 |         log_func_critical(f"Unexpected error calling '{func.__name__}': {e}", exc_info=True)
 257 |         if display_result_panel:
 258 |             console.print(f"\n[bold red]CRITICAL UNEXPECTED ERROR in {func.__name__}:[/bold red]")
 259 |             console.print_exception(show_locals=False)
 260 |         return {
 261 |             "success": False,
 262 |             "error": f"Unexpected: {str(e)}",
 263 |             "error_code": "UNEXPECTED_ERROR",
 264 |             "error_type": type(e).__name__,
 265 |             "details": {"traceback": traceback.format_exc()},
 266 |             "isError": True,
 267 |         }
 268 |     finally:
 269 |         if display_title:
 270 |             console.print()
 271 | 
 272 | 
 273 | # --- Demo Setup & Teardown ---
 274 | 
 275 | 
 276 | async def setup_demo_environment():
 277 |     """Initialize the memory system using the DEFAULT database file."""
 278 |     global DEMO_DB_FILE
 279 |     DEMO_DB_FILE = config.agent_memory.db_path
 280 |     log_func_info = getattr(logger, "info", print)
 281 |     log_func_info(f"Using default database for demo: {DEMO_DB_FILE}")
 282 |     console.print(
 283 |         Panel(
 284 |             f"Using default database: [cyan]{DEMO_DB_FILE}[/]\n"
 285 |             f"[yellow]NOTE:[/yellow] This demo will operate on the actual development database.",
 286 |             title="Demo Setup",
 287 |             border_style="yellow",
 288 |         )
 289 |     )
 290 | 
 291 |     init_result = await safe_tool_call(
 292 |         initialize_memory_system,
 293 |         {"db_path": DEMO_DB_FILE},
 294 |         "Initialize Memory System",
 295 |     )
 296 |     if not init_result or not init_result.get("success"):
 297 |         console.print(
 298 |             "[bold red]CRITICAL:[/bold red] Failed to initialize memory system. Aborting demo."
 299 |         )
 300 |         console.print(
 301 |             "[yellow]Check DB access and potentially API key configuration/network if init requires them.[/yellow]"
 302 |         )
 303 |         await cleanup_demo_environment()
 304 |         sys.exit(1)
 305 | 
 306 | 
 307 | async def cleanup_demo_environment():
 308 |     """Close DB connection."""
 309 |     global DEMO_DB_FILE
 310 |     log_func_info = getattr(logger, "info", print)
 311 |     log_func_warn = getattr(logger, "warning", print)
 312 | 
 313 |     try:
 314 |         await DBConnection.close_connection()
 315 |         log_func_info("Closed database connection.")
 316 |     except Exception as e:
 317 |         log_func_warn(f"Error closing DB connection during cleanup: {e}")
 318 | 
 319 |     if DEMO_DB_FILE:
 320 |         log_func_info(f"Demo finished using database: {DEMO_DB_FILE}")
 321 |         console.print(f"Demo finished using database: [dim]{DEMO_DB_FILE}[/dim]")
 322 |         DEMO_DB_FILE = None
 323 | 
 324 | 
 325 | # --- Individual Demo Sections ---
 326 | 
 327 | 
 328 | # (Keep existing sections 1-8 as they are)
 329 | async def demonstrate_basic_workflows():
 330 |     """Demonstrate basic workflow CRUD and listing operations."""
 331 |     console.print(Rule("[bold green]1. Basic Workflow Operations[/bold green]", style="green"))
 332 | 
 333 |     # Create
 334 |     create_args = {
 335 |         "title": "Enhanced WF Demo",
 336 |         "goal": "Demonstrate core workflow, action, artifact, and memory linking.",
 337 |         "tags": ["enhanced", "demo", "core"],
 338 |     }
 339 |     wf_result = await safe_tool_call(create_workflow, create_args, "Create Enhanced Workflow")
 340 |     wf_id = wf_result.get("workflow_id") if wf_result.get("success") else None
 341 | 
 342 |     if not wf_id:
 343 |         console.print("[bold red]CRITICAL DEMO FAILURE:[/bold red] Failed to create workflow. Cannot continue basic workflow demo.")
 344 |         return None # Return None to signal failure
 345 | 
 346 |     # Get Details
 347 |     await safe_tool_call(
 348 |         get_workflow_details, {"workflow_id": wf_id}, f"Get Workflow Details ({_fmt_id(wf_id)})"
 349 |     )
 350 | 
 351 |     # List (should show the one we created)
 352 |     await safe_tool_call(list_workflows, {"limit": 5}, "List Workflows (Limit 5)")
 353 | 
 354 |     # List Filtered by Tag
 355 |     await safe_tool_call(list_workflows, {"tag": "enhanced"}, "List Workflows Tagged 'enhanced'")
 356 | 
 357 |     # Update Status (to active for subsequent steps)
 358 |     await safe_tool_call(
 359 |         update_workflow_status,
 360 |         {"workflow_id": wf_id, "status": "active"},
 361 |         f"Ensure Workflow Status is Active ({_fmt_id(wf_id)})",
 362 |     )
 363 | 
 364 |     return wf_id
 365 | 
 366 | 
 367 | async def demonstrate_basic_actions(wf_id: Optional[str]):
 368 |     """Demonstrate basic action recording, completion, and retrieval."""
 369 |     console.print(Rule("[bold green]2. Basic Action Operations[/bold green]", style="green"))
 370 |     if not wf_id:
 371 |         console.print("[yellow]Skipping action demo: No valid workflow ID provided.[/yellow]")
 372 |         return {}  # Return empty dict
 373 | 
 374 |     action_ids = {}
 375 | 
 376 |     # Record Action 1 Start (e.g., Planning)
 377 |     start_args_1 = {
 378 |         "workflow_id": wf_id,
 379 |         "action_type": ActionType.PLANNING.value,
 380 |         "reasoning": "Initial planning phase for the enhanced demo.",
 381 |         "title": "Plan Demo Steps",
 382 |         "tags": ["planning"],
 383 |     }
 384 |     action_res_1 = await safe_tool_call(
 385 |         record_action_start, start_args_1, "Record Action 1 Start (Planning)"
 386 |     )
 387 |     action_id_1 = action_res_1.get("action_id") if action_res_1 and action_res_1.get("success") else None # More robust check
 388 |     if not action_id_1:
 389 |         console.print("[bold red]CRITICAL DEMO FAILURE:[/bold red] Failed to record start for Action 1. Aborting action demo.")
 390 |         return {} # Return empty dict
 391 |     action_ids["action1_id"] = action_id_1
 392 | 
 393 |     # Record Action 1 Completion (Needs action_id_1, which is now checked)
 394 |     complete_args_1 = {
 395 |         "action_id": action_id_1,
 396 |         "status": ActionStatus.COMPLETED.value,
 397 |         "summary": "Planning complete. Next step: data simulation.",
 398 |     }
 399 |     await safe_tool_call(
 400 |         record_action_completion,
 401 |         complete_args_1,
 402 |         f"Record Action 1 Completion ({_fmt_id(action_id_1)})",
 403 |     )
 404 | 
 405 |     # Record Action 2 Start (e.g., Tool Use - simulated)
 406 |     start_args_2 = {
 407 |         "workflow_id": wf_id,
 408 |         "action_type": ActionType.TOOL_USE.value,
 409 |         "reasoning": "Simulating data generation based on the plan.",
 410 |         "tool_name": "simulate_data",
 411 |         "tool_args": {"rows": 100, "type": "random"},
 412 |         "title": "Simulate Demo Data",
 413 |         "tags": ["data", "simulation"],
 414 |         "parent_action_id": action_id_1,  # Link to planning action
 415 |     }
 416 |     action_res_2 = await safe_tool_call(
 417 |         record_action_start, start_args_2, "Record Action 2 Start (Simulate Data)"
 418 |     )
 419 |     action_id_2 = action_res_2.get("action_id") if action_res_2 and action_res_2.get("success") else None # More robust check
 420 |     if action_id_2:
 421 |         action_ids["action2_id"] = action_id_2
 422 |         # Moved inside the 'if action_id_2:' block:
 423 |         await safe_tool_call(
 424 |             get_action_details,
 425 |             {"action_ids": [action_id_1, action_id_2]}, # Both IDs are valid here
 426 |             "Get Action Details (Multiple Actions)",
 427 |         )
 428 |         complete_args_2 = {
 429 |             "action_id": action_id_2,  # Now guaranteed to be non-None
 430 |             "status": ActionStatus.FAILED.value,
 431 |             "summary": "Simulation failed due to resource limit.",
 432 |             "tool_result": {"error": "Timeout", "code": 504},
 433 |         }
 434 |         await safe_tool_call(
 435 |             record_action_completion,
 436 |             complete_args_2,
 437 |             f"Record Action 2 Completion (Failed - {_fmt_id(action_id_2)})",
 438 |         )
 439 |     else:
 440 |         # Keep the existing else block for handling Action 2 start failure
 441 |         console.print("[bold red]Action 2 failed to start. Skipping completion and dependency tests involving Action 2.[/bold red]")
 442 |         # Ensure action_id_2 is not added to the dict if it's None
 443 |         if "action2_id" in action_ids:
 444 |             del action_ids["action2_id"]
 445 |         # Potentially skip dependency demo if action2_id is needed? (The demo logic does skip if action2_id is missing)
 446 | 
 447 |     # Get Action Details (Only Action 1 if Action 2 failed) - Moved outside check block
 448 |     if action_id_1 and not action_id_2:  # Only fetch Action 1 if Action 2 failed
 449 |         await safe_tool_call(
 450 |             get_action_details,
 451 |             {"action_id": action_id_1},
 452 |             f"Get Action Details (Action 1 Only - {_fmt_id(action_id_1)})",
 453 |         )
 454 | 
 455 |     # Get Recent Actions (should show both)
 456 |     await safe_tool_call(
 457 |         get_recent_actions, {"workflow_id": wf_id, "limit": 5}, "Get Recent Actions"
 458 |     )
 459 | 
 460 |     return action_ids
 461 | 
 462 | 
 463 | async def demonstrate_action_dependencies(wf_id: Optional[str], action_ids: Dict):
 464 |     """Demonstrate adding and retrieving action dependencies."""
 465 |     console.print(Rule("[bold green]3. Action Dependency Operations[/bold green]", style="green"))
 466 |     if not wf_id:
 467 |         console.print("[yellow]Skipping dependency demo: No valid workflow ID.[/yellow]")
 468 |         return
 469 |     action1_id = action_ids.get("action1_id")
 470 |     action2_id = action_ids.get("action2_id")
 471 |     if not action1_id or not action2_id:
 472 |         console.print("[yellow]Skipping dependency demo: Need at least two valid action IDs.[/yellow]")
 473 |         return
 474 | 
 475 |     # Add Dependency (Action 2 requires Action 1)
 476 |     await safe_tool_call(
 477 |         add_action_dependency,
 478 |         {
 479 |             "source_action_id": action2_id,
 480 |             "target_action_id": action1_id,
 481 |             "dependency_type": "requires",
 482 |         },
 483 |         f"Add Dependency ({_fmt_id(action2_id)} requires {_fmt_id(action1_id)})",
 484 |     )
 485 | 
 486 |     # Get Dependencies for Action 1 (Should show Action 2 depends on it - Downstream)
 487 |     await safe_tool_call(
 488 |         get_action_dependencies,
 489 |         {"action_id": action1_id, "direction": "downstream"},
 490 |         f"Get Dependencies (Downstream of Action 1 - {_fmt_id(action1_id)})",
 491 |     )
 492 | 
 493 |     # Get Dependencies for Action 2 (Should show it depends on Action 1 - Upstream)
 494 |     await safe_tool_call(
 495 |         get_action_dependencies,
 496 |         {"action_id": action2_id, "direction": "upstream", "include_details": True},
 497 |         f"Get Dependencies (Upstream of Action 2 - {_fmt_id(action2_id)}, with Details)",
 498 |     )
 499 | 
 500 |     # Get Action 1 Details (Include Dependencies)
 501 |     await safe_tool_call(
 502 |         get_action_details,
 503 |         {"action_id": action1_id, "include_dependencies": True},
 504 |         f"Get Action 1 Details ({_fmt_id(action1_id)}), Include Dependencies",
 505 |     )
 506 | 
 507 | 
 508 | async def demonstrate_artifacts(wf_id: Optional[str], action_ids: Dict):
 509 |     """Demonstrate artifact recording and retrieval."""
 510 |     console.print(Rule("[bold green]4. Artifact Operations[/bold green]", style="green"))
 511 |     if not wf_id:
 512 |         console.print("[yellow]Skipping artifact demo: No valid workflow ID provided.[/yellow]")
 513 |         return {}  # Return empty dict
 514 |     action1_id = action_ids.get("action1_id")
 515 |     action2_id = action_ids.get("action2_id") # May be None if Action 2 failed
 516 | 
 517 |     artifact_ids = {}
 518 | 
 519 |     # Record Artifact 1 (e.g., Plan document from Action 1)
 520 |     artifact_args_1 = {
 521 |         "workflow_id": wf_id,
 522 |         "action_id": action1_id,
 523 |         "name": "demo_plan.txt",
 524 |         "artifact_type": ArtifactType.FILE.value,  # Use enum value
 525 |         "description": "Initial plan for the demo steps.",
 526 |         "path": "/path/to/demo_plan.txt",
 527 |         "content": "Step 1: Plan\nStep 2: Simulate\nStep 3: Analyze",  # Small content example
 528 |         "tags": ["planning", "document"],
 529 |         "is_output": False,
 530 |     }
 531 |     art_res_1 = await safe_tool_call(
 532 |         record_artifact, artifact_args_1, "Record Artifact 1 (Plan Doc)"
 533 |     )
 534 |     art_id_1 = art_res_1.get("artifact_id") if art_res_1 and art_res_1.get("success") else None # Robust check
 535 |     if not art_id_1:
 536 |         console.print("[bold red]DEMO WARNING:[/bold red] Failed to record Artifact 1. Subsequent steps needing art1_id might fail.")
 537 |         # Don't abort, but warn
 538 |     else:
 539 |         artifact_ids["art1_id"] = art_id_1
 540 | 
 541 |     # Record Artifact 2 (e.g., Error log from Action 2)
 542 |     artifact_args_2 = {
 543 |         "workflow_id": wf_id,
 544 |         "action_id": action2_id,
 545 |         "name": "simulation_error.log",
 546 |         "artifact_type": ArtifactType.TEXT.value,
 547 |         "description": "Error log from the failed data simulation.",
 548 |         "content": "ERROR: Timeout waiting for resource. Code 504.",
 549 |         "tags": ["error", "log", "simulation"],
 550 |     }
 551 |     art_res_2 = await safe_tool_call(
 552 |         record_artifact, artifact_args_2, "Record Artifact 2 (Error Log)"
 553 |     )
 554 |     art_id_2 = art_res_2.get("artifact_id") if art_res_2.get("success") else None
 555 | 
 556 |     # --- ADD CHECK before recording Artifact 2 ---
 557 |     if not action2_id:
 558 |         console.print("[yellow]Skipping Artifact 2 recording: Action 2 ID is not available (likely failed to start).[/yellow]")
 559 |     else:
 560 |         # Proceed with recording Artifact 2 only if action2_id exists
 561 |         artifact_args_2["action_id"] = action2_id # Assign the valid ID
 562 |         art_res_2 = await safe_tool_call(
 563 |             record_artifact, artifact_args_2, "Record Artifact 2 (Error Log)"
 564 |         )
 565 |         art_id_2 = art_res_2.get("artifact_id") if art_res_2 and art_res_2.get("success") else None
 566 |         if art_id_2:
 567 |             artifact_ids["art2_id"] = art_id_2
 568 |         else:
 569 |              console.print("[bold red]DEMO WARNING:[/bold red] Failed to record Artifact 2.")
 570 | 
 571 |     # Get Artifacts (List all for workflow)
 572 |     await safe_tool_call(
 573 |         get_artifacts, {"workflow_id": wf_id, "limit": 5}, "Get Artifacts (List for Workflow)"
 574 |     )
 575 | 
 576 |     # Get Artifacts (Filter by tag 'planning')
 577 |     await safe_tool_call(
 578 |         get_artifacts,
 579 |         {"workflow_id": wf_id, "tag": "planning"},
 580 |         "Get Artifacts (Filter by Tag 'planning')",
 581 |     )
 582 | 
 583 |     # Get Artifact by ID (Get the plan doc)
 584 |     if art_id_1:
 585 |         await safe_tool_call(
 586 |             get_artifact_by_id,
 587 |             {"artifact_id": art_id_1, "include_content": True},
 588 |             f"Get Artifact by ID ({_fmt_id(art_id_1)}, Include Content)",
 589 |         )
 590 |     else:
 591 |         console.print("[yellow]Skipping 'Get Artifact by ID' for Artifact 1 as it failed to record.[/yellow]")
 592 | 
 593 |     return artifact_ids
 594 | 
 595 | 
 596 | async def demonstrate_thoughts_and_linking(
 597 |     wf_id: Optional[str], action_ids: Dict, artifact_ids: Dict
 598 | ):
 599 |     """Demonstrate thought chains, recording thoughts, and linking them."""
 600 |     console.print(Rule("[bold green]5. Thought Operations & Linking[/bold green]", style="green"))
 601 |     if not wf_id:
 602 |         console.print("[yellow]Skipping thought demo: No valid workflow ID provided.[/yellow]")
 603 |         return None
 604 |     action1_id = action_ids.get("action1_id")  # noqa: F841
 605 |     action2_id = action_ids.get("action2_id") # Might be None
 606 |     art1_id = artifact_ids.get("art1_id") # Might be None if artifact demo failed
 607 | 
 608 |     # Check if prerequisite artifact exists before linking
 609 |     if not art1_id:
 610 |         console.print("[yellow]Skipping thought demo: Planning artifact ID (art1_id) not available.[/yellow]")
 611 |         return None
 612 |     
 613 |     # Create a new thought chain
 614 |     chain_args = {
 615 |         "workflow_id": wf_id,
 616 |         "title": "Analysis Thought Chain",
 617 |         "initial_thought": "Review the plan artifact.",
 618 |         "initial_thought_type": ThoughtType.PLAN.value,
 619 |     }
 620 |     chain_res = await safe_tool_call(
 621 |         create_thought_chain, chain_args, "Create New Thought Chain (Analysis)"
 622 |     )
 623 |     chain_id = chain_res.get("thought_chain_id") if chain_res and chain_res.get("success") else None # Robust check
 624 |     
 625 |     if not chain_id:
 626 |        console.print("[bold red]CRITICAL DEMO FAILURE:[/bold red] Failed to create thought chain. Aborting thought demo.")
 627 |        return None
 628 | 
 629 |     # Record a thought linked to the plan artifact
 630 |     thought_args_1 = {
 631 |         "workflow_id": wf_id,
 632 |         "thought_chain_id": chain_id,
 633 |         "content": "The plan seems straightforward but lacks detail on simulation parameters.",
 634 |         "thought_type": ThoughtType.CRITIQUE.value,
 635 |         "relevant_artifact_id": art1_id,  # Link to the plan artifact
 636 |     }
 637 |     thought_res_1 = await safe_tool_call(
 638 |         record_thought, thought_args_1, "Record Thought (Critique Linked to Artifact)"
 639 |     )
 640 |     thought1_id = thought_res_1.get("thought_id") if thought_res_1.get("success") else None
 641 | 
 642 |     if not thought1_id:
 643 |         console.print("[bold red]DEMO WARNING:[/bold red] Failed to record thought 1. Subsequent linked thought might fail or be unlinked.")
 644 |     
 645 |     # Record second thought (depends on action2_id, thought1_id)
 646 |     if not action2_id:
 647 |          console.print("[yellow]Skipping recording thought 2: Action 2 ID is missing.[/yellow]")
 648 |     elif not thought1_id:
 649 |          console.print("[yellow]Skipping recording thought 2: Thought 1 ID is missing.[/yellow]")
 650 |          # Record thought 2 without parent link if action2_id exists but thought1_id doesn't?
 651 |          thought_args_2_no_parent = {
 652 |              "workflow_id": wf_id,
 653 |              "thought_chain_id": chain_id,
 654 |              "content": "The simulation failure needs investigation. Was it transient or configuration?",
 655 |              "thought_type": ThoughtType.QUESTION.value,
 656 |              "relevant_action_id": action2_id, # Action 2 ID exists here
 657 |          }
 658 |          await safe_tool_call(
 659 |              record_thought, thought_args_2_no_parent, "Record Thought (Question Linked to Action, NO PARENT)"
 660 |          )
 661 |     else:
 662 |         # Record another thought linked to the failed action
 663 |         thought_args_2 = {
 664 |             "workflow_id": wf_id,
 665 |             "thought_chain_id": chain_id,
 666 |             "content": "The simulation failure needs investigation. Was it transient or configuration?",
 667 |             "thought_type": ThoughtType.QUESTION.value,
 668 |             "relevant_action_id": action_ids.get("action2_id"),  # Link to failed action
 669 |             "parent_thought_id": thought1_id,  # Link to previous thought
 670 |         }
 671 | 
 672 |     await safe_tool_call(
 673 |         record_thought, thought_args_2, "Record Thought (Question Linked to Action)"
 674 |     )
 675 | 
 676 |     # Get the thought chain details (should show linked thoughts)
 677 |     await safe_tool_call(
 678 |         get_thought_chain,
 679 |         {"thought_chain_id": chain_id},
 680 |         f"Get Analysis Thought Chain Details ({_fmt_id(chain_id)})",
 681 |     )
 682 | 
 683 |     return chain_id
 684 | 
 685 | 
 686 | async def demonstrate_memory_operations(wf_id: Optional[str], action_ids: Dict, thought_ids: Dict):
 687 |     """Demonstrate memory storage, querying, linking."""
 688 |     console.print(Rule("[bold green]6. Memory Operations & Querying[/bold green]", style="green"))
 689 |     if not wf_id:
 690 |         console.print("[yellow]Skipping memory demo: No valid workflow ID provided.[/yellow]")
 691 |         return {}  # Return empty dict
 692 | 
 693 |     mem_ids = {}
 694 | 
 695 |     action1_id = action_ids.get("action1_id") # Might be None  # noqa: F841
 696 |     action2_id = action_ids.get("action2_id") # Might be None  # noqa: F841
 697 | 
 698 | 
 699 |     # Store Memory 1 (Related to Planning Action)
 700 |     store_args_1 = {
 701 |         "workflow_id": wf_id,
 702 |         "action_id": action_ids.get("action1_id"),
 703 |         "content": "The initial plan involves simulation and analysis.",
 704 |         "memory_type": MemoryType.SUMMARY.value,
 705 |         "memory_level": MemoryLevel.EPISODIC.value,
 706 |         "description": "Summary of initial plan",
 707 |         "tags": ["planning", "summary"],
 708 |         "generate_embedding": False,  # Set False explicitly for baseline
 709 |     }
 710 |     mem_res_1 = await safe_tool_call(store_memory, store_args_1, "Store Memory 1 (Plan Summary)")
 711 |     mem1_id = mem_res_1.get("memory_id") if mem_res_1.get("success") else None
 712 |     if mem1_id:
 713 |         mem_ids["mem1_id"] = mem1_id
 714 | 
 715 |     # Store Memory 2 (Related to Failed Action)
 716 |     store_args_2 = {
 717 |         "workflow_id": wf_id,
 718 |         "action_id": action_ids.get("action2_id"),
 719 |         "content": "Data simulation failed with a timeout error (Code 504).",
 720 |         "memory_type": MemoryType.OBSERVATION.value,
 721 |         "memory_level": MemoryLevel.EPISODIC.value,
 722 |         "description": "Simulation failure detail",
 723 |         "importance": 7.0,  # Failed actions might be important
 724 |         "tags": ["error", "simulation", "observation"],
 725 |         "generate_embedding": False,
 726 |     }
 727 |     mem_res_2 = await safe_tool_call(
 728 |         store_memory, store_args_2, "Store Memory 2 (Simulation Error)"
 729 |     )
 730 |     mem2_id = mem_res_2.get("memory_id") if mem_res_2.get("success") else None
 731 |     if mem2_id:
 732 |         mem_ids["mem2_id"] = mem2_id
 733 | 
 734 |     # Store Memory 3 (A more general fact)
 735 |     store_args_3 = {
 736 |         "workflow_id": wf_id,
 737 |         "content": "Timeout errors often indicate resource contention or configuration issues.",
 738 |         "memory_type": MemoryType.FACT.value,
 739 |         "memory_level": MemoryLevel.SEMANTIC.value,
 740 |         "description": "General knowledge about timeouts",
 741 |         "importance": 6.0,
 742 |         "confidence": 0.9,
 743 |         "tags": ["error", "knowledge", "fact"],
 744 |         "generate_embedding": False,
 745 |     }
 746 |     mem_res_3 = await safe_tool_call(store_memory, store_args_3, "Store Memory 3 (Timeout Fact)")
 747 |     mem3_id = mem_res_3.get("memory_id") if mem_res_3.get("success") else None
 748 |     if mem3_id:
 749 |         mem_ids["mem3_id"] = mem3_id
 750 | 
 751 |     # Link Memory 2 (Error) -> Memory 3 (Fact)
 752 |     if mem2_id and mem3_id:
 753 |         await safe_tool_call(
 754 |             create_memory_link,
 755 |             {
 756 |                 "source_memory_id": mem2_id,
 757 |                 "target_memory_id": mem3_id,
 758 |                 "link_type": LinkType.REFERENCES.value,
 759 |                 "description": "Error relates to general timeout knowledge",
 760 |             },
 761 |             f"Link Error ({_fmt_id(mem2_id)}) to Fact ({_fmt_id(mem3_id)})",
 762 |         )
 763 | 
 764 |         # Get Linked Memories for the Error Memory
 765 |         await safe_tool_call(
 766 |             get_linked_memories,
 767 |             {"memory_id": mem2_id, "direction": "outgoing", "include_memory_details": True},
 768 |             f"Get Outgoing Linked Memories for Error ({_fmt_id(mem2_id)})",
 769 |         )
 770 | 
 771 |     # Query Memories using FTS
 772 |     await safe_tool_call(
 773 |         query_memories,
 774 |         {"workflow_id": wf_id, "search_text": "simulation error timeout"},
 775 |         "Query Memories (FTS: 'simulation error timeout')",
 776 |     )
 777 | 
 778 |     # Query Memories by Importance Range
 779 |     await safe_tool_call(
 780 |         query_memories,
 781 |         {"workflow_id": wf_id, "min_importance": 6.5, "sort_by": "importance"},
 782 |         "Query Memories (Importance >= 6.5)",
 783 |     )
 784 | 
 785 |     # Query Memories by Memory Type
 786 |     await safe_tool_call(
 787 |         query_memories,
 788 |         {"workflow_id": wf_id, "memory_type": MemoryType.FACT.value},
 789 |         "Query Memories (Type: Fact)",
 790 |     )
 791 | 
 792 |     # Update Memory 1's tags
 793 |     if mem1_id:
 794 |         await safe_tool_call(
 795 |             update_memory,
 796 |             {"memory_id": mem1_id, "tags": ["planning", "summary", "initial_phase"]},
 797 |             f"Update Memory 1 Tags ({_fmt_id(mem1_id)})",
 798 |         )
 799 |         # Verify update
 800 |         await safe_tool_call(
 801 |             get_memory_by_id,
 802 |             {"memory_id": mem1_id},
 803 |             f"Get Memory 1 After Tag Update ({_fmt_id(mem1_id)})",
 804 |         )
 805 | 
 806 |     # Example: Record a thought linked to a memory
 807 |     if mem3_id and thought_ids:  # Assuming demonstrate_thoughts ran successfully
 808 |         thought_chain_id_str = thought_ids.get("main_chain_id")
 809 |         if not thought_chain_id_str:
 810 |             console.print(
 811 |                 "[yellow]Skipping thought link to memory: main_chain_id not found in thought_ids dict.[/yellow]"
 812 |             )
 813 |         else:
 814 |             thought_args_link = {
 815 |                 "workflow_id": wf_id,
 816 |                 "thought_chain_id": thought_chain_id_str,  # Pass the string ID
 817 |                 "content": "Based on the general knowledge about timeouts, need to check server logs.",
 818 |                 "thought_type": ThoughtType.PLAN.value,
 819 |                 "relevant_memory_id": mem3_id,  # Link to the Fact memory
 820 |             }
 821 |             await safe_tool_call(
 822 |                 record_thought,
 823 |                 thought_args_link,
 824 |                 f"Record Thought Linked to Memory ({_fmt_id(mem3_id)})",
 825 |             )
 826 |     elif not thought_ids:
 827 |         console.print(
 828 |             "[yellow]Skipping thought link to memory: thought_ids dict is empty or None.[/yellow]"
 829 |         )
 830 | 
 831 |     return mem_ids
 832 | 
 833 | 
 834 | async def demonstrate_embedding_and_search(wf_id: Optional[str], mem_ids: Dict, thought_ids: Dict):
 835 |     """Demonstrate embedding generation and semantic/hybrid search."""
 836 |     console.print(Rule("[bold green]7. Embedding & Semantic Search[/bold green]", style="green"))
 837 |     if not wf_id:
 838 |         console.print("[yellow]Skipping embedding demo: No valid workflow ID.[/yellow]")
 839 |         return  # Return immediately if no workflow ID
 840 |     mem1_id = mem_ids.get("mem1_id")  # Plan summary
 841 |     mem2_id = mem_ids.get("mem2_id")  # Simulation error
 842 |     mem3_id = mem_ids.get("mem3_id")  # Timeout fact
 843 | 
 844 |     if not mem1_id or not mem2_id or not mem3_id:
 845 |         console.print(
 846 |             "[yellow]Skipping embedding demo: Missing required memory IDs from previous steps.[/yellow]"
 847 |         )
 848 |         return  # Return immediately if prerequisite memories are missing
 849 | 
 850 |     # 1. Update Memory 2 (Error) to generate embedding
 851 |     # This relies on the embedding service being functional (API key configured)
 852 |     console.print(
 853 |         "[yellow]Attempting to generate embeddings. Requires configured Embedding Service (e.g., OpenAI API key).[/yellow]"
 854 |     )
 855 |     update_res = await safe_tool_call(
 856 |         update_memory,
 857 |         {
 858 |             "memory_id": mem2_id,
 859 |             "regenerate_embedding": True,
 860 |         },
 861 |         f"Update Memory 2 ({_fmt_id(mem2_id)}) to Generate Embedding",
 862 |     )
 863 |     if not (update_res and update_res.get("success") and update_res.get("embedding_regenerated")):
 864 |         console.print(
 865 |             "[red]   -> Failed to generate embedding for Memory 2. Semantic/Hybrid search may not work as expected.[/red]"
 866 |         )
 867 | 
 868 |     # 2. Store a new memory WITH embedding generation enabled
 869 |     store_args_4 = {
 870 |         "workflow_id": wf_id,
 871 |         "content": "Investigating the root cause of the simulation timeout is the next priority.",
 872 |         "memory_type": MemoryType.PLAN.value,
 873 |         "memory_level": MemoryLevel.EPISODIC.value,
 874 |         "description": "Next step planning",
 875 |         "importance": 7.5,
 876 |         "tags": ["investigation", "planning", "error_handling"],
 877 |         "generate_embedding": True,  # Explicitly enable
 878 |     }
 879 |     mem_res_4 = await safe_tool_call(
 880 |         store_memory, store_args_4, "Store Memory 4 (Next Step Plan) with Embedding"
 881 |     )
 882 |     mem4_id = mem_res_4.get("memory_id") if mem_res_4.get("success") else None
 883 |     if mem4_id:
 884 |         mem_ids["mem4_id"] = mem4_id  # Add to our tracked IDs
 885 | 
 886 |     # Check if embedding was actually generated for Mem4
 887 |     if mem4_id:
 888 |         mem4_details = await safe_tool_call(
 889 |             get_memory_by_id,
 890 |             {"memory_id": mem4_id},
 891 |             f"Check Memory 4 Details ({_fmt_id(mem4_id)})",
 892 |             suppress_output=True,
 893 |         )
 894 |         if mem4_details and mem4_details.get("success") and mem4_details.get("embedding_id"):
 895 |             console.print(
 896 |                 f"[green]   -> Embedding ID confirmed for Memory 4: {_fmt_id(mem4_details['embedding_id'])}[/green]"
 897 |             )
 898 |         else:
 899 |             console.print(
 900 |                 "[yellow]   -> Warning: Embedding ID missing for Memory 4. Embedding generation likely failed.[/yellow]"
 901 |             )
 902 |             console.print("[dim]      (Semantic/Hybrid search results may be limited.)[/dim]")
 903 | 
 904 |     # 3. Semantic Search
 905 |     await safe_tool_call(
 906 |         search_semantic_memories,
 907 |         {
 908 |             "workflow_id": wf_id,
 909 |             "query": "problems with simulation performance",
 910 |             "limit": 3,
 911 |             "threshold": 0.5,
 912 |         },
 913 |         "Semantic Search: 'problems with simulation performance'",
 914 |     )
 915 |     await safe_tool_call(
 916 |         search_semantic_memories,
 917 |         {
 918 |             "workflow_id": wf_id,
 919 |             "query": "next actions to take",
 920 |             "limit": 2,
 921 |             "memory_level": MemoryLevel.EPISODIC.value,
 922 |         },
 923 |         "Semantic Search: 'next actions to take' (Episodic only)",
 924 |     )
 925 | 
 926 |     # 4. Hybrid Search
 927 |     await safe_tool_call(
 928 |         hybrid_search_memories,
 929 |         {
 930 |             "workflow_id": wf_id,
 931 |             "query": "investigate timeout simulation",
 932 |             "limit": 4,
 933 |             "semantic_weight": 0.6,
 934 |             "keyword_weight": 0.4,
 935 |             "tags": ["error"],
 936 |             "include_content": False,
 937 |         },
 938 |         "Hybrid Search: 'investigate timeout simulation' + tag 'error'",
 939 |     )
 940 | 
 941 |     # 5. Demonstrate link suggestions
 942 |     # Update Mem3 (Timeout fact) to generate embedding
 943 |     update_res_3 = await safe_tool_call(
 944 |         update_memory,
 945 |         {"memory_id": mem3_id, "regenerate_embedding": True},
 946 |         f"Update Memory 3 ({_fmt_id(mem3_id)}) to Generate Embedding",
 947 |     )
 948 |     if not (
 949 |         update_res_3 and update_res_3.get("success") and update_res_3.get("embedding_regenerated")
 950 |     ):
 951 |         console.print(
 952 |             "[red]   -> Failed to generate embedding for Memory 3. Link suggestion test might fail.[/red]"
 953 |         )
 954 | 
 955 |     # --- Store Memory 5 (Hypothesis) ---
 956 |     hypothesis_content = "Resource limits on the simulation server might be too low."
 957 |     thought_chain_id = thought_ids.get("main_chain_id") if isinstance(thought_ids, dict) else None
 958 |     hypothesis_thought_id = None
 959 |     if thought_chain_id:
 960 |         thought_args_hyp = {
 961 |             "workflow_id": wf_id,
 962 |             "thought_chain_id": thought_chain_id,
 963 |             "content": hypothesis_content,
 964 |             "thought_type": ThoughtType.HYPOTHESIS.value,
 965 |             "relevant_memory_id": mem3_id,
 966 |         }
 967 |         hyp_thought_res = await safe_tool_call(
 968 |             record_thought, thought_args_hyp, "Record Hypothesis Thought"
 969 |         )
 970 |         hypothesis_thought_id = (
 971 |             hyp_thought_res.get("thought_id") if hyp_thought_res.get("success") else None
 972 |         )
 973 |     else:
 974 |         console.print(
 975 |             "[yellow]Skipping hypothesis memory storage: Could not get thought chain ID.[/yellow]"
 976 |         )
 977 | 
 978 |     mem5_id = None
 979 |     mem_res_5 = None
 980 |     if hypothesis_thought_id:
 981 |         store_args_5 = {
 982 |             "workflow_id": wf_id,
 983 |             "thought_id": hypothesis_thought_id,
 984 |             "content": hypothesis_content,
 985 |             "memory_type": MemoryType.REASONING_STEP.value,
 986 |             "memory_level": MemoryLevel.SEMANTIC.value,
 987 |             "description": "Hypothesis on timeout cause",
 988 |             "importance": 6.5,
 989 |             "confidence": 0.6,
 990 |             "tags": ["hypothesis", "resource", "error", "reasoning_step"],
 991 |             "generate_embedding": True,
 992 |             "suggest_links": True,  # Explicitly ask for suggestions
 993 |             "max_suggested_links": 2,
 994 |         }
 995 |         mem_res_5 = await safe_tool_call(
 996 |             store_memory, store_args_5, "Store Memory 5 (Hypothesis Reasoning) - Suggest Links"
 997 |         )
 998 |         mem5_id = mem_res_5.get("memory_id") if mem_res_5.get("success") else None
 999 |         if mem5_id:
1000 |             mem_ids["mem5_id"] = mem5_id
1001 | 
1002 |         # Check suggestions result
1003 |         if mem_res_5 and mem_res_5.get("success") and mem_res_5.get("suggested_links"):
1004 |             console.print("[cyan]   -> Link suggestions received for Memory 5:[/]")
1005 |             console.print(pretty_repr(mem_res_5["suggested_links"]))
1006 |         elif mem_res_5 and mem_res_5.get("success"):
1007 |             console.print(
1008 |                 "[dim]   -> No link suggestions returned for Memory 5 (or embedding failed).[/dim]"
1009 |             )
1010 |         elif mem_res_5 and not mem_res_5.get("success"):
1011 |             console.print(
1012 |                 "[yellow]   -> Failed to store Memory 5, cannot check suggestions.[/yellow]"
1013 |             )
1014 |     else:
1015 |         console.print(
1016 |             "[yellow]Skipping Memory 5 storage: Hypothesis thought recording failed.[/yellow]"
1017 |         )
1018 | 
1019 | 
1020 | async def demonstrate_state_and_working_memory(
1021 |     wf_id: str,
1022 |     mem_ids_dict: Dict[str, str],
1023 |     action_ids_dict: Dict[str, str],
1024 |     thought_ids_dict: Dict[str, Any],
1025 |     state_ids_dict: Dict[str, str],
1026 | ):
1027 |     """Demonstrate saving/loading state and working memory operations."""
1028 |     console.print(
1029 |         Rule("[bold green]8. Cognitive State & Working Memory[/bold green]", style="green")
1030 |     )
1031 | 
1032 |     # --- Retrieve necessary IDs from previous steps ---
1033 |     main_wf_id = wf_id
1034 |     main_chain_id = thought_ids_dict.get("main_chain_id")  # noqa: F841
1035 |     plan_action_id = action_ids_dict.get("action1_id")
1036 |     sim_action_id = action_ids_dict.get("action2_id")
1037 |     mem1_id = mem_ids_dict.get("mem1_id")
1038 |     mem2_id = mem_ids_dict.get("mem2_id")
1039 |     mem3_id = mem_ids_dict.get("mem3_id")
1040 |     mem4_id = mem_ids_dict.get("mem4_id")
1041 |     mem5_id = mem_ids_dict.get("mem5_id")
1042 | 
1043 |     hypothesis_thought_id = None
1044 |     if mem5_id and main_wf_id:
1045 |         mem5_details = await safe_tool_call(
1046 |             get_memory_by_id,
1047 |             {"memory_id": mem5_id},
1048 |             f"Get Memory 5 Details ({_fmt_id(mem5_id)}) for Thought ID",
1049 |             suppress_output=True,
1050 |         )
1051 |         if mem5_details and mem5_details.get("success"):
1052 |             hypothesis_thought_id = mem5_details.get("thought_id")
1053 |             if hypothesis_thought_id:
1054 |                 console.print(
1055 |                     f"[cyan]   -> Retrieved Hypothesis Thought ID: {_fmt_id(hypothesis_thought_id)}[/cyan]"
1056 |                 )
1057 |             else:
1058 |                 console.print(
1059 |                     "[yellow]   -> Could not retrieve hypothesis thought ID from Memory 5 details.[/yellow]"
1060 |                 )
1061 |         else:
1062 |             # Handle case where get_memory_by_id failed or didn't return success
1063 |              console.print(
1064 |                  f"[yellow]   -> Failed to get details for Memory 5 ({_fmt_id(mem5_id)}) to find Thought ID.[/yellow]"
1065 |              )
1066 | 
1067 |     # --- Check if we have enough *critical* data to proceed ---
1068 |     # Hypothesis thought ID is critical for saving the intended state goals
1069 |     if not (
1070 |         main_wf_id
1071 |         and mem1_id
1072 |         and mem2_id
1073 |         and mem3_id
1074 |         and mem4_id
1075 |         and plan_action_id
1076 |         and hypothesis_thought_id # Ensure this critical ID exists
1077 |     ):
1078 |         console.print(
1079 |             "[bold yellow]Skipping state/working memory demo:[/bold yellow] Missing critical IDs (workflow, mem1-4, plan_action, hypothesis_thought) from previous steps."
1080 |         )
1081 |         return # Exit if critical IDs are missing
1082 | 
1083 |     # Prepare IDs for saving state - check individually for non-critical ones
1084 |     working_mems = [mem_id for mem_id in [mem2_id, mem3_id, mem4_id, mem5_id] if mem_id] # Filter None
1085 |     focus_mems = [mem4_id] if mem4_id else [] # Filter None
1086 |     context_actions = [action_id for action_id in [plan_action_id, sim_action_id] if action_id] # Filter None
1087 |     goal_thoughts = [hypothesis_thought_id] # Already checked above
1088 | 
1089 |     # 1. Save Cognitive State
1090 |     save_args = {
1091 |         "workflow_id": wf_id,
1092 |         "title": "State after simulation failure and hypothesis",
1093 |         "working_memory_ids": working_mems,
1094 |         "focus_area_ids": focus_mems,
1095 |         "context_action_ids": context_actions,
1096 |         "current_goal_thought_ids": goal_thoughts,
1097 |     }
1098 |     state_res = await safe_tool_call(save_cognitive_state, save_args, "Save Cognitive State")
1099 |     state_id = state_res.get("state_id") if state_res and state_res.get("success") else None # More robust check
1100 | 
1101 |     if state_id:
1102 |         state_ids_dict["saved_state_id"] = state_id
1103 |         console.print(f"[green]   -> Cognitive state saved successfully with ID: {_fmt_id(state_id)}[/green]")
1104 |     else:
1105 |         console.print("[bold red]CRITICAL DEMO FAILURE:[/bold red] Failed to save cognitive state. Cannot proceed with working memory tests.")
1106 |         return # Exit if state saving failed
1107 | 
1108 |     # 2. Load Cognitive State (by ID) - Use the confirmed state_id
1109 |     await safe_tool_call(
1110 |         load_cognitive_state,
1111 |         {"workflow_id": wf_id, "state_id": state_id}, # Use state_id directly
1112 |         f"Load Cognitive State ({_fmt_id(state_id)}) by ID",
1113 |     )
1114 | 
1115 |     # 3. Load Cognitive State (Latest)
1116 |     await safe_tool_call(
1117 |         load_cognitive_state,
1118 |         {"workflow_id": wf_id},
1119 |         "Load Latest Cognitive State",
1120 |     )
1121 | 
1122 |     # --- Working Memory Operations using the saved state_id as the context_id ---
1123 |     # The variable 'state_id' now holds the context ID we need for the rest of this section.
1124 |     console.print(f"\n[dim]Using saved state ID '{_fmt_id(state_id)}' as context_id for working memory tests...[/dim]\n")
1125 | 
1126 |     # 4. Focus Memory (Focus on the 'hypothesis' memory if it exists)
1127 |     focus_target_id = mem_ids_dict.get("mem5_id") # Get mem5_id again here
1128 |     if focus_target_id:
1129 |         await safe_tool_call(
1130 |             focus_memory,
1131 |             {
1132 |                 "memory_id": focus_target_id,
1133 |                 "context_id": state_id, # USE state_id
1134 |                 "add_to_working": False, # Assume it's already there from save_state
1135 |             },
1136 |             f"Focus Memory ({_fmt_id(focus_target_id)}) in Context ({_fmt_id(state_id)})", # USE state_id
1137 |         )
1138 |     else:
1139 |         console.print(
1140 |             "[bold yellow]Skipping focus memory test: Hypothesis memory ID (mem5_id) not available.[/bold yellow]"
1141 |         )
1142 | 
1143 |     # 5. Get Working Memory (Should reflect the saved state initially)
1144 |     await safe_tool_call(
1145 |         get_working_memory,
1146 |         {
1147 |             "context_id": state_id, # USE state_id
1148 |             "include_links": False, # Keep output cleaner for this demo step
1149 |         },
1150 |         f"Get Working Memory for Context ({_fmt_id(state_id)})", # USE state_id
1151 |     )
1152 | 
1153 |     # 6. Optimize Working Memory (Reduce size, using 'balanced' strategy)
1154 |     wm_details = await safe_tool_call(
1155 |         get_working_memory,
1156 |         {"context_id": state_id}, # USE state_id
1157 |         "Get WM Size Before Optimization",
1158 |         suppress_output=True,
1159 |     )
1160 |     current_wm_size = (
1161 |         len(wm_details.get("working_memories", []))
1162 |         if wm_details and wm_details.get("success")
1163 |         else 0
1164 |     )
1165 | 
1166 |     if current_wm_size > 2: # Only optimize if we have more than 2 memories
1167 |         target_optimize_size = max(1, current_wm_size // 2)
1168 |         console.print(
1169 |             f"[cyan]   -> Optimizing working memory from {current_wm_size} down to {target_optimize_size}...[/cyan]"
1170 |         )
1171 |         await safe_tool_call(
1172 |             optimize_working_memory,
1173 |             {
1174 |                 "context_id": state_id, # USE state_id
1175 |                 "target_size": target_optimize_size,
1176 |                 "strategy": "balanced",
1177 |             },
1178 |             f"Optimize Working Memory (Context: {_fmt_id(state_id)}, Target: {target_optimize_size})", # USE state_id
1179 |         )
1180 |         await safe_tool_call(
1181 |             get_working_memory,
1182 |             {"context_id": state_id, "include_links": False}, # USE state_id
1183 |             f"Get Working Memory After Optimization (Context: {_fmt_id(state_id)})", # USE state_id
1184 |         )
1185 |     else:
1186 |         console.print(
1187 |             f"[dim]Skipping working memory optimization: Current size ({current_wm_size}) is too small.[/dim]"
1188 |         )
1189 | 
1190 | 
1191 | async def demonstrate_metacognition(wf_id: str, mem_ids: Dict, state_ids: Dict):
1192 |     """Demonstrate context retrieval, auto-focus, promotion, consolidation, reflection, summarization."""
1193 |     console.print(Rule("[bold green]9. Meta-Cognition & Summarization[/bold green]", style="green"))
1194 | 
1195 |     # 1. Get Workflow Context
1196 |     await safe_tool_call(get_workflow_context, {"workflow_id": wf_id}, "Get Full Workflow Context")
1197 | 
1198 |     # 2. Auto Update Focus
1199 |     context_id = state_ids.get("saved_state_id")
1200 |     if context_id:
1201 |         await safe_tool_call(
1202 |             auto_update_focus,
1203 |             {"context_id": context_id},
1204 |             f"Auto Update Focus for Context ({_fmt_id(context_id)})",
1205 |         )
1206 |     else:
1207 |         console.print("[yellow]Skipping auto-focus: No context_id (state_id) available.[/yellow]")
1208 | 
1209 |     # 3. Promote Memory Level
1210 |     mem1_id = mem_ids.get("mem1_id")  # Episodic summary
1211 |     mem3_id = mem_ids.get("mem3_id")  # Semantic fact
1212 |     if mem1_id:
1213 |         console.print(
1214 |             f"[cyan]   -> Manually increasing access_count for Memory 1 ({_fmt_id(mem1_id)}) to test promotion...[/cyan]"
1215 |         )
1216 |         try:
1217 |             async with DBConnection(DEMO_DB_FILE) as conn:
1218 |                 await conn.execute(
1219 |                     "UPDATE memories SET access_count = 10, confidence = 0.9 WHERE memory_id = ?",
1220 |                     (mem1_id,),
1221 |                 )
1222 |                 await conn.commit()
1223 |             await safe_tool_call(
1224 |                 promote_memory_level,
1225 |                 {"memory_id": mem1_id},
1226 |                 f"Attempt Promote Memory 1 ({_fmt_id(mem1_id)}) from Episodic",
1227 |             )
1228 |         except Exception as e:
1229 |             console.print(f"[red]   -> Error updating access count for promotion test: {e}[/red]")
1230 | 
1231 |     if mem3_id:
1232 |         await safe_tool_call(
1233 |             promote_memory_level,
1234 |             {"memory_id": mem3_id},
1235 |             f"Attempt Promote Memory 3 ({_fmt_id(mem3_id)}) from Semantic (Should Fail)",
1236 |         )
1237 | 
1238 |     # 4. Consolidate Memories (requires LLM)
1239 |     mem_ids_for_consolidation = [
1240 |         mid
1241 |         for mid in [mem_ids.get("mem1_id"), mem_ids.get("mem2_id"), mem_ids.get("mem3_id")]
1242 |         if mid
1243 |     ]
1244 |     if len(mem_ids_for_consolidation) >= 2:
1245 |         console.print(
1246 |             "[yellow]Attempting memory consolidation. Requires configured LLM provider (e.g., OpenAI API key).[/yellow]"
1247 |         )
1248 |         await safe_tool_call(
1249 |             consolidate_memories,
1250 |             {
1251 |                 "workflow_id": wf_id,
1252 |                 "target_memories": mem_ids_for_consolidation,
1253 |                 "consolidation_type": "summary",
1254 |                 "store_result": True,
1255 |                 "provider": config.default_provider or "openai",
1256 |             },
1257 |             "Consolidate Memories (Summary)",
1258 |         )
1259 |     else:
1260 |         console.print(
1261 |             "[yellow]Skipping consolidation: Not enough source memories available.[/yellow]"
1262 |         )
1263 | 
1264 |     # 5. Generate Reflection (requires LLM)
1265 |     console.print(
1266 |         "[yellow]Attempting reflection generation. Requires configured LLM provider.[/yellow]"
1267 |     )
1268 |     await safe_tool_call(
1269 |         generate_reflection,
1270 |         {
1271 |             "workflow_id": wf_id,
1272 |             "reflection_type": "gaps",
1273 |             "provider": config.default_provider
1274 |             or "openai",  # Use configured default from GatewayConfig
1275 |         },
1276 |         "Generate Reflection (Gaps)",
1277 |     )
1278 | 
1279 |     # 6. Summarize Text (requires LLM)
1280 |     console.print(
1281 |         "[yellow]Attempting text summarization. Requires configured LLM provider.[/yellow]"
1282 |     )
1283 |     sample_text = """
1284 |     The Unified Memory System integrates several components for advanced agent cognition.
1285 |     It tracks workflows, actions, artifacts, and thoughts. A multi-level memory hierarchy
1286 |     (working, episodic, semantic, procedural) allows for different types of knowledge storage.
1287 |     Vector embeddings enable semantic search capabilities. Associative links connect related
1288 |     memory items. Cognitive states can be saved and loaded, preserving the agent's context.
1289 |     Maintenance tools help manage memory expiration and provide statistics. Reporting and
1290 |     visualization tools offer insights into the agent's processes. This system aims to provide
1291 |     a robust foundation for complex autonomous agents.
1292 |     """
1293 |     await safe_tool_call(
1294 |         summarize_text,
1295 |         {
1296 |             "text_to_summarize": sample_text,
1297 |             "target_tokens": 50,
1298 |             "record_summary": True,
1299 |             "workflow_id": wf_id,
1300 |             "provider": config.default_provider or "openai",
1301 |         },
1302 |         "Summarize Sample Text and Record Memory",
1303 |     )
1304 | 
1305 | 
1306 | async def demonstrate_maintenance_and_stats(wf_id: str):
1307 |     """Demonstrate memory deletion and statistics computation."""
1308 |     console.print(Rule("[bold green]10. Maintenance & Statistics[/bold green]", style="green"))
1309 | 
1310 |     # 1. Delete Expired Memories
1311 |     # Store a temporary memory with a short TTL
1312 |     console.print("[cyan]   -> Storing a temporary memory with TTL=1 second...[/cyan]")
1313 |     ttl_mem_args = {
1314 |         "workflow_id": wf_id,
1315 |         "content": "This memory should expire quickly.",
1316 |         "memory_type": "observation",
1317 |         "ttl": 1,  # 1 second TTL
1318 |     }
1319 |     ttl_mem_res = await safe_tool_call(
1320 |         store_memory,  # Pass the function object
1321 |         ttl_mem_args,  # Pass the arguments dictionary
1322 |         "Store Temporary Memory",
1323 |         suppress_output=True,
1324 |     )
1325 | 
1326 |     if ttl_mem_res and ttl_mem_res.get("success"):
1327 |         console.print("[cyan]   -> Waiting 2 seconds for memory to expire...[/cyan]")
1328 |         await asyncio.sleep(2)
1329 |         await safe_tool_call(
1330 |             delete_expired_memories, {}, "Delete Expired Memories (Should delete 1)"
1331 |         )
1332 |     else:
1333 |         console.print(
1334 |             "[yellow]   -> Failed to store temporary memory for expiration test.[/yellow]"
1335 |         )
1336 |         if ttl_mem_res:
1337 |             console.print(f"[yellow]      Error: {ttl_mem_res.get('error')}[/yellow]")
1338 | 
1339 |     # 2. Compute Statistics (Workflow Specific)
1340 |     await safe_tool_call(
1341 |         compute_memory_statistics,
1342 |         {"workflow_id": wf_id},
1343 |         f"Compute Statistics for Workflow ({_fmt_id(wf_id)})",
1344 |     )
1345 | 
1346 |     # 3. Compute Statistics (Global)
1347 |     await safe_tool_call(compute_memory_statistics, {}, "Compute Global Statistics")
1348 | 
1349 | 
1350 | async def demonstrate_reporting_and_viz(wf_id: str, thought_chain_id: str, mem_ids: Dict):
1351 |     """Demonstrate report generation and visualization."""
1352 |     console.print(Rule("[bold green]11. Reporting & Visualization[/bold green]", style="green"))
1353 | 
1354 |     # 1. Generate Workflow Reports
1355 |     await safe_tool_call(
1356 |         generate_workflow_report,
1357 |         {"workflow_id": wf_id, "report_format": "markdown", "style": "professional"},
1358 |         "Generate Workflow Report (Markdown, Professional)",
1359 |     )
1360 |     await safe_tool_call(
1361 |         generate_workflow_report,
1362 |         {"workflow_id": wf_id, "report_format": "html", "style": "concise"},
1363 |         "Generate Workflow Report (HTML, Concise)",
1364 |     )
1365 |     await safe_tool_call(
1366 |         generate_workflow_report,
1367 |         {"workflow_id": wf_id, "report_format": "json"},
1368 |         "Generate Workflow Report (JSON)",
1369 |     )
1370 |     await safe_tool_call(
1371 |         generate_workflow_report,
1372 |         {"workflow_id": wf_id, "report_format": "mermaid"},
1373 |         "Generate Workflow Report (Mermaid Diagram)",
1374 |     )
1375 | 
1376 |     # 2. Visualize Reasoning Chain
1377 |     if thought_chain_id:
1378 |         await safe_tool_call(
1379 |             visualize_reasoning_chain,
1380 |             {"thought_chain_id": thought_chain_id},
1381 |             f"Visualize Reasoning Chain ({_fmt_id(thought_chain_id)})",
1382 |         )
1383 |     else:
1384 |         console.print(
1385 |             "[yellow]Skipping reasoning visualization: No thought_chain_id available.[/yellow]"
1386 |         )
1387 | 
1388 |     # 3. Visualize Memory Network
1389 |     # Visualize around the 'error' memory
1390 |     center_mem_id = mem_ids.get("mem2_id")
1391 |     if center_mem_id:
1392 |         await safe_tool_call(
1393 |             visualize_memory_network,
1394 |             {"center_memory_id": center_mem_id, "depth": 1, "max_nodes": 15},
1395 |             f"Visualize Memory Network (Centered on Error Mem {_fmt_id(center_mem_id)}, Depth 1)",
1396 |         )
1397 |     else:
1398 |         console.print(
1399 |             "[yellow]Skipping centered memory visualization: Error memory ID not available.[/yellow]"
1400 |         )
1401 | 
1402 |     # Visualize top memories for the workflow
1403 |     await safe_tool_call(
1404 |         visualize_memory_network,
1405 |         {"workflow_id": wf_id, "max_nodes": 20},
1406 |         f"Visualize Memory Network (Workflow {_fmt_id(wf_id)}, Top 20 Relevant)",
1407 |     )
1408 | 
1409 | 
1410 | # --- Main Execution Logic ---
1411 | async def main():
1412 |     """Run the extended Unified Memory System demonstration suite."""
1413 |     console.print(
1414 |         Rule(
1415 |             "[bold magenta]Unified Memory System Tools Demo (Extended)[/bold magenta]",
1416 |             style="white",
1417 |         )
1418 |     )
1419 |     exit_code = 0
1420 |     # Dictionaries to store IDs created during the demo
1421 |     wf_ids = {}
1422 |     action_ids = {}
1423 |     artifact_ids = {}
1424 |     thought_ids = {}  # Store chain ID
1425 |     mem_ids = {}
1426 |     state_ids = {}  # Store state ID
1427 | 
1428 |     try:
1429 |         await setup_demo_environment()
1430 | 
1431 |         # --- Run Demo Sections in Order ---
1432 |         wf_id = await demonstrate_basic_workflows()
1433 |         if wf_id:
1434 |             wf_ids["main_wf_id"] = wf_id
1435 |         else:
1436 |             raise RuntimeError("Workflow creation failed, cannot continue demo.")
1437 | 
1438 |         action_ids = await demonstrate_basic_actions(wf_ids.get("main_wf_id"))
1439 |         await demonstrate_action_dependencies(wf_ids.get("main_wf_id"), action_ids)
1440 |         artifact_ids = await demonstrate_artifacts(wf_ids.get("main_wf_id"), action_ids)
1441 | 
1442 |         chain_id = await demonstrate_thoughts_and_linking(
1443 |             wf_ids.get("main_wf_id"), action_ids, artifact_ids
1444 |         )
1445 |         if chain_id:
1446 |             thought_ids["main_chain_id"] = chain_id
1447 | 
1448 |         mem_ids = await demonstrate_memory_operations(
1449 |             wf_ids.get("main_wf_id"), action_ids, thought_ids
1450 |         )  # Pass thought_ids dict
1451 |         await demonstrate_embedding_and_search(wf_ids.get("main_wf_id"), mem_ids, thought_ids)
1452 | 
1453 |         # State/Working Memory depends on previous steps creating IDs
1454 |         # Pass all collected ID dictionaries
1455 |         await demonstrate_state_and_working_memory(
1456 |             wf_id=wf_ids["main_wf_id"],
1457 |             mem_ids_dict=mem_ids,
1458 |             action_ids_dict=action_ids,
1459 |             thought_ids_dict=thought_ids,  # Contains chain_id and potentially specific thought IDs if needed later
1460 |             state_ids_dict=state_ids,  # Pass dict to store the created state_id
1461 |         )
1462 | 
1463 |         # --- Run NEW Advanced Demo Sections ---
1464 |         await demonstrate_metacognition(wf_ids["main_wf_id"], mem_ids, state_ids)
1465 |         await demonstrate_maintenance_and_stats(wf_ids["main_wf_id"])
1466 |         await demonstrate_reporting_and_viz(
1467 |             wf_ids["main_wf_id"], thought_ids.get("main_chain_id"), mem_ids
1468 |         )
1469 |         # --- End NEW Sections ---
1470 | 
1471 |         logger.success(
1472 |             "Unified Memory System Demo completed successfully!",
1473 |             emoji_key="complete",
1474 |         )
1475 |         console.print(Rule("[bold green]Demo Finished[/bold green]", style="green"))
1476 | 
1477 |     except Exception as e:
1478 |         logger.critical(f"Demo crashed unexpectedly: {str(e)}", emoji_key="critical", exc_info=True)
1479 |         console.print(f"\n[bold red]CRITICAL ERROR:[/bold red] {escape(str(e))}")
1480 |         console.print_exception(show_locals=False)
1481 |         exit_code = 1
1482 | 
1483 |     finally:
1484 |         # Clean up the demo environment
1485 |         console.print(Rule("Cleanup", style="dim"))
1486 |         await cleanup_demo_environment()
1487 | 
1488 |     return exit_code
1489 | 
1490 | 
1491 | if __name__ == "__main__":
1492 |     # Run the demo
1493 |     final_exit_code = asyncio.run(main())
1494 |     sys.exit(final_exit_code)
1495 | 
```
Page 20/45FirstPrevNextLast