#
tokens: 47865/50000 13/435 files (page 9/39)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 9 of 39. Use http://codebase.md/wshobson/maverick-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .github
│   ├── dependabot.yml
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── feature_request.md
│   │   ├── question.md
│   │   └── security_report.md
│   ├── pull_request_template.md
│   └── workflows
│       ├── claude-code-review.yml
│       └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│   ├── launch.json
│   └── settings.json
├── alembic
│   ├── env.py
│   ├── script.py.mako
│   └── versions
│       ├── 001_initial_schema.py
│       ├── 003_add_performance_indexes.py
│       ├── 006_rename_metadata_columns.py
│       ├── 008_performance_optimization_indexes.py
│       ├── 009_rename_to_supply_demand.py
│       ├── 010_self_contained_schema.py
│       ├── 011_remove_proprietary_terms.py
│       ├── 013_add_backtest_persistence_models.py
│       ├── 014_add_portfolio_models.py
│       ├── 08e3945a0c93_merge_heads.py
│       ├── 9374a5c9b679_merge_heads_for_testing.py
│       ├── abf9b9afb134_merge_multiple_heads.py
│       ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│       ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│       ├── f0696e2cac15_add_essential_performance_indexes.py
│       └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── api
│   │   └── backtesting.md
│   ├── BACKTESTING.md
│   ├── COST_BASIS_SPECIFICATION.md
│   ├── deep_research_agent.md
│   ├── exa_research_testing_strategy.md
│   ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│   ├── PORTFOLIO.md
│   ├── SETUP_SELF_CONTAINED.md
│   └── speed_testing_framework.md
├── examples
│   ├── complete_speed_validation.py
│   ├── deep_research_integration.py
│   ├── llm_optimization_example.py
│   ├── llm_speed_demo.py
│   ├── monitoring_example.py
│   ├── parallel_research_example.py
│   ├── speed_optimization_demo.py
│   └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│   ├── __init__.py
│   ├── agents
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── circuit_breaker.py
│   │   ├── deep_research.py
│   │   ├── market_analysis.py
│   │   ├── optimized_research.py
│   │   ├── supervisor.py
│   │   └── technical_analysis.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── api_server.py
│   │   ├── connection_manager.py
│   │   ├── dependencies
│   │   │   ├── __init__.py
│   │   │   ├── stock_analysis.py
│   │   │   └── technical_analysis.py
│   │   ├── error_handling.py
│   │   ├── inspector_compatible_sse.py
│   │   ├── inspector_sse.py
│   │   ├── middleware
│   │   │   ├── error_handling.py
│   │   │   ├── mcp_logging.py
│   │   │   ├── rate_limiting_enhanced.py
│   │   │   └── security.py
│   │   ├── openapi_config.py
│   │   ├── routers
│   │   │   ├── __init__.py
│   │   │   ├── agents.py
│   │   │   ├── backtesting.py
│   │   │   ├── data_enhanced.py
│   │   │   ├── data.py
│   │   │   ├── health_enhanced.py
│   │   │   ├── health_tools.py
│   │   │   ├── health.py
│   │   │   ├── intelligent_backtesting.py
│   │   │   ├── introspection.py
│   │   │   ├── mcp_prompts.py
│   │   │   ├── monitoring.py
│   │   │   ├── news_sentiment_enhanced.py
│   │   │   ├── performance.py
│   │   │   ├── portfolio.py
│   │   │   ├── research.py
│   │   │   ├── screening_ddd.py
│   │   │   ├── screening_parallel.py
│   │   │   ├── screening.py
│   │   │   ├── technical_ddd.py
│   │   │   ├── technical_enhanced.py
│   │   │   ├── technical.py
│   │   │   └── tool_registry.py
│   │   ├── server.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   ├── base_service.py
│   │   │   ├── market_service.py
│   │   │   ├── portfolio_service.py
│   │   │   ├── prompt_service.py
│   │   │   └── resource_service.py
│   │   ├── simple_sse.py
│   │   └── utils
│   │       ├── __init__.py
│   │       ├── insomnia_export.py
│   │       └── postman_export.py
│   ├── application
│   │   ├── __init__.py
│   │   ├── commands
│   │   │   └── __init__.py
│   │   ├── dto
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_dto.py
│   │   ├── queries
│   │   │   ├── __init__.py
│   │   │   └── get_technical_analysis.py
│   │   └── screening
│   │       ├── __init__.py
│   │       ├── dtos.py
│   │       └── queries.py
│   ├── backtesting
│   │   ├── __init__.py
│   │   ├── ab_testing.py
│   │   ├── analysis.py
│   │   ├── batch_processing_stub.py
│   │   ├── batch_processing.py
│   │   ├── model_manager.py
│   │   ├── optimization.py
│   │   ├── persistence.py
│   │   ├── retraining_pipeline.py
│   │   ├── strategies
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── ml
│   │   │   │   ├── __init__.py
│   │   │   │   ├── adaptive.py
│   │   │   │   ├── ensemble.py
│   │   │   │   ├── feature_engineering.py
│   │   │   │   └── regime_aware.py
│   │   │   ├── ml_strategies.py
│   │   │   ├── parser.py
│   │   │   └── templates.py
│   │   ├── strategy_executor.py
│   │   ├── vectorbt_engine.py
│   │   └── visualization.py
│   ├── config
│   │   ├── __init__.py
│   │   ├── constants.py
│   │   ├── database_self_contained.py
│   │   ├── database.py
│   │   ├── llm_optimization_config.py
│   │   ├── logging_settings.py
│   │   ├── plotly_config.py
│   │   ├── security_utils.py
│   │   ├── security.py
│   │   ├── settings.py
│   │   ├── technical_constants.py
│   │   ├── tool_estimation.py
│   │   └── validation.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── technical_analysis.py
│   │   └── visualization.py
│   ├── data
│   │   ├── __init__.py
│   │   ├── cache_manager.py
│   │   ├── cache.py
│   │   ├── django_adapter.py
│   │   ├── health.py
│   │   ├── models.py
│   │   ├── performance.py
│   │   ├── session_management.py
│   │   └── validation.py
│   ├── database
│   │   ├── __init__.py
│   │   ├── base.py
│   │   └── optimization.py
│   ├── dependencies.py
│   ├── domain
│   │   ├── __init__.py
│   │   ├── entities
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis.py
│   │   ├── events
│   │   │   └── __init__.py
│   │   ├── portfolio.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   ├── entities.py
│   │   │   ├── services.py
│   │   │   └── value_objects.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_service.py
│   │   ├── stock_analysis
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis_service.py
│   │   └── value_objects
│   │       ├── __init__.py
│   │       └── technical_indicators.py
│   ├── exceptions.py
│   ├── infrastructure
│   │   ├── __init__.py
│   │   ├── cache
│   │   │   └── __init__.py
│   │   ├── caching
│   │   │   ├── __init__.py
│   │   │   └── cache_management_service.py
│   │   ├── connection_manager.py
│   │   ├── data_fetching
│   │   │   ├── __init__.py
│   │   │   └── stock_data_service.py
│   │   ├── health
│   │   │   ├── __init__.py
│   │   │   └── health_checker.py
│   │   ├── persistence
│   │   │   ├── __init__.py
│   │   │   └── stock_repository.py
│   │   ├── providers
│   │   │   └── __init__.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   └── repositories.py
│   │   └── sse_optimizer.py
│   ├── langchain_tools
│   │   ├── __init__.py
│   │   ├── adapters.py
│   │   └── registry.py
│   ├── logging_config.py
│   ├── memory
│   │   ├── __init__.py
│   │   └── stores.py
│   ├── monitoring
│   │   ├── __init__.py
│   │   ├── health_check.py
│   │   ├── health_monitor.py
│   │   ├── integration_example.py
│   │   ├── metrics.py
│   │   ├── middleware.py
│   │   └── status_dashboard.py
│   ├── providers
│   │   ├── __init__.py
│   │   ├── dependencies.py
│   │   ├── factories
│   │   │   ├── __init__.py
│   │   │   ├── config_factory.py
│   │   │   └── provider_factory.py
│   │   ├── implementations
│   │   │   ├── __init__.py
│   │   │   ├── cache_adapter.py
│   │   │   ├── macro_data_adapter.py
│   │   │   ├── market_data_adapter.py
│   │   │   ├── persistence_adapter.py
│   │   │   └── stock_data_adapter.py
│   │   ├── interfaces
│   │   │   ├── __init__.py
│   │   │   ├── cache.py
│   │   │   ├── config.py
│   │   │   ├── macro_data.py
│   │   │   ├── market_data.py
│   │   │   ├── persistence.py
│   │   │   └── stock_data.py
│   │   ├── llm_factory.py
│   │   ├── macro_data.py
│   │   ├── market_data.py
│   │   ├── mocks
│   │   │   ├── __init__.py
│   │   │   ├── mock_cache.py
│   │   │   ├── mock_config.py
│   │   │   ├── mock_macro_data.py
│   │   │   ├── mock_market_data.py
│   │   │   ├── mock_persistence.py
│   │   │   └── mock_stock_data.py
│   │   ├── openrouter_provider.py
│   │   ├── optimized_screening.py
│   │   ├── optimized_stock_data.py
│   │   └── stock_data.py
│   ├── README.md
│   ├── tests
│   │   ├── __init__.py
│   │   ├── README_INMEMORY_TESTS.md
│   │   ├── test_cache_debug.py
│   │   ├── test_fixes_validation.py
│   │   ├── test_in_memory_routers.py
│   │   ├── test_in_memory_server.py
│   │   ├── test_macro_data_provider.py
│   │   ├── test_mailgun_email.py
│   │   ├── test_market_calendar_caching.py
│   │   ├── test_mcp_tool_fixes_pytest.py
│   │   ├── test_mcp_tool_fixes.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_models_functional.py
│   │   ├── test_server.py
│   │   ├── test_stock_data_enhanced.py
│   │   ├── test_stock_data_provider.py
│   │   └── test_technical_analysis.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── performance_monitoring.py
│   │   ├── portfolio_manager.py
│   │   ├── risk_management.py
│   │   └── sentiment_analysis.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── agent_errors.py
│   │   ├── batch_processing.py
│   │   ├── cache_warmer.py
│   │   ├── circuit_breaker_decorators.py
│   │   ├── circuit_breaker_services.py
│   │   ├── circuit_breaker.py
│   │   ├── data_chunking.py
│   │   ├── database_monitoring.py
│   │   ├── debug_utils.py
│   │   ├── fallback_strategies.py
│   │   ├── llm_optimization.py
│   │   ├── logging_example.py
│   │   ├── logging_init.py
│   │   ├── logging.py
│   │   ├── mcp_logging.py
│   │   ├── memory_profiler.py
│   │   ├── monitoring_middleware.py
│   │   ├── monitoring.py
│   │   ├── orchestration_logging.py
│   │   ├── parallel_research.py
│   │   ├── parallel_screening.py
│   │   ├── quick_cache.py
│   │   ├── resource_manager.py
│   │   ├── shutdown.py
│   │   ├── stock_helpers.py
│   │   ├── structured_logger.py
│   │   ├── tool_monitoring.py
│   │   ├── tracing.py
│   │   └── yfinance_pool.py
│   ├── validation
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── data.py
│   │   ├── middleware.py
│   │   ├── portfolio.py
│   │   ├── responses.py
│   │   ├── screening.py
│   │   └── technical.py
│   └── workflows
│       ├── __init__.py
│       ├── agents
│       │   ├── __init__.py
│       │   ├── market_analyzer.py
│       │   ├── optimizer_agent.py
│       │   ├── strategy_selector.py
│       │   └── validator_agent.py
│       ├── backtesting_workflow.py
│       └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│   ├── dev.sh
│   ├── INSTALLATION_GUIDE.md
│   ├── load_example.py
│   ├── load_market_data.py
│   ├── load_tiingo_data.py
│   ├── migrate_db.py
│   ├── README_TIINGO_LOADER.md
│   ├── requirements_tiingo.txt
│   ├── run_stock_screening.py
│   ├── run-migrations.sh
│   ├── seed_db.py
│   ├── seed_sp500.py
│   ├── setup_database.sh
│   ├── setup_self_contained.py
│   ├── setup_sp500_database.sh
│   ├── test_seeded_data.py
│   ├── test_tiingo_loader.py
│   ├── tiingo_config.py
│   └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── core
│   │   └── test_technical_analysis.py
│   ├── data
│   │   └── test_portfolio_models.py
│   ├── domain
│   │   ├── conftest.py
│   │   ├── test_portfolio_entities.py
│   │   └── test_technical_analysis_service.py
│   ├── fixtures
│   │   └── orchestration_fixtures.py
│   ├── integration
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── README.md
│   │   ├── run_integration_tests.sh
│   │   ├── test_api_technical.py
│   │   ├── test_chaos_engineering.py
│   │   ├── test_config_management.py
│   │   ├── test_full_backtest_workflow_advanced.py
│   │   ├── test_full_backtest_workflow.py
│   │   ├── test_high_volume.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_orchestration_complete.py
│   │   ├── test_portfolio_persistence.py
│   │   ├── test_redis_cache.py
│   │   ├── test_security_integration.py.disabled
│   │   └── vcr_setup.py
│   ├── performance
│   │   ├── __init__.py
│   │   ├── test_benchmarks.py
│   │   ├── test_load.py
│   │   ├── test_profiling.py
│   │   └── test_stress.py
│   ├── providers
│   │   └── test_stock_data_simple.py
│   ├── README.md
│   ├── test_agents_router_mcp.py
│   ├── test_backtest_persistence.py
│   ├── test_cache_management_service.py
│   ├── test_cache_serialization.py
│   ├── test_circuit_breaker.py
│   ├── test_database_pool_config_simple.py
│   ├── test_database_pool_config.py
│   ├── test_deep_research_functional.py
│   ├── test_deep_research_integration.py
│   ├── test_deep_research_parallel_execution.py
│   ├── test_error_handling.py
│   ├── test_event_loop_integrity.py
│   ├── test_exa_research_integration.py
│   ├── test_exception_hierarchy.py
│   ├── test_financial_search.py
│   ├── test_graceful_shutdown.py
│   ├── test_integration_simple.py
│   ├── test_langgraph_workflow.py
│   ├── test_market_data_async.py
│   ├── test_market_data_simple.py
│   ├── test_mcp_orchestration_functional.py
│   ├── test_ml_strategies.py
│   ├── test_optimized_research_agent.py
│   ├── test_orchestration_integration.py
│   ├── test_orchestration_logging.py
│   ├── test_orchestration_tools_simple.py
│   ├── test_parallel_research_integration.py
│   ├── test_parallel_research_orchestrator.py
│   ├── test_parallel_research_performance.py
│   ├── test_performance_optimizations.py
│   ├── test_production_validation.py
│   ├── test_provider_architecture.py
│   ├── test_rate_limiting_enhanced.py
│   ├── test_runner_validation.py
│   ├── test_security_comprehensive.py.disabled
│   ├── test_security_cors.py
│   ├── test_security_enhancements.py.disabled
│   ├── test_security_headers.py
│   ├── test_security_penetration.py
│   ├── test_session_management.py
│   ├── test_speed_optimization_validation.py
│   ├── test_stock_analysis_dependencies.py
│   ├── test_stock_analysis_service.py
│   ├── test_stock_data_fetching_service.py
│   ├── test_supervisor_agent.py
│   ├── test_supervisor_functional.py
│   ├── test_tool_estimation_config.py
│   ├── test_visualization.py
│   └── utils
│       ├── test_agent_errors.py
│       ├── test_logging.py
│       ├── test_parallel_screening.py
│       └── test_quick_cache.py
├── tools
│   ├── check_orchestration_config.py
│   ├── experiments
│   │   ├── validation_examples.py
│   │   └── validation_fixed.py
│   ├── fast_dev.sh
│   ├── hot_reload.py
│   ├── quick_test.py
│   └── templates
│       ├── new_router_template.py
│       ├── new_tool_template.py
│       ├── screening_strategy_template.py
│       └── test_template.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/maverick_mcp/domain/screening/entities.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Screening domain entities.
  3 | 
  4 | This module contains the core business entities for stock screening,
  5 | with embedded business rules and validation logic.
  6 | """
  7 | 
  8 | from dataclasses import dataclass
  9 | from datetime import datetime
 10 | from decimal import Decimal
 11 | from typing import Any
 12 | 
 13 | 
 14 | @dataclass
 15 | class ScreeningResult:
 16 |     """
 17 |     Domain entity representing a stock screening result.
 18 | 
 19 |     This entity encapsulates all business rules related to screening results,
 20 |     including validation, scoring, and ranking logic.
 21 |     """
 22 | 
 23 |     # Core identification
 24 |     stock_symbol: str
 25 |     screening_date: datetime
 26 | 
 27 |     # Price data
 28 |     open_price: Decimal
 29 |     high_price: Decimal
 30 |     low_price: Decimal
 31 |     close_price: Decimal
 32 |     volume: int
 33 | 
 34 |     # Technical indicators
 35 |     ema_21: Decimal
 36 |     sma_50: Decimal
 37 |     sma_150: Decimal
 38 |     sma_200: Decimal
 39 |     momentum_score: Decimal
 40 |     avg_volume_30d: Decimal
 41 |     adr_percentage: Decimal
 42 |     atr: Decimal
 43 | 
 44 |     # Pattern analysis
 45 |     pattern: str | None = None
 46 |     squeeze: str | None = None
 47 |     consolidation: str | None = None
 48 |     entry_signal: str | None = None
 49 | 
 50 |     # Screening-specific scores
 51 |     combined_score: int = 0
 52 |     bear_score: int = 0
 53 |     compression_score: int = 0
 54 |     pattern_detected: int = 0
 55 | 
 56 |     # Additional bearish indicators
 57 |     rsi_14: Decimal | None = None
 58 |     macd: Decimal | None = None
 59 |     macd_signal: Decimal | None = None
 60 |     macd_histogram: Decimal | None = None
 61 |     distribution_days_20: int | None = None
 62 |     atr_contraction: bool | None = None
 63 |     big_down_volume: bool | None = None
 64 | 
 65 |     def __post_init__(self):
 66 |         """Validate business rules after initialization."""
 67 |         self._validate_stock_symbol()
 68 |         self._validate_price_data()
 69 |         self._validate_technical_indicators()
 70 | 
 71 |     def _validate_stock_symbol(self) -> None:
 72 |         """Validate stock symbol format."""
 73 |         if not self.stock_symbol or not isinstance(self.stock_symbol, str):
 74 |             raise ValueError("Stock symbol must be a non-empty string")
 75 | 
 76 |         if len(self.stock_symbol) > 10:
 77 |             raise ValueError("Stock symbol cannot exceed 10 characters")
 78 | 
 79 |     def _validate_price_data(self) -> None:
 80 |         """Validate price data consistency."""
 81 |         if self.close_price <= 0:
 82 |             raise ValueError("Close price must be positive")
 83 | 
 84 |         if self.volume < 0:
 85 |             raise ValueError("Volume cannot be negative")
 86 | 
 87 |         if self.high_price < self.low_price:
 88 |             raise ValueError("High price cannot be less than low price")
 89 | 
 90 |         if not (self.low_price <= self.close_price <= self.high_price):
 91 |             raise ValueError("Close price must be between low and high prices")
 92 | 
 93 |     def _validate_technical_indicators(self) -> None:
 94 |         """Validate technical indicator ranges."""
 95 |         if not (0 <= self.momentum_score <= 100):
 96 |             raise ValueError("Momentum score must be between 0 and 100")
 97 | 
 98 |         if self.adr_percentage < 0:
 99 |             raise ValueError("ADR percentage cannot be negative")
100 | 
101 |         if self.avg_volume_30d < 0:
102 |             raise ValueError("Average volume cannot be negative")
103 | 
104 |     def is_bullish_setup(self) -> bool:
105 |         """
106 |         Determine if this is a bullish screening setup.
107 | 
108 |         Business rule: A stock is considered bullish if it meets
109 |         momentum and trend criteria.
110 |         """
111 |         return (
112 |             self.close_price > self.sma_50
113 |             and self.close_price > self.sma_150
114 |             and self.momentum_score >= 70
115 |             and self.combined_score >= 50
116 |         )
117 | 
118 |     def is_bearish_setup(self) -> bool:
119 |         """
120 |         Determine if this is a bearish screening setup.
121 | 
122 |         Business rule: A stock is considered bearish if it shows
123 |         weakness and distribution characteristics.
124 |         """
125 |         return (
126 |             self.close_price < self.sma_50
127 |             and self.momentum_score <= 30
128 |             and self.bear_score >= 50
129 |         )
130 | 
131 |     def is_trending_stage2(self) -> bool:
132 |         """
133 |         Determine if this meets trending criteria.
134 | 
135 |         Business rule: Trending requires proper moving average alignment
136 |         and strong relative strength.
137 |         """
138 |         return (
139 |             self.close_price > self.sma_50
140 |             and self.close_price > self.sma_150
141 |             and self.close_price > self.sma_200
142 |             and self.sma_50 > self.sma_150
143 |             and self.sma_150 > self.sma_200
144 |             and self.momentum_score >= 80
145 |         )
146 | 
147 |     def meets_volume_criteria(self, min_volume: int) -> bool:
148 |         """Check if stock meets minimum volume requirements."""
149 |         return self.avg_volume_30d >= min_volume
150 | 
151 |     def meets_price_criteria(self, min_price: Decimal, max_price: Decimal) -> bool:
152 |         """Check if stock meets price range criteria."""
153 |         return min_price <= self.close_price <= max_price
154 | 
155 |     def calculate_risk_reward_ratio(
156 |         self, stop_loss_percentage: Decimal = Decimal("0.08")
157 |     ) -> Decimal:
158 |         """
159 |         Calculate risk/reward ratio based on current price and stop loss.
160 | 
161 |         Business rule: Risk is calculated as the distance to stop loss,
162 |         reward is calculated as the potential upside to resistance levels.
163 |         """
164 |         stop_loss_price = self.close_price * (1 - stop_loss_percentage)
165 |         risk = self.close_price - stop_loss_price
166 | 
167 |         # Simple reward calculation based on ADR
168 |         potential_reward = self.close_price * (self.adr_percentage / 100)
169 | 
170 |         if risk <= 0:
171 |             return Decimal("0")
172 | 
173 |         return potential_reward / risk
174 | 
175 |     def get_quality_score(self) -> int:
176 |         """
177 |         Calculate overall quality score based on multiple factors.
178 | 
179 |         Business rule: Quality score combines technical strength,
180 |         volume characteristics, and pattern recognition.
181 |         """
182 |         score = 0
183 | 
184 |         # Momentum Score contribution (0-40 points)
185 |         score += int(self.momentum_score * 0.4)
186 | 
187 |         # Volume quality (0-20 points)
188 |         if self.avg_volume_30d >= 1_000_000:
189 |             score += 20
190 |         elif self.avg_volume_30d >= 500_000:
191 |             score += 15
192 |         elif self.avg_volume_30d >= 100_000:
193 |             score += 10
194 | 
195 |         # Pattern recognition (0-20 points)
196 |         if self.pattern_detected > 0:
197 |             score += 20
198 | 
199 |         # Price action (0-20 points)
200 |         if self.close_price > self.sma_50:
201 |             score += 10
202 |         if self.close_price > self.sma_200:
203 |             score += 10
204 | 
205 |         return min(score, 100)  # Cap at 100
206 | 
207 |     def to_dict(self) -> dict[str, Any]:
208 |         """Convert entity to dictionary for serialization."""
209 |         return {
210 |             "stock_symbol": self.stock_symbol,
211 |             "screening_date": self.screening_date.isoformat(),
212 |             "close_price": float(self.close_price),
213 |             "volume": self.volume,
214 |             "momentum_score": float(self.momentum_score),
215 |             "adr_percentage": float(self.adr_percentage),
216 |             "pattern": self.pattern,
217 |             "squeeze": self.squeeze,
218 |             "vcp": self.vcp,
219 |             "entry_signal": self.entry_signal,
220 |             "combined_score": self.combined_score,
221 |             "bear_score": self.bear_score,
222 |             "quality_score": self.get_quality_score(),
223 |             "is_bullish": self.is_bullish_setup(),
224 |             "is_bearish": self.is_bearish_setup(),
225 |             "is_trending_stage2": self.is_trending_stage2(),
226 |             "risk_reward_ratio": float(self.calculate_risk_reward_ratio()),
227 |         }
228 | 
229 | 
230 | @dataclass
231 | class ScreeningResultCollection:
232 |     """
233 |     Domain entity representing a collection of screening results.
234 | 
235 |     This aggregate root manages business rules that apply across
236 |     multiple screening results, such as ranking and filtering.
237 |     """
238 | 
239 |     results: list[ScreeningResult]
240 |     strategy_used: str
241 |     screening_timestamp: datetime
242 |     total_candidates_analyzed: int
243 | 
244 |     def __post_init__(self):
245 |         """Validate collection business rules."""
246 |         if self.total_candidates_analyzed < len(self.results):
247 |             raise ValueError("Total candidates cannot be less than results count")
248 | 
249 |     def get_top_ranked(self, limit: int) -> list[ScreeningResult]:
250 |         """
251 |         Get top-ranked results based on screening strategy.
252 | 
253 |         Business rule: Ranking depends on the screening strategy used.
254 |         """
255 |         if self.strategy_used == "maverick_bullish":
256 |             return sorted(self.results, key=lambda r: r.combined_score, reverse=True)[
257 |                 :limit
258 |             ]
259 |         elif self.strategy_used == "maverick_bearish":
260 |             return sorted(self.results, key=lambda r: r.bear_score, reverse=True)[
261 |                 :limit
262 |             ]
263 |         elif self.strategy_used == "trending_stage2":
264 |             return sorted(self.results, key=lambda r: r.momentum_score, reverse=True)[
265 |                 :limit
266 |             ]
267 |         else:
268 |             # Default to quality score
269 |             return sorted(
270 |                 self.results, key=lambda r: r.get_quality_score(), reverse=True
271 |             )[:limit]
272 | 
273 |     def filter_by_criteria(
274 |         self,
275 |         min_momentum_score: Decimal | None = None,
276 |         min_volume: int | None = None,
277 |         max_price: Decimal | None = None,
278 |         min_price: Decimal | None = None,
279 |     ) -> list[ScreeningResult]:
280 |         """
281 |         Filter results by business criteria.
282 | 
283 |         Business rule: All filters must be satisfied simultaneously.
284 |         """
285 |         filtered_results = self.results
286 | 
287 |         if min_momentum_score is not None:
288 |             filtered_results = [
289 |                 r for r in filtered_results if r.momentum_score >= min_momentum_score
290 |             ]
291 | 
292 |         if min_volume is not None:
293 |             filtered_results = [
294 |                 r for r in filtered_results if r.avg_volume_30d >= min_volume
295 |             ]
296 | 
297 |         if max_price is not None:
298 |             filtered_results = [
299 |                 r for r in filtered_results if r.close_price <= max_price
300 |             ]
301 | 
302 |         if min_price is not None:
303 |             filtered_results = [
304 |                 r for r in filtered_results if r.close_price >= min_price
305 |             ]
306 | 
307 |         return filtered_results
308 | 
309 |     def get_statistics(self) -> dict[str, Any]:
310 |         """Get collection statistics for analysis."""
311 |         if not self.results:
312 |             return {
313 |                 "total_results": 0,
314 |                 "avg_momentum_score": 0,
315 |                 "avg_volume": 0,
316 |                 "avg_price": 0,
317 |                 "bullish_setups": 0,
318 |                 "bearish_setups": 0,
319 |                 "trending_stage2": 0,
320 |             }
321 | 
322 |         return {
323 |             "total_results": len(self.results),
324 |             "avg_momentum_score": float(
325 |                 sum(r.momentum_score for r in self.results) / len(self.results)
326 |             ),
327 |             "avg_volume": int(
328 |                 sum(r.avg_volume_30d for r in self.results) / len(self.results)
329 |             ),
330 |             "avg_price": float(
331 |                 sum(r.close_price for r in self.results) / len(self.results)
332 |             ),
333 |             "bullish_setups": sum(1 for r in self.results if r.is_bullish_setup()),
334 |             "bearish_setups": sum(1 for r in self.results if r.is_bearish_setup()),
335 |             "trending_stage2": sum(1 for r in self.results if r.is_trending_stage2()),
336 |         }
337 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/api/routers/data_enhanced.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Enhanced data fetching router with dependency injection support.
  3 | 
  4 | This module demonstrates how to integrate the new provider interfaces
  5 | with FastMCP routers while maintaining backward compatibility.
  6 | """
  7 | 
  8 | import json
  9 | import logging
 10 | from datetime import UTC, datetime
 11 | from typing import Any
 12 | 
 13 | from fastmcp import FastMCP
 14 | 
 15 | from maverick_mcp.providers.dependencies import (
 16 |     get_cache_manager,
 17 |     get_configuration,
 18 |     get_stock_data_fetcher,
 19 | )
 20 | from maverick_mcp.providers.interfaces.cache import ICacheManager
 21 | from maverick_mcp.providers.interfaces.config import IConfigurationProvider
 22 | from maverick_mcp.providers.interfaces.stock_data import IStockDataFetcher
 23 | from maverick_mcp.validation.data import (
 24 |     FetchStockDataRequest,
 25 |     GetNewsRequest,
 26 |     GetStockInfoRequest,
 27 |     StockDataBatchRequest,
 28 | )
 29 | 
 30 | logger = logging.getLogger(__name__)
 31 | 
 32 | # Create the enhanced data router
 33 | data_enhanced_router: FastMCP = FastMCP("Enhanced_Data_Operations")
 34 | 
 35 | 
 36 | # Example of new interface-based implementation
 37 | @data_enhanced_router.tool()
 38 | async def fetch_stock_data_enhanced(
 39 |     request: FetchStockDataRequest,
 40 |     stock_fetcher: IStockDataFetcher | None = None,
 41 |     cache_manager: ICacheManager | None = None,
 42 |     config: IConfigurationProvider | None = None,
 43 | ) -> dict[str, Any]:
 44 |     """
 45 |     Fetch historical stock data using the new interface-based architecture.
 46 | 
 47 |     This function demonstrates how to use dependency injection with the new
 48 |     provider interfaces while maintaining the same external API.
 49 | 
 50 |     Args:
 51 |         request: Stock data request parameters
 52 |         stock_fetcher: Optional stock data fetcher (injected if not provided)
 53 |         cache_manager: Optional cache manager (injected if not provided)
 54 |         config: Optional configuration provider (injected if not provided)
 55 | 
 56 |     Returns:
 57 |         Dictionary containing the stock data in JSON format
 58 |     """
 59 |     try:
 60 |         # Use dependency injection with fallback to global providers
 61 |         fetcher = stock_fetcher or get_stock_data_fetcher()
 62 |         cache = cache_manager or get_cache_manager()
 63 |         cfg = config or get_configuration()
 64 | 
 65 |         logger.debug(
 66 |             f"Fetching stock data for {request.ticker} using enhanced interface"
 67 |         )
 68 | 
 69 |         # Check cache first if enabled
 70 |         cache_key = (
 71 |             f"stock_data:{request.ticker}:{request.start_date}:{request.end_date}"
 72 |         )
 73 |         cached_result = None
 74 | 
 75 |         if cfg.is_cache_enabled():
 76 |             cached_result = await cache.get(cache_key)
 77 |             if cached_result:
 78 |                 logger.debug(f"Cache hit for {request.ticker}")
 79 |                 return cached_result
 80 | 
 81 |         # Fetch data using the interface
 82 |         data = await fetcher.get_stock_data(
 83 |             symbol=request.ticker,
 84 |             start_date=request.start_date,
 85 |             end_date=request.end_date,
 86 |             use_cache=True,  # The fetcher will handle its own caching
 87 |         )
 88 | 
 89 |         # Convert to JSON format
 90 |         json_data = data.to_json(orient="split", date_format="iso")
 91 |         result: dict[str, Any] = json.loads(json_data) if json_data else {}
 92 |         result["ticker"] = request.ticker
 93 |         result["record_count"] = len(data)
 94 |         result["source"] = "enhanced_interface"
 95 |         result["timestamp"] = datetime.now(UTC).isoformat()
 96 | 
 97 |         # Cache the result if caching is enabled
 98 |         if cfg.is_cache_enabled():
 99 |             cache_ttl = cfg.get_cache_ttl()
100 |             await cache.set(cache_key, result, ttl=cache_ttl)
101 |             logger.debug(f"Cached result for {request.ticker} (TTL: {cache_ttl}s)")
102 | 
103 |         return result
104 | 
105 |     except Exception as e:
106 |         logger.error(f"Error fetching stock data for {request.ticker}: {e}")
107 |         return {
108 |             "error": str(e),
109 |             "ticker": request.ticker,
110 |             "source": "enhanced_interface",
111 |             "timestamp": datetime.now(UTC).isoformat(),
112 |         }
113 | 
114 | 
115 | @data_enhanced_router.tool()
116 | async def fetch_stock_data_batch_enhanced(
117 |     request: StockDataBatchRequest,
118 |     stock_fetcher: IStockDataFetcher | None = None,
119 | ) -> dict[str, Any]:
120 |     """
121 |     Fetch historical data for multiple tickers using the enhanced interface.
122 | 
123 |     Args:
124 |         request: Batch stock data request parameters
125 |         stock_fetcher: Optional stock data fetcher (injected if not provided)
126 | 
127 |     Returns:
128 |         Dictionary with ticker symbols as keys and data/errors as values
129 |     """
130 |     fetcher = stock_fetcher or get_stock_data_fetcher()
131 |     results = {}
132 | 
133 |     logger.debug(f"Fetching batch stock data for {len(request.tickers)} tickers")
134 | 
135 |     # Process each ticker
136 |     for ticker in request.tickers:
137 |         try:
138 |             data = await fetcher.get_stock_data(
139 |                 symbol=ticker,
140 |                 start_date=request.start_date,
141 |                 end_date=request.end_date,
142 |                 use_cache=True,
143 |             )
144 | 
145 |             json_data = data.to_json(orient="split", date_format="iso")
146 |             ticker_result: dict[str, Any] = json.loads(json_data) if json_data else {}
147 |             ticker_result["ticker"] = ticker
148 |             ticker_result["record_count"] = len(data)
149 | 
150 |             results[ticker] = ticker_result
151 | 
152 |         except Exception as e:
153 |             logger.error(f"Error fetching data for {ticker}: {e}")
154 |             results[ticker] = {"error": str(e), "ticker": ticker}
155 | 
156 |     return {
157 |         "results": results,
158 |         "total_tickers": len(request.tickers),
159 |         "successful": len([r for r in results.values() if "error" not in r]),
160 |         "failed": len([r for r in results.values() if "error" in r]),
161 |         "source": "enhanced_interface",
162 |         "timestamp": datetime.now(UTC).isoformat(),
163 |     }
164 | 
165 | 
166 | @data_enhanced_router.tool()
167 | async def get_stock_info_enhanced(
168 |     request: GetStockInfoRequest,
169 |     stock_fetcher: IStockDataFetcher | None = None,
170 | ) -> dict[str, Any]:
171 |     """
172 |     Get detailed stock information using the enhanced interface.
173 | 
174 |     Args:
175 |         request: Stock info request parameters
176 |         stock_fetcher: Optional stock data fetcher (injected if not provided)
177 | 
178 |     Returns:
179 |         Dictionary with detailed stock information
180 |     """
181 |     try:
182 |         fetcher = stock_fetcher or get_stock_data_fetcher()
183 | 
184 |         logger.debug(f"Fetching stock info for {request.ticker}")
185 | 
186 |         info = await fetcher.get_stock_info(request.ticker)
187 | 
188 |         return {
189 |             "ticker": request.ticker,
190 |             "info": info,
191 |             "source": "enhanced_interface",
192 |             "timestamp": datetime.now(UTC).isoformat(),
193 |         }
194 | 
195 |     except Exception as e:
196 |         logger.error(f"Error fetching stock info for {request.ticker}: {e}")
197 |         return {
198 |             "error": str(e),
199 |             "ticker": request.ticker,
200 |             "source": "enhanced_interface",
201 |             "timestamp": datetime.now(UTC).isoformat(),
202 |         }
203 | 
204 | 
205 | @data_enhanced_router.tool()
206 | async def get_realtime_data_enhanced(
207 |     ticker: str,
208 |     stock_fetcher: IStockDataFetcher | None = None,
209 | ) -> dict[str, Any]:
210 |     """
211 |     Get real-time stock data using the enhanced interface.
212 | 
213 |     Args:
214 |         ticker: Stock ticker symbol
215 |         stock_fetcher: Optional stock data fetcher (injected if not provided)
216 | 
217 |     Returns:
218 |         Dictionary with real-time stock data
219 |     """
220 |     try:
221 |         fetcher = stock_fetcher or get_stock_data_fetcher()
222 | 
223 |         logger.debug(f"Fetching real-time data for {ticker}")
224 | 
225 |         data = await fetcher.get_realtime_data(ticker)
226 | 
227 |         if data is None:
228 |             return {
229 |                 "error": "Real-time data not available",
230 |                 "ticker": ticker,
231 |                 "source": "enhanced_interface",
232 |                 "timestamp": datetime.now(UTC).isoformat(),
233 |             }
234 | 
235 |         return {
236 |             "ticker": ticker,
237 |             "data": data,
238 |             "source": "enhanced_interface",
239 |             "timestamp": datetime.now(UTC).isoformat(),
240 |         }
241 | 
242 |     except Exception as e:
243 |         logger.error(f"Error fetching real-time data for {ticker}: {e}")
244 |         return {
245 |             "error": str(e),
246 |             "ticker": ticker,
247 |             "source": "enhanced_interface",
248 |             "timestamp": datetime.now(UTC).isoformat(),
249 |         }
250 | 
251 | 
252 | @data_enhanced_router.tool()
253 | async def get_news_enhanced(
254 |     request: GetNewsRequest,
255 |     stock_fetcher: IStockDataFetcher | None = None,
256 | ) -> dict[str, Any]:
257 |     """
258 |     Get news for a stock using the enhanced interface.
259 | 
260 |     Args:
261 |         request: News request parameters
262 |         stock_fetcher: Optional stock data fetcher (injected if not provided)
263 | 
264 |     Returns:
265 |         Dictionary with news data
266 |     """
267 |     try:
268 |         fetcher = stock_fetcher or get_stock_data_fetcher()
269 | 
270 |         logger.debug(f"Fetching news for {request.ticker}")
271 | 
272 |         news_df = await fetcher.get_news(request.ticker, request.limit)
273 | 
274 |         # Convert DataFrame to JSON
275 |         if not news_df.empty:
276 |             news_data = news_df.to_dict(orient="records")
277 |         else:
278 |             news_data = []
279 | 
280 |         return {
281 |             "ticker": request.ticker,
282 |             "news": news_data,
283 |             "count": len(news_data),
284 |             "source": "enhanced_interface",
285 |             "timestamp": datetime.now(UTC).isoformat(),
286 |         }
287 | 
288 |     except Exception as e:
289 |         logger.error(f"Error fetching news for {request.ticker}: {e}")
290 |         return {
291 |             "error": str(e),
292 |             "ticker": request.ticker,
293 |             "source": "enhanced_interface",
294 |             "timestamp": datetime.now(UTC).isoformat(),
295 |         }
296 | 
297 | 
298 | @data_enhanced_router.tool()
299 | async def check_market_status_enhanced(
300 |     stock_fetcher: IStockDataFetcher | None = None,
301 | ) -> dict[str, Any]:
302 |     """
303 |     Check if the market is currently open using the enhanced interface.
304 | 
305 |     Args:
306 |         stock_fetcher: Optional stock data fetcher (injected if not provided)
307 | 
308 |     Returns:
309 |         Dictionary with market status
310 |     """
311 |     try:
312 |         fetcher = stock_fetcher or get_stock_data_fetcher()
313 | 
314 |         is_open = await fetcher.is_market_open()
315 | 
316 |         return {
317 |             "market_open": is_open,
318 |             "source": "enhanced_interface",
319 |             "timestamp": datetime.now(UTC).isoformat(),
320 |         }
321 | 
322 |     except Exception as e:
323 |         logger.error(f"Error checking market status: {e}")
324 |         return {
325 |             "error": str(e),
326 |             "source": "enhanced_interface",
327 |             "timestamp": datetime.now(UTC).isoformat(),
328 |         }
329 | 
330 | 
331 | @data_enhanced_router.tool()
332 | async def clear_cache_enhanced(
333 |     pattern: str | None = None,
334 |     cache_manager: ICacheManager | None = None,
335 | ) -> dict[str, Any]:
336 |     """
337 |     Clear cache entries using the enhanced cache interface.
338 | 
339 |     Args:
340 |         pattern: Optional pattern to match cache keys (e.g., "stock:*")
341 |         cache_manager: Optional cache manager (injected if not provided)
342 | 
343 |     Returns:
344 |         Dictionary with cache clearing results
345 |     """
346 |     try:
347 |         cache = cache_manager or get_cache_manager()
348 | 
349 |         cleared_count = await cache.clear(pattern)
350 | 
351 |         return {
352 |             "cleared_count": cleared_count,
353 |             "pattern": pattern,
354 |             "source": "enhanced_interface",
355 |             "timestamp": datetime.now(UTC).isoformat(),
356 |         }
357 | 
358 |     except Exception as e:
359 |         logger.error(f"Error clearing cache: {e}")
360 |         return {
361 |             "error": str(e),
362 |             "pattern": pattern,
363 |             "source": "enhanced_interface",
364 |             "timestamp": datetime.now(UTC).isoformat(),
365 |         }
366 | 
```

--------------------------------------------------------------------------------
/docs/speed_testing_framework.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Speed Testing Framework for MaverickMCP Research Agents
  2 | 
  3 | This document describes the comprehensive speed testing framework developed to validate and monitor the speed optimizations implemented in the MaverickMCP research system.
  4 | 
  5 | ## Overview
  6 | 
  7 | The speed testing framework validates the following optimization claims:
  8 | - **2-3x speed improvements** over baseline performance
  9 | - **Sub-30s completion times** for emergency scenarios
 10 | - **Resolution of timeout issues** (previously 138s, 129s failures)
 11 | - **Intelligent model selection** for time-critical scenarios
 12 | - **Adaptive optimization** based on query complexity and time constraints
 13 | 
 14 | ## Framework Components
 15 | 
 16 | ### 1. Speed Optimization Validation Tests (`tests/test_speed_optimization_validation.py`)
 17 | 
 18 | Comprehensive pytest-based test suite that validates:
 19 | 
 20 | #### Core Components Tested
 21 | - **Adaptive Model Selection**: Verifies fastest models are chosen for emergency scenarios
 22 | - **Progressive Token Budgeting**: Tests time-aware token allocation
 23 | - **Parallel LLM Processing**: Validates batch processing optimizations
 24 | - **Confidence Tracking**: Tests early termination logic
 25 | - **Content Filtering**: Validates intelligent source prioritization
 26 | 
 27 | #### Query Complexity Levels
 28 | - **Simple**: Basic queries (target: <15s completion)
 29 | - **Moderate**: Standard analysis queries (target: <25s completion)  
 30 | - **Complex**: Comprehensive research queries (target: <35s completion)
 31 | - **Emergency**: Time-critical queries (target: <30s completion)
 32 | 
 33 | #### Expected Model Selections
 34 | ```python
 35 | EXPECTED_MODEL_SELECTIONS = {
 36 |     QueryComplexity.EMERGENCY: ["google/gemini-2.5-flash", "openai/gpt-4o-mini"],
 37 |     QueryComplexity.SIMPLE: ["google/gemini-2.5-flash", "openai/gpt-4o-mini"],
 38 |     QueryComplexity.MODERATE: ["openai/gpt-4o-mini", "google/gemini-2.5-flash"],
 39 |     QueryComplexity.COMPLEX: ["anthropic/claude-sonnet-4", "google/gemini-2.5-pro"],
 40 | }
 41 | ```
 42 | 
 43 | #### Model Speed Benchmarks
 44 | ```python
 45 | MODEL_SPEED_BENCHMARKS = {
 46 |     "google/gemini-2.5-flash": 199,    # tokens/second - FASTEST
 47 |     "openai/gpt-4o-mini": 126,         # tokens/second - FAST
 48 |     "anthropic/claude-haiku": 89,      # tokens/second - MODERATE
 49 |     "anthropic/claude-sonnet-4": 45,   # tokens/second - COMPREHENSIVE
 50 |     "google/gemini-2.5-pro": 25, # tokens/second - DEEP
 51 | }
 52 | ```
 53 | 
 54 | ### 2. Speed Benchmarking Script (`scripts/speed_benchmark.py`)
 55 | 
 56 | Command-line tool for running various speed benchmarks:
 57 | 
 58 | #### Benchmark Modes
 59 | ```bash
 60 | # Quick validation for CI pipeline
 61 | python scripts/speed_benchmark.py --mode quick
 62 | 
 63 | # Comprehensive benchmark suite
 64 | python scripts/speed_benchmark.py --mode full
 65 | 
 66 | # Emergency mode focused testing
 67 | python scripts/speed_benchmark.py --mode emergency
 68 | 
 69 | # Before/after performance comparison
 70 | python scripts/speed_benchmark.py --mode comparison
 71 | 
 72 | # Custom query testing
 73 | python scripts/speed_benchmark.py --query "Apple Inc analysis"
 74 | ```
 75 | 
 76 | #### Output Formats
 77 | - **JSON**: Structured benchmark data for analysis
 78 | - **Markdown**: Human-readable reports with recommendations
 79 | 
 80 | ### 3. Quick Speed Demo (`scripts/quick_speed_demo.py`)
 81 | 
 82 | Standalone demonstration script that shows:
 83 | - Adaptive model selection in action
 84 | - Progressive token budgeting scaling
 85 | - Complexity-based optimizations
 86 | - Speed improvement claims validation
 87 | - Timeout resolution demonstration
 88 | 
 89 | ## Integration with Development Workflow
 90 | 
 91 | ### Makefile Integration
 92 | 
 93 | ```bash
 94 | # Speed testing commands
 95 | make test-speed              # Run all speed optimization tests
 96 | make test-speed-quick        # Quick CI validation
 97 | make test-speed-emergency    # Emergency mode tests
 98 | make test-speed-comparison   # Before/after comparison
 99 | 
100 | # Benchmarking commands
101 | make benchmark-speed         # Comprehensive speed benchmark
102 | ```
103 | 
104 | ### Continuous Integration
105 | 
106 | The framework supports CI integration through:
107 | - **Quick validation mode**: Completes in <2 minutes for CI pipelines
108 | - **Exit codes**: Non-zero exit for failed performance thresholds
109 | - **Structured output**: Machine-readable results for automation
110 | 
111 | ## Performance Thresholds
112 | 
113 | ### Speed Thresholds
114 | ```python
115 | SPEED_THRESHOLDS = {
116 |     "simple_query_max_time": 15.0,      # Simple queries: <15s
117 |     "moderate_query_max_time": 25.0,    # Moderate queries: <25s  
118 |     "complex_query_max_time": 35.0,     # Complex queries: <35s
119 |     "emergency_mode_max_time": 30.0,    # Emergency mode: <30s
120 |     "minimum_speedup_factor": 2.0,      # Minimum 2x speedup
121 |     "target_speedup_factor": 3.0,       # Target 3x speedup
122 |     "timeout_failure_threshold": 0.05,  # Max 5% timeout failures
123 | }
124 | ```
125 | 
126 | ### Model Selection Validation
127 | - **Emergency scenarios**: Must select models with 126+ tokens/second
128 | - **Time budgets <30s**: Automatically use fastest available models
129 | - **Complex analysis**: Can use slower, higher-quality models when time allows
130 | 
131 | ## Testing Scenarios
132 | 
133 | ### 1. Emergency Mode Performance
134 | Tests that urgent queries complete within strict time budgets:
135 | ```python
136 | # Test emergency completion under 30s
137 | result = await validator.test_emergency_mode_performance(
138 |     "Quick Apple sentiment - bullish or bearish right now?"
139 | )
140 | assert result["execution_time"] < 30.0
141 | assert result["within_budget"] == True
142 | ```
143 | 
144 | ### 2. Adaptive Model Selection
145 | Validates appropriate model selection based on time constraints:
146 | ```python
147 | # Emergency scenario should select fastest model
148 | config = selector.select_model_for_time_budget(
149 |     task_type=TaskType.QUICK_ANSWER,
150 |     time_remaining_seconds=10.0,
151 |     complexity_score=0.3,
152 |     content_size_tokens=200,
153 | )
154 | assert config.model_id in ["google/gemini-2.5-flash", "openai/gpt-4o-mini"]
155 | ```
156 | 
157 | ### 3. Baseline vs Optimized Comparison
158 | Compares performance improvements over baseline:
159 | ```python
160 | # Test 2-3x speedup achievement
161 | result = await validator.test_baseline_vs_optimized_performance(
162 |     "Apple Inc comprehensive analysis", QueryComplexity.MODERATE
163 | )
164 | assert result["speedup_factor"] >= 2.0  # Minimum 2x improvement
165 | ```
166 | 
167 | ### 4. Timeout Resolution
168 | Validates that previous timeout issues are resolved:
169 | ```python
170 | # Test scenarios that previously failed with 138s/129s timeouts
171 | test_cases = ["Apple analysis", "Tesla outlook", "Microsoft assessment"]
172 | for query in test_cases:
173 |     result = await test_emergency_performance(query)
174 |     assert result["execution_time"] < 30.0  # No more long timeouts
175 | ```
176 | 
177 | ## Real-World Query Examples
178 | 
179 | ### Simple Queries (Target: <15s)
180 | - "Apple Inc current stock price and basic sentiment"
181 | - "Tesla recent news and market overview" 
182 | - "Microsoft quarterly earnings summary"
183 | 
184 | ### Moderate Queries (Target: <25s)
185 | - "Apple Inc comprehensive financial analysis and competitive position"
186 | - "Tesla Inc market outlook considering EV competition and regulatory changes"
187 | - "Microsoft Corp cloud business growth prospects and AI strategy"
188 | 
189 | ### Complex Queries (Target: <35s)
190 | - "Apple Inc deep fundamental analysis including supply chain risks, product lifecycle assessment, regulatory challenges across global markets, competitive positioning, and 5-year growth trajectory"
191 | 
192 | ### Emergency Queries (Target: <30s)
193 | - "Quick Apple sentiment - bullish or bearish right now?"
194 | - "Tesla stock - buy, hold, or sell this week?"
195 | - "Microsoft earnings - beat or miss expectations?"
196 | 
197 | ## Optimization Features Validated
198 | 
199 | ### 1. Adaptive Model Selection
200 | - **Emergency Mode**: Selects Gemini 2.5 Flash (199 tok/s) or GPT-4o Mini (126 tok/s)
201 | - **Balanced Mode**: Cost-effective fast models for standard queries
202 | - **Comprehensive Mode**: High-quality models when time allows
203 | 
204 | ### 2. Progressive Token Budgeting
205 | - **Emergency Budget**: Minimal tokens, tight timeouts
206 | - **Standard Budget**: Balanced token allocation
207 | - **Time-Aware Scaling**: Budgets scale with available time
208 | 
209 | ### 3. Intelligent Content Filtering
210 | - **Relevance Scoring**: Prioritizes high-quality, relevant sources
211 | - **Preprocessing**: Reduces content size for faster processing
212 | - **Domain Credibility**: Weights sources by reliability
213 | 
214 | ### 4. Early Termination
215 | - **Confidence Tracking**: Stops when target confidence reached
216 | - **Diminishing Returns**: Terminates when no improvement detected
217 | - **Time Pressure**: Adapts termination thresholds for time constraints
218 | 
219 | ## Monitoring and Reporting
220 | 
221 | ### Performance Metrics Tracked
222 | - **Execution Time**: Total time from request to completion
223 | - **Model Selection**: Which models were chosen and why
224 | - **Token Usage**: Input/output tokens consumed
225 | - **Timeout Compliance**: Percentage of queries completing within budget
226 | - **Speedup Factors**: Performance improvement over baseline
227 | - **Success Rates**: Percentage of successful completions
228 | 
229 | ### Report Generation
230 | The framework generates comprehensive reports including:
231 | - **Performance Summary**: Key metrics and thresholds
232 | - **Model Selection Analysis**: Usage patterns and optimization effectiveness
233 | - **Timeout Analysis**: Compliance rates and failure patterns
234 | - **Speedup Analysis**: Improvement measurements
235 | - **Recommendations**: Suggested optimizations based on results
236 | 
237 | ## Usage Examples
238 | 
239 | ### Running Quick Validation
240 | ```bash
241 | # Quick CI validation
242 | make test-speed-quick
243 | 
244 | # View results
245 | cat benchmark_results/speed_benchmark_quick_*.md
246 | ```
247 | 
248 | ### Custom Query Testing
249 | ```bash
250 | # Test a specific query
251 | python scripts/speed_benchmark.py --query "Apple Inc urgent analysis needed"
252 | 
253 | # View detailed results
254 | python scripts/quick_speed_demo.py
255 | ```
256 | 
257 | ### Full Performance Analysis
258 | ```bash
259 | # Run comprehensive benchmarks
260 | make benchmark-speed
261 | 
262 | # Generate performance report
263 | python scripts/speed_benchmark.py --mode full --output-dir ./reports
264 | ```
265 | 
266 | ## Troubleshooting
267 | 
268 | ### Common Issues
269 | 
270 | 1. **Import Errors**: Ensure all dependencies are installed with `uv sync`
271 | 2. **Model Selection Issues**: Check OpenRouter provider configuration
272 | 3. **Timeout Still Occurring**: Verify emergency mode is enabled
273 | 4. **Performance Regression**: Run comparison benchmarks to identify issues
274 | 
275 | ### Debug Commands
276 | ```bash
277 | # Test core components
278 | python scripts/quick_speed_demo.py
279 | 
280 | # Run specific test category
281 | pytest tests/test_speed_optimization_validation.py::TestSpeedOptimizations -v
282 | 
283 | # Benchmark with verbose output
284 | python scripts/speed_benchmark.py --mode quick --verbose
285 | ```
286 | 
287 | ## Future Enhancements
288 | 
289 | ### Planned Improvements
290 | 1. **Real-time Monitoring**: Continuous performance tracking in production
291 | 2. **A/B Testing**: Compare different optimization strategies
292 | 3. **Machine Learning**: Adaptive optimization based on query patterns
293 | 4. **Cost Optimization**: Balance speed with API costs
294 | 5. **Multi-modal Support**: Extend optimizations to image/audio analysis
295 | 
296 | ### Extension Points
297 | - **Custom Complexity Calculators**: Domain-specific complexity scoring
298 | - **Alternative Model Providers**: Support for additional LLM providers
299 | - **Advanced Caching**: Semantic caching for similar queries
300 | - **Performance Prediction**: ML-based execution time estimation
301 | 
302 | ## Conclusion
303 | 
304 | The speed testing framework provides comprehensive validation that the MaverickMCP research system achieves its performance optimization goals:
305 | 
306 | ✅ **2-3x Speed Improvements**: Validated across all query complexities  
307 | ✅ **Sub-30s Emergency Mode**: Guaranteed fast response for urgent queries  
308 | ✅ **Timeout Resolution**: No more 138s/129s failures  
309 | ✅ **Intelligent Optimization**: Adaptive performance based on constraints  
310 | ✅ **Continuous Validation**: Automated testing prevents performance regressions
311 | 
312 | The framework ensures that speed optimizations remain effective as the system evolves and provides early detection of any performance degradation.
```

--------------------------------------------------------------------------------
/maverick_mcp/api/inspector_compatible_sse.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | MCP Inspector-compatible SSE implementation.
  3 | 
  4 | This implements a proper bidirectional SSE handler that works with MCP Inspector,
  5 | handling JSON-RPC messages directly over the SSE connection.
  6 | """
  7 | 
  8 | import asyncio
  9 | import json
 10 | import logging
 11 | from typing import Any
 12 | from uuid import uuid4
 13 | 
 14 | from starlette.requests import Request
 15 | from starlette.responses import StreamingResponse
 16 | 
 17 | from maverick_mcp.api.server import mcp
 18 | 
 19 | logger = logging.getLogger(__name__)
 20 | 
 21 | 
 22 | class InspectorCompatibleSSE:
 23 |     """SSE handler that properly implements MCP protocol for Inspector."""
 24 | 
 25 |     def __init__(self):
 26 |         self.sessions: dict[str, dict[str, Any]] = {}
 27 |         self.message_queues: dict[str, asyncio.Queue] = {}
 28 | 
 29 |     async def handle_sse(self, request: Request):
 30 |         """Handle SSE connection from MCP Inspector."""
 31 |         session_id = str(uuid4())
 32 |         logger.info(f"New Inspector SSE connection: {session_id}")
 33 | 
 34 |         # Create a message queue for this session
 35 |         message_queue: asyncio.Queue = asyncio.Queue()
 36 |         self.message_queues[session_id] = message_queue
 37 | 
 38 |         # Create a simple session state tracker
 39 |         session_state = {
 40 |             "initialized": False,
 41 |             "server_name": "MaverickMCP",
 42 |             "server_version": "1.0.0",
 43 |             "protocol_version": "2024-11-05",
 44 |         }
 45 |         self.sessions[session_id] = session_state
 46 | 
 47 |         async def event_generator():
 48 |             """Generate SSE events and handle bidirectional communication."""
 49 |             try:
 50 |                 # Send initial connection event with session info
 51 |                 connection_msg = {
 52 |                     "type": "connection",
 53 |                     "sessionId": session_id,
 54 |                     "endpoint": f"/inspector/message?session_id={session_id}",
 55 |                 }
 56 |                 yield f"data: {json.dumps(connection_msg)}\n\n"
 57 | 
 58 |                 # Process incoming messages from the queue
 59 |                 while True:
 60 |                     try:
 61 |                         # Wait for messages with timeout for keepalive
 62 |                         message = await asyncio.wait_for(
 63 |                             message_queue.get(), timeout=30.0
 64 |                         )
 65 | 
 66 |                         # Process the message through MCP session
 67 |                         if isinstance(message, dict) and "jsonrpc" in message:
 68 |                             # Handle the JSON-RPC request
 69 |                             response = await self._process_message(
 70 |                                 session_state, message
 71 |                             )
 72 |                             if response:
 73 |                                 yield f"data: {json.dumps(response)}\n\n"
 74 | 
 75 |                     except TimeoutError:
 76 |                         # Send keepalive
 77 |                         yield ": keepalive\n\n"
 78 |                     except Exception as e:
 79 |                         logger.error(f"Error processing message: {e}")
 80 |                         error_response = {
 81 |                             "jsonrpc": "2.0",
 82 |                             "error": {"code": -32603, "message": str(e)},
 83 |                             "id": None,
 84 |                         }
 85 |                         yield f"data: {json.dumps(error_response)}\n\n"
 86 | 
 87 |             finally:
 88 |                 # Cleanup on disconnect
 89 |                 if session_id in self.sessions:
 90 |                     del self.sessions[session_id]
 91 |                 if session_id in self.message_queues:
 92 |                     del self.message_queues[session_id]
 93 |                 logger.info(f"Inspector SSE connection closed: {session_id}")
 94 | 
 95 |         return StreamingResponse(
 96 |             event_generator(),
 97 |             media_type="text/event-stream",
 98 |             headers={
 99 |                 "Cache-Control": "no-cache",
100 |                 "Connection": "keep-alive",
101 |                 "X-Accel-Buffering": "no",
102 |                 "Access-Control-Allow-Origin": "*",
103 |                 "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
104 |                 "Access-Control-Allow-Headers": "*",
105 |             },
106 |         )
107 | 
108 |     async def handle_message(self, request: Request):
109 |         """Handle incoming JSON-RPC messages from Inspector."""
110 |         session_id = request.query_params.get("session_id")
111 |         if not session_id or session_id not in self.message_queues:
112 |             return {"error": "Invalid or missing session_id"}
113 | 
114 |         try:
115 |             message = await request.json()
116 |             logger.info(f"Inspector message for session {session_id}: {message}")
117 | 
118 |             # Put message in queue for processing
119 |             await self.message_queues[session_id].put(message)
120 | 
121 |             # Return acknowledgment
122 |             return {"status": "queued"}
123 | 
124 |         except Exception as e:
125 |             logger.error(f"Failed to process message: {e}")
126 |             return {"error": str(e)}
127 | 
128 |     async def _process_message(
129 |         self, session_state: dict[str, Any], message: dict[str, Any]
130 |     ) -> dict[str, Any] | None:
131 |         """Process a JSON-RPC message through the MCP session."""
132 |         method = message.get("method")
133 |         params = message.get("params", {})
134 |         msg_id = message.get("id")
135 | 
136 |         try:
137 |             # Handle different MCP methods
138 |             if method == "initialize":
139 |                 # Mark session as initialized
140 |                 session_state["initialized"] = True
141 |                 protocol_version = params.get("protocolVersion", "2024-11-05")
142 | 
143 |                 # Get server capabilities
144 |                 return {
145 |                     "jsonrpc": "2.0",
146 |                     "id": msg_id,
147 |                     "result": {
148 |                         "protocolVersion": protocol_version,
149 |                         "capabilities": {
150 |                             "tools": {"listChanged": True}
151 |                             if hasattr(mcp, "_tool_manager")
152 |                             and hasattr(mcp._tool_manager, "tools")
153 |                             and mcp._tool_manager.tools
154 |                             else {},
155 |                             "resources": {"listChanged": True}
156 |                             if hasattr(mcp, "_resource_manager")
157 |                             and hasattr(mcp._resource_manager, "resources")
158 |                             and mcp._resource_manager.resources
159 |                             else {},
160 |                             "prompts": {"listChanged": True}
161 |                             if hasattr(mcp, "_prompt_manager")
162 |                             and hasattr(mcp._prompt_manager, "prompts")
163 |                             and mcp._prompt_manager.prompts
164 |                             else {},
165 |                         },
166 |                         "serverInfo": {
167 |                             "name": session_state["server_name"],
168 |                             "version": session_state["server_version"],
169 |                         },
170 |                     },
171 |                 }
172 | 
173 |             elif method == "tools/list":
174 |                 # List available tools
175 |                 tools = []
176 |                 if (
177 |                     hasattr(mcp, "_tool_manager")
178 |                     and hasattr(mcp._tool_manager, "tools")
179 |                     and hasattr(mcp._tool_manager.tools, "items")
180 |                 ):
181 |                     for tool_name, tool_func in mcp._tool_manager.tools.items():  # type: ignore[attr-defined]
182 |                         tools.append(
183 |                             {
184 |                                 "name": tool_name,
185 |                                 "description": tool_func.__doc__ or "No description",
186 |                                 "inputSchema": getattr(tool_func, "input_schema", {}),
187 |                             }
188 |                         )
189 | 
190 |                 return {"jsonrpc": "2.0", "id": msg_id, "result": {"tools": tools}}
191 | 
192 |             elif method == "resources/list":
193 |                 # List available resources
194 |                 resources = []
195 |                 if (
196 |                     hasattr(mcp, "_resource_manager")
197 |                     and hasattr(mcp._resource_manager, "resources")
198 |                     and hasattr(mcp._resource_manager.resources, "items")
199 |                 ):
200 |                     for (
201 |                         resource_uri,
202 |                         resource_func,
203 |                     ) in mcp._resource_manager.resources.items():  # type: ignore[attr-defined]
204 |                         resources.append(
205 |                             {
206 |                                 "uri": resource_uri,
207 |                                 "name": getattr(
208 |                                     resource_func, "__name__", str(resource_func)
209 |                                 ),
210 |                                 "description": getattr(resource_func, "__doc__", None)
211 |                                 or "No description",
212 |                             }
213 |                         )
214 | 
215 |                 return {
216 |                     "jsonrpc": "2.0",
217 |                     "id": msg_id,
218 |                     "result": {"resources": resources},
219 |                 }
220 | 
221 |             elif method == "tools/call":
222 |                 # Call a tool
223 |                 tool_name = params.get("name")
224 |                 tool_args = params.get("arguments", {})
225 | 
226 |                 if (
227 |                     hasattr(mcp, "_tool_manager")
228 |                     and hasattr(mcp._tool_manager, "tools")
229 |                     and hasattr(mcp._tool_manager.tools, "__contains__")
230 |                     and tool_name in mcp._tool_manager.tools  # type: ignore[operator]
231 |                 ):
232 |                     tool_func = mcp._tool_manager.tools[tool_name]  # type: ignore[index]
233 |                     try:
234 |                         # Execute the tool
235 |                         result = await tool_func(**tool_args)
236 | 
237 |                         return {
238 |                             "jsonrpc": "2.0",
239 |                             "id": msg_id,
240 |                             "result": {
241 |                                 "content": [
242 |                                     {
243 |                                         "type": "text",
244 |                                         "text": json.dumps(result, default=str),
245 |                                     }
246 |                                 ]
247 |                             },
248 |                         }
249 |                     except Exception as tool_error:
250 |                         return {
251 |                             "jsonrpc": "2.0",
252 |                             "id": msg_id,
253 |                             "error": {
254 |                                 "code": -32603,
255 |                                 "message": f"Tool execution error: {str(tool_error)}",
256 |                             },
257 |                         }
258 |                 else:
259 |                     return {
260 |                         "jsonrpc": "2.0",
261 |                         "id": msg_id,
262 |                         "error": {
263 |                             "code": -32601,
264 |                             "message": f"Tool not found: {tool_name}",
265 |                         },
266 |                     }
267 | 
268 |             else:
269 |                 # Method not found
270 |                 return {
271 |                     "jsonrpc": "2.0",
272 |                     "id": msg_id,
273 |                     "error": {"code": -32601, "message": f"Method not found: {method}"},
274 |                 }
275 | 
276 |         except Exception as e:
277 |             logger.error(f"Error processing {method}: {e}")
278 |             return {
279 |                 "jsonrpc": "2.0",
280 |                 "id": msg_id,
281 |                 "error": {"code": -32603, "message": str(e)},
282 |             }
283 | 
284 | 
285 | # Create global handler instance
286 | inspector_sse = InspectorCompatibleSSE()
287 | 
```

--------------------------------------------------------------------------------
/examples/parallel_research_example.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Example demonstrating the new parallel research capabilities of DeepResearchAgent.
  4 | 
  5 | This example shows how to:
  6 | 1. Initialize DeepResearchAgent with parallel execution
  7 | 2. Use both parallel and sequential modes
  8 | 3. Configure parallel execution parameters
  9 | 4. Access specialized research results from parallel agents
 10 | """
 11 | 
 12 | import asyncio
 13 | import logging
 14 | from datetime import datetime
 15 | from typing import Any
 16 | 
 17 | from langchain_core.callbacks.manager import (
 18 |     AsyncCallbackManagerForLLMRun,
 19 |     CallbackManagerForLLMRun,
 20 | )
 21 | from langchain_core.language_models.chat_models import BaseChatModel
 22 | from langchain_core.messages import AIMessage, BaseMessage
 23 | from langchain_core.outputs import ChatGeneration, ChatResult
 24 | 
 25 | from maverick_mcp.agents.deep_research import DeepResearchAgent
 26 | from maverick_mcp.utils.parallel_research import ParallelResearchConfig
 27 | 
 28 | # Set up logging to see parallel execution in action
 29 | logging.basicConfig(
 30 |     level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
 31 | )
 32 | 
 33 | 
 34 | class MockChatModel(BaseChatModel):
 35 |     """Mock chat model for testing that extends BaseChatModel properly."""
 36 | 
 37 |     def __init__(self, responses: list[str]):
 38 |         super().__init__()
 39 |         self.responses = responses
 40 |         self._call_count = 0
 41 | 
 42 |     @property
 43 |     def _llm_type(self) -> str:
 44 |         return "mock"
 45 | 
 46 |     def _generate(
 47 |         self,
 48 |         messages: list[BaseMessage],
 49 |         stop: list[str] | None = None,
 50 |         run_manager: CallbackManagerForLLMRun | None = None,
 51 |         **kwargs: Any,
 52 |     ) -> ChatResult:
 53 |         response = self.responses[self._call_count % len(self.responses)]
 54 |         self._call_count += 1
 55 |         message = AIMessage(content=response)
 56 |         return ChatResult(generations=[ChatGeneration(message=message)])
 57 | 
 58 |     async def _agenerate(
 59 |         self,
 60 |         messages: list[BaseMessage],
 61 |         stop: list[str] | None = None,
 62 |         run_manager: AsyncCallbackManagerForLLMRun | None = None,
 63 |         **kwargs: Any,
 64 |     ) -> ChatResult:
 65 |         return self._generate(messages, stop, **kwargs)
 66 | 
 67 | 
 68 | async def main():
 69 |     """Demonstrate parallel research capabilities."""
 70 | 
 71 |     # Create a mock LLM for testing (in real usage, use Claude/GPT)
 72 |     llm = MockChatModel(
 73 |         responses=[
 74 |             '{"KEY_INSIGHTS": ["Strong earnings growth", "Market expansion"], "SENTIMENT": {"direction": "bullish", "confidence": 0.8}, "RISK_FACTORS": ["Market volatility"], "OPPORTUNITIES": ["AI adoption"], "CREDIBILITY": 0.7, "RELEVANCE": 0.9, "SUMMARY": "Positive outlook for tech company"}',
 75 |             "Comprehensive research synthesis shows positive trends across multiple analysis areas with strong fundamentals and technical indicators supporting continued growth.",
 76 |             "Technical analysis indicates strong upward momentum with key resistance levels broken.",
 77 |             "Market sentiment is predominantly bullish with institutional support.",
 78 |             "Competitive analysis shows strong market position with sustainable advantages.",
 79 |         ]
 80 |     )
 81 | 
 82 |     print("🔬 DeepResearchAgent Parallel Execution Demo")
 83 |     print("=" * 50)
 84 | 
 85 |     # 1. Create agent with parallel execution enabled (default)
 86 |     print("\n1. Creating DeepResearchAgent with parallel execution...")
 87 | 
 88 |     parallel_config = ParallelResearchConfig(
 89 |         max_concurrent_agents=3,  # Run 3 agents in parallel
 90 |         timeout_per_agent=120,  # 2 minutes per agent
 91 |         enable_fallbacks=True,  # Enable fallback to sequential if parallel fails
 92 |         rate_limit_delay=0.5,  # 0.5 second delay between agent starts
 93 |     )
 94 | 
 95 |     agent = DeepResearchAgent(
 96 |         llm=llm,
 97 |         persona="moderate",
 98 |         enable_parallel_execution=True,
 99 |         parallel_config=parallel_config,
100 |         # Note: In real usage, provide API keys:
101 |         # exa_api_key="your-exa-key",
102 |         # tavily_api_key="your-tavily-key"
103 |     )
104 | 
105 |     print("✅ Agent created with parallel execution enabled")
106 |     print(f"   Max concurrent agents: {agent.parallel_config.max_concurrent_agents}")
107 |     print(f"   Timeout per agent: {agent.parallel_config.timeout_per_agent}s")
108 | 
109 |     # 2. Demonstrate parallel research
110 |     print("\n2. Running parallel research...")
111 | 
112 |     # This will automatically use parallel execution
113 |     start_time = datetime.now()
114 | 
115 |     try:
116 |         # Note: This requires actual search providers (Exa/Tavily API keys) to work fully
117 |         # For demo purposes, we'll show the structure
118 |         topic = "AAPL stock analysis and investment outlook"
119 |         session_id = "demo_session_001"
120 | 
121 |         print(f"   Topic: {topic}")
122 |         print(f"   Session: {session_id}")
123 |         print("   🚀 Starting parallel research execution...")
124 | 
125 |         # In a real environment with API keys, this would work:
126 |         # result = await agent.research_comprehensive(
127 |         #     topic=topic,
128 |         #     session_id=session_id,
129 |         #     depth="standard",
130 |         #     focus_areas=["fundamentals", "technical_analysis", "market_sentiment"],
131 |         #     use_parallel_execution=True  # Explicitly enable (default)
132 |         # )
133 | 
134 |         # For demo, we'll simulate the expected response structure
135 |         result = {
136 |             "status": "success",
137 |             "agent_type": "deep_research",
138 |             "execution_mode": "parallel",
139 |             "persona": "Moderate",
140 |             "research_topic": topic,
141 |             "research_depth": "standard",
142 |             "findings": {
143 |                 "synthesis": "Comprehensive analysis from multiple specialized agents shows strong fundamentals...",
144 |                 "key_insights": [
145 |                     "Strong earnings growth trajectory",
146 |                     "Positive technical indicators",
147 |                     "Bullish market sentiment",
148 |                     "Competitive market position",
149 |                 ],
150 |                 "overall_sentiment": {"direction": "bullish", "confidence": 0.75},
151 |                 "risk_assessment": ["Market volatility", "Regulatory risks"],
152 |                 "investment_implications": {
153 |                     "opportunities": ["AI growth", "Market expansion"],
154 |                     "threats": ["Competition", "Economic headwinds"],
155 |                     "recommended_action": "Consider position building with appropriate risk management",
156 |                 },
157 |                 "confidence_score": 0.78,
158 |             },
159 |             "sources_analyzed": 24,
160 |             "confidence_score": 0.78,
161 |             "execution_time_ms": 15000,  # 15 seconds (faster than sequential)
162 |             "parallel_execution_stats": {
163 |                 "total_tasks": 3,
164 |                 "successful_tasks": 3,
165 |                 "failed_tasks": 0,
166 |                 "parallel_efficiency": 2.8,  # 2.8x faster than sequential
167 |                 "task_breakdown": {
168 |                     "demo_session_001_fundamental": {
169 |                         "type": "fundamental",
170 |                         "status": "completed",
171 |                         "execution_time": 5.2,
172 |                     },
173 |                     "demo_session_001_sentiment": {
174 |                         "type": "sentiment",
175 |                         "status": "completed",
176 |                         "execution_time": 4.8,
177 |                     },
178 |                     "demo_session_001_competitive": {
179 |                         "type": "competitive",
180 |                         "status": "completed",
181 |                         "execution_time": 5.5,
182 |                     },
183 |                 },
184 |             },
185 |         }
186 | 
187 |         execution_time = (datetime.now() - start_time).total_seconds()
188 | 
189 |         print(f"   ✅ Parallel research completed in {execution_time:.1f}s")
190 |         print("   📊 Results from parallel execution:")
191 |         print(f"      • Sources analyzed: {result['sources_analyzed']}")
192 |         print(
193 |             f"      • Overall sentiment: {result['findings']['overall_sentiment']['direction']} ({result['findings']['overall_sentiment']['confidence']:.2f} confidence)"
194 |         )
195 |         print(f"      • Key insights: {len(result['findings']['key_insights'])}")
196 |         print(
197 |             f"      • Parallel efficiency: {result['parallel_execution_stats']['parallel_efficiency']:.1f}x speedup"
198 |         )
199 |         print(
200 |             f"      • Tasks: {result['parallel_execution_stats']['successful_tasks']}/{result['parallel_execution_stats']['total_tasks']} successful"
201 |         )
202 | 
203 |         # Show task breakdown
204 |         print("\n   📋 Task Breakdown:")
205 |         for _task_id, task_info in result["parallel_execution_stats"][
206 |             "task_breakdown"
207 |         ].items():
208 |             task_type = task_info["type"].title()
209 |             status = task_info["status"].title()
210 |             exec_time = task_info["execution_time"]
211 |             print(f"      • {task_type} Research: {status} ({exec_time:.1f}s)")
212 | 
213 |     except Exception as e:
214 |         print(f"   ❌ Parallel research failed (expected without API keys): {e}")
215 | 
216 |     # 3. Demonstrate sequential fallback
217 |     print("\n3. Testing sequential fallback...")
218 | 
219 |     _sequential_agent = DeepResearchAgent(
220 |         llm=llm,
221 |         persona="moderate",
222 |         enable_parallel_execution=False,  # Force sequential mode
223 |     )
224 | 
225 |     print("   ✅ Sequential-only agent created")
226 |     print("   📝 This would use traditional LangGraph workflow for compatibility")
227 | 
228 |     # 4. Show configuration options
229 |     print("\n4. Configuration Options:")
230 |     print("   📋 Parallel Execution Configuration:")
231 |     print(f"      • Max concurrent agents: {parallel_config.max_concurrent_agents}")
232 |     print(f"      • Timeout per agent: {parallel_config.timeout_per_agent}s")
233 |     print(f"      • Enable fallbacks: {parallel_config.enable_fallbacks}")
234 |     print(f"      • Rate limit delay: {parallel_config.rate_limit_delay}s")
235 | 
236 |     print("\n   🎛️  Available Research Types:")
237 |     print("      • Fundamental: Financial statements, earnings, valuation")
238 |     print("      • Technical: Chart patterns, indicators, price action")
239 |     print("      • Sentiment: News analysis, analyst ratings, social sentiment")
240 |     print("      • Competitive: Industry analysis, market position, competitors")
241 | 
242 |     # 5. Usage recommendations
243 |     print("\n5. Usage Recommendations:")
244 |     print("   💡 When to use parallel execution:")
245 |     print("      • Comprehensive research requiring multiple analysis types")
246 |     print("      • Time-sensitive research with tight deadlines")
247 |     print("      • Research topics requiring diverse data sources")
248 |     print("      • When you have sufficient API rate limits")
249 | 
250 |     print("\n   ⚠️  When to use sequential execution:")
251 |     print("      • Limited API rate limits")
252 |     print("      • Simple, focused research queries")
253 |     print("      • Debugging and development")
254 |     print("      • When consistency with legacy behavior is required")
255 | 
256 |     print("\n6. API Integration Requirements:")
257 |     print("   🔑 For full functionality, provide:")
258 |     print("      • EXA_API_KEY: High-quality research content")
259 |     print("      • TAVILY_API_KEY: Comprehensive web search")
260 |     print("      • Both are optional but recommended for best results")
261 | 
262 |     print("\n" + "=" * 50)
263 |     print("🎉 Demo completed! The enhanced DeepResearchAgent now supports:")
264 |     print("   ✅ Parallel execution with specialized subagents")
265 |     print("   ✅ Automatic fallback to sequential execution")
266 |     print("   ✅ Configurable concurrency and timeouts")
267 |     print("   ✅ Full backward compatibility")
268 |     print("   ✅ Detailed execution statistics and monitoring")
269 | 
270 | 
271 | if __name__ == "__main__":
272 |     asyncio.run(main())
273 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/tools/portfolio_manager.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Portfolio manager for financial portfolio analysis and management.
  3 | This module provides a portfolio management interface for tracking and analyzing investment portfolios.
  4 | """
  5 | 
  6 | import asyncio
  7 | import json
  8 | import logging
  9 | import os
 10 | from datetime import UTC, datetime
 11 | from typing import Any
 12 | 
 13 | from dotenv import load_dotenv
 14 | 
 15 | # Configure logging
 16 | logging.basicConfig(
 17 |     level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
 18 | )
 19 | logger = logging.getLogger("maverick_mcp.portfolio_manager")
 20 | 
 21 | # Load environment variables
 22 | load_dotenv()
 23 | 
 24 | 
 25 | class PortfolioManager:
 26 |     """
 27 |     Portfolio manager for tracking and analyzing investment portfolios.
 28 |     """
 29 | 
 30 |     def __init__(
 31 |         self,
 32 |         portfolio_name: str,
 33 |         risk_profile: str = "moderate",
 34 |         portfolio_file: str | None = None,
 35 |     ):
 36 |         """
 37 |         Initialize the portfolio manager
 38 | 
 39 |         Args:
 40 |             portfolio_name: Name of the portfolio
 41 |             risk_profile: Risk profile of the portfolio ('conservative', 'moderate', 'aggressive')
 42 |             portfolio_file: Path to a JSON file containing portfolio data
 43 |         """
 44 |         self.portfolio_name = portfolio_name
 45 |         self.risk_profile = risk_profile
 46 |         self.portfolio_file = portfolio_file
 47 | 
 48 |         # Load portfolio from file if provided
 49 |         self.portfolio = []
 50 |         if portfolio_file and os.path.exists(portfolio_file):
 51 |             with open(portfolio_file) as f:
 52 |                 data = json.load(f)
 53 |                 self.portfolio = data.get("holdings", [])
 54 |                 self.risk_profile = data.get("risk_profile", risk_profile)
 55 |                 self.portfolio_name = data.get("name", portfolio_name)
 56 | 
 57 |         self.transaction_history: list[dict[str, Any]] = []
 58 | 
 59 |     async def add_to_portfolio(self, symbol: str, shares: float, price: float):
 60 |         """
 61 |         Add a stock to the portfolio
 62 | 
 63 |         Args:
 64 |             symbol: Stock ticker symbol
 65 |             shares: Number of shares to add
 66 |             price: Purchase price per share
 67 |         """
 68 |         # Check if stock already exists in portfolio
 69 |         for holding in self.portfolio:
 70 |             if holding["symbol"] == symbol:
 71 |                 # Update existing holding
 72 |                 old_shares = holding["shares"]
 73 |                 old_price = holding["avg_price"]
 74 |                 total_cost = (old_shares * old_price) + (shares * price)
 75 |                 total_shares = old_shares + shares
 76 |                 holding["shares"] = total_shares
 77 |                 holding["avg_price"] = total_cost / total_shares
 78 |                 holding["last_update"] = datetime.now(UTC).isoformat()
 79 | 
 80 |                 # Record transaction
 81 |                 self.transaction_history.append(
 82 |                     {
 83 |                         "type": "buy",
 84 |                         "symbol": symbol,
 85 |                         "shares": shares,
 86 |                         "price": price,
 87 |                         "timestamp": datetime.now(UTC).isoformat(),
 88 |                     }
 89 |                 )
 90 | 
 91 |                 return
 92 | 
 93 |         # Add new holding
 94 |         self.portfolio.append(
 95 |             {
 96 |                 "symbol": symbol,
 97 |                 "shares": shares,
 98 |                 "avg_price": price,
 99 |                 "purchase_date": datetime.now(UTC).isoformat(),
100 |                 "last_update": datetime.now(UTC).isoformat(),
101 |             }
102 |         )
103 | 
104 |         # Record transaction
105 |         self.transaction_history.append(
106 |             {
107 |                 "type": "buy",
108 |                 "symbol": symbol,
109 |                 "shares": shares,
110 |                 "price": price,
111 |                 "timestamp": datetime.now(UTC).isoformat(),
112 |             }
113 |         )
114 | 
115 |     async def remove_from_portfolio(
116 |         self, symbol: str, shares: float | None = None, price: float | None = None
117 |     ):
118 |         """
119 |         Remove a stock from the portfolio
120 | 
121 |         Args:
122 |             symbol: Stock ticker symbol
123 |             shares: Number of shares to remove (if None, remove all shares)
124 |             price: Selling price per share
125 |         """
126 |         for i, holding in enumerate(self.portfolio):
127 |             if holding["symbol"] == symbol:
128 |                 if shares is None or shares >= holding["shares"]:
129 |                     # Remove entire holding
130 |                     removed_holding = self.portfolio.pop(i)
131 | 
132 |                     # Record transaction
133 |                     self.transaction_history.append(
134 |                         {
135 |                             "type": "sell",
136 |                             "symbol": symbol,
137 |                             "shares": removed_holding["shares"],
138 |                             "price": price,
139 |                             "timestamp": datetime.now(UTC).isoformat(),
140 |                         }
141 |                     )
142 |                 else:
143 |                     # Partially remove holding
144 |                     holding["shares"] -= shares
145 |                     holding["last_update"] = datetime.now(UTC).isoformat()
146 | 
147 |                     # Record transaction
148 |                     self.transaction_history.append(
149 |                         {
150 |                             "type": "sell",
151 |                             "symbol": symbol,
152 |                             "shares": shares,
153 |                             "price": price,
154 |                             "timestamp": datetime.now(UTC).isoformat(),
155 |                         }
156 |                     )
157 | 
158 |                 return True
159 | 
160 |         return False
161 | 
162 |     async def get_portfolio_value(self) -> dict[str, Any]:
163 |         """
164 |         Get the current value of the portfolio
165 | 
166 |         Returns:
167 |             Dictionary with portfolio value information
168 |         """
169 |         if not self.portfolio:
170 |             return {
171 |                 "total_value": 0,
172 |                 "holdings": [],
173 |                 "timestamp": datetime.now(UTC).isoformat(),
174 |             }
175 | 
176 |         total_value = 0
177 |         holdings_data = []
178 | 
179 |         for holding in self.portfolio:
180 |             symbol = holding["symbol"]
181 |             shares = holding["shares"]
182 |             avg_price = holding["avg_price"]
183 |             current_price = avg_price  # In a real implementation, fetch current price from market data API
184 | 
185 |             # Calculate values
186 |             position_value = shares * current_price
187 |             cost_basis = shares * avg_price
188 |             gain_loss = position_value - cost_basis
189 |             gain_loss_pct = (gain_loss / cost_basis) * 100 if cost_basis > 0 else 0
190 | 
191 |             holdings_data.append(
192 |                 {
193 |                     "symbol": symbol,
194 |                     "shares": shares,
195 |                     "avg_price": avg_price,
196 |                     "current_price": current_price,
197 |                     "position_value": position_value,
198 |                     "cost_basis": cost_basis,
199 |                     "gain_loss": gain_loss,
200 |                     "gain_loss_pct": gain_loss_pct,
201 |                 }
202 |             )
203 | 
204 |             total_value += position_value
205 | 
206 |         return {
207 |             "total_value": total_value,
208 |             "holdings": holdings_data,
209 |             "timestamp": datetime.now(UTC).isoformat(),
210 |         }
211 | 
212 |     async def get_portfolio_analysis(self) -> dict[str, Any]:
213 |         """
214 |         Get a comprehensive analysis of the portfolio
215 | 
216 |         Returns:
217 |             Dictionary with portfolio analysis information
218 |         """
219 |         if not self.portfolio:
220 |             return {
221 |                 "analysis": "Portfolio is empty. No analysis available.",
222 |                 "timestamp": datetime.now(UTC).isoformat(),
223 |             }
224 | 
225 |         # Get current portfolio value
226 |         portfolio_value = await self.get_portfolio_value()
227 | 
228 |         # In a real implementation, perform portfolio analysis here
229 |         analysis = "Portfolio analysis not implemented"
230 | 
231 |         return {
232 |             "portfolio_data": portfolio_value,
233 |             "analysis": analysis,
234 |             "risk_profile": self.risk_profile,
235 |             "timestamp": datetime.now(UTC).isoformat(),
236 |         }
237 | 
238 |     async def get_rebalance_recommendations(self) -> dict[str, Any]:
239 |         """
240 |         Get recommendations for rebalancing the portfolio
241 | 
242 |         Returns:
243 |             Dictionary with rebalance recommendations
244 |         """
245 |         if not self.portfolio:
246 |             return {
247 |                 "recommendations": "Portfolio is empty. No rebalance recommendations available.",
248 |                 "timestamp": datetime.now(UTC).isoformat(),
249 |             }
250 | 
251 |         # Get current portfolio value
252 |         portfolio_value = await self.get_portfolio_value()
253 | 
254 |         # In a real implementation, generate rebalancing recommendations here
255 |         recommendations = "Rebalance recommendations not implemented"
256 | 
257 |         return {
258 |             "portfolio_data": portfolio_value,
259 |             "recommendations": recommendations,
260 |             "risk_profile": self.risk_profile,
261 |             "timestamp": datetime.now(UTC).isoformat(),
262 |         }
263 | 
264 |     def save_portfolio(self, filepath: str | None = None):
265 |         """
266 |         Save the portfolio to a file
267 | 
268 |         Args:
269 |             filepath: Path to save the portfolio to (if None, use the portfolio file path)
270 |         """
271 |         if not filepath:
272 |             filepath = (
273 |                 self.portfolio_file
274 |                 or f"{self.portfolio_name.replace(' ', '_').lower()}_portfolio.json"
275 |             )
276 | 
277 |         data = {
278 |             "name": self.portfolio_name,
279 |             "risk_profile": self.risk_profile,
280 |             "holdings": self.portfolio,
281 |             "transaction_history": self.transaction_history,
282 |             "last_update": datetime.now(UTC).isoformat(),
283 |         }
284 | 
285 |         with open(filepath, "w") as f:
286 |             json.dump(data, f, indent=2)
287 | 
288 |         logger.info(f"Portfolio saved to {filepath}")
289 | 
290 |         return filepath
291 | 
292 | 
293 | async def main():
294 |     """Example usage of the portfolio manager"""
295 |     # Create a sample portfolio
296 |     portfolio = [
297 |         {"symbol": "AAPL", "shares": 10, "avg_price": 170.50},
298 |         {"symbol": "MSFT", "shares": 5, "avg_price": 325.25},
299 |         {"symbol": "GOOGL", "shares": 2, "avg_price": 140.75},
300 |         {"symbol": "AMZN", "shares": 3, "avg_price": 178.30},
301 |         {"symbol": "TSLA", "shares": 8, "avg_price": 185.60},
302 |     ]
303 | 
304 |     # Create the portfolio manager
305 |     manager = PortfolioManager(
306 |         portfolio_name="Tech Growth Portfolio",
307 |         risk_profile="moderate",
308 |     )
309 | 
310 |     # Add the sample stocks to the portfolio
311 |     for holding in portfolio:
312 |         await manager.add_to_portfolio(
313 |             symbol=str(holding["symbol"]),
314 |             shares=float(holding["shares"]),  # type: ignore[arg-type]
315 |             price=float(holding["avg_price"]),  # type: ignore[arg-type]
316 |         )
317 | 
318 |     try:
319 |         # Get portfolio value
320 |         print("Getting portfolio value...")
321 |         portfolio_value = await manager.get_portfolio_value()
322 |         print(f"Total portfolio value: ${portfolio_value['total_value']:.2f}")
323 | 
324 |         # Get portfolio analysis
325 |         print("\nAnalyzing portfolio...")
326 |         analysis = await manager.get_portfolio_analysis()
327 |         print("\nPortfolio Analysis:")
328 |         print(analysis["analysis"])
329 | 
330 |         # Get rebalance recommendations
331 |         print("\nGetting rebalance recommendations...")
332 |         rebalance = await manager.get_rebalance_recommendations()
333 |         print("\nRebalance Recommendations:")
334 |         print(rebalance["recommendations"])
335 | 
336 |         # Save the portfolio
337 |         filepath = manager.save_portfolio()
338 |         print(f"\nPortfolio saved to {filepath}")
339 | 
340 |     finally:
341 |         pass
342 | 
343 | 
344 | if __name__ == "__main__":
345 |     asyncio.run(main())
346 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/application/screening/queries.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Screening application queries.
  3 | 
  4 | This module contains application service queries that orchestrate
  5 | domain services and infrastructure adapters for screening operations.
  6 | """
  7 | 
  8 | from datetime import datetime
  9 | from typing import Any
 10 | 
 11 | from maverick_mcp.domain.screening.entities import (
 12 |     ScreeningResultCollection,
 13 | )
 14 | from maverick_mcp.domain.screening.services import IStockRepository, ScreeningService
 15 | from maverick_mcp.domain.screening.value_objects import (
 16 |     ScreeningCriteria,
 17 |     ScreeningStrategy,
 18 |     SortingOptions,
 19 | )
 20 | 
 21 | 
 22 | class GetScreeningResultsQuery:
 23 |     """
 24 |     Application query for retrieving screening results.
 25 | 
 26 |     This query orchestrates the domain service and infrastructure
 27 |     to provide a complete screening operation.
 28 |     """
 29 | 
 30 |     def __init__(self, stock_repository: IStockRepository):
 31 |         """
 32 |         Initialize the query with required dependencies.
 33 | 
 34 |         Args:
 35 |             stock_repository: Repository for accessing stock data
 36 |         """
 37 |         self._stock_repository = stock_repository
 38 |         self._screening_service = ScreeningService()
 39 | 
 40 |     async def execute(
 41 |         self,
 42 |         strategy: ScreeningStrategy,
 43 |         limit: int = 20,
 44 |         criteria: ScreeningCriteria | None = None,
 45 |         sorting: SortingOptions | None = None,
 46 |     ) -> ScreeningResultCollection:
 47 |         """
 48 |         Execute the screening query.
 49 | 
 50 |         Args:
 51 |             strategy: The screening strategy to use
 52 |             limit: Maximum number of results to return
 53 |             criteria: Optional filtering criteria
 54 |             sorting: Optional sorting configuration
 55 | 
 56 |         Returns:
 57 |             ScreeningResultCollection with results and metadata
 58 |         """
 59 |         # Validate and adjust limit
 60 |         validated_limit = self._screening_service.validate_screening_limits(limit)
 61 | 
 62 |         # Get raw data from repository based on strategy
 63 |         raw_data = await self._get_raw_data_for_strategy(
 64 |             strategy, validated_limit, criteria
 65 |         )
 66 | 
 67 |         # Convert raw data to domain entities
 68 |         screening_results = []
 69 |         for raw_result in raw_data:
 70 |             try:
 71 |                 result = self._screening_service.create_screening_result_from_raw_data(
 72 |                     raw_result, datetime.utcnow()
 73 |                 )
 74 |                 screening_results.append(result)
 75 |             except Exception as e:
 76 |                 # Log and skip invalid results
 77 |                 # In a real application, we'd use proper logging
 78 |                 print(
 79 |                     f"Warning: Skipped invalid result for {raw_result.get('stock', 'unknown')}: {e}"
 80 |                 )
 81 |                 continue
 82 | 
 83 |         # Apply additional filtering if criteria provided
 84 |         if criteria and criteria.has_any_filters():
 85 |             screening_results = self._screening_service.apply_screening_criteria(
 86 |                 screening_results, criteria
 87 |             )
 88 | 
 89 |         # Apply sorting
 90 |         if sorting is None:
 91 |             sorting = SortingOptions.for_strategy(strategy)
 92 | 
 93 |         screening_results = self._screening_service.sort_screening_results(
 94 |             screening_results, sorting
 95 |         )
 96 | 
 97 |         # Limit results after filtering and sorting
 98 |         screening_results = screening_results[:validated_limit]
 99 | 
100 |         # Create and return collection
101 |         return self._screening_service.create_screening_collection(
102 |             screening_results,
103 |             strategy,
104 |             len(raw_data),  # Total candidates before filtering
105 |         )
106 | 
107 |     async def _get_raw_data_for_strategy(
108 |         self,
109 |         strategy: ScreeningStrategy,
110 |         limit: int,
111 |         criteria: ScreeningCriteria | None,
112 |     ) -> list[dict[str, Any]]:
113 |         """
114 |         Get raw data from repository based on strategy.
115 | 
116 |         This method handles the strategy-specific repository calls
117 |         and basic filtering that can be done at the data layer.
118 |         """
119 |         if strategy == ScreeningStrategy.MAVERICK_BULLISH:
120 |             min_score = None
121 |             if criteria and criteria.min_combined_score:
122 |                 min_score = criteria.min_combined_score
123 | 
124 |             return self._stock_repository.get_maverick_stocks(
125 |                 limit=limit * 2,  # Get more to allow for filtering
126 |                 min_score=min_score,
127 |             )
128 | 
129 |         elif strategy == ScreeningStrategy.MAVERICK_BEARISH:
130 |             min_score = None
131 |             if criteria and criteria.min_bear_score:
132 |                 min_score = criteria.min_bear_score
133 | 
134 |             return self._stock_repository.get_maverick_bear_stocks(
135 |                 limit=limit * 2,  # Get more to allow for filtering
136 |                 min_score=min_score,
137 |             )
138 | 
139 |         elif strategy == ScreeningStrategy.TRENDING_STAGE2:
140 |             min_momentum_score = None
141 |             if criteria and criteria.min_momentum_score:
142 |                 min_momentum_score = criteria.min_momentum_score
143 | 
144 |             # Check if we need moving average filtering
145 |             filter_ma = criteria and (
146 |                 criteria.require_above_sma50
147 |                 or criteria.require_above_sma150
148 |                 or criteria.require_above_sma200
149 |                 or criteria.require_ma_alignment
150 |             )
151 | 
152 |             return self._stock_repository.get_trending_stocks(
153 |                 limit=limit * 2,  # Get more to allow for filtering
154 |                 min_momentum_score=min_momentum_score,
155 |                 filter_moving_averages=filter_ma,
156 |             )
157 | 
158 |         else:
159 |             raise ValueError(f"Unsupported screening strategy: {strategy}")
160 | 
161 | 
162 | class GetAllScreeningResultsQuery:
163 |     """
164 |     Application query for retrieving results from all screening strategies.
165 | 
166 |     This query provides a comprehensive view across all available
167 |     screening strategies.
168 |     """
169 | 
170 |     def __init__(self, stock_repository: IStockRepository):
171 |         """
172 |         Initialize the query with required dependencies.
173 | 
174 |         Args:
175 |             stock_repository: Repository for accessing stock data
176 |         """
177 |         self._stock_repository = stock_repository
178 |         self._screening_service = ScreeningService()
179 | 
180 |     async def execute(
181 |         self, limit_per_strategy: int = 10, criteria: ScreeningCriteria | None = None
182 |     ) -> dict[str, ScreeningResultCollection]:
183 |         """
184 |         Execute screening across all strategies.
185 | 
186 |         Args:
187 |             limit_per_strategy: Number of results per strategy
188 |             criteria: Optional filtering criteria (applied to all strategies)
189 | 
190 |         Returns:
191 |             Dictionary mapping strategy names to their result collections
192 |         """
193 |         results = {}
194 | 
195 |         # Execute each strategy
196 |         for strategy in ScreeningStrategy:
197 |             try:
198 |                 query = GetScreeningResultsQuery(self._stock_repository)
199 |                 collection = await query.execute(
200 |                     strategy=strategy, limit=limit_per_strategy, criteria=criteria
201 |                 )
202 |                 results[strategy.value] = collection
203 |             except Exception as e:
204 |                 # Log and continue with other strategies
205 |                 print(f"Warning: Failed to get results for {strategy.value}: {e}")
206 |                 # Create empty collection for failed strategy
207 |                 results[strategy.value] = (
208 |                     self._screening_service.create_screening_collection([], strategy, 0)
209 |                 )
210 | 
211 |         return results
212 | 
213 | 
214 | class GetScreeningStatisticsQuery:
215 |     """
216 |     Application query for retrieving screening statistics and analytics.
217 | 
218 |     This query provides business intelligence and analytical insights
219 |     across screening results.
220 |     """
221 | 
222 |     def __init__(self, stock_repository: IStockRepository):
223 |         """
224 |         Initialize the query with required dependencies.
225 | 
226 |         Args:
227 |             stock_repository: Repository for accessing stock data
228 |         """
229 |         self._stock_repository = stock_repository
230 |         self._screening_service = ScreeningService()
231 | 
232 |     async def execute(
233 |         self, strategy: ScreeningStrategy | None = None, limit: int = 100
234 |     ) -> dict[str, Any]:
235 |         """
236 |         Execute the statistics query.
237 | 
238 |         Args:
239 |             strategy: Optional specific strategy to analyze (None for all)
240 |             limit: Maximum results to analyze per strategy
241 | 
242 |         Returns:
243 |             Comprehensive statistics and analytics
244 |         """
245 |         if strategy:
246 |             # Single strategy analysis
247 |             query = GetScreeningResultsQuery(self._stock_repository)
248 |             collection = await query.execute(strategy, limit)
249 | 
250 |             return {
251 |                 "strategy": strategy.value,
252 |                 "statistics": self._screening_service.calculate_screening_statistics(
253 |                     collection
254 |                 ),
255 |                 "timestamp": datetime.utcnow().isoformat(),
256 |             }
257 | 
258 |         else:
259 |             # All strategies analysis
260 |             all_query = GetAllScreeningResultsQuery(self._stock_repository)
261 |             all_collections = await all_query.execute(limit)
262 | 
263 |             combined_stats = {
264 |                 "overall_summary": {
265 |                     "strategies_analyzed": len(all_collections),
266 |                     "total_results": sum(
267 |                         len(c.results) for c in all_collections.values()
268 |                     ),
269 |                     "timestamp": datetime.utcnow().isoformat(),
270 |                 },
271 |                 "by_strategy": {},
272 |             }
273 | 
274 |             # Calculate stats for each strategy
275 |             for strategy_name, collection in all_collections.items():
276 |                 combined_stats["by_strategy"][strategy_name] = (
277 |                     self._screening_service.calculate_screening_statistics(collection)
278 |                 )
279 | 
280 |             # Calculate cross-strategy insights
281 |             combined_stats["cross_strategy_analysis"] = (
282 |                 self._calculate_cross_strategy_insights(all_collections)
283 |             )
284 | 
285 |             return combined_stats
286 | 
287 |     def _calculate_cross_strategy_insights(
288 |         self, collections: dict[str, ScreeningResultCollection]
289 |     ) -> dict[str, Any]:
290 |         """
291 |         Calculate insights that span across multiple strategies.
292 | 
293 |         This provides valuable business intelligence by comparing
294 |         and contrasting results across different screening approaches.
295 |         """
296 |         all_symbols = set()
297 |         strategy_overlaps = {}
298 | 
299 |         # Collect all symbols and calculate overlaps
300 |         for strategy_name, collection in collections.items():
301 |             symbols = {r.stock_symbol for r in collection.results}
302 |             all_symbols.update(symbols)
303 |             strategy_overlaps[strategy_name] = symbols
304 | 
305 |         # Find intersections
306 |         bullish_symbols = strategy_overlaps.get(
307 |             ScreeningStrategy.MAVERICK_BULLISH.value, set()
308 |         )
309 |         bearish_symbols = strategy_overlaps.get(
310 |             ScreeningStrategy.MAVERICK_BEARISH.value, set()
311 |         )
312 |         trending_symbols = strategy_overlaps.get(
313 |             ScreeningStrategy.TRENDING_STAGE2.value, set()
314 |         )
315 | 
316 |         return {
317 |             "total_unique_symbols": len(all_symbols),
318 |             "strategy_overlaps": {
319 |                 "bullish_and_trending": len(bullish_symbols & trending_symbols),
320 |                 "conflicting_signals": len(bullish_symbols & bearish_symbols),
321 |                 "trending_exclusive": len(
322 |                     trending_symbols - bullish_symbols - bearish_symbols
323 |                 ),
324 |             },
325 |             "market_sentiment": {
326 |                 "bullish_bias": len(bullish_symbols) > len(bearish_symbols),
327 |                 "trend_strength": len(trending_symbols) / max(len(all_symbols), 1),
328 |                 "conflict_ratio": len(bullish_symbols & bearish_symbols)
329 |                 / max(len(all_symbols), 1),
330 |             },
331 |         }
332 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/backtesting/strategies/templates.py:
--------------------------------------------------------------------------------

```python
  1 | """Pre-built strategy templates for VectorBT."""
  2 | 
  3 | from typing import Any
  4 | 
  5 | import pandas as pd
  6 | 
  7 | 
  8 | class SimpleMovingAverageStrategy:
  9 |     """Simple Moving Average crossover strategy for ML integration."""
 10 | 
 11 |     def __init__(
 12 |         self, parameters: dict = None, fast_period: int = 10, slow_period: int = 20
 13 |     ):
 14 |         """
 15 |         Initialize SMA strategy.
 16 | 
 17 |         Args:
 18 |             parameters: Optional dict with fast_period and slow_period
 19 |             fast_period: Period for fast moving average
 20 |             slow_period: Period for slow moving average
 21 |         """
 22 |         if parameters:
 23 |             self.fast_period = parameters.get("fast_period", fast_period)
 24 |             self.slow_period = parameters.get("slow_period", slow_period)
 25 |         else:
 26 |             self.fast_period = fast_period
 27 |             self.slow_period = slow_period
 28 |         self.name = "SMA Crossover"
 29 |         self.parameters = {
 30 |             "fast_period": self.fast_period,
 31 |             "slow_period": self.slow_period,
 32 |         }
 33 | 
 34 |     def generate_signals(self, data: pd.DataFrame) -> tuple:
 35 |         """
 36 |         Generate buy/sell signals based on SMA crossover.
 37 | 
 38 |         Args:
 39 |             data: DataFrame with at least 'close' column
 40 | 
 41 |         Returns:
 42 |             Tuple of (entries, exits) as boolean Series
 43 |         """
 44 |         close = data["close"] if "close" in data.columns else data["Close"]
 45 | 
 46 |         # Calculate SMAs
 47 |         fast_sma = close.rolling(window=self.fast_period).mean()
 48 |         slow_sma = close.rolling(window=self.slow_period).mean()
 49 | 
 50 |         # Generate signals
 51 |         entries = (fast_sma > slow_sma) & (fast_sma.shift(1) <= slow_sma.shift(1))
 52 |         exits = (fast_sma < slow_sma) & (fast_sma.shift(1) >= slow_sma.shift(1))
 53 | 
 54 |         # Handle NaN values
 55 |         entries = entries.fillna(False)
 56 |         exits = exits.fillna(False)
 57 | 
 58 |         return entries, exits
 59 | 
 60 |     def get_parameters(self) -> dict[str, Any]:
 61 |         """Get strategy parameters."""
 62 |         return {"fast_period": self.fast_period, "slow_period": self.slow_period}
 63 | 
 64 | 
 65 | STRATEGY_TEMPLATES = {
 66 |     "sma_cross": {
 67 |         "name": "SMA Crossover",
 68 |         "description": "Buy when fast SMA crosses above slow SMA, sell when it crosses below",
 69 |         "parameters": {
 70 |             "fast_period": 10,
 71 |             "slow_period": 20,
 72 |         },
 73 |         "optimization_ranges": {
 74 |             "fast_period": [5, 10, 15, 20],
 75 |             "slow_period": [20, 30, 50, 100],
 76 |         },
 77 |         "code": """
 78 | # SMA Crossover Strategy
 79 | fast_sma = vbt.MA.run(close, {fast_period}).ma.squeeze()
 80 | slow_sma = vbt.MA.run(close, {slow_period}).ma.squeeze()
 81 | 
 82 | entries = (fast_sma > slow_sma) & (fast_sma.shift(1) <= slow_sma.shift(1))
 83 | exits = (fast_sma < slow_sma) & (fast_sma.shift(1) >= slow_sma.shift(1))
 84 | """,
 85 |     },
 86 |     "rsi": {
 87 |         "name": "RSI Mean Reversion",
 88 |         "description": "Buy oversold (RSI < 30), sell overbought (RSI > 70)",
 89 |         "parameters": {
 90 |             "period": 14,
 91 |             "oversold": 30,
 92 |             "overbought": 70,
 93 |         },
 94 |         "optimization_ranges": {
 95 |             "period": [7, 14, 21],
 96 |             "oversold": [20, 25, 30, 35],
 97 |             "overbought": [65, 70, 75, 80],
 98 |         },
 99 |         "code": """
100 | # RSI Mean Reversion Strategy
101 | rsi = vbt.RSI.run(close, {period}).rsi.squeeze()
102 | 
103 | entries = (rsi < {oversold}) & (rsi.shift(1) >= {oversold})
104 | exits = (rsi > {overbought}) & (rsi.shift(1) <= {overbought})
105 | """,
106 |     },
107 |     "macd": {
108 |         "name": "MACD Signal",
109 |         "description": "Buy when MACD crosses above signal line, sell when crosses below",
110 |         "parameters": {
111 |             "fast_period": 12,
112 |             "slow_period": 26,
113 |             "signal_period": 9,
114 |         },
115 |         "optimization_ranges": {
116 |             "fast_period": [8, 10, 12, 14],
117 |             "slow_period": [21, 24, 26, 30],
118 |             "signal_period": [7, 9, 11],
119 |         },
120 |         "code": """
121 | # MACD Signal Strategy
122 | macd = vbt.MACD.run(close,
123 |     fast_window={fast_period},
124 |     slow_window={slow_period},
125 |     signal_window={signal_period}
126 | )
127 | 
128 | macd_line = macd.macd.squeeze()
129 | signal_line = macd.signal.squeeze()
130 | 
131 | entries = (macd_line > signal_line) & (macd_line.shift(1) <= signal_line.shift(1))
132 | exits = (macd_line < signal_line) & (macd_line.shift(1) >= signal_line.shift(1))
133 | """,
134 |     },
135 |     "bollinger": {
136 |         "name": "Bollinger Bands",
137 |         "description": "Buy at lower band (oversold), sell at upper band (overbought)",
138 |         "parameters": {
139 |             "period": 20,
140 |             "std_dev": 2.0,
141 |         },
142 |         "optimization_ranges": {
143 |             "period": [10, 15, 20, 25],
144 |             "std_dev": [1.5, 2.0, 2.5, 3.0],
145 |         },
146 |         "code": """
147 | # Bollinger Bands Strategy
148 | bb = vbt.BBANDS.run(close, window={period}, alpha={std_dev})
149 | upper = bb.upper.squeeze()
150 | lower = bb.lower.squeeze()
151 | 
152 | # Buy when price touches lower band, sell when touches upper
153 | entries = (close <= lower) & (close.shift(1) > lower.shift(1))
154 | exits = (close >= upper) & (close.shift(1) < upper.shift(1))
155 | """,
156 |     },
157 |     "momentum": {
158 |         "name": "Momentum",
159 |         "description": "Buy strong momentum, sell weak momentum based on returns threshold",
160 |         "parameters": {
161 |             "lookback": 20,
162 |             "threshold": 0.05,
163 |         },
164 |         "optimization_ranges": {
165 |             "lookback": [10, 15, 20, 25, 30],
166 |             "threshold": [0.02, 0.03, 0.05, 0.07, 0.10],
167 |         },
168 |         "code": """
169 | # Momentum Strategy
170 | returns = close.pct_change({lookback})
171 | 
172 | entries = returns > {threshold}
173 | exits = returns < -{threshold}
174 | """,
175 |     },
176 |     "ema_cross": {
177 |         "name": "EMA Crossover",
178 |         "description": "Exponential moving average crossover with faster response than SMA",
179 |         "parameters": {
180 |             "fast_period": 12,
181 |             "slow_period": 26,
182 |         },
183 |         "optimization_ranges": {
184 |             "fast_period": [8, 12, 16, 20],
185 |             "slow_period": [20, 26, 35, 50],
186 |         },
187 |         "code": """
188 | # EMA Crossover Strategy
189 | fast_ema = vbt.MA.run(close, {fast_period}, ewm=True).ma.squeeze()
190 | slow_ema = vbt.MA.run(close, {slow_period}, ewm=True).ma.squeeze()
191 | 
192 | entries = (fast_ema > slow_ema) & (fast_ema.shift(1) <= slow_ema.shift(1))
193 | exits = (fast_ema < slow_ema) & (fast_ema.shift(1) >= slow_ema.shift(1))
194 | """,
195 |     },
196 |     "mean_reversion": {
197 |         "name": "Mean Reversion",
198 |         "description": "Buy when price is below moving average by threshold",
199 |         "parameters": {
200 |             "ma_period": 20,
201 |             "entry_threshold": 0.02,  # 2% below MA
202 |             "exit_threshold": 0.01,  # 1% above MA
203 |         },
204 |         "optimization_ranges": {
205 |             "ma_period": [15, 20, 30, 50],
206 |             "entry_threshold": [0.01, 0.02, 0.03, 0.05],
207 |             "exit_threshold": [0.00, 0.01, 0.02],
208 |         },
209 |         "code": """
210 | # Mean Reversion Strategy
211 | ma = vbt.MA.run(close, {ma_period}).ma.squeeze()
212 | deviation = (close - ma) / ma
213 | 
214 | entries = deviation < -{entry_threshold}
215 | exits = deviation > {exit_threshold}
216 | """,
217 |     },
218 |     "breakout": {
219 |         "name": "Channel Breakout",
220 |         "description": "Buy on breakout above rolling high, sell on breakdown below rolling low",
221 |         "parameters": {
222 |             "lookback": 20,
223 |             "exit_lookback": 10,
224 |         },
225 |         "optimization_ranges": {
226 |             "lookback": [10, 20, 30, 50],
227 |             "exit_lookback": [5, 10, 15, 20],
228 |         },
229 |         "code": """
230 | # Channel Breakout Strategy
231 | upper_channel = close.rolling({lookback}).max()
232 | lower_channel = close.rolling({exit_lookback}).min()
233 | 
234 | entries = close > upper_channel.shift(1)
235 | exits = close < lower_channel.shift(1)
236 | """,
237 |     },
238 |     "volume_momentum": {
239 |         "name": "Volume-Weighted Momentum",
240 |         "description": "Momentum strategy filtered by volume surge",
241 |         "parameters": {
242 |             "momentum_period": 20,
243 |             "volume_period": 20,
244 |             "momentum_threshold": 0.05,
245 |             "volume_multiplier": 1.5,
246 |         },
247 |         "optimization_ranges": {
248 |             "momentum_period": [10, 20, 30],
249 |             "volume_period": [10, 20, 30],
250 |             "momentum_threshold": [0.03, 0.05, 0.07],
251 |             "volume_multiplier": [1.2, 1.5, 2.0],
252 |         },
253 |         "code": """
254 | # Volume-Weighted Momentum Strategy
255 | returns = close.pct_change({momentum_period})
256 | avg_volume = volume.rolling({volume_period}).mean()
257 | volume_surge = volume > (avg_volume * {volume_multiplier})
258 | 
259 | # Entry: positive momentum with volume surge
260 | entries = (returns > {momentum_threshold}) & volume_surge
261 | 
262 | # Exit: negative momentum or volume dry up
263 | exits = (returns < -{momentum_threshold}) | (volume < avg_volume * 0.8)
264 | """,
265 |     },
266 |     "online_learning": {
267 |         "name": "Online Learning Strategy",
268 |         "description": "Adaptive strategy using online learning to predict price movements",
269 |         "parameters": {
270 |             "lookback": 20,
271 |             "learning_rate": 0.01,
272 |             "update_frequency": 5,
273 |         },
274 |         "optimization_ranges": {
275 |             "lookback": [10, 20, 30, 50],
276 |             "learning_rate": [0.001, 0.01, 0.1],
277 |             "update_frequency": [1, 5, 10, 20],
278 |         },
279 |         "code": """
280 | # Online Learning Strategy (ML-based)
281 | # Uses streaming updates to adapt to market conditions
282 | # Implements SGD classifier with technical features
283 | """,
284 |     },
285 |     "regime_aware": {
286 |         "name": "Regime-Aware Strategy",
287 |         "description": "Adapts strategy based on detected market regime (trending/ranging)",
288 |         "parameters": {
289 |             "regime_window": 50,
290 |             "threshold": 0.02,
291 |             "trend_strategy": "momentum",
292 |             "range_strategy": "mean_reversion",
293 |         },
294 |         "optimization_ranges": {
295 |             "regime_window": [20, 50, 100],
296 |             "threshold": [0.01, 0.02, 0.05],
297 |         },
298 |         "code": """
299 | # Regime-Aware Strategy
300 | # Detects market regime and switches between strategies
301 | # Uses volatility and trend strength indicators
302 | """,
303 |     },
304 |     "ensemble": {
305 |         "name": "Ensemble Strategy",
306 |         "description": "Combines multiple strategies with weighted voting",
307 |         "parameters": {
308 |             "fast_period": 10,
309 |             "slow_period": 20,
310 |             "rsi_period": 14,
311 |             "weight_method": "equal",
312 |         },
313 |         "optimization_ranges": {
314 |             "fast_period": [5, 10, 15],
315 |             "slow_period": [20, 30, 50],
316 |             "rsi_period": [7, 14, 21],
317 |         },
318 |         "code": """
319 | # Ensemble Strategy
320 | # Combines SMA, RSI, and MACD signals
321 | # Uses voting or weighted average for final signal
322 | """,
323 |     },
324 | }
325 | 
326 | 
327 | def get_strategy_template(strategy_type: str) -> dict[str, Any]:
328 |     """Get a strategy template by type.
329 | 
330 |     Args:
331 |         strategy_type: Type of strategy
332 | 
333 |     Returns:
334 |         Strategy template dictionary
335 | 
336 |     Raises:
337 |         ValueError: If strategy type not found
338 |     """
339 |     if strategy_type not in STRATEGY_TEMPLATES:
340 |         available = ", ".join(STRATEGY_TEMPLATES.keys())
341 |         raise ValueError(
342 |             f"Unknown strategy type: {strategy_type}. Available: {available}"
343 |         )
344 |     return STRATEGY_TEMPLATES[strategy_type]
345 | 
346 | 
347 | def list_available_strategies() -> list[str]:
348 |     """List all available strategy types.
349 | 
350 |     Returns:
351 |         List of strategy type names
352 |     """
353 |     return list(STRATEGY_TEMPLATES.keys())
354 | 
355 | 
356 | def get_strategy_info(strategy_type: str) -> dict[str, Any]:
357 |     """Get information about a strategy.
358 | 
359 |     Args:
360 |         strategy_type: Type of strategy
361 | 
362 |     Returns:
363 |         Strategy information including name, description, and parameters
364 |     """
365 |     template = get_strategy_template(strategy_type)
366 |     return {
367 |         "type": strategy_type,
368 |         "name": template["name"],
369 |         "description": template["description"],
370 |         "default_parameters": template["parameters"],
371 |         "optimization_ranges": template["optimization_ranges"],
372 |     }
373 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/api/services/prompt_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Prompt service for MaverickMCP API.
  3 | 
  4 | Handles trading and investing prompts for technical analysis and stock screening.
  5 | Extracted from server.py to improve code organization and maintainability.
  6 | """
  7 | 
  8 | from .base_service import BaseService
  9 | 
 10 | 
 11 | class PromptService(BaseService):
 12 |     """
 13 |     Service class for prompt operations.
 14 | 
 15 |     Provides trading and investing prompts for technical analysis and stock screening.
 16 |     """
 17 | 
 18 |     def register_tools(self):
 19 |         """Register prompt tools with MCP."""
 20 | 
 21 |         @self.mcp.prompt()
 22 |         def technical_analysis(ticker: str, timeframe: str = "daily") -> str:
 23 |             """
 24 |             Generate a comprehensive technical analysis prompt for a given stock.
 25 | 
 26 |             Args:
 27 |                 ticker: Stock ticker symbol (e.g., "AAPL", "MSFT")
 28 |                 timeframe: Analysis timeframe - "daily", "weekly", or "monthly"
 29 | 
 30 |             Returns:
 31 |                 Formatted prompt for technical analysis
 32 |             """
 33 |             return self._technical_analysis_prompt(ticker, timeframe)
 34 | 
 35 |         @self.mcp.prompt()
 36 |         def stock_screening_report(strategy: str = "momentum") -> str:
 37 |             """
 38 |             Generate a stock screening analysis prompt based on specified strategy.
 39 | 
 40 |             Args:
 41 |                 strategy: Screening strategy - "momentum", "value", "growth", "quality", or "dividend"
 42 | 
 43 |             Returns:
 44 |                 Formatted prompt for stock screening analysis
 45 |             """
 46 |             return self._stock_screening_prompt(strategy)
 47 | 
 48 |     def _technical_analysis_prompt(self, ticker: str, timeframe: str = "daily") -> str:
 49 |         """Generate technical analysis prompt implementation."""
 50 |         # Validate inputs
 51 |         valid_timeframes = ["daily", "weekly", "monthly"]
 52 |         if timeframe not in valid_timeframes:
 53 |             timeframe = "daily"
 54 | 
 55 |         ticker = ticker.upper().strip()
 56 | 
 57 |         prompt = f"""
 58 | # Technical Analysis Request for {ticker}
 59 | 
 60 | Please provide a comprehensive technical analysis for **{ticker}** using {timeframe} timeframe data.
 61 | 
 62 | ## Analysis Requirements:
 63 | 
 64 | ### 1. Price Action Analysis
 65 | - Current price level and recent price movement
 66 | - Key support and resistance levels
 67 | - Trend direction (bullish, bearish, or sideways)
 68 | - Chart patterns (if any): triangles, flags, head & shoulders, etc.
 69 | 
 70 | ### 2. Technical Indicators Analysis
 71 | Please analyze these key indicators:
 72 | 
 73 | **Moving Averages:**
 74 | - 20, 50, 200-period moving averages
 75 | - Price position relative to moving averages
 76 | - Moving average convergence/divergence signals
 77 | 
 78 | **Momentum Indicators:**
 79 | - RSI (14-period): overbought/oversold conditions
 80 | - MACD: signal line crossovers and histogram
 81 | - Stochastic oscillator: %K and %D levels
 82 | 
 83 | **Volume Analysis:**
 84 | - Recent volume trends
 85 | - Volume confirmation of price moves
 86 | - On-balance volume (OBV) trend
 87 | 
 88 | ### 3. Market Context
 89 | - Overall market trend and {ticker}'s correlation
 90 | - Sector performance and relative strength
 91 | - Recent news or events that might impact the stock
 92 | 
 93 | ### 4. Trading Recommendations
 94 | Based on the technical analysis, please provide:
 95 | - **Entry points**: Optimal buy/sell levels
 96 | - **Stop loss**: Risk management levels
 97 | - **Target prices**: Profit-taking levels
 98 | - **Time horizon**: Short-term, medium-term, or long-term outlook
 99 | - **Risk assessment**: High, medium, or low risk trade
100 | 
101 | ### 5. Alternative Scenarios
102 | - Bull case: What would drive the stock higher?
103 | - Bear case: What are the key risks or downside catalysts?
104 | - Base case: Most likely scenario given current technicals
105 | 
106 | ## Additional Context:
107 | - Timeframe: {timeframe.title()} analysis
108 | - Analysis date: {self._get_current_date()}
109 | - Please use the most recent market data available
110 | - Consider both technical and fundamental factors if relevant
111 | 
112 | Please structure your analysis clearly and provide actionable insights for traders and investors.
113 | """
114 | 
115 |         self.log_tool_usage(
116 |             "technical_analysis_prompt", ticker=ticker, timeframe=timeframe
117 |         )
118 |         return prompt.strip()
119 | 
120 |     def _stock_screening_prompt(self, strategy: str = "momentum") -> str:
121 |         """Generate stock screening prompt implementation."""
122 |         # Validate strategy
123 |         valid_strategies = ["momentum", "value", "growth", "quality", "dividend"]
124 |         if strategy not in valid_strategies:
125 |             strategy = "momentum"
126 | 
127 |         strategy_configs = {
128 |             "momentum": {
129 |                 "title": "Momentum Stock Screening",
130 |                 "description": "Identify stocks with strong price momentum and technical strength",
131 |                 "criteria": [
132 |                     "Strong relative strength (RS rating > 80)",
133 |                     "Price above 50-day and 200-day moving averages",
134 |                     "Recent breakout from consolidation pattern",
135 |                     "Volume surge on breakout",
136 |                     "Positive earnings growth",
137 |                     "Strong sector performance",
138 |                 ],
139 |                 "metrics": [
140 |                     "Relative Strength Index (RSI)",
141 |                     "Price rate of change (ROC)",
142 |                     "Volume relative to average",
143 |                     "Distance from moving averages",
144 |                     "Earnings growth rate",
145 |                     "Revenue growth rate",
146 |                 ],
147 |             },
148 |             "value": {
149 |                 "title": "Value Stock Screening",
150 |                 "description": "Find undervalued stocks with strong fundamentals",
151 |                 "criteria": [
152 |                     "Low P/E ratio relative to industry",
153 |                     "P/B ratio below 2.0",
154 |                     "Debt-to-equity ratio below industry average",
155 |                     "Positive free cash flow",
156 |                     "Dividend yield above market average",
157 |                     "Strong return on equity (ROE > 15%)",
158 |                 ],
159 |                 "metrics": [
160 |                     "Price-to-Earnings (P/E) ratio",
161 |                     "Price-to-Book (P/B) ratio",
162 |                     "Price-to-Sales (P/S) ratio",
163 |                     "Enterprise Value/EBITDA",
164 |                     "Free cash flow yield",
165 |                     "Return on equity (ROE)",
166 |                 ],
167 |             },
168 |             "growth": {
169 |                 "title": "Growth Stock Screening",
170 |                 "description": "Identify companies with accelerating growth metrics",
171 |                 "criteria": [
172 |                     "Revenue growth > 20% annually",
173 |                     "Earnings growth acceleration",
174 |                     "Strong profit margins",
175 |                     "Expanding market share",
176 |                     "Innovation and competitive advantages",
177 |                     "Strong management execution",
178 |                 ],
179 |                 "metrics": [
180 |                     "Revenue growth rate",
181 |                     "Earnings per share (EPS) growth",
182 |                     "Profit margin trends",
183 |                     "Return on invested capital (ROIC)",
184 |                     "Price/Earnings/Growth (PEG) ratio",
185 |                     "Market share metrics",
186 |                 ],
187 |             },
188 |             "quality": {
189 |                 "title": "Quality Stock Screening",
190 |                 "description": "Find high-quality companies with sustainable competitive advantages",
191 |                 "criteria": [
192 |                     "Consistent earnings growth (5+ years)",
193 |                     "Strong balance sheet (low debt)",
194 |                     "High return on equity (ROE > 20%)",
195 |                     "Wide economic moat",
196 |                     "Stable or growing market share",
197 |                     "Strong management track record",
198 |                 ],
199 |                 "metrics": [
200 |                     "Return on equity (ROE)",
201 |                     "Return on assets (ROA)",
202 |                     "Debt-to-equity ratio",
203 |                     "Interest coverage ratio",
204 |                     "Earnings consistency",
205 |                     "Free cash flow stability",
206 |                 ],
207 |             },
208 |             "dividend": {
209 |                 "title": "Dividend Stock Screening",
210 |                 "description": "Identify stocks with attractive and sustainable dividend yields",
211 |                 "criteria": [
212 |                     "Dividend yield between 3-8%",
213 |                     "Dividend growth history (5+ years)",
214 |                     "Payout ratio below 60%",
215 |                     "Strong free cash flow coverage",
216 |                     "Stable or growing earnings",
217 |                     "Defensive business model",
218 |                 ],
219 |                 "metrics": [
220 |                     "Dividend yield",
221 |                     "Dividend growth rate",
222 |                     "Payout ratio",
223 |                     "Free cash flow coverage",
224 |                     "Dividend aristocrat status",
225 |                     "Earnings stability",
226 |                 ],
227 |             },
228 |         }
229 | 
230 |         config = strategy_configs[strategy]
231 | 
232 |         prompt = f"""
233 | # {config["title"]} Analysis Request
234 | 
235 | Please conduct a comprehensive {strategy} stock screening analysis to {config["description"]}.
236 | 
237 | ## Screening Criteria:
238 | 
239 | ### Primary Filters:
240 | {chr(10).join(f"- {criteria}" for criteria in config["criteria"])}
241 | 
242 | ### Key Metrics to Analyze:
243 | {chr(10).join(f"- {metric}" for metric in config["metrics"])}
244 | 
245 | ## Analysis Framework:
246 | 
247 | ### 1. Market Environment Assessment
248 | - Current market conditions and {strategy} stock performance
249 | - Sector rotation trends favoring {strategy} strategies
250 | - Economic factors supporting {strategy} investing
251 | - Historical performance of {strategy} strategies in similar conditions
252 | 
253 | ### 2. Stock Screening Process
254 | Please apply the following methodology:
255 | - **Universe**: Focus on large and mid-cap stocks (market cap > $2B)
256 | - **Liquidity**: Average daily volume > 1M shares
257 | - **Fundamental Screening**: Apply the primary filters listed above
258 | - **Technical Validation**: Confirm with technical analysis
259 | - **Risk Assessment**: Evaluate potential risks and catalysts
260 | 
261 | ### 3. Top Stock Recommendations
262 | For each recommended stock, provide:
263 | - **Company overview**: Business model and competitive position
264 | - **Why it fits the {strategy} criteria**: Specific metrics and rationale
265 | - **Risk factors**: Key risks to monitor
266 | - **Price targets**: Entry points and target prices
267 | - **Position sizing**: Recommended allocation (1-5% portfolio weight)
268 | 
269 | ### 4. Portfolio Construction
270 | - **Diversification**: Spread across sectors and industries
271 | - **Risk management**: Position sizing and stop-loss levels
272 | - **Rebalancing**: When and how to adjust positions
273 | - **Performance monitoring**: Key metrics to track
274 | 
275 | ### 5. Implementation Strategy
276 | - **Entry strategy**: Best practices for building positions
277 | - **Timeline**: Short-term vs. long-term holding periods
278 | - **Market timing**: Consider current market cycle
279 | - **Tax considerations**: Tax-efficient implementation
280 | 
281 | ## Additional Requirements:
282 | - Screen date: {self._get_current_date()}
283 | - Market cap focus: Large and mid-cap stocks
284 | - Geographic focus: US markets (can include international if compelling)
285 | - Minimum liquidity: $10M average daily volume
286 | - Exclude recent IPOs (< 6 months) unless exceptionally compelling
287 | 
288 | ## Output Format:
289 | 1. **Executive Summary**: Key findings and market outlook
290 | 2. **Top 10 Stock Recommendations**: Detailed analysis for each
291 | 3. **Sector Allocation**: Recommended sector weights
292 | 4. **Risk Assessment**: Portfolio-level risks and mitigation
293 | 5. **Performance Expectations**: Expected returns and timeline
294 | 
295 | Please provide actionable insights that can be immediately implemented in a {strategy}-focused investment strategy.
296 | """
297 | 
298 |         self.log_tool_usage("stock_screening_prompt", strategy=strategy)
299 |         return prompt.strip()
300 | 
301 |     def _get_current_date(self) -> str:
302 |         """Get current date in readable format."""
303 |         from datetime import UTC, datetime
304 | 
305 |         return datetime.now(UTC).strftime("%B %d, %Y")
306 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/utils/orchestration_logging.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Comprehensive Orchestration Logging System
  3 | 
  4 | Provides structured logging for research agent orchestration with:
  5 | - Request ID tracking across all components
  6 | - Performance timing and metrics
  7 | - Parallel execution visibility
  8 | - Agent communication tracking
  9 | - Resource usage monitoring
 10 | """
 11 | 
 12 | import functools
 13 | import logging
 14 | import time
 15 | import uuid
 16 | from contextlib import contextmanager
 17 | from typing import Any
 18 | 
 19 | 
 20 | # Color codes for better readability in terminal
 21 | class LogColors:
 22 |     HEADER = "\033[95m"
 23 |     OKBLUE = "\033[94m"
 24 |     OKCYAN = "\033[96m"
 25 |     OKGREEN = "\033[92m"
 26 |     WARNING = "\033[93m"
 27 |     FAIL = "\033[91m"
 28 |     ENDC = "\033[0m"
 29 |     BOLD = "\033[1m"
 30 |     UNDERLINE = "\033[4m"
 31 | 
 32 | 
 33 | class OrchestrationLogger:
 34 |     """Enhanced logger for orchestration components with structured output."""
 35 | 
 36 |     def __init__(self, component_name: str):
 37 |         self.component_name = component_name
 38 |         self.logger = logging.getLogger(f"maverick_mcp.orchestration.{component_name}")
 39 |         self.request_id: str | None = None
 40 |         self.session_context: dict[str, Any] = {}
 41 | 
 42 |     def set_request_context(
 43 |         self, request_id: str | None = None, session_id: str | None = None, **kwargs
 44 |     ):
 45 |         """Set context for this request that will be included in all logs."""
 46 |         self.request_id = request_id or str(uuid.uuid4())[:8]
 47 |         self.session_context = {
 48 |             "session_id": session_id,
 49 |             "request_id": self.request_id,
 50 |             **kwargs,
 51 |         }
 52 | 
 53 |     def _format_message(self, level: str, message: str, **kwargs) -> str:
 54 |         """Format log message with consistent structure and colors."""
 55 |         color = {
 56 |             "DEBUG": LogColors.OKCYAN,
 57 |             "INFO": LogColors.OKGREEN,
 58 |             "WARNING": LogColors.WARNING,
 59 |             "ERROR": LogColors.FAIL,
 60 |         }.get(level, "")
 61 | 
 62 |         # Build context string
 63 |         context_parts = []
 64 |         if self.request_id:
 65 |             context_parts.append(f"req:{self.request_id}")
 66 |         if self.session_context.get("session_id"):
 67 |             context_parts.append(f"session:{self.session_context['session_id']}")
 68 | 
 69 |         context_str = f"[{' | '.join(context_parts)}]" if context_parts else ""
 70 | 
 71 |         # Add component and extra info
 72 |         extra_info = " | ".join(f"{k}:{v}" for k, v in kwargs.items() if v is not None)
 73 |         extra_str = f" | {extra_info}" if extra_info else ""
 74 | 
 75 |         return f"{color}🔧 {self.component_name}{LogColors.ENDC} {context_str}: {message}{extra_str}"
 76 | 
 77 |     def debug(self, message: str, **kwargs):
 78 |         """Log debug message with context."""
 79 |         self.logger.debug(self._format_message("DEBUG", message, **kwargs))
 80 | 
 81 |     def info(self, message: str, **kwargs):
 82 |         """Log info message with context."""
 83 |         self.logger.info(self._format_message("INFO", message, **kwargs))
 84 | 
 85 |     def warning(self, message: str, **kwargs):
 86 |         """Log warning message with context."""
 87 |         self.logger.warning(self._format_message("WARNING", message, **kwargs))
 88 | 
 89 |     def error(self, message: str, **kwargs):
 90 |         """Log error message with context."""
 91 |         self.logger.error(self._format_message("ERROR", message, **kwargs))
 92 | 
 93 | 
 94 | # Global registry of component loggers
 95 | _component_loggers: dict[str, OrchestrationLogger] = {}
 96 | 
 97 | 
 98 | def get_orchestration_logger(component_name: str) -> OrchestrationLogger:
 99 |     """Get or create an orchestration logger for a component."""
100 |     if component_name not in _component_loggers:
101 |         _component_loggers[component_name] = OrchestrationLogger(component_name)
102 |     return _component_loggers[component_name]
103 | 
104 | 
105 | def log_method_call(
106 |     component: str | None = None,
107 |     include_params: bool = True,
108 |     include_timing: bool = True,
109 | ):
110 |     """
111 |     Decorator to log method entry/exit with timing and parameters.
112 | 
113 |     Args:
114 |         component: Component name override
115 |         include_params: Whether to log method parameters
116 |         include_timing: Whether to log execution timing
117 |     """
118 | 
119 |     def decorator(func):
120 |         @functools.wraps(func)
121 |         async def async_wrapper(*args, **kwargs):
122 |             # Determine component name
123 |             comp_name = component
124 |             if not comp_name and args and hasattr(args[0], "__class__"):
125 |                 comp_name = args[0].__class__.__name__
126 |             if not comp_name:
127 |                 comp_name = func.__module__.split(".")[-1]
128 | 
129 |             logger = get_orchestration_logger(comp_name)
130 | 
131 |             # Log method entry
132 |             params_str = ""
133 |             if include_params:
134 |                 # Sanitize parameters for logging
135 |                 safe_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
136 |                 if safe_kwargs:
137 |                     params_str = f" | params: {safe_kwargs}"
138 | 
139 |             logger.info(f"🚀 START {func.__name__}{params_str}")
140 | 
141 |             start_time = time.time()
142 |             try:
143 |                 result = await func(*args, **kwargs)
144 | 
145 |                 # Log successful completion
146 |                 duration = time.time() - start_time
147 |                 timing_str = f" | duration: {duration:.3f}s" if include_timing else ""
148 | 
149 |                 # Include result summary if available
150 |                 result_summary = ""
151 |                 if isinstance(result, dict):
152 |                     if "execution_mode" in result:
153 |                         result_summary += f" | mode: {result['execution_mode']}"
154 |                     if "research_confidence" in result:
155 |                         result_summary += (
156 |                             f" | confidence: {result['research_confidence']:.2f}"
157 |                         )
158 |                     if "parallel_execution_stats" in result:
159 |                         stats = result["parallel_execution_stats"]
160 |                         result_summary += f" | tasks: {stats.get('successful_tasks', 0)}/{stats.get('total_tasks', 0)}"
161 | 
162 |                 logger.info(f"✅ SUCCESS {func.__name__}{timing_str}{result_summary}")
163 |                 return result
164 | 
165 |             except Exception as e:
166 |                 # Log error
167 |                 duration = time.time() - start_time
168 |                 timing_str = f" | duration: {duration:.3f}s" if include_timing else ""
169 |                 logger.error(f"❌ ERROR {func.__name__}{timing_str} | error: {str(e)}")
170 |                 raise
171 | 
172 |         @functools.wraps(func)
173 |         def sync_wrapper(*args, **kwargs):
174 |             # Handle synchronous functions
175 |             comp_name = component
176 |             if not comp_name and args and hasattr(args[0], "__class__"):
177 |                 comp_name = args[0].__class__.__name__
178 |             if not comp_name:
179 |                 comp_name = func.__module__.split(".")[-1]
180 | 
181 |             logger = get_orchestration_logger(comp_name)
182 | 
183 |             # Log method entry
184 |             params_str = ""
185 |             if include_params:
186 |                 safe_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
187 |                 if safe_kwargs:
188 |                     params_str = f" | params: {safe_kwargs}"
189 | 
190 |             logger.info(f"🚀 START {func.__name__}{params_str}")
191 | 
192 |             start_time = time.time()
193 |             try:
194 |                 result = func(*args, **kwargs)
195 | 
196 |                 duration = time.time() - start_time
197 |                 timing_str = f" | duration: {duration:.3f}s" if include_timing else ""
198 |                 logger.info(f"✅ SUCCESS {func.__name__}{timing_str}")
199 |                 return result
200 | 
201 |             except Exception as e:
202 |                 duration = time.time() - start_time
203 |                 timing_str = f" | duration: {duration:.3f}s" if include_timing else ""
204 |                 logger.error(f"❌ ERROR {func.__name__}{timing_str} | error: {str(e)}")
205 |                 raise
206 | 
207 |         # Return appropriate wrapper based on function type
208 |         if hasattr(func, "_is_coroutine") or "async" in str(func):
209 |             return async_wrapper
210 |         else:
211 |             return sync_wrapper
212 | 
213 |     return decorator
214 | 
215 | 
216 | @contextmanager
217 | def log_parallel_execution(component: str, task_description: str, task_count: int):
218 |     """Context manager for logging parallel execution blocks."""
219 |     logger = get_orchestration_logger(component)
220 | 
221 |     logger.info(f"🔄 PARALLEL_START {task_description} | tasks: {task_count}")
222 |     start_time = time.time()
223 | 
224 |     try:
225 |         yield logger
226 | 
227 |         duration = time.time() - start_time
228 |         logger.info(
229 |             f"🎯 PARALLEL_SUCCESS {task_description} | duration: {duration:.3f}s | tasks: {task_count}"
230 |         )
231 | 
232 |     except Exception as e:
233 |         duration = time.time() - start_time
234 |         logger.error(
235 |             f"💥 PARALLEL_ERROR {task_description} | duration: {duration:.3f}s | error: {str(e)}"
236 |         )
237 |         raise
238 | 
239 | 
240 | @contextmanager
241 | def log_agent_execution(
242 |     agent_type: str, task_id: str, focus_areas: list[str] | None = None
243 | ):
244 |     """Context manager for logging individual agent execution."""
245 |     logger = get_orchestration_logger(f"{agent_type}Agent")
246 | 
247 |     focus_str = f" | focus: {focus_areas}" if focus_areas else ""
248 |     logger.info(f"🤖 AGENT_START {task_id}{focus_str}")
249 | 
250 |     start_time = time.time()
251 | 
252 |     try:
253 |         yield logger
254 | 
255 |         duration = time.time() - start_time
256 |         logger.info(f"🎉 AGENT_SUCCESS {task_id} | duration: {duration:.3f}s")
257 | 
258 |     except Exception as e:
259 |         duration = time.time() - start_time
260 |         logger.error(
261 |             f"🔥 AGENT_ERROR {task_id} | duration: {duration:.3f}s | error: {str(e)}"
262 |         )
263 |         raise
264 | 
265 | 
266 | def log_tool_invocation(tool_name: str, request_data: dict[str, Any] | None = None):
267 |     """Log MCP tool invocation with request details."""
268 |     logger = get_orchestration_logger("MCPToolRegistry")
269 | 
270 |     request_summary = ""
271 |     if request_data:
272 |         if "query" in request_data:
273 |             request_summary += f" | query: '{request_data['query'][:50]}...'"
274 |         if "research_scope" in request_data:
275 |             request_summary += f" | scope: {request_data['research_scope']}"
276 |         if "persona" in request_data:
277 |             request_summary += f" | persona: {request_data['persona']}"
278 | 
279 |     logger.info(f"🔧 TOOL_INVOKE {tool_name}{request_summary}")
280 | 
281 | 
282 | def log_synthesis_operation(
283 |     operation: str, input_count: int, output_summary: str | None = None
284 | ):
285 |     """Log result synthesis operations."""
286 |     logger = get_orchestration_logger("ResultSynthesis")
287 | 
288 |     summary_str = f" | output: {output_summary}" if output_summary else ""
289 |     logger.info(f"🧠 SYNTHESIS {operation} | inputs: {input_count}{summary_str}")
290 | 
291 | 
292 | def log_fallback_trigger(component: str, reason: str, fallback_action: str):
293 |     """Log when fallback mechanisms are triggered."""
294 |     logger = get_orchestration_logger(component)
295 |     logger.warning(f"⚠️ FALLBACK_TRIGGER {reason} | action: {fallback_action}")
296 | 
297 | 
298 | def log_performance_metrics(component: str, metrics: dict[str, Any]):
299 |     """Log performance metrics for monitoring."""
300 |     logger = get_orchestration_logger(component)
301 | 
302 |     metrics_str = " | ".join(f"{k}: {v}" for k, v in metrics.items())
303 |     logger.info(f"📊 PERFORMANCE_METRICS | {metrics_str}")
304 | 
305 | 
306 | def log_resource_usage(
307 |     component: str,
308 |     api_calls: int | None = None,
309 |     cache_hits: int | None = None,
310 |     memory_mb: float | None = None,
311 | ):
312 |     """Log resource usage statistics."""
313 |     logger = get_orchestration_logger(component)
314 | 
315 |     usage_parts = []
316 |     if api_calls is not None:
317 |         usage_parts.append(f"api_calls: {api_calls}")
318 |     if cache_hits is not None:
319 |         usage_parts.append(f"cache_hits: {cache_hits}")
320 |     if memory_mb is not None:
321 |         usage_parts.append(f"memory_mb: {memory_mb:.1f}")
322 | 
323 |     if usage_parts:
324 |         usage_str = " | ".join(usage_parts)
325 |         logger.info(f"📈 RESOURCE_USAGE | {usage_str}")
326 | 
327 | 
328 | # Export key functions
329 | __all__ = [
330 |     "OrchestrationLogger",
331 |     "get_orchestration_logger",
332 |     "log_method_call",
333 |     "log_parallel_execution",
334 |     "log_agent_execution",
335 |     "log_tool_invocation",
336 |     "log_synthesis_operation",
337 |     "log_fallback_trigger",
338 |     "log_performance_metrics",
339 |     "log_resource_usage",
340 | ]
341 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/utils/circuit_breaker_decorators.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Decorators for easy circuit breaker integration.
  3 | Provides convenient decorators for common external service patterns.
  4 | """
  5 | 
  6 | import asyncio
  7 | import functools
  8 | import logging
  9 | from collections.abc import Callable
 10 | from typing import TypeVar, cast
 11 | 
 12 | from maverick_mcp.config.settings import get_settings
 13 | from maverick_mcp.utils.circuit_breaker_services import (
 14 |     economic_data_breaker,
 15 |     http_breaker,
 16 |     market_data_breaker,
 17 |     news_data_breaker,
 18 |     stock_data_breaker,
 19 | )
 20 | 
 21 | logger = logging.getLogger(__name__)
 22 | settings = get_settings()
 23 | 
 24 | T = TypeVar("T")
 25 | 
 26 | 
 27 | def with_stock_data_circuit_breaker(
 28 |     use_fallback: bool = True, fallback_on_open: bool = True
 29 | ) -> Callable:
 30 |     """
 31 |     Decorator for stock data fetching functions.
 32 | 
 33 |     Args:
 34 |         use_fallback: Whether to use fallback strategies on failure
 35 |         fallback_on_open: Whether to use fallback when circuit is open
 36 | 
 37 |     Example:
 38 |         @with_stock_data_circuit_breaker()
 39 |         def get_stock_data(symbol: str, start: str, end: str) -> pd.DataFrame:
 40 |             return yf.download(symbol, start=start, end=end)
 41 |     """
 42 | 
 43 |     def decorator(func: Callable[..., T]) -> Callable[..., T]:
 44 |         if asyncio.iscoroutinefunction(func):
 45 | 
 46 |             @functools.wraps(func)
 47 |             async def async_wrapper(*args, **kwargs):
 48 |                 if use_fallback and len(args) >= 3:
 49 |                     # Extract symbol, start, end from args
 50 |                     symbol = args[0] if args else kwargs.get("symbol", "UNKNOWN")
 51 |                     start_date = (
 52 |                         args[1] if len(args) > 1 else kwargs.get("start_date", "")
 53 |                     )
 54 |                     end_date = args[2] if len(args) > 2 else kwargs.get("end_date", "")
 55 | 
 56 |                     return await stock_data_breaker.fetch_with_fallback_async(
 57 |                         func, symbol, start_date, end_date, **kwargs
 58 |                     )
 59 |                 else:
 60 |                     return await stock_data_breaker.call_async(func, *args, **kwargs)
 61 | 
 62 |             return cast(Callable[..., T], async_wrapper)
 63 |         else:
 64 | 
 65 |             @functools.wraps(func)
 66 |             def sync_wrapper(*args, **kwargs):
 67 |                 if use_fallback and len(args) >= 3:
 68 |                     # Extract symbol, start, end from args
 69 |                     symbol = args[0] if args else kwargs.get("symbol", "UNKNOWN")
 70 |                     start_date = (
 71 |                         args[1] if len(args) > 1 else kwargs.get("start_date", "")
 72 |                     )
 73 |                     end_date = args[2] if len(args) > 2 else kwargs.get("end_date", "")
 74 | 
 75 |                     return stock_data_breaker.fetch_with_fallback(
 76 |                         func, symbol, start_date, end_date, **kwargs
 77 |                     )
 78 |                 else:
 79 |                     return stock_data_breaker.call_sync(func, *args, **kwargs)
 80 | 
 81 |             return cast(Callable[..., T], sync_wrapper)
 82 | 
 83 |     return decorator
 84 | 
 85 | 
 86 | def with_market_data_circuit_breaker(
 87 |     use_fallback: bool = True, service: str = "finviz"
 88 | ) -> Callable:
 89 |     """
 90 |     Decorator for market data fetching functions.
 91 | 
 92 |     Args:
 93 |         use_fallback: Whether to use fallback strategies on failure
 94 |         service: Service name (finviz, external_api)
 95 | 
 96 |     Example:
 97 |         @with_market_data_circuit_breaker(service="finviz")
 98 |         def get_top_gainers() -> dict:
 99 |             return fetch_finviz_gainers()
100 |     """
101 | 
102 |     def decorator(func: Callable[..., T]) -> Callable[..., T]:
103 |         # Get appropriate breaker
104 |         if service == "external_api":
105 |             from maverick_mcp.utils.circuit_breaker_services import (
106 |                 MarketDataCircuitBreaker,
107 |             )
108 | 
109 |             breaker = MarketDataCircuitBreaker("external_api")
110 |         else:
111 |             breaker = market_data_breaker
112 | 
113 |         if asyncio.iscoroutinefunction(func):
114 | 
115 |             @functools.wraps(func)
116 |             async def async_wrapper(*args, **kwargs):
117 |                 if use_fallback:
118 |                     # Try to extract mover_type
119 |                     mover_type = kwargs.get("mover_type", "market_data")
120 |                     try:
121 |                         return await breaker.call_async(func, *args, **kwargs)
122 |                     except Exception as e:
123 |                         logger.warning(f"Market data fetch failed: {e}, using fallback")
124 |                         return breaker.fallback.execute_sync(mover_type)
125 |                 else:
126 |                     return await breaker.call_async(func, *args, **kwargs)
127 | 
128 |             return cast(Callable[..., T], async_wrapper)
129 |         else:
130 | 
131 |             @functools.wraps(func)
132 |             def sync_wrapper(*args, **kwargs):
133 |                 if use_fallback:
134 |                     # Try to extract mover_type
135 |                     mover_type = kwargs.get("mover_type", "market_data")
136 |                     return breaker.fetch_with_fallback(func, mover_type, **kwargs)
137 |                 else:
138 |                     return breaker.call_sync(func, *args, **kwargs)
139 | 
140 |             return cast(Callable[..., T], sync_wrapper)
141 | 
142 |     return decorator
143 | 
144 | 
145 | def with_economic_data_circuit_breaker(use_fallback: bool = True) -> Callable:
146 |     """
147 |     Decorator for economic data fetching functions.
148 | 
149 |     Args:
150 |         use_fallback: Whether to use fallback strategies on failure
151 | 
152 |     Example:
153 |         @with_economic_data_circuit_breaker()
154 |         def get_gdp_data(start: str, end: str) -> pd.Series:
155 |             return fred.get_series("GDP", start, end)
156 |     """
157 | 
158 |     def decorator(func: Callable[..., T]) -> Callable[..., T]:
159 |         if asyncio.iscoroutinefunction(func):
160 | 
161 |             @functools.wraps(func)
162 |             async def async_wrapper(*args, **kwargs):
163 |                 if use_fallback and (args or "series_id" in kwargs):
164 |                     # Extract series_id and dates
165 |                     series_id = args[0] if args else kwargs.get("series_id", "UNKNOWN")
166 |                     start_date = (
167 |                         args[1] if len(args) > 1 else kwargs.get("start_date", "")
168 |                     )
169 |                     end_date = args[2] if len(args) > 2 else kwargs.get("end_date", "")
170 | 
171 |                     try:
172 |                         return await economic_data_breaker.call_async(
173 |                             func, *args, **kwargs
174 |                         )
175 |                     except Exception as e:
176 |                         logger.warning(
177 |                             f"Economic data fetch failed: {e}, using fallback"
178 |                         )
179 |                         return economic_data_breaker.fallback.execute_sync(
180 |                             series_id, start_date, end_date
181 |                         )
182 |                 else:
183 |                     return await economic_data_breaker.call_async(func, *args, **kwargs)
184 | 
185 |             return cast(Callable[..., T], async_wrapper)
186 |         else:
187 | 
188 |             @functools.wraps(func)
189 |             def sync_wrapper(*args, **kwargs):
190 |                 if use_fallback and (args or "series_id" in kwargs):
191 |                     # Extract series_id and dates
192 |                     series_id = args[0] if args else kwargs.get("series_id", "UNKNOWN")
193 |                     start_date = (
194 |                         args[1] if len(args) > 1 else kwargs.get("start_date", "")
195 |                     )
196 |                     end_date = args[2] if len(args) > 2 else kwargs.get("end_date", "")
197 | 
198 |                     return economic_data_breaker.fetch_with_fallback(
199 |                         func, series_id, start_date, end_date, **kwargs
200 |                     )
201 |                 else:
202 |                     return economic_data_breaker.call_sync(func, *args, **kwargs)
203 | 
204 |             return cast(Callable[..., T], sync_wrapper)
205 | 
206 |     return decorator
207 | 
208 | 
209 | def with_news_circuit_breaker(use_fallback: bool = True) -> Callable:
210 |     """
211 |     Decorator for news/sentiment API calls.
212 | 
213 |     Args:
214 |         use_fallback: Whether to use fallback strategies on failure
215 | 
216 |     Example:
217 |         @with_news_circuit_breaker()
218 |         def get_stock_news(symbol: str) -> dict:
219 |             return fetch_news_api(symbol)
220 |     """
221 | 
222 |     def decorator(func: Callable[..., T]) -> Callable[..., T]:
223 |         if asyncio.iscoroutinefunction(func):
224 | 
225 |             @functools.wraps(func)
226 |             async def async_wrapper(*args, **kwargs):
227 |                 if use_fallback and (args or "symbol" in kwargs):
228 |                     symbol = args[0] if args else kwargs.get("symbol", "UNKNOWN")
229 |                     try:
230 |                         return await news_data_breaker.call_async(func, *args, **kwargs)
231 |                     except Exception as e:
232 |                         logger.warning(f"News data fetch failed: {e}, using fallback")
233 |                         return news_data_breaker.fallback.execute_sync(symbol)
234 |                 else:
235 |                     return await news_data_breaker.call_async(func, *args, **kwargs)
236 | 
237 |             return cast(Callable[..., T], async_wrapper)
238 |         else:
239 | 
240 |             @functools.wraps(func)
241 |             def sync_wrapper(*args, **kwargs):
242 |                 if use_fallback and (args or "symbol" in kwargs):
243 |                     symbol = args[0] if args else kwargs.get("symbol", "UNKNOWN")
244 |                     return news_data_breaker.fetch_with_fallback(func, symbol, **kwargs)
245 |                 else:
246 |                     return news_data_breaker.call_sync(func, *args, **kwargs)
247 | 
248 |             return cast(Callable[..., T], sync_wrapper)
249 | 
250 |     return decorator
251 | 
252 | 
253 | def with_http_circuit_breaker(
254 |     timeout: float | None = None, use_session: bool = False
255 | ) -> Callable:
256 |     """
257 |     Decorator for general HTTP requests.
258 | 
259 |     Args:
260 |         timeout: Override default timeout
261 |         use_session: Whether the function uses a requests Session
262 | 
263 |     Example:
264 |         @with_http_circuit_breaker(timeout=10.0)
265 |         def fetch_api_data(url: str) -> dict:
266 |             response = requests.get(url)
267 |             return response.json()
268 |     """
269 | 
270 |     def decorator(func: Callable[..., T]) -> Callable[..., T]:
271 |         if asyncio.iscoroutinefunction(func):
272 | 
273 |             @functools.wraps(func)
274 |             async def async_wrapper(*args, **kwargs):
275 |                 # Override timeout if specified
276 |                 if timeout is not None:
277 |                     kwargs["timeout"] = timeout
278 |                 return await http_breaker.call_async(func, *args, **kwargs)
279 | 
280 |             return cast(Callable[..., T], async_wrapper)
281 |         else:
282 | 
283 |             @functools.wraps(func)
284 |             def sync_wrapper(*args, **kwargs):
285 |                 # Override timeout if specified
286 |                 if timeout is not None:
287 |                     kwargs["timeout"] = timeout
288 |                 return http_breaker.call_sync(func, *args, **kwargs)
289 | 
290 |             return cast(Callable[..., T], sync_wrapper)
291 | 
292 |     return decorator
293 | 
294 | 
295 | def circuit_breaker_method(
296 |     service: str = "http", use_fallback: bool = True, **breaker_kwargs
297 | ) -> Callable:
298 |     """
299 |     Generic circuit breaker decorator for class methods.
300 | 
301 |     Args:
302 |         service: Service type (yfinance, finviz, fred, news, http)
303 |         use_fallback: Whether to use fallback strategies
304 |         **breaker_kwargs: Additional arguments for the circuit breaker
305 | 
306 |     Example:
307 |         class DataProvider:
308 |             @circuit_breaker_method(service="yfinance")
309 |             def get_stock_data(self, symbol: str) -> pd.DataFrame:
310 |                 return yf.download(symbol)
311 |     """
312 |     # Map service names to decorators
313 |     service_decorators = {
314 |         "yfinance": with_stock_data_circuit_breaker,
315 |         "stock": with_stock_data_circuit_breaker,
316 |         "finviz": lambda **kw: with_market_data_circuit_breaker(service="finviz", **kw),
317 |         "external_api": lambda **kw: with_market_data_circuit_breaker(
318 |             service="external_api", **kw
319 |         ),
320 |         "market": with_market_data_circuit_breaker,
321 |         "fred": with_economic_data_circuit_breaker,
322 |         "economic": with_economic_data_circuit_breaker,
323 |         "news": with_news_circuit_breaker,
324 |         "sentiment": with_news_circuit_breaker,
325 |         "http": with_http_circuit_breaker,
326 |     }
327 | 
328 |     decorator_func = service_decorators.get(service, with_http_circuit_breaker)
329 |     return decorator_func(use_fallback=use_fallback, **breaker_kwargs)
330 | 
```

--------------------------------------------------------------------------------
/scripts/load_market_data.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Market data loading script for self-contained Maverick-MCP database.
  4 | 
  5 | This script loads stock and price data from Tiingo API into the self-contained
  6 | mcp_ prefixed tables, making Maverick-MCP completely independent.
  7 | 
  8 | Usage:
  9 |     python scripts/load_market_data.py --symbols AAPL,MSFT,GOOGL
 10 |     python scripts/load_market_data.py --file symbols.txt
 11 |     python scripts/load_market_data.py --sp500  # Load S&P 500 stocks
 12 | """
 13 | 
 14 | import argparse
 15 | import asyncio
 16 | import logging
 17 | import os
 18 | import sys
 19 | from datetime import datetime, timedelta
 20 | from pathlib import Path
 21 | 
 22 | import aiohttp
 23 | import pandas as pd
 24 | 
 25 | # Add parent directory to path for imports
 26 | sys.path.append(str(Path(__file__).parent.parent))
 27 | 
 28 | from maverick_mcp.config.database_self_contained import (
 29 |     SelfContainedDatabaseSession,
 30 |     init_self_contained_database,
 31 | )
 32 | from maverick_mcp.data.models import (
 33 |     Stock,
 34 |     bulk_insert_price_data,
 35 | )
 36 | 
 37 | # Set up logging
 38 | logging.basicConfig(
 39 |     level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
 40 | )
 41 | logger = logging.getLogger("market_data_loader")
 42 | 
 43 | 
 44 | class TiingoDataLoader:
 45 |     """Loads market data from Tiingo API into self-contained database."""
 46 | 
 47 |     def __init__(self, api_token: str | None = None):
 48 |         """
 49 |         Initialize Tiingo data loader.
 50 | 
 51 |         Args:
 52 |             api_token: Tiingo API token. If None, will use TIINGO_API_TOKEN env var
 53 |         """
 54 |         self.api_token = api_token or os.getenv("TIINGO_API_TOKEN")
 55 |         if not self.api_token:
 56 |             raise ValueError("Tiingo API token required. Set TIINGO_API_TOKEN env var.")
 57 | 
 58 |         self.base_url = "https://api.tiingo.com/tiingo"
 59 |         self.session: aiohttp.ClientSession | None = None
 60 | 
 61 |     async def __aenter__(self):
 62 |         """Async context manager entry."""
 63 |         self.session = aiohttp.ClientSession(
 64 |             headers={"Authorization": f"Token {self.api_token}"}
 65 |         )
 66 |         return self
 67 | 
 68 |     async def __aexit__(self, exc_type, exc_val, exc_tb):
 69 |         """Async context manager exit."""
 70 |         if self.session:
 71 |             await self.session.close()
 72 | 
 73 |     async def get_stock_metadata(self, symbol: str) -> dict | None:
 74 |         """
 75 |         Get stock metadata from Tiingo.
 76 | 
 77 |         Args:
 78 |             symbol: Stock ticker symbol
 79 | 
 80 |         Returns:
 81 |             Stock metadata dict or None if not found
 82 |         """
 83 |         url = f"{self.base_url}/daily/{symbol}"
 84 | 
 85 |         try:
 86 |             async with self.session.get(url) as response:
 87 |                 if response.status == 200:
 88 |                     data = await response.json()
 89 |                     return data
 90 |                 elif response.status == 404:
 91 |                     logger.warning(f"Stock {symbol} not found in Tiingo")
 92 |                     return None
 93 |                 else:
 94 |                     logger.error(
 95 |                         f"Error fetching metadata for {symbol}: {response.status}"
 96 |                     )
 97 |                     return None
 98 | 
 99 |         except Exception as e:
100 |             logger.error(f"Exception fetching metadata for {symbol}: {e}")
101 |             return None
102 | 
103 |     async def get_price_data(
104 |         self, symbol: str, start_date: str, end_date: str | None = None
105 |     ) -> pd.DataFrame | None:
106 |         """
107 |         Get historical price data from Tiingo.
108 | 
109 |         Args:
110 |             symbol: Stock ticker symbol
111 |             start_date: Start date in YYYY-MM-DD format
112 |             end_date: End date in YYYY-MM-DD format (default: today)
113 | 
114 |         Returns:
115 |             DataFrame with OHLCV data or None if not found
116 |         """
117 |         if not end_date:
118 |             end_date = datetime.now().strftime("%Y-%m-%d")
119 | 
120 |         url = f"{self.base_url}/daily/{symbol}/prices"
121 |         params = {"startDate": start_date, "endDate": end_date, "format": "json"}
122 | 
123 |         try:
124 |             async with self.session.get(url, params=params) as response:
125 |                 if response.status == 200:
126 |                     data = await response.json()
127 | 
128 |                     if not data:
129 |                         return None
130 | 
131 |                     df = pd.DataFrame(data)
132 | 
133 |                     # Convert date column and set as index
134 |                     df["date"] = pd.to_datetime(df["date"]).dt.date
135 |                     df.set_index("date", inplace=True)
136 | 
137 |                     # Rename columns to match our model
138 |                     column_mapping = {
139 |                         "open": "open",
140 |                         "high": "high",
141 |                         "low": "low",
142 |                         "close": "close",
143 |                         "volume": "volume",
144 |                         "adjOpen": "adj_open",
145 |                         "adjHigh": "adj_high",
146 |                         "adjLow": "adj_low",
147 |                         "adjClose": "adj_close",
148 |                         "adjVolume": "adj_volume",
149 |                     }
150 | 
151 |                     df = df.rename(columns=column_mapping)
152 |                     df["symbol"] = symbol.upper()
153 | 
154 |                     logger.info(f"Loaded {len(df)} price records for {symbol}")
155 |                     return df
156 | 
157 |                 elif response.status == 404:
158 |                     logger.warning(f"Price data for {symbol} not found")
159 |                     return None
160 |                 else:
161 |                     logger.error(
162 |                         f"Error fetching prices for {symbol}: {response.status}"
163 |                     )
164 |                     return None
165 | 
166 |         except Exception as e:
167 |             logger.error(f"Exception fetching prices for {symbol}: {e}")
168 |             return None
169 | 
170 |     async def load_stock_data(self, symbols: list[str]) -> int:
171 |         """
172 |         Load stock metadata and price data for multiple symbols.
173 | 
174 |         Args:
175 |             symbols: List of stock ticker symbols
176 | 
177 |         Returns:
178 |             Number of stocks successfully loaded
179 |         """
180 |         loaded_count = 0
181 | 
182 |         with SelfContainedDatabaseSession() as session:
183 |             for symbol in symbols:
184 |                 logger.info(f"Loading data for {symbol}...")
185 | 
186 |                 # Get stock metadata
187 |                 metadata = await self.get_stock_metadata(symbol)
188 |                 if not metadata:
189 |                     continue
190 | 
191 |                 # Create or update stock record
192 |                 Stock.get_or_create(
193 |                     session,
194 |                     symbol,
195 |                     company_name=metadata.get("name", ""),
196 |                     description=metadata.get("description", ""),
197 |                     exchange=metadata.get("exchangeCode", ""),
198 |                     currency="USD",  # Tiingo uses USD
199 |                 )
200 | 
201 |                 # Load price data (last 2 years)
202 |                 start_date = (datetime.now() - timedelta(days=730)).strftime("%Y-%m-%d")
203 |                 price_df = await self.get_price_data(symbol, start_date)
204 | 
205 |                 if price_df is not None and not price_df.empty:
206 |                     # Insert price data
207 |                     records_inserted = bulk_insert_price_data(session, symbol, price_df)
208 |                     logger.info(
209 |                         f"Inserted {records_inserted} price records for {symbol}"
210 |                     )
211 | 
212 |                 loaded_count += 1
213 | 
214 |                 # Rate limiting - Tiingo allows 2400 requests/hour
215 |                 await asyncio.sleep(1.5)  # ~2400 requests/hour limit
216 | 
217 |         return loaded_count
218 | 
219 | 
220 | def get_sp500_symbols() -> list[str]:
221 |     """Get S&P 500 stock symbols from a predefined list."""
222 |     # Top 100 S&P 500 stocks for initial loading
223 |     return [
224 |         "AAPL",
225 |         "MSFT",
226 |         "GOOGL",
227 |         "AMZN",
228 |         "TSLA",
229 |         "META",
230 |         "NVDA",
231 |         "BRK.B",
232 |         "UNH",
233 |         "JNJ",
234 |         "V",
235 |         "PG",
236 |         "JPM",
237 |         "HD",
238 |         "CVX",
239 |         "MA",
240 |         "PFE",
241 |         "ABBV",
242 |         "BAC",
243 |         "KO",
244 |         "AVGO",
245 |         "PEP",
246 |         "TMO",
247 |         "COST",
248 |         "WMT",
249 |         "DIS",
250 |         "ABT",
251 |         "ACN",
252 |         "NFLX",
253 |         "ADBE",
254 |         "CRM",
255 |         "VZ",
256 |         "DHR",
257 |         "INTC",
258 |         "NKE",
259 |         "T",
260 |         "TXN",
261 |         "BMY",
262 |         "QCOM",
263 |         "PM",
264 |         "UPS",
265 |         "HON",
266 |         "ORCL",
267 |         "WFC",
268 |         "LOW",
269 |         "LIN",
270 |         "AMD",
271 |         "SBUX",
272 |         "IBM",
273 |         "GE",
274 |         "CAT",
275 |         "MDT",
276 |         "BA",
277 |         "AXP",
278 |         "GILD",
279 |         "RTX",
280 |         "GS",
281 |         "BLK",
282 |         "MMM",
283 |         "CVS",
284 |         "ISRG",
285 |         "NOW",
286 |         "AMT",
287 |         "SPGI",
288 |         "PLD",
289 |         "SYK",
290 |         "TJX",
291 |         "MDLZ",
292 |         "ZTS",
293 |         "MO",
294 |         "CB",
295 |         "CI",
296 |         "PYPL",
297 |         "SO",
298 |         "EL",
299 |         "DE",
300 |         "REGN",
301 |         "CCI",
302 |         "USB",
303 |         "BSX",
304 |         "DUK",
305 |         "AON",
306 |         "CSX",
307 |         "CL",
308 |         "ITW",
309 |         "PNC",
310 |         "FCX",
311 |         "SCHW",
312 |         "EMR",
313 |         "NSC",
314 |         "GM",
315 |         "FDX",
316 |         "MU",
317 |         "BDX",
318 |         "TGT",
319 |         "EOG",
320 |         "SLB",
321 |         "ICE",
322 |         "EQIX",
323 |         "APD",
324 |     ]
325 | 
326 | 
327 | def load_symbols_from_file(file_path: str) -> list[str]:
328 |     """
329 |     Load stock symbols from a text file.
330 | 
331 |     Args:
332 |         file_path: Path to file containing stock symbols (one per line)
333 | 
334 |     Returns:
335 |         List of stock symbols
336 |     """
337 |     symbols = []
338 |     try:
339 |         with open(file_path) as f:
340 |             for line in f:
341 |                 symbol = line.strip().upper()
342 |                 if symbol and not symbol.startswith("#"):
343 |                     symbols.append(symbol)
344 |         logger.info(f"Loaded {len(symbols)} symbols from {file_path}")
345 |     except FileNotFoundError:
346 |         logger.error(f"Symbol file not found: {file_path}")
347 |         sys.exit(1)
348 |     except Exception as e:
349 |         logger.error(f"Error reading symbol file {file_path}: {e}")
350 |         sys.exit(1)
351 | 
352 |     return symbols
353 | 
354 | 
355 | async def main():
356 |     """Main function to load market data."""
357 |     parser = argparse.ArgumentParser(
358 |         description="Load market data into self-contained database"
359 |     )
360 |     parser.add_argument(
361 |         "--symbols",
362 |         type=str,
363 |         help="Comma-separated list of stock symbols (e.g., AAPL,MSFT,GOOGL)",
364 |     )
365 |     parser.add_argument(
366 |         "--file", type=str, help="Path to file containing stock symbols (one per line)"
367 |     )
368 |     parser.add_argument(
369 |         "--sp500", action="store_true", help="Load top 100 S&P 500 stocks"
370 |     )
371 |     parser.add_argument(
372 |         "--create-tables",
373 |         action="store_true",
374 |         help="Create database tables if they don't exist",
375 |     )
376 |     parser.add_argument("--database-url", type=str, help="Override database URL")
377 | 
378 |     args = parser.parse_args()
379 | 
380 |     # Determine symbols to load
381 |     symbols = []
382 |     if args.symbols:
383 |         symbols = [s.strip().upper() for s in args.symbols.split(",")]
384 |     elif args.file:
385 |         symbols = load_symbols_from_file(args.file)
386 |     elif args.sp500:
387 |         symbols = get_sp500_symbols()
388 |     else:
389 |         parser.print_help()
390 |         sys.exit(1)
391 | 
392 |     logger.info(f"Will load data for {len(symbols)} symbols")
393 | 
394 |     # Initialize self-contained database
395 |     try:
396 |         init_self_contained_database(
397 |             database_url=args.database_url, create_tables=args.create_tables
398 |         )
399 |         logger.info("Self-contained database initialized")
400 |     except Exception as e:
401 |         logger.error(f"Database initialization failed: {e}")
402 |         sys.exit(1)
403 | 
404 |     # Load market data
405 |     try:
406 |         async with TiingoDataLoader() as loader:
407 |             loaded_count = await loader.load_stock_data(symbols)
408 |             logger.info(
409 |                 f"Successfully loaded data for {loaded_count}/{len(symbols)} stocks"
410 |             )
411 | 
412 |     except Exception as e:
413 |         logger.error(f"Data loading failed: {e}")
414 |         sys.exit(1)
415 | 
416 |     # Display database stats
417 |     from maverick_mcp.config.database_self_contained import get_self_contained_db_config
418 | 
419 |     db_config = get_self_contained_db_config()
420 |     stats = db_config.get_database_stats()
421 | 
422 |     print("\n📊 Database Statistics:")
423 |     print(f"   Total Records: {stats['total_records']}")
424 |     for table, count in stats["tables"].items():
425 |         print(f"   {table}: {count}")
426 | 
427 |     print("\n✅ Market data loading completed successfully!")
428 | 
429 | 
430 | if __name__ == "__main__":
431 |     asyncio.run(main())
432 | 
```

--------------------------------------------------------------------------------
/tests/test_orchestration_integration.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Integration tests for the orchestration system.
  3 | 
  4 | Tests the end-to-end functionality of SupervisorAgent and DeepResearchAgent
  5 | to verify the orchestration system works correctly.
  6 | """
  7 | 
  8 | from typing import Any
  9 | from unittest.mock import AsyncMock, MagicMock
 10 | 
 11 | import pytest
 12 | from langchain_core.callbacks.manager import (
 13 |     AsyncCallbackManagerForLLMRun,
 14 |     CallbackManagerForLLMRun,
 15 | )
 16 | from langchain_core.language_models.chat_models import BaseChatModel
 17 | from langchain_core.messages import AIMessage, BaseMessage
 18 | from langchain_core.outputs import ChatGeneration, ChatResult
 19 | 
 20 | from maverick_mcp.agents.base import INVESTOR_PERSONAS, PersonaAwareAgent
 21 | from maverick_mcp.agents.deep_research import DeepResearchAgent
 22 | from maverick_mcp.agents.supervisor import ROUTING_MATRIX, SupervisorAgent
 23 | 
 24 | 
 25 | class MockChatModel(BaseChatModel):
 26 |     """Mock chat model for testing that extends BaseChatModel properly."""
 27 | 
 28 |     def __init__(self, responses: list[str]):
 29 |         super().__init__()
 30 |         self.responses = responses
 31 |         self._call_count = 0
 32 | 
 33 |     @property
 34 |     def _llm_type(self) -> str:
 35 |         return "mock"
 36 | 
 37 |     def _generate(
 38 |         self,
 39 |         messages: list[BaseMessage],
 40 |         stop: list[str] | None = None,
 41 |         run_manager: CallbackManagerForLLMRun | None = None,
 42 |         **kwargs: Any,
 43 |     ) -> ChatResult:
 44 |         response = self.responses[self._call_count % len(self.responses)]
 45 |         self._call_count += 1
 46 |         message = AIMessage(content=response)
 47 |         return ChatResult(generations=[ChatGeneration(message=message)])
 48 | 
 49 |     async def _agenerate(
 50 |         self,
 51 |         messages: list[BaseMessage],
 52 |         stop: list[str] | None = None,
 53 |         run_manager: AsyncCallbackManagerForLLMRun | None = None,
 54 |         **kwargs: Any,
 55 |     ) -> ChatResult:
 56 |         return self._generate(messages, stop, **kwargs)
 57 | 
 58 | 
 59 | class TestOrchestrationSystemIntegration:
 60 |     """Test the complete orchestration system integration."""
 61 | 
 62 |     @pytest.fixture
 63 |     def mock_llm(self):
 64 |         """Create a mock LLM for testing."""
 65 |         llm = MagicMock()
 66 |         llm.ainvoke = AsyncMock()
 67 |         llm.bind_tools = MagicMock(return_value=llm)
 68 |         llm.invoke = MagicMock()
 69 |         return llm
 70 | 
 71 |     @pytest.fixture
 72 |     def mock_market_agent(self):
 73 |         """Create a mock market analysis agent."""
 74 |         agent = MagicMock(spec=PersonaAwareAgent)
 75 |         agent.analyze_market = AsyncMock(
 76 |             return_value={
 77 |                 "status": "success",
 78 |                 "summary": "Market analysis completed",
 79 |                 "screened_symbols": ["AAPL", "MSFT", "NVDA"],
 80 |                 "confidence": 0.85,
 81 |                 "execution_time_ms": 1500,
 82 |             }
 83 |         )
 84 |         return agent
 85 | 
 86 |     def test_agent_imports_successful(self):
 87 |         """Test that all agent classes can be imported successfully."""
 88 |         # These imports should not raise exceptions
 89 |         assert SupervisorAgent is not None
 90 |         assert DeepResearchAgent is not None
 91 |         assert ROUTING_MATRIX is not None
 92 |         assert INVESTOR_PERSONAS is not None
 93 | 
 94 |     def test_routing_matrix_structure(self):
 95 |         """Test that routing matrix has expected structure."""
 96 |         assert isinstance(ROUTING_MATRIX, dict)
 97 |         assert len(ROUTING_MATRIX) > 0
 98 | 
 99 |         # Check each routing entry has required fields
100 |         for _category, routing_info in ROUTING_MATRIX.items():
101 |             assert "primary" in routing_info
102 |             assert isinstance(routing_info["primary"], str)
103 |             assert "agents" in routing_info
104 |             assert isinstance(routing_info["agents"], list)
105 | 
106 |     def test_personas_structure(self):
107 |         """Test that investor personas have expected structure."""
108 |         expected_personas = ["conservative", "moderate", "aggressive"]
109 | 
110 |         for persona_name in expected_personas:
111 |             assert persona_name in INVESTOR_PERSONAS
112 |             persona = INVESTOR_PERSONAS[persona_name]
113 | 
114 |             # Check persona has required attributes
115 |             assert hasattr(persona, "name")
116 |             assert hasattr(persona, "risk_tolerance")
117 |             assert hasattr(persona, "position_size_max")
118 | 
119 |     @pytest.mark.asyncio
120 |     async def test_supervisor_agent_instantiation(self, mock_llm, mock_market_agent):
121 |         """Test SupervisorAgent can be instantiated properly."""
122 |         agents = {"market": mock_market_agent}
123 | 
124 |         supervisor = SupervisorAgent(
125 |             llm=mock_llm, agents=agents, persona="moderate", ttl_hours=1
126 |         )
127 | 
128 |         assert supervisor is not None
129 |         assert supervisor.persona.name == "Moderate"
130 |         assert "market" in supervisor.agents
131 | 
132 |     @pytest.mark.asyncio
133 |     async def test_deep_research_agent_instantiation(self, mock_llm):
134 |         """Test DeepResearchAgent can be instantiated properly."""
135 |         # Test without API keys (should still work)
136 |         research_agent = DeepResearchAgent(
137 |             llm=mock_llm,
138 |             persona="moderate",
139 |             ttl_hours=1,
140 |             exa_api_key=None,
141 |         )
142 | 
143 |         assert research_agent is not None
144 |         assert research_agent.persona.name == "Moderate"
145 | 
146 |     @pytest.mark.asyncio
147 |     async def test_deep_research_agent_with_api_keys(self, mock_llm):
148 |         """Test DeepResearchAgent instantiation with API keys."""
149 |         # Test with mock API keys
150 |         research_agent = DeepResearchAgent(
151 |             llm=mock_llm,
152 |             persona="aggressive",
153 |             ttl_hours=2,
154 |             exa_api_key="test-exa-key",
155 |         )
156 | 
157 |         assert research_agent is not None
158 |         assert research_agent.persona.name == "Aggressive"
159 |         # Should have initialized search providers
160 |         assert hasattr(research_agent, "search_providers")
161 | 
162 |     @pytest.mark.asyncio
163 |     async def test_supervisor_with_research_agent(self, mock_llm, mock_market_agent):
164 |         """Test supervisor working with research agent."""
165 |         # Create research agent
166 |         research_agent = DeepResearchAgent(
167 |             llm=mock_llm, persona="moderate", ttl_hours=1
168 |         )
169 | 
170 |         # Create supervisor with both agents
171 |         agents = {"market": mock_market_agent, "research": research_agent}
172 | 
173 |         supervisor = SupervisorAgent(
174 |             llm=mock_llm, agents=agents, persona="moderate", ttl_hours=1
175 |         )
176 | 
177 |         assert len(supervisor.agents) == 2
178 |         assert "market" in supervisor.agents
179 |         assert "research" in supervisor.agents
180 | 
181 |     def test_configuration_completeness(self):
182 |         """Test that configuration system is complete."""
183 |         from maverick_mcp.config.settings import get_settings
184 | 
185 |         settings = get_settings()
186 | 
187 |         # Check that research settings exist
188 |         assert hasattr(settings, "research")
189 |         assert hasattr(settings.research, "exa_api_key")
190 |         assert hasattr(settings.research, "tavily_api_key")
191 | 
192 |         # Check that data limits exist
193 |         assert hasattr(settings, "data_limits")
194 |         assert hasattr(settings.data_limits, "max_agent_iterations")
195 | 
196 |     def test_exception_hierarchy(self):
197 |         """Test that exception hierarchy is properly set up."""
198 |         from maverick_mcp.exceptions import (
199 |             AgentExecutionError,
200 |             MaverickException,
201 |             ResearchError,
202 |             WebSearchError,
203 |         )
204 | 
205 |         # Test exception hierarchy
206 |         assert issubclass(AgentExecutionError, MaverickException)
207 |         assert issubclass(ResearchError, MaverickException)
208 |         assert issubclass(WebSearchError, ResearchError)
209 | 
210 |         # Test exception instantiation
211 |         error = AgentExecutionError("Test error")
212 |         assert error.message == "Test error"
213 |         assert error.error_code == "AGENT_EXECUTION_ERROR"
214 | 
215 |     def test_state_classes_structure(self):
216 |         """Test that state classes have proper structure."""
217 |         from maverick_mcp.workflows.state import DeepResearchState, SupervisorState
218 | 
219 |         # These should be TypedDict classes
220 |         assert hasattr(SupervisorState, "__annotations__")
221 |         assert hasattr(DeepResearchState, "__annotations__")
222 | 
223 |         # Check key fields exist
224 |         supervisor_fields = SupervisorState.__annotations__.keys()
225 |         assert "query_classification" in supervisor_fields
226 |         assert "agent_results" in supervisor_fields
227 |         assert "workflow_status" in supervisor_fields
228 | 
229 |         research_fields = DeepResearchState.__annotations__.keys()
230 |         assert "research_topic" in research_fields
231 |         assert "search_results" in research_fields
232 |         assert "research_findings" in research_fields
233 | 
234 |     @pytest.mark.asyncio
235 |     async def test_circuit_breaker_integration(self):
236 |         """Test that circuit breaker integration works."""
237 |         from maverick_mcp.agents.circuit_breaker import circuit_breaker, circuit_manager
238 | 
239 |         # Test circuit breaker manager
240 |         assert circuit_manager is not None
241 | 
242 |         # Test circuit breaker decorator
243 |         @circuit_breaker("test_breaker", failure_threshold=2)
244 |         async def test_function():
245 |             return "success"
246 | 
247 |         result = await test_function()
248 |         assert result == "success"
249 | 
250 |     def test_mcp_router_structure(self):
251 |         """Test that MCP router is properly structured."""
252 |         from maverick_mcp.api.routers.agents import agents_router
253 | 
254 |         # Should be a FastMCP instance
255 |         assert agents_router is not None
256 |         assert hasattr(agents_router, "name")
257 |         assert agents_router.name == "Financial_Analysis_Agents"
258 | 
259 |     def test_agent_factory_function(self):
260 |         """Test agent factory function structure."""
261 |         from maverick_mcp.api.routers.agents import get_or_create_agent
262 | 
263 |         # Should be a callable function
264 |         assert callable(get_or_create_agent)
265 | 
266 |         # Test with invalid agent type
267 |         with pytest.raises(ValueError, match="Unknown agent type"):
268 |             get_or_create_agent("invalid_type", "moderate")
269 | 
270 | 
271 | class TestOrchestrationWorkflow:
272 |     """Test orchestration workflow components."""
273 | 
274 |     def test_persona_compatibility(self):
275 |         """Test that all agents support all personas."""
276 |         expected_personas = ["conservative", "moderate", "aggressive"]
277 | 
278 |         for persona_name in expected_personas:
279 |             assert persona_name in INVESTOR_PERSONAS
280 | 
281 |             # All personas should have required attributes
282 |             persona = INVESTOR_PERSONAS[persona_name]
283 |             assert hasattr(persona, "name")
284 |             assert hasattr(persona, "risk_tolerance")
285 |             assert hasattr(persona, "position_size_max")
286 |             assert hasattr(persona, "stop_loss_multiplier")
287 | 
288 |     def test_routing_categories_completeness(self):
289 |         """Test that routing covers expected analysis categories."""
290 |         expected_categories = {
291 |             "market_screening",
292 |             "company_research",
293 |             "technical_analysis",
294 |             "sentiment_analysis",
295 |         }
296 | 
297 |         routing_categories = set(ROUTING_MATRIX.keys())
298 | 
299 |         # Should contain the key categories we care about
300 |         for category in expected_categories:
301 |             if category in routing_categories:
302 |                 routing_info = ROUTING_MATRIX[category]
303 |                 assert "primary" in routing_info
304 |                 assert "agents" in routing_info
305 | 
306 |     @pytest.mark.asyncio
307 |     async def test_end_to_end_mock_workflow(self):
308 |         """Test a complete mock workflow from query to response."""
309 |         # Create mock LLM for testing
310 |         fake_llm = MockChatModel(
311 |             responses=[
312 |                 "Mock analysis complete",
313 |                 "Mock research findings",
314 |                 "Mock synthesis result",
315 |             ]
316 |         )
317 | 
318 |         # Create mock agents
319 |         mock_market_agent = MagicMock()
320 |         mock_market_agent.analyze_market = AsyncMock(
321 |             return_value={
322 |                 "status": "success",
323 |                 "summary": "Market screening complete",
324 |                 "confidence": 0.8,
325 |             }
326 |         )
327 | 
328 |         # Create supervisor with mock agents
329 |         supervisor = SupervisorAgent(
330 |             llm=fake_llm, agents={"market": mock_market_agent}, persona="moderate"
331 |         )
332 | 
333 |         # This would normally call the orchestration method
334 |         # For now, just verify the supervisor was created properly
335 |         assert supervisor is not None
336 |         assert len(supervisor.agents) == 1
337 | 
338 | 
339 | if __name__ == "__main__":
340 |     # Run tests
341 |     pytest.main([__file__, "-v", "--tb=short"])
342 | 
```
Page 9/39FirstPrevNextLast