#
tokens: 49701/50000 12/435 files (page 10/39)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 10 of 39. Use http://codebase.md/wshobson/maverick-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .github
│   ├── dependabot.yml
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── feature_request.md
│   │   ├── question.md
│   │   └── security_report.md
│   ├── pull_request_template.md
│   └── workflows
│       ├── claude-code-review.yml
│       └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│   ├── launch.json
│   └── settings.json
├── alembic
│   ├── env.py
│   ├── script.py.mako
│   └── versions
│       ├── 001_initial_schema.py
│       ├── 003_add_performance_indexes.py
│       ├── 006_rename_metadata_columns.py
│       ├── 008_performance_optimization_indexes.py
│       ├── 009_rename_to_supply_demand.py
│       ├── 010_self_contained_schema.py
│       ├── 011_remove_proprietary_terms.py
│       ├── 013_add_backtest_persistence_models.py
│       ├── 014_add_portfolio_models.py
│       ├── 08e3945a0c93_merge_heads.py
│       ├── 9374a5c9b679_merge_heads_for_testing.py
│       ├── abf9b9afb134_merge_multiple_heads.py
│       ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│       ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│       ├── f0696e2cac15_add_essential_performance_indexes.py
│       └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── api
│   │   └── backtesting.md
│   ├── BACKTESTING.md
│   ├── COST_BASIS_SPECIFICATION.md
│   ├── deep_research_agent.md
│   ├── exa_research_testing_strategy.md
│   ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│   ├── PORTFOLIO.md
│   ├── SETUP_SELF_CONTAINED.md
│   └── speed_testing_framework.md
├── examples
│   ├── complete_speed_validation.py
│   ├── deep_research_integration.py
│   ├── llm_optimization_example.py
│   ├── llm_speed_demo.py
│   ├── monitoring_example.py
│   ├── parallel_research_example.py
│   ├── speed_optimization_demo.py
│   └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│   ├── __init__.py
│   ├── agents
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── circuit_breaker.py
│   │   ├── deep_research.py
│   │   ├── market_analysis.py
│   │   ├── optimized_research.py
│   │   ├── supervisor.py
│   │   └── technical_analysis.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── api_server.py
│   │   ├── connection_manager.py
│   │   ├── dependencies
│   │   │   ├── __init__.py
│   │   │   ├── stock_analysis.py
│   │   │   └── technical_analysis.py
│   │   ├── error_handling.py
│   │   ├── inspector_compatible_sse.py
│   │   ├── inspector_sse.py
│   │   ├── middleware
│   │   │   ├── error_handling.py
│   │   │   ├── mcp_logging.py
│   │   │   ├── rate_limiting_enhanced.py
│   │   │   └── security.py
│   │   ├── openapi_config.py
│   │   ├── routers
│   │   │   ├── __init__.py
│   │   │   ├── agents.py
│   │   │   ├── backtesting.py
│   │   │   ├── data_enhanced.py
│   │   │   ├── data.py
│   │   │   ├── health_enhanced.py
│   │   │   ├── health_tools.py
│   │   │   ├── health.py
│   │   │   ├── intelligent_backtesting.py
│   │   │   ├── introspection.py
│   │   │   ├── mcp_prompts.py
│   │   │   ├── monitoring.py
│   │   │   ├── news_sentiment_enhanced.py
│   │   │   ├── performance.py
│   │   │   ├── portfolio.py
│   │   │   ├── research.py
│   │   │   ├── screening_ddd.py
│   │   │   ├── screening_parallel.py
│   │   │   ├── screening.py
│   │   │   ├── technical_ddd.py
│   │   │   ├── technical_enhanced.py
│   │   │   ├── technical.py
│   │   │   └── tool_registry.py
│   │   ├── server.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   ├── base_service.py
│   │   │   ├── market_service.py
│   │   │   ├── portfolio_service.py
│   │   │   ├── prompt_service.py
│   │   │   └── resource_service.py
│   │   ├── simple_sse.py
│   │   └── utils
│   │       ├── __init__.py
│   │       ├── insomnia_export.py
│   │       └── postman_export.py
│   ├── application
│   │   ├── __init__.py
│   │   ├── commands
│   │   │   └── __init__.py
│   │   ├── dto
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_dto.py
│   │   ├── queries
│   │   │   ├── __init__.py
│   │   │   └── get_technical_analysis.py
│   │   └── screening
│   │       ├── __init__.py
│   │       ├── dtos.py
│   │       └── queries.py
│   ├── backtesting
│   │   ├── __init__.py
│   │   ├── ab_testing.py
│   │   ├── analysis.py
│   │   ├── batch_processing_stub.py
│   │   ├── batch_processing.py
│   │   ├── model_manager.py
│   │   ├── optimization.py
│   │   ├── persistence.py
│   │   ├── retraining_pipeline.py
│   │   ├── strategies
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── ml
│   │   │   │   ├── __init__.py
│   │   │   │   ├── adaptive.py
│   │   │   │   ├── ensemble.py
│   │   │   │   ├── feature_engineering.py
│   │   │   │   └── regime_aware.py
│   │   │   ├── ml_strategies.py
│   │   │   ├── parser.py
│   │   │   └── templates.py
│   │   ├── strategy_executor.py
│   │   ├── vectorbt_engine.py
│   │   └── visualization.py
│   ├── config
│   │   ├── __init__.py
│   │   ├── constants.py
│   │   ├── database_self_contained.py
│   │   ├── database.py
│   │   ├── llm_optimization_config.py
│   │   ├── logging_settings.py
│   │   ├── plotly_config.py
│   │   ├── security_utils.py
│   │   ├── security.py
│   │   ├── settings.py
│   │   ├── technical_constants.py
│   │   ├── tool_estimation.py
│   │   └── validation.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── technical_analysis.py
│   │   └── visualization.py
│   ├── data
│   │   ├── __init__.py
│   │   ├── cache_manager.py
│   │   ├── cache.py
│   │   ├── django_adapter.py
│   │   ├── health.py
│   │   ├── models.py
│   │   ├── performance.py
│   │   ├── session_management.py
│   │   └── validation.py
│   ├── database
│   │   ├── __init__.py
│   │   ├── base.py
│   │   └── optimization.py
│   ├── dependencies.py
│   ├── domain
│   │   ├── __init__.py
│   │   ├── entities
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis.py
│   │   ├── events
│   │   │   └── __init__.py
│   │   ├── portfolio.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   ├── entities.py
│   │   │   ├── services.py
│   │   │   └── value_objects.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_service.py
│   │   ├── stock_analysis
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis_service.py
│   │   └── value_objects
│   │       ├── __init__.py
│   │       └── technical_indicators.py
│   ├── exceptions.py
│   ├── infrastructure
│   │   ├── __init__.py
│   │   ├── cache
│   │   │   └── __init__.py
│   │   ├── caching
│   │   │   ├── __init__.py
│   │   │   └── cache_management_service.py
│   │   ├── connection_manager.py
│   │   ├── data_fetching
│   │   │   ├── __init__.py
│   │   │   └── stock_data_service.py
│   │   ├── health
│   │   │   ├── __init__.py
│   │   │   └── health_checker.py
│   │   ├── persistence
│   │   │   ├── __init__.py
│   │   │   └── stock_repository.py
│   │   ├── providers
│   │   │   └── __init__.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   └── repositories.py
│   │   └── sse_optimizer.py
│   ├── langchain_tools
│   │   ├── __init__.py
│   │   ├── adapters.py
│   │   └── registry.py
│   ├── logging_config.py
│   ├── memory
│   │   ├── __init__.py
│   │   └── stores.py
│   ├── monitoring
│   │   ├── __init__.py
│   │   ├── health_check.py
│   │   ├── health_monitor.py
│   │   ├── integration_example.py
│   │   ├── metrics.py
│   │   ├── middleware.py
│   │   └── status_dashboard.py
│   ├── providers
│   │   ├── __init__.py
│   │   ├── dependencies.py
│   │   ├── factories
│   │   │   ├── __init__.py
│   │   │   ├── config_factory.py
│   │   │   └── provider_factory.py
│   │   ├── implementations
│   │   │   ├── __init__.py
│   │   │   ├── cache_adapter.py
│   │   │   ├── macro_data_adapter.py
│   │   │   ├── market_data_adapter.py
│   │   │   ├── persistence_adapter.py
│   │   │   └── stock_data_adapter.py
│   │   ├── interfaces
│   │   │   ├── __init__.py
│   │   │   ├── cache.py
│   │   │   ├── config.py
│   │   │   ├── macro_data.py
│   │   │   ├── market_data.py
│   │   │   ├── persistence.py
│   │   │   └── stock_data.py
│   │   ├── llm_factory.py
│   │   ├── macro_data.py
│   │   ├── market_data.py
│   │   ├── mocks
│   │   │   ├── __init__.py
│   │   │   ├── mock_cache.py
│   │   │   ├── mock_config.py
│   │   │   ├── mock_macro_data.py
│   │   │   ├── mock_market_data.py
│   │   │   ├── mock_persistence.py
│   │   │   └── mock_stock_data.py
│   │   ├── openrouter_provider.py
│   │   ├── optimized_screening.py
│   │   ├── optimized_stock_data.py
│   │   └── stock_data.py
│   ├── README.md
│   ├── tests
│   │   ├── __init__.py
│   │   ├── README_INMEMORY_TESTS.md
│   │   ├── test_cache_debug.py
│   │   ├── test_fixes_validation.py
│   │   ├── test_in_memory_routers.py
│   │   ├── test_in_memory_server.py
│   │   ├── test_macro_data_provider.py
│   │   ├── test_mailgun_email.py
│   │   ├── test_market_calendar_caching.py
│   │   ├── test_mcp_tool_fixes_pytest.py
│   │   ├── test_mcp_tool_fixes.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_models_functional.py
│   │   ├── test_server.py
│   │   ├── test_stock_data_enhanced.py
│   │   ├── test_stock_data_provider.py
│   │   └── test_technical_analysis.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── performance_monitoring.py
│   │   ├── portfolio_manager.py
│   │   ├── risk_management.py
│   │   └── sentiment_analysis.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── agent_errors.py
│   │   ├── batch_processing.py
│   │   ├── cache_warmer.py
│   │   ├── circuit_breaker_decorators.py
│   │   ├── circuit_breaker_services.py
│   │   ├── circuit_breaker.py
│   │   ├── data_chunking.py
│   │   ├── database_monitoring.py
│   │   ├── debug_utils.py
│   │   ├── fallback_strategies.py
│   │   ├── llm_optimization.py
│   │   ├── logging_example.py
│   │   ├── logging_init.py
│   │   ├── logging.py
│   │   ├── mcp_logging.py
│   │   ├── memory_profiler.py
│   │   ├── monitoring_middleware.py
│   │   ├── monitoring.py
│   │   ├── orchestration_logging.py
│   │   ├── parallel_research.py
│   │   ├── parallel_screening.py
│   │   ├── quick_cache.py
│   │   ├── resource_manager.py
│   │   ├── shutdown.py
│   │   ├── stock_helpers.py
│   │   ├── structured_logger.py
│   │   ├── tool_monitoring.py
│   │   ├── tracing.py
│   │   └── yfinance_pool.py
│   ├── validation
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── data.py
│   │   ├── middleware.py
│   │   ├── portfolio.py
│   │   ├── responses.py
│   │   ├── screening.py
│   │   └── technical.py
│   └── workflows
│       ├── __init__.py
│       ├── agents
│       │   ├── __init__.py
│       │   ├── market_analyzer.py
│       │   ├── optimizer_agent.py
│       │   ├── strategy_selector.py
│       │   └── validator_agent.py
│       ├── backtesting_workflow.py
│       └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│   ├── dev.sh
│   ├── INSTALLATION_GUIDE.md
│   ├── load_example.py
│   ├── load_market_data.py
│   ├── load_tiingo_data.py
│   ├── migrate_db.py
│   ├── README_TIINGO_LOADER.md
│   ├── requirements_tiingo.txt
│   ├── run_stock_screening.py
│   ├── run-migrations.sh
│   ├── seed_db.py
│   ├── seed_sp500.py
│   ├── setup_database.sh
│   ├── setup_self_contained.py
│   ├── setup_sp500_database.sh
│   ├── test_seeded_data.py
│   ├── test_tiingo_loader.py
│   ├── tiingo_config.py
│   └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── core
│   │   └── test_technical_analysis.py
│   ├── data
│   │   └── test_portfolio_models.py
│   ├── domain
│   │   ├── conftest.py
│   │   ├── test_portfolio_entities.py
│   │   └── test_technical_analysis_service.py
│   ├── fixtures
│   │   └── orchestration_fixtures.py
│   ├── integration
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── README.md
│   │   ├── run_integration_tests.sh
│   │   ├── test_api_technical.py
│   │   ├── test_chaos_engineering.py
│   │   ├── test_config_management.py
│   │   ├── test_full_backtest_workflow_advanced.py
│   │   ├── test_full_backtest_workflow.py
│   │   ├── test_high_volume.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_orchestration_complete.py
│   │   ├── test_portfolio_persistence.py
│   │   ├── test_redis_cache.py
│   │   ├── test_security_integration.py.disabled
│   │   └── vcr_setup.py
│   ├── performance
│   │   ├── __init__.py
│   │   ├── test_benchmarks.py
│   │   ├── test_load.py
│   │   ├── test_profiling.py
│   │   └── test_stress.py
│   ├── providers
│   │   └── test_stock_data_simple.py
│   ├── README.md
│   ├── test_agents_router_mcp.py
│   ├── test_backtest_persistence.py
│   ├── test_cache_management_service.py
│   ├── test_cache_serialization.py
│   ├── test_circuit_breaker.py
│   ├── test_database_pool_config_simple.py
│   ├── test_database_pool_config.py
│   ├── test_deep_research_functional.py
│   ├── test_deep_research_integration.py
│   ├── test_deep_research_parallel_execution.py
│   ├── test_error_handling.py
│   ├── test_event_loop_integrity.py
│   ├── test_exa_research_integration.py
│   ├── test_exception_hierarchy.py
│   ├── test_financial_search.py
│   ├── test_graceful_shutdown.py
│   ├── test_integration_simple.py
│   ├── test_langgraph_workflow.py
│   ├── test_market_data_async.py
│   ├── test_market_data_simple.py
│   ├── test_mcp_orchestration_functional.py
│   ├── test_ml_strategies.py
│   ├── test_optimized_research_agent.py
│   ├── test_orchestration_integration.py
│   ├── test_orchestration_logging.py
│   ├── test_orchestration_tools_simple.py
│   ├── test_parallel_research_integration.py
│   ├── test_parallel_research_orchestrator.py
│   ├── test_parallel_research_performance.py
│   ├── test_performance_optimizations.py
│   ├── test_production_validation.py
│   ├── test_provider_architecture.py
│   ├── test_rate_limiting_enhanced.py
│   ├── test_runner_validation.py
│   ├── test_security_comprehensive.py.disabled
│   ├── test_security_cors.py
│   ├── test_security_enhancements.py.disabled
│   ├── test_security_headers.py
│   ├── test_security_penetration.py
│   ├── test_session_management.py
│   ├── test_speed_optimization_validation.py
│   ├── test_stock_analysis_dependencies.py
│   ├── test_stock_analysis_service.py
│   ├── test_stock_data_fetching_service.py
│   ├── test_supervisor_agent.py
│   ├── test_supervisor_functional.py
│   ├── test_tool_estimation_config.py
│   ├── test_visualization.py
│   └── utils
│       ├── test_agent_errors.py
│       ├── test_logging.py
│       ├── test_parallel_screening.py
│       └── test_quick_cache.py
├── tools
│   ├── check_orchestration_config.py
│   ├── experiments
│   │   ├── validation_examples.py
│   │   └── validation_fixed.py
│   ├── fast_dev.sh
│   ├── hot_reload.py
│   ├── quick_test.py
│   └── templates
│       ├── new_router_template.py
│       ├── new_tool_template.py
│       ├── screening_strategy_template.py
│       └── test_template.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/examples/deep_research_integration.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | DeepResearchAgent Integration Example
  3 | 
  4 | This example demonstrates how to use the DeepResearchAgent with the SupervisorAgent
  5 | for comprehensive financial research capabilities.
  6 | """
  7 | 
  8 | import asyncio
  9 | import logging
 10 | 
 11 | from maverick_mcp.agents.deep_research import DeepResearchAgent
 12 | from maverick_mcp.agents.market_analysis import MarketAnalysisAgent
 13 | from maverick_mcp.agents.supervisor import SupervisorAgent
 14 | from maverick_mcp.agents.technical_analysis import TechnicalAnalysisAgent
 15 | from maverick_mcp.providers.llm_factory import get_llm
 16 | 
 17 | # Set up logging
 18 | logging.basicConfig(level=logging.INFO)
 19 | logger = logging.getLogger(__name__)
 20 | 
 21 | 
 22 | async def example_standalone_research():
 23 |     """Example of using DeepResearchAgent standalone."""
 24 | 
 25 |     print("🔍 DeepResearchAgent Standalone Example")
 26 |     print("=" * 50)
 27 | 
 28 |     # Initialize LLM and agent
 29 |     llm = get_llm()
 30 |     research_agent = DeepResearchAgent(
 31 |         llm=llm,
 32 |         persona="moderate",  # Conservative, Moderate, Aggressive, Day Trader
 33 |         max_sources=30,
 34 |         research_depth="comprehensive",
 35 |     )
 36 | 
 37 |     # Example 1: Company Research
 38 |     print("\n📊 Example 1: Comprehensive Company Research")
 39 |     print("-" * 40)
 40 | 
 41 |     try:
 42 |         result = await research_agent.research_company_comprehensive(
 43 |             symbol="AAPL",
 44 |             session_id="company_research_demo",
 45 |             include_competitive_analysis=True,
 46 |         )
 47 | 
 48 |         print("✅ Research completed for AAPL")
 49 |         print(f"📈 Confidence Score: {result.get('research_confidence', 0):.2f}")
 50 |         print(f"📰 Sources Analyzed: {result.get('sources_found', 0)}")
 51 | 
 52 |         if "persona_insights" in result:
 53 |             insights = result["persona_insights"]
 54 |             print(
 55 |                 f"🎯 Persona Insights: {len(insights.get('prioritized_insights', []))} relevant insights"
 56 |             )
 57 |             print(
 58 |                 f"⚠️ Risk Assessment: {insights.get('risk_assessment', {}).get('risk_acceptable', 'Unknown')}"
 59 |             )
 60 |             print(
 61 |                 f"💡 Recommended Action: {insights.get('recommended_action', 'No recommendation')}"
 62 |             )
 63 | 
 64 |     except Exception as e:
 65 |         print(f"❌ Error in company research: {e}")
 66 | 
 67 |     # Example 2: Market Sentiment Analysis
 68 |     print("\n📈 Example 2: Market Sentiment Analysis")
 69 |     print("-" * 40)
 70 | 
 71 |     try:
 72 |         result = await research_agent.analyze_market_sentiment(
 73 |             topic="artificial intelligence stocks",
 74 |             session_id="sentiment_analysis_demo",
 75 |             timeframe="1w",
 76 |         )
 77 | 
 78 |         print("✅ Sentiment analysis completed")
 79 | 
 80 |         if "content_analysis" in result:
 81 |             analysis = result["content_analysis"]
 82 |             consensus = analysis.get("consensus_view", {})
 83 |             themes = analysis.get("key_themes", [])
 84 | 
 85 |             print(
 86 |                 f"📊 Overall Sentiment: {consensus.get('direction', 'neutral').title()}"
 87 |             )
 88 |             print(f"🔒 Confidence: {consensus.get('confidence', 0):.2f}")
 89 |             print(f"🔑 Key Themes: {len(themes)} themes identified")
 90 | 
 91 |             if themes:
 92 |                 for i, theme in enumerate(themes[:3], 1):
 93 |                     print(
 94 |                         f"   {i}. {theme.get('theme', 'Unknown')} (relevance: {theme.get('relevance', 0):.2f})"
 95 |                     )
 96 | 
 97 |     except Exception as e:
 98 |         print(f"❌ Error in sentiment analysis: {e}")
 99 | 
100 |     # Example 3: Custom Research Query
101 |     print("\n🔍 Example 3: Custom Research Query")
102 |     print("-" * 40)
103 | 
104 |     try:
105 |         result = await research_agent.research_topic(
106 |             query="impact of Federal Reserve interest rate decisions on tech stocks",
107 |             session_id="custom_research_demo",
108 |             research_scope="comprehensive",
109 |             max_sources=25,
110 |             timeframe="1m",
111 |         )
112 | 
113 |         print("✅ Custom research completed")
114 |         print(f"📊 Research Confidence: {result.get('research_confidence', 0):.2f}")
115 | 
116 |         if "content_analysis" in result:
117 |             analysis = result["content_analysis"]
118 |             insights = analysis.get("insights", [])
119 |             print(f"💡 Insights Generated: {len(insights)}")
120 | 
121 |             # Show top 3 insights
122 |             for i, insight in enumerate(insights[:3], 1):
123 |                 insight_text = insight.get("insight", "No insight text")[:100] + "..."
124 |                 confidence = insight.get("confidence", 0)
125 |                 print(f"   {i}. {insight_text} (confidence: {confidence:.2f})")
126 | 
127 |     except Exception as e:
128 |         print(f"❌ Error in custom research: {e}")
129 | 
130 | 
131 | async def example_supervisor_integration():
132 |     """Example of using DeepResearchAgent with SupervisorAgent."""
133 | 
134 |     print("\n🎛️ SupervisorAgent Integration Example")
135 |     print("=" * 50)
136 | 
137 |     # Initialize LLM
138 |     llm = get_llm()
139 | 
140 |     # Create specialized agents
141 |     market_agent = MarketAnalysisAgent(llm=llm, persona="moderate")
142 |     technical_agent = TechnicalAnalysisAgent(llm=llm, persona="moderate")
143 |     research_agent = DeepResearchAgent(llm=llm, persona="moderate")
144 | 
145 |     # Create supervisor with all agents
146 |     supervisor = SupervisorAgent(
147 |         llm=llm,
148 |         agents={
149 |             "market": market_agent,
150 |             "technical": technical_agent,
151 |             "research": research_agent,  # Key integration point
152 |         },
153 |         persona="moderate",
154 |         routing_strategy="llm_powered",
155 |         synthesis_mode="weighted",
156 |     )
157 | 
158 |     # Example coordination scenarios
159 |     test_queries = [
160 |         {
161 |             "query": "Should I invest in MSFT? I want comprehensive analysis including recent news and competitive position",
162 |             "expected_routing": ["technical", "research"],
163 |             "description": "Investment decision requiring technical + research",
164 |         },
165 |         {
166 |             "query": "What's the current market sentiment on renewable energy stocks?",
167 |             "expected_routing": ["research"],
168 |             "description": "Pure sentiment analysis research",
169 |         },
170 |         {
171 |             "query": "Find me high-momentum stocks with strong fundamentals",
172 |             "expected_routing": ["market", "research"],
173 |             "description": "Screening + fundamental research",
174 |         },
175 |     ]
176 | 
177 |     for i, test_case in enumerate(test_queries, 1):
178 |         print(f"\n📋 Test Case {i}: {test_case['description']}")
179 |         print(f"Query: '{test_case['query']}'")
180 |         print(f"Expected Routing: {test_case['expected_routing']}")
181 |         print("-" * 60)
182 | 
183 |         try:
184 |             result = await supervisor.coordinate_agents(
185 |                 query=test_case["query"], session_id=f"supervisor_demo_{i}"
186 |             )
187 | 
188 |             if result.get("status") == "success":
189 |                 agents_used = result.get("agents_used", [])
190 |                 confidence = result.get("confidence_score", 0)
191 |                 execution_time = result.get("execution_time_ms", 0)
192 |                 conflicts_resolved = result.get("conflicts_resolved", 0)
193 | 
194 |                 print("✅ Coordination successful")
195 |                 print(f"🤖 Agents Used: {agents_used}")
196 |                 print(f"📊 Confidence Score: {confidence:.2f}")
197 |                 print(f"⏱️ Execution Time: {execution_time:.0f}ms")
198 |                 print(f"🔧 Conflicts Resolved: {conflicts_resolved}")
199 | 
200 |                 # Show synthesis result
201 |                 synthesis = (
202 |                     result.get("synthesis", "No synthesis available")[:200] + "..."
203 |                 )
204 |                 print(f"📝 Synthesis Preview: {synthesis}")
205 | 
206 |             else:
207 |                 print(f"❌ Coordination failed: {result.get('error', 'Unknown error')}")
208 | 
209 |         except Exception as e:
210 |             print(f"❌ Error in coordination: {e}")
211 | 
212 | 
213 | async def example_persona_adaptation():
214 |     """Example showing how research adapts to different investor personas."""
215 | 
216 |     print("\n👥 Persona Adaptation Example")
217 |     print("=" * 50)
218 | 
219 |     llm = get_llm()
220 |     personas = ["conservative", "moderate", "aggressive", "day_trader"]
221 |     query = "Should I invest in Tesla (TSLA)?"
222 | 
223 |     for persona in personas:
224 |         print(f"\n🎭 Persona: {persona.title()}")
225 |         print("-" * 30)
226 | 
227 |         try:
228 |             research_agent = DeepResearchAgent(
229 |                 llm=llm,
230 |                 persona=persona,
231 |                 max_sources=20,  # Smaller sample for demo
232 |                 research_depth="standard",
233 |             )
234 | 
235 |             result = await research_agent.research_topic(
236 |                 query=query,
237 |                 session_id=f"persona_demo_{persona}",
238 |                 research_scope="standard",
239 |                 timeframe="2w",
240 |             )
241 | 
242 |             if "persona_insights" in result:
243 |                 insights = result["persona_insights"]
244 |                 risk_assessment = insights.get("risk_assessment", {})
245 |                 action = insights.get("recommended_action", "No action")
246 |                 alignment = insights.get("persona_alignment_score", 0)
247 | 
248 |                 print(f"📊 Persona Alignment: {alignment:.2f}")
249 |                 print(
250 |                     f"⚠️ Risk Acceptable: {risk_assessment.get('risk_acceptable', 'Unknown')}"
251 |                 )
252 |                 print(f"💡 Recommended Action: {action}")
253 | 
254 |                 # Show risk factors for conservative investors
255 |                 if persona == "conservative" and risk_assessment.get("risk_factors"):
256 |                     print(f"🚨 Risk Factors ({len(risk_assessment['risk_factors'])}):")
257 |                     for factor in risk_assessment["risk_factors"][:2]:
258 |                         print(f"   • {factor[:80]}...")
259 | 
260 |             else:
261 |                 print("⚠️ No persona insights available")
262 | 
263 |         except Exception as e:
264 |             print(f"❌ Error for {persona}: {e}")
265 | 
266 | 
267 | async def example_research_tools_mcp():
268 |     """Example showing MCP tool integration."""
269 | 
270 |     print("\n🔧 MCP Tools Integration Example")
271 |     print("=" * 50)
272 | 
273 |     # Note: This is a conceptual example - actual MCP tool usage would be through Claude Desktop
274 |     print("📚 Available Research Tools:")
275 |     print("1. comprehensive_research - Deep research on any financial topic")
276 |     print("2. analyze_market_sentiment - Market sentiment analysis")
277 |     print("3. research_company_comprehensive - Company fundamental analysis")
278 |     print("4. search_financial_news - News search and analysis")
279 |     print("5. validate_research_claims - Fact-checking and validation")
280 | 
281 |     # Example tool configurations for Claude Desktop
282 |     print("\n📋 Claude Desktop Configuration Example:")
283 |     print("```json")
284 |     print("{")
285 |     print('  "mcpServers": {')
286 |     print('    "maverick-research": {')
287 |     print('      "command": "npx",')
288 |     print('      "args": ["-y", "mcp-remote", "http://localhost:8000/research"]')
289 |     print("    }")
290 |     print("  }")
291 |     print("}")
292 |     print("```")
293 | 
294 |     print("\n💬 Example Claude Desktop Prompts:")
295 |     examples = [
296 |         "Research Tesla's competitive position in the EV market with comprehensive analysis",
297 |         "Analyze current market sentiment for renewable energy stocks over the past week",
298 |         "Perform fundamental analysis of Apple (AAPL) including business model and growth prospects",
299 |         "Search for recent financial news about Federal Reserve policy changes",
300 |         "Validate the claim that 'AI stocks outperformed the market by 20% this quarter'",
301 |     ]
302 | 
303 |     for i, example in enumerate(examples, 1):
304 |         print(f"{i}. {example}")
305 | 
306 | 
307 | async def main():
308 |     """Run all examples."""
309 | 
310 |     print("🚀 DeepResearchAgent Comprehensive Examples")
311 |     print("=" * 60)
312 |     print("This demo showcases the DeepResearchAgent capabilities")
313 |     print("including standalone usage, SupervisorAgent integration,")
314 |     print("persona adaptation, and MCP tool integration.")
315 |     print("=" * 60)
316 | 
317 |     try:
318 |         # Run examples
319 |         await example_standalone_research()
320 |         await example_supervisor_integration()
321 |         await example_persona_adaptation()
322 |         await example_research_tools_mcp()
323 | 
324 |         print("\n✅ All examples completed successfully!")
325 |         print("\n📖 Next Steps:")
326 |         print("1. Set up EXA_API_KEY and TAVILY_API_KEY environment variables")
327 |         print("2. Configure Claude Desktop with the research MCP server")
328 |         print("3. Test with real queries through Claude Desktop")
329 |         print("4. Customize personas and research parameters as needed")
330 | 
331 |     except Exception as e:
332 |         print(f"\n❌ Demo failed: {e}")
333 |         logger.exception("Demo execution failed")
334 | 
335 | 
336 | if __name__ == "__main__":
337 |     # Run the examples
338 |     asyncio.run(main())
339 | 
```

--------------------------------------------------------------------------------
/tests/test_error_handling.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Comprehensive test suite for error handling and recovery mechanisms.
  3 | """
  4 | 
  5 | from unittest.mock import Mock, patch
  6 | 
  7 | import pytest
  8 | from langchain_core.tools import BaseTool
  9 | from pydantic import BaseModel, Field
 10 | 
 11 | from maverick_mcp.agents.market_analysis import MarketAnalysisAgent
 12 | from maverick_mcp.exceptions import (
 13 |     AgentInitializationError,
 14 |     APIRateLimitError,
 15 |     CircuitBreakerError,
 16 |     PersonaConfigurationError,
 17 |     ValidationError,
 18 | )
 19 | from maverick_mcp.logging_config import CorrelationIDMiddleware, ErrorLogger
 20 | 
 21 | 
 22 | # Mock tool input model
 23 | class MockToolInput(BaseModel):
 24 |     """Input for mock tool."""
 25 | 
 26 |     query: str = Field(default="test", description="Test query")
 27 | 
 28 | 
 29 | # Create a proper mock tool that LangChain can work with
 30 | class MockTool(BaseTool):
 31 |     """Mock tool for testing."""
 32 | 
 33 |     name: str = "mock_tool"
 34 |     description: str = "A mock tool for testing"
 35 |     args_schema: type[BaseModel] = MockToolInput
 36 | 
 37 |     def _run(self, query: str = "test") -> str:
 38 |         """Run the tool."""
 39 |         return f"Mock result for: {query}"
 40 | 
 41 |     async def _arun(self, query: str = "test") -> str:
 42 |         """Run the tool asynchronously."""
 43 |         return f"Mock result for: {query}"
 44 | 
 45 | 
 46 | # Create a mock tool with configurable set_persona method
 47 | class MockPersonaAwareTool(BaseTool):
 48 |     """Mock tool that can have a set_persona method."""
 49 | 
 50 |     name: str = "mock_persona_tool"
 51 |     description: str = "A mock persona-aware tool for testing"
 52 |     args_schema: type[BaseModel] = MockToolInput
 53 |     _fail_on_set_persona: bool = False  # Private attribute using underscore
 54 | 
 55 |     def __init__(self, fail_on_set_persona: bool = False, **kwargs):
 56 |         """Initialize with option to fail on set_persona."""
 57 |         super().__init__(**kwargs)
 58 |         # Use object.__setattr__ to bypass Pydantic validation
 59 |         object.__setattr__(self, "_fail_on_set_persona", fail_on_set_persona)
 60 | 
 61 |     def set_persona(self, persona: str) -> None:
 62 |         """Set the persona for the tool."""
 63 |         if self._fail_on_set_persona:
 64 |             raise Exception("Tool configuration failed")
 65 | 
 66 |     def _run(self, query: str = "test") -> str:
 67 |         """Run the tool."""
 68 |         return f"Mock result for: {query}"
 69 | 
 70 |     async def _arun(self, query: str = "test") -> str:
 71 |         """Run the tool asynchronously."""
 72 |         return f"Mock result for: {query}"
 73 | 
 74 | 
 75 | class TestAgentErrorHandling:
 76 |     """Test error handling in agent initialization and operation."""
 77 | 
 78 |     @pytest.mark.asyncio
 79 |     async def test_invalid_persona_error(self):
 80 |         """Test that invalid persona raises PersonaConfigurationError."""
 81 |         mock_llm = Mock()
 82 | 
 83 |         with pytest.raises(PersonaConfigurationError) as exc_info:
 84 |             MarketAnalysisAgent(llm=mock_llm, persona="invalid_persona")
 85 | 
 86 |         assert "Invalid persona 'invalid_persona'" in str(exc_info.value)
 87 |         assert exc_info.value.context["invalid_persona"] == "invalid_persona"
 88 |         assert "conservative" in exc_info.value.context["valid_personas"]
 89 | 
 90 |     @pytest.mark.asyncio
 91 |     async def test_no_tools_initialization_error(self):
 92 |         """Test that agent initialization fails gracefully with no tools."""
 93 |         mock_llm = Mock()
 94 | 
 95 |         with patch(
 96 |             "maverick_mcp.agents.market_analysis.get_tool_registry"
 97 |         ) as mock_registry:
 98 |             # Mock registry to return no tools
 99 |             mock_registry.return_value.get_tool.return_value = None
100 | 
101 |             # Also need to mock the directly instantiated tools
102 |             with (
103 |                 patch(
104 |                     "maverick_mcp.agents.market_analysis.PositionSizeTool",
105 |                     return_value=None,
106 |                 ),
107 |                 patch(
108 |                     "maverick_mcp.agents.market_analysis.RiskMetricsTool",
109 |                     return_value=None,
110 |                 ),
111 |                 patch(
112 |                     "maverick_mcp.agents.market_analysis.TechnicalStopsTool",
113 |                     return_value=None,
114 |                 ),
115 |                 patch(
116 |                     "maverick_mcp.agents.market_analysis.NewsSentimentTool",
117 |                     return_value=None,
118 |                 ),
119 |                 patch(
120 |                     "maverick_mcp.agents.market_analysis.MarketBreadthTool",
121 |                     return_value=None,
122 |                 ),
123 |                 patch(
124 |                     "maverick_mcp.agents.market_analysis.SectorSentimentTool",
125 |                     return_value=None,
126 |                 ),
127 |             ):
128 |                 with pytest.raises(AgentInitializationError) as exc_info:
129 |                     MarketAnalysisAgent(llm=mock_llm, persona="moderate")
130 | 
131 |                 assert "No tools available" in str(exc_info.value)
132 |                 assert exc_info.value.context["agent_type"] == "MarketAnalysisAgent"
133 | 
134 |     @pytest.mark.asyncio
135 |     async def test_tool_registry_failure(self):
136 |         """Test handling of tool registry failures."""
137 |         mock_llm = Mock()
138 | 
139 |         with patch(
140 |             "maverick_mcp.agents.market_analysis.get_tool_registry"
141 |         ) as mock_registry:
142 |             # Simulate registry failure
143 |             mock_registry.side_effect = Exception("Registry connection failed")
144 | 
145 |             with pytest.raises(AgentInitializationError) as exc_info:
146 |                 MarketAnalysisAgent(llm=mock_llm, persona="moderate")
147 | 
148 |             assert "Registry connection failed" in str(exc_info.value)
149 | 
150 |     @pytest.mark.asyncio
151 |     async def test_successful_initialization_with_retry(self):
152 |         """Test successful initialization after transient failure."""
153 |         mock_llm = Mock()
154 |         attempts = 0
155 | 
156 |         def mock_get_tool(name):
157 |             nonlocal attempts
158 |             attempts += 1
159 |             if attempts < 2:
160 |                 return None  # First attempt fails
161 |             return MockTool()  # Second attempt succeeds with proper tool
162 | 
163 |         with patch(
164 |             "maverick_mcp.agents.market_analysis.get_tool_registry"
165 |         ) as mock_registry:
166 |             mock_registry.return_value.get_tool = mock_get_tool
167 | 
168 |             # Should succeed on retry
169 |             agent = MarketAnalysisAgent(llm=mock_llm, persona="moderate")
170 |             assert agent is not None
171 | 
172 | 
173 | class TestDataProviderErrorHandling:
174 |     """Test error handling in data providers."""
175 | 
176 |     def test_api_rate_limit_error(self):
177 |         """Test API rate limit error handling."""
178 |         error = APIRateLimitError(provider="yahoo_finance", retry_after=60)
179 | 
180 |         assert error.recoverable is True
181 |         assert error.context["retry_after"] == 60
182 |         assert "Rate limit exceeded" in str(error)
183 | 
184 |         # Test error dictionary conversion
185 |         error_dict = error.to_dict()
186 |         assert error_dict["code"] == "RATE_LIMIT_EXCEEDED"
187 |         assert (
188 |             error_dict["message"]
189 |             == "Rate limit exceeded for yahoo_finance. Retry after 60 seconds"
190 |         )
191 |         assert error_dict["context"]["retry_after"] == 60
192 | 
193 |     def test_data_not_found_error(self):
194 |         """Test data not found error with date range."""
195 |         from maverick_mcp.exceptions import DataNotFoundError
196 | 
197 |         error = DataNotFoundError(
198 |             symbol="INVALID", date_range=("2024-01-01", "2024-01-31")
199 |         )
200 | 
201 |         assert "INVALID" in str(error)
202 |         assert "2024-01-01" in str(error)
203 |         assert error.context["symbol"] == "INVALID"
204 | 
205 | 
206 | class TestCircuitBreakerIntegration:
207 |     """Test circuit breaker error handling."""
208 | 
209 |     def test_circuit_breaker_open_error(self):
210 |         """Test circuit breaker open error."""
211 |         error = CircuitBreakerError(
212 |             service="stock_data_api", failure_count=5, threshold=3
213 |         )
214 | 
215 |         assert error.recoverable is True
216 |         assert error.context["failure_count"] == 5
217 |         assert error.context["threshold"] == 3
218 |         assert "Circuit breaker open" in str(error)
219 | 
220 | 
221 | class TestValidationErrors:
222 |     """Test validation error handling."""
223 | 
224 |     def test_parameter_validation_error(self):
225 |         """Test parameter validation error."""
226 |         from maverick_mcp.exceptions import ParameterValidationError
227 | 
228 |         error = ParameterValidationError(
229 |             param_name="start_date", expected_type="datetime", actual_type="str"
230 |         )
231 | 
232 |         assert error.recoverable is True  # Default is True in new implementation
233 |         assert "Expected datetime, got str" in str(error)
234 |         assert (
235 |             error.field == "start_date"
236 |         )  # ParameterValidationError inherits from ValidationError which uses "field"
237 |         assert error.context["expected_type"] == "datetime"
238 |         assert error.context["actual_type"] == "str"
239 | 
240 |     def test_validation_error_with_details(self):
241 |         """Test validation error with detailed context."""
242 |         error = ValidationError(message="Invalid ticker format", field="ticker")
243 |         error.context["value"] = "ABC123"
244 | 
245 |         assert error.recoverable is True  # Default is True now
246 |         assert "Invalid ticker format" in str(error)
247 |         assert error.field == "ticker"
248 |         assert error.context["value"] == "ABC123"
249 | 
250 | 
251 | class TestErrorLogging:
252 |     """Test structured error logging functionality."""
253 | 
254 |     def test_error_logger_masking(self):
255 |         """Test that sensitive data is masked in logs."""
256 |         logger = Mock()
257 |         error_logger = ErrorLogger(logger)
258 | 
259 |         sensitive_context = {
260 |             "api_key": "secret123",
261 |             "user_data": {"email": "[email protected]", "password": "password123"},
262 |             "safe_field": "visible_data",
263 |         }
264 | 
265 |         error = ValueError("Test error")
266 |         error_logger.log_error(error, sensitive_context)
267 | 
268 |         # Check that log was called
269 |         assert logger.log.called
270 | 
271 |         # Get the extra data passed to logger
272 |         call_args = logger.log.call_args
273 |         extra_data = call_args[1]["extra"]
274 | 
275 |         # Verify sensitive data was masked
276 |         assert extra_data["context"]["api_key"] == "***MASKED***"
277 |         assert extra_data["context"]["user_data"]["password"] == "***MASKED***"
278 |         assert extra_data["context"]["safe_field"] == "visible_data"
279 | 
280 |     def test_error_counting(self):
281 |         """Test error count tracking."""
282 |         logger = Mock()
283 |         error_logger = ErrorLogger(logger)
284 | 
285 |         # Log same error type multiple times
286 |         for _i in range(3):
287 |             error_logger.log_error(ValueError("Test"), {})
288 | 
289 |         # Log different error type
290 |         error_logger.log_error(TypeError("Test"), {})
291 | 
292 |         stats = error_logger.get_error_stats()
293 |         assert stats["ValueError"] == 3
294 |         assert stats["TypeError"] == 1
295 | 
296 | 
297 | class TestCorrelationIDMiddleware:
298 |     """Test correlation ID tracking."""
299 | 
300 |     def test_correlation_id_generation(self):
301 |         """Test correlation ID generation and retrieval."""
302 |         # Generate new ID
303 |         correlation_id = CorrelationIDMiddleware.set_correlation_id()
304 |         assert correlation_id.startswith("mcp-")
305 |         assert len(correlation_id) == 12  # "mcp-" + 8 hex chars
306 | 
307 |         # Retrieve same ID
308 |         retrieved_id = CorrelationIDMiddleware.get_correlation_id()
309 |         assert retrieved_id == correlation_id
310 | 
311 |     def test_correlation_id_persistence(self):
312 |         """Test that correlation ID persists across function calls."""
313 |         correlation_id = CorrelationIDMiddleware.set_correlation_id()
314 | 
315 |         def inner_function():
316 |             return CorrelationIDMiddleware.get_correlation_id()
317 | 
318 |         assert inner_function() == correlation_id
319 | 
320 | 
321 | # Integration test for complete error flow
322 | class TestErrorFlowIntegration:
323 |     """Test complete error handling flow from agent to logging."""
324 | 
325 |     @pytest.mark.asyncio
326 |     async def test_complete_error_flow(self):
327 |         """Test error propagation from tool through agent to logging."""
328 |         mock_llm = Mock()
329 | 
330 |         with patch(
331 |             "maverick_mcp.agents.market_analysis.get_tool_registry"
332 |         ) as mock_registry:
333 |             # Create a proper mock tool that will fail on set_persona
334 |             mock_tool = MockPersonaAwareTool(fail_on_set_persona=True)
335 |             mock_registry.return_value.get_tool.return_value = mock_tool
336 | 
337 |             # Agent should still initialize but log warning
338 |             with patch("maverick_mcp.agents.market_analysis.logger") as mock_logger:
339 |                 MarketAnalysisAgent(llm=mock_llm, persona="moderate")
340 | 
341 |                 # Verify warning was logged
342 |                 assert mock_logger.warning.called
343 |                 warning_msg = mock_logger.warning.call_args[0][0]
344 |                 assert "Failed to set persona" in warning_msg
345 | 
```

--------------------------------------------------------------------------------
/docs/deep_research_agent.md:
--------------------------------------------------------------------------------

```markdown
  1 | # DeepResearchAgent Documentation
  2 | 
  3 | ## Overview
  4 | 
  5 | The DeepResearchAgent provides comprehensive financial research capabilities using web search, content analysis, and AI-powered insights. It integrates seamlessly with the existing maverick-mcp architecture and adapts research depth and focus based on investor personas.
  6 | 
  7 | ## Key Features
  8 | 
  9 | ### 🔍 Comprehensive Research
 10 | - **Multi-Source Web Search**: Integrates Exa AI and Tavily for comprehensive coverage
 11 | - **Content Analysis**: AI-powered extraction of insights, sentiment, and key themes
 12 | - **Source Credibility**: Automatic scoring and validation of information sources
 13 | - **Citation Management**: Proper citations and reference tracking
 14 | - **Fact Validation**: Cross-referencing and validation of research claims
 15 | 
 16 | ### 🎯 Persona-Aware Research
 17 | - **Conservative**: Focus on stability, dividends, risk factors, established companies
 18 | - **Moderate**: Balanced approach with growth and value considerations
 19 | - **Aggressive**: Emphasis on growth opportunities, momentum, high-return potential
 20 | - **Day Trader**: Short-term focus, liquidity, technical factors, immediate opportunities
 21 | 
 22 | ### 🏗️ LangGraph 2025 Integration
 23 | - **State Management**: Comprehensive state tracking with `DeepResearchState`
 24 | - **Workflow Orchestration**: Multi-step research process with error handling
 25 | - **Streaming Support**: Real-time progress updates and streaming responses
 26 | - **Circuit Breaker**: Automatic failover and rate limiting protection
 27 | 
 28 | ## Architecture
 29 | 
 30 | ### Core Components
 31 | 
 32 | ```
 33 | DeepResearchAgent
 34 | ├── ResearchQueryAnalyzer     # Query analysis and strategy planning
 35 | ├── WebSearchProvider         # Multi-provider search (Exa, Tavily)
 36 | ├── ContentAnalyzer          # AI-powered content analysis
 37 | ├── PersonaAdapter           # Persona-specific result filtering
 38 | └── CacheManager            # Intelligent caching and performance
 39 | ```
 40 | 
 41 | ### State Management
 42 | 
 43 | The `DeepResearchState` extends `BaseAgentState` with comprehensive tracking:
 44 | 
 45 | ```python
 46 | class DeepResearchState(BaseAgentState):
 47 |     # Research parameters
 48 |     research_query: str
 49 |     research_scope: str  
 50 |     research_depth: str
 51 |     timeframe: str
 52 |     
 53 |     # Source management
 54 |     raw_sources: list[dict]
 55 |     processed_sources: list[dict]
 56 |     source_credibility: dict[str, float]
 57 |     
 58 |     # Content analysis
 59 |     extracted_content: dict[str, str]
 60 |     key_insights: list[dict]
 61 |     sentiment_analysis: dict
 62 |     
 63 |     # Research findings
 64 |     research_themes: list[dict]
 65 |     consensus_view: dict
 66 |     contrarian_views: list[dict]
 67 |     
 68 |     # Persona adaptation
 69 |     persona_focus_areas: list[str]
 70 |     actionable_insights: list[dict]
 71 | ```
 72 | 
 73 | ## Usage Examples
 74 | 
 75 | ### Standalone Usage
 76 | 
 77 | ```python
 78 | from maverick_mcp.agents.deep_research import DeepResearchAgent
 79 | from maverick_mcp.providers.llm_factory import get_llm
 80 | 
 81 | # Initialize agent
 82 | llm = get_llm()
 83 | research_agent = DeepResearchAgent(
 84 |     llm=llm,
 85 |     persona="moderate",
 86 |     max_sources=50,
 87 |     research_depth="comprehensive"
 88 | )
 89 | 
 90 | # Company research
 91 | result = await research_agent.research_company_comprehensive(
 92 |     symbol="AAPL",
 93 |     session_id="research_session",
 94 |     include_competitive_analysis=True
 95 | )
 96 | 
 97 | # Market sentiment analysis
 98 | sentiment = await research_agent.analyze_market_sentiment(
 99 |     topic="artificial intelligence stocks",
100 |     session_id="sentiment_session",
101 |     timeframe="1w"
102 | )
103 | 
104 | # Custom research
105 | custom = await research_agent.research_topic(
106 |     query="impact of Federal Reserve policy on tech stocks",
107 |     session_id="custom_session",
108 |     research_scope="comprehensive",
109 |     timeframe="1m"
110 | )
111 | ```
112 | 
113 | ### SupervisorAgent Integration
114 | 
115 | ```python
116 | from maverick_mcp.agents.supervisor import SupervisorAgent
117 | 
118 | # Create supervisor with research agent
119 | supervisor = SupervisorAgent(
120 |     llm=llm,
121 |     agents={
122 |         "market": market_agent,
123 |         "technical": technical_agent,
124 |         "research": research_agent  # DeepResearchAgent
125 |     },
126 |     persona="moderate"
127 | )
128 | 
129 | # Coordinated analysis
130 | result = await supervisor.coordinate_agents(
131 |     query="Should I invest in MSFT? I want comprehensive analysis",
132 |     session_id="coordination_session"
133 | )
134 | ```
135 | 
136 | ### MCP Tools Integration
137 | 
138 | Available MCP tools for Claude Desktop:
139 | 
140 | 1. **`comprehensive_research`** - Deep research on any financial topic
141 | 2. **`analyze_market_sentiment`** - Market sentiment analysis
142 | 3. **`research_company_comprehensive`** - Company fundamental analysis
143 | 4. **`search_financial_news`** - News search and analysis
144 | 5. **`validate_research_claims`** - Fact-checking and validation
145 | 
146 | #### Claude Desktop Configuration
147 | 
148 | ```json
149 | {
150 |   "mcpServers": {
151 |     "maverick-research": {
152 |       "command": "npx",
153 |       "args": ["-y", "mcp-remote", "http://localhost:8003/research"]
154 |     }
155 |   }
156 | }
157 | ```
158 | 
159 | #### Example Prompts
160 | 
161 | - "Research Tesla's competitive position in the EV market with comprehensive analysis"
162 | - "Analyze current market sentiment for renewable energy stocks"
163 | - "Perform fundamental analysis of Apple (AAPL) including competitive advantages"
164 | - "Search for recent news about Federal Reserve interest rate decisions"
165 | 
166 | ## Configuration
167 | 
168 | ### Environment Variables
169 | 
170 | ```bash
171 | # Required API Keys
172 | EXA_API_KEY=your_exa_api_key
173 | TAVILY_API_KEY=your_tavily_api_key
174 | 
175 | # Optional Configuration
176 | RESEARCH_MAX_SOURCES=50
177 | RESEARCH_CACHE_TTL_HOURS=4
178 | RESEARCH_DEPTH=comprehensive
179 | ```
180 | 
181 | ### Settings
182 | 
183 | ```python
184 | from maverick_mcp.config.settings import get_settings
185 | 
186 | settings = get_settings()
187 | 
188 | # Research settings
189 | research_config = settings.research
190 | print(f"Max sources: {research_config.default_max_sources}")
191 | print(f"Cache TTL: {research_config.cache_ttl_hours} hours")
192 | print(f"Trusted domains: {research_config.trusted_domains}")
193 | ```
194 | 
195 | ## Research Workflow
196 | 
197 | ### 1. Query Analysis
198 | - Classify research type (company, sector, market, news, fundamental)
199 | - Determine appropriate search strategies and sources
200 | - Set persona-specific focus areas and priorities
201 | 
202 | ### 2. Search Execution
203 | - Execute parallel searches across multiple providers
204 | - Apply domain filtering and content type selection
205 | - Handle rate limiting and error recovery
206 | 
207 | ### 3. Content Processing
208 | - Extract and clean content from sources
209 | - Remove duplicates and low-quality sources
210 | - Score sources for credibility and relevance
211 | 
212 | ### 4. Content Analysis
213 | - AI-powered insight extraction
214 | - Sentiment analysis and trend detection
215 | - Theme identification and cross-referencing
216 | 
217 | ### 5. Persona Adaptation
218 | - Filter insights for persona relevance
219 | - Adjust risk assessments and recommendations
220 | - Generate persona-specific action items
221 | 
222 | ### 6. Result Synthesis
223 | - Consolidate findings into coherent analysis
224 | - Generate citations and source references
225 | - Calculate confidence scores and quality metrics
226 | 
227 | ## Persona Behaviors
228 | 
229 | ### Conservative Investor
230 | - **Focus**: Stability, dividends, established companies, risk factors
231 | - **Sources**: Prioritize authoritative financial publications
232 | - **Insights**: Emphasize capital preservation and low-risk opportunities
233 | - **Actions**: More cautious recommendations with detailed risk analysis
234 | 
235 | ### Moderate Investor  
236 | - **Focus**: Balanced growth and value, diversification
237 | - **Sources**: Mix of news, analysis, and fundamental reports
238 | - **Insights**: Balanced view of opportunities and risks
239 | - **Actions**: Moderate position sizing with measured recommendations
240 | 
241 | ### Aggressive Investor
242 | - **Focus**: Growth opportunities, momentum, high-return potential
243 | - **Sources**: Include social media sentiment and trending analysis
244 | - **Insights**: Emphasize upside potential and growth catalysts
245 | - **Actions**: Larger position sizing with growth-focused recommendations
246 | 
247 | ### Day Trader
248 | - **Focus**: Short-term catalysts, technical factors, liquidity
249 | - **Sources**: Real-time news, social sentiment, technical analysis
250 | - **Insights**: Immediate trading opportunities and momentum indicators
251 | - **Actions**: Quick-turn recommendations with tight risk controls
252 | 
253 | ## Performance & Caching
254 | 
255 | ### Intelligent Caching
256 | - **Research Results**: 4-hour TTL for comprehensive research
257 | - **Source Content**: 1-hour TTL for raw content
258 | - **Sentiment Analysis**: 30-minute TTL for rapidly changing topics
259 | - **Company Fundamentals**: 24-hour TTL for stable company data
260 | 
261 | ### Rate Limiting
262 | - **Exa AI**: Respects API rate limits with exponential backoff
263 | - **Tavily**: Built-in rate limiting and request queuing
264 | - **Content Analysis**: Batch processing to optimize LLM usage
265 | 
266 | ### Performance Optimization
267 | - **Parallel Search**: Concurrent execution across providers
268 | - **Content Streaming**: Progressive result delivery
269 | - **Circuit Breakers**: Automatic failover on provider issues
270 | - **Connection Pooling**: Efficient network resource usage
271 | 
272 | ## Error Handling
273 | 
274 | ### Circuit Breaker Pattern
275 | - Automatic provider failover on repeated failures
276 | - Graceful degradation with partial results
277 | - Recovery testing and automatic restoration
278 | 
279 | ### Fallback Strategies
280 | - Provider fallback (Exa → Tavily → Basic web search)
281 | - Reduced scope fallback (comprehensive → standard → basic)
282 | - Cached result fallback when live search fails
283 | 
284 | ### Error Types
285 | - `WebSearchError`: Search provider failures
286 | - `ContentAnalysisError`: Content processing failures
287 | - `ResearchError`: General research operation failures
288 | - `CircuitBreakerError`: Circuit breaker activation
289 | 
290 | ## Integration Points
291 | 
292 | ### SupervisorAgent Routing
293 | - Automatic routing for research-related queries
294 | - Intelligent agent selection based on query complexity
295 | - Result synthesis with technical and market analysis
296 | 
297 | ### MCP Server Integration
298 | - RESTful API endpoints for external access
299 | - Standardized request/response formats
300 | - Authentication and rate limiting support
301 | 
302 | ### Database Integration
303 | - Research result caching in PostgreSQL/SQLite
304 | - Source credibility tracking and learning
305 | - Historical research analysis and trends
306 | 
307 | ## Best Practices
308 | 
309 | ### Query Optimization
310 | - Use specific, focused queries for better results
311 | - Include timeframe context for temporal relevance
312 | - Specify research depth based on needs (basic/standard/comprehensive)
313 | 
314 | ### Persona Selection
315 | - Choose persona that matches intended investment style
316 | - Consider persona characteristics in result interpretation
317 | - Use persona-specific insights for decision making
318 | 
319 | ### Result Interpretation
320 | - Review confidence scores and source diversity
321 | - Consider contrarian views alongside consensus
322 | - Validate critical claims through multiple sources
323 | 
324 | ### Performance Tuning
325 | - Adjust max_sources based on speed vs. comprehensiveness needs
326 | - Use appropriate research_depth for the use case
327 | - Monitor cache hit rates and adjust TTL settings
328 | 
329 | ## Troubleshooting
330 | 
331 | ### Common Issues
332 | 
333 | 1. **No Results Found**
334 |    - Check API key configuration
335 |    - Verify internet connectivity
336 |    - Try broader search terms
337 | 
338 | 2. **Low Confidence Scores**
339 |    - Increase max_sources parameter
340 |    - Use longer timeframe for more data
341 |    - Check for topic relevance and specificity
342 | 
343 | 3. **Rate Limiting Errors**
344 |    - Review API usage limits
345 |    - Implement request spacing
346 |    - Consider upgrading API plans
347 | 
348 | 4. **Poor Persona Alignment**
349 |    - Review persona characteristics
350 |    - Adjust focus areas in research strategy
351 |    - Consider custom persona configuration
352 | 
353 | ### Debug Mode
354 | 
355 | ```python
356 | import logging
357 | logging.basicConfig(level=logging.DEBUG)
358 | 
359 | # Enable detailed logging for troubleshooting
360 | research_agent = DeepResearchAgent(
361 |     llm=llm,
362 |     persona="moderate",
363 |     research_depth="comprehensive"
364 | )
365 | ```
366 | 
367 | ## Future Enhancements
368 | 
369 | ### Planned Features
370 | - **Multi-language Support**: Research in multiple languages
371 | - **PDF Analysis**: Direct analysis of earnings reports and filings
372 | - **Real-time Alerts**: Research-based alert generation
373 | - **Custom Personas**: User-defined persona characteristics
374 | - **Research Collaboration**: Multi-user research sessions
375 | 
376 | ### API Extensions
377 | - **Batch Research**: Process multiple queries simultaneously
378 | - **Research Templates**: Pre-configured research workflows
379 | - **Historical Analysis**: Time-series research trend analysis
380 | - **Integration APIs**: Third-party platform integrations
381 | 
382 | ---
383 | 
384 | ## Support
385 | 
386 | For questions, issues, or feature requests related to the DeepResearchAgent:
387 | 
388 | 1. Check the troubleshooting section above
389 | 2. Review the example code in `/examples/deep_research_integration.py`
390 | 3. Enable debug logging for detailed error information
391 | 4. Consider the integration patterns with SupervisorAgent for complex workflows
392 | 
393 | The DeepResearchAgent is designed to provide institutional-quality research capabilities while maintaining the flexibility and persona-awareness that makes it suitable for individual investors across all experience levels.
```

--------------------------------------------------------------------------------
/maverick_mcp/application/screening/dtos.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Screening application DTOs (Data Transfer Objects).
  3 | 
  4 | This module contains DTOs for request/response communication
  5 | between the API layer and application layer.
  6 | """
  7 | 
  8 | from typing import Any
  9 | 
 10 | from pydantic import BaseModel, Field, validator
 11 | 
 12 | from maverick_mcp.domain.screening.value_objects import ScreeningStrategy
 13 | 
 14 | 
 15 | class ScreeningRequestDTO(BaseModel):
 16 |     """
 17 |     DTO for screening requests from the API layer.
 18 | 
 19 |     This DTO validates and structures incoming screening requests.
 20 |     """
 21 | 
 22 |     strategy: str = Field(
 23 |         description="Screening strategy to use", example="maverick_bullish"
 24 |     )
 25 |     limit: int = Field(
 26 |         default=20, ge=1, le=100, description="Maximum number of results to return"
 27 |     )
 28 | 
 29 |     # Filtering criteria
 30 |     min_momentum_score: float | None = Field(
 31 |         default=None, ge=0, le=100, description="Minimum momentum score"
 32 |     )
 33 |     max_momentum_score: float | None = Field(
 34 |         default=None, ge=0, le=100, description="Maximum momentum score"
 35 |     )
 36 |     min_volume: int | None = Field(
 37 |         default=None, ge=0, description="Minimum average daily volume"
 38 |     )
 39 |     max_volume: int | None = Field(
 40 |         default=None, ge=0, description="Maximum average daily volume"
 41 |     )
 42 |     min_price: float | None = Field(
 43 |         default=None, gt=0, description="Minimum stock price"
 44 |     )
 45 |     max_price: float | None = Field(
 46 |         default=None, gt=0, description="Maximum stock price"
 47 |     )
 48 |     min_combined_score: int | None = Field(
 49 |         default=None, ge=0, description="Minimum combined score for bullish screening"
 50 |     )
 51 |     min_bear_score: int | None = Field(
 52 |         default=None, ge=0, description="Minimum bear score for bearish screening"
 53 |     )
 54 |     min_adr_percentage: float | None = Field(
 55 |         default=None, ge=0, description="Minimum average daily range percentage"
 56 |     )
 57 |     max_adr_percentage: float | None = Field(
 58 |         default=None, ge=0, description="Maximum average daily range percentage"
 59 |     )
 60 | 
 61 |     # Pattern filters
 62 |     require_pattern_detected: bool = Field(
 63 |         default=False, description="Require pattern detection signal"
 64 |     )
 65 |     require_squeeze: bool = Field(default=False, description="Require squeeze signal")
 66 |     require_consolidation: bool = Field(
 67 |         default=False, description="Require consolidation pattern"
 68 |     )
 69 |     require_entry_signal: bool = Field(
 70 |         default=False, description="Require entry signal"
 71 |     )
 72 | 
 73 |     # Moving average filters
 74 |     require_above_sma50: bool = Field(
 75 |         default=False, description="Require price above SMA 50"
 76 |     )
 77 |     require_above_sma150: bool = Field(
 78 |         default=False, description="Require price above SMA 150"
 79 |     )
 80 |     require_above_sma200: bool = Field(
 81 |         default=False, description="Require price above SMA 200"
 82 |     )
 83 |     require_ma_alignment: bool = Field(
 84 |         default=False,
 85 |         description="Require proper moving average alignment (50>150>200)",
 86 |     )
 87 | 
 88 |     # Sorting options
 89 |     sort_field: str | None = Field(
 90 |         default=None, description="Field to sort by (strategy default if not specified)"
 91 |     )
 92 |     sort_descending: bool = Field(default=True, description="Sort in descending order")
 93 | 
 94 |     @validator("strategy")
 95 |     def validate_strategy(cls, v):
 96 |         """Validate that strategy is a known screening strategy."""
 97 |         valid_strategies = [s.value for s in ScreeningStrategy]
 98 |         if v not in valid_strategies:
 99 |             raise ValueError(f"Invalid strategy. Must be one of: {valid_strategies}")
100 |         return v
101 | 
102 |     @validator("max_momentum_score")
103 |     def validate_momentum_score_range(cls, v, values):
104 |         """Validate that max_momentum_score >= min_momentum_score if both specified."""
105 |         if (
106 |             v is not None
107 |             and "min_momentum_score" in values
108 |             and values["min_momentum_score"] is not None
109 |         ):
110 |             if v < values["min_momentum_score"]:
111 |                 raise ValueError(
112 |                     "max_momentum_score cannot be less than min_momentum_score"
113 |                 )
114 |         return v
115 | 
116 |     @validator("max_volume")
117 |     def validate_volume_range(cls, v, values):
118 |         """Validate that max_volume >= min_volume if both specified."""
119 |         if (
120 |             v is not None
121 |             and "min_volume" in values
122 |             and values["min_volume"] is not None
123 |         ):
124 |             if v < values["min_volume"]:
125 |                 raise ValueError("max_volume cannot be less than min_volume")
126 |         return v
127 | 
128 |     @validator("max_price")
129 |     def validate_price_range(cls, v, values):
130 |         """Validate that max_price >= min_price if both specified."""
131 |         if v is not None and "min_price" in values and values["min_price"] is not None:
132 |             if v < values["min_price"]:
133 |                 raise ValueError("max_price cannot be less than min_price")
134 |         return v
135 | 
136 |     @validator("sort_field")
137 |     def validate_sort_field(cls, v):
138 |         """Validate sort field if specified."""
139 |         if v is not None:
140 |             valid_fields = {
141 |                 "combined_score",
142 |                 "bear_score",
143 |                 "momentum_score",
144 |                 "close_price",
145 |                 "volume",
146 |                 "avg_volume_30d",
147 |                 "adr_percentage",
148 |                 "quality_score",
149 |             }
150 |             if v not in valid_fields:
151 |                 raise ValueError(f"Invalid sort field. Must be one of: {valid_fields}")
152 |         return v
153 | 
154 | 
155 | class ScreeningResultDTO(BaseModel):
156 |     """
157 |     DTO for individual screening results.
158 | 
159 |     This DTO represents a single stock screening result for API responses.
160 |     """
161 | 
162 |     stock_symbol: str = Field(description="Stock ticker symbol")
163 |     screening_date: str = Field(description="Date when screening was performed")
164 |     close_price: float = Field(description="Current closing price")
165 |     volume: int = Field(description="Current volume")
166 |     momentum_score: float = Field(description="Momentum score (0-100)")
167 |     adr_percentage: float = Field(description="Average daily range percentage")
168 | 
169 |     # Technical indicators
170 |     ema_21: float = Field(description="21-period exponential moving average")
171 |     sma_50: float = Field(description="50-period simple moving average")
172 |     sma_150: float = Field(description="150-period simple moving average")
173 |     sma_200: float = Field(description="200-period simple moving average")
174 |     avg_volume_30d: float = Field(description="30-day average volume")
175 |     atr: float = Field(description="Average True Range")
176 | 
177 |     # Pattern signals
178 |     pattern: str | None = Field(default=None, description="Detected pattern")
179 |     squeeze: str | None = Field(default=None, description="Squeeze signal")
180 |     consolidation: str | None = Field(
181 |         default=None, description="Consolidation pattern signal"
182 |     )
183 |     entry_signal: str | None = Field(default=None, description="Entry signal")
184 | 
185 |     # Scores
186 |     combined_score: int = Field(description="Combined bullish score")
187 |     bear_score: int = Field(description="Bearish score")
188 |     quality_score: int = Field(description="Overall quality score")
189 | 
190 |     # Business rule indicators
191 |     is_bullish: bool = Field(description="Meets bullish setup criteria")
192 |     is_bearish: bool = Field(description="Meets bearish setup criteria")
193 |     is_trending: bool = Field(description="Meets trending criteria")
194 |     risk_reward_ratio: float = Field(description="Calculated risk/reward ratio")
195 | 
196 |     # Bearish-specific fields (optional)
197 |     rsi_14: float | None = Field(default=None, description="14-period RSI")
198 |     macd: float | None = Field(default=None, description="MACD line")
199 |     macd_signal: float | None = Field(default=None, description="MACD signal line")
200 |     macd_histogram: float | None = Field(default=None, description="MACD histogram")
201 |     distribution_days_20: int | None = Field(
202 |         default=None, description="Distribution days in last 20 days"
203 |     )
204 |     atr_contraction: bool | None = Field(
205 |         default=None, description="ATR contraction detected"
206 |     )
207 |     big_down_volume: bool | None = Field(
208 |         default=None, description="Big down volume detected"
209 |     )
210 | 
211 | 
212 | class ScreeningCollectionDTO(BaseModel):
213 |     """
214 |     DTO for screening result collections.
215 | 
216 |     This DTO represents the complete response for a screening operation.
217 |     """
218 | 
219 |     strategy_used: str = Field(description="Screening strategy that was used")
220 |     screening_timestamp: str = Field(description="When the screening was performed")
221 |     total_candidates_analyzed: int = Field(
222 |         description="Total number of candidates analyzed"
223 |     )
224 |     results_returned: int = Field(description="Number of results returned")
225 |     results: list[ScreeningResultDTO] = Field(
226 |         description="Individual screening results"
227 |     )
228 | 
229 |     # Statistics and metadata
230 |     statistics: dict[str, Any] = Field(description="Collection statistics")
231 |     applied_filters: dict[str, Any] = Field(description="Filters that were applied")
232 |     sorting_applied: dict[str, Any] = Field(description="Sorting configuration used")
233 | 
234 |     # Status information
235 |     status: str = Field(default="success", description="Operation status")
236 |     execution_time_ms: float | None = Field(
237 |         default=None, description="Execution time in milliseconds"
238 |     )
239 |     warnings: list[str] = Field(
240 |         default_factory=list, description="Any warnings during processing"
241 |     )
242 | 
243 | 
244 | class AllScreeningResultsDTO(BaseModel):
245 |     """
246 |     DTO for comprehensive screening results across all strategies.
247 | 
248 |     This DTO represents results from all available screening strategies.
249 |     """
250 | 
251 |     screening_timestamp: str = Field(description="When the screening was performed")
252 |     strategies_executed: list[str] = Field(
253 |         description="List of strategies that were executed"
254 |     )
255 | 
256 |     # Results by strategy
257 |     maverick_bullish: ScreeningCollectionDTO | None = Field(
258 |         default=None, description="Maverick bullish screening results"
259 |     )
260 |     maverick_bearish: ScreeningCollectionDTO | None = Field(
261 |         default=None, description="Maverick bearish screening results"
262 |     )
263 |     trending: ScreeningCollectionDTO | None = Field(
264 |         default=None, description="Trending screening results"
265 |     )
266 | 
267 |     # Cross-strategy analysis
268 |     cross_strategy_analysis: dict[str, Any] = Field(
269 |         description="Analysis across multiple strategies"
270 |     )
271 | 
272 |     # Overall statistics
273 |     overall_summary: dict[str, Any] = Field(
274 |         description="Summary statistics across all strategies"
275 |     )
276 | 
277 |     # Status information
278 |     status: str = Field(default="success", description="Operation status")
279 |     execution_time_ms: float | None = Field(
280 |         default=None, description="Total execution time in milliseconds"
281 |     )
282 |     errors: list[str] = Field(
283 |         default_factory=list, description="Any errors during processing"
284 |     )
285 | 
286 | 
287 | class ScreeningStatisticsDTO(BaseModel):
288 |     """
289 |     DTO for screening statistics and analytics.
290 | 
291 |     This DTO provides comprehensive analytics and business intelligence
292 |     for screening operations.
293 |     """
294 | 
295 |     strategy: str | None = Field(
296 |         default=None, description="Strategy analyzed (None for all)"
297 |     )
298 |     timestamp: str = Field(description="When the analysis was performed")
299 | 
300 |     # Single strategy statistics
301 |     statistics: dict[str, Any] | None = Field(
302 |         default=None, description="Statistics for single strategy analysis"
303 |     )
304 | 
305 |     # Multi-strategy statistics
306 |     overall_summary: dict[str, Any] | None = Field(
307 |         default=None, description="Summary across all strategies"
308 |     )
309 |     by_strategy: dict[str, dict[str, Any]] | None = Field(
310 |         default=None, description="Statistics broken down by strategy"
311 |     )
312 |     cross_strategy_analysis: dict[str, Any] | None = Field(
313 |         default=None, description="Cross-strategy insights and analysis"
314 |     )
315 | 
316 |     # Metadata
317 |     analysis_scope: str = Field(description="Scope of the analysis (single/all)")
318 |     results_analyzed: int = Field(description="Total number of results analyzed")
319 | 
320 | 
321 | class ErrorResponseDTO(BaseModel):
322 |     """
323 |     DTO for error responses.
324 | 
325 |     This DTO provides standardized error information for API responses.
326 |     """
327 | 
328 |     status: str = Field(default="error", description="Response status")
329 |     error_code: str = Field(description="Machine-readable error code")
330 |     error_message: str = Field(description="Human-readable error message")
331 |     details: dict[str, Any] | None = Field(
332 |         default=None, description="Additional error details"
333 |     )
334 |     timestamp: str = Field(description="When the error occurred")
335 |     request_id: str | None = Field(
336 |         default=None, description="Request identifier for tracking"
337 |     )
338 | 
```

--------------------------------------------------------------------------------
/tests/test_integration_simple.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Simplified Integration Test Suite for MaverickMCP Security System.
  3 | 
  4 | This test suite validates that the core security integrations are working:
  5 | - API server can start
  6 | - Health check endpoints
  7 | - Basic authentication flow (if available)
  8 | - Security middleware is active
  9 | - Performance systems can initialize
 10 | 
 11 | This is a lightweight version to validate system integration without
 12 | requiring full database or Redis setup.
 13 | """
 14 | 
 15 | import os
 16 | from unittest.mock import MagicMock, patch
 17 | 
 18 | import pytest
 19 | from fastapi.testclient import TestClient
 20 | 
 21 | from maverick_mcp.api.api_server import create_api_app
 22 | 
 23 | 
 24 | @pytest.fixture
 25 | def mock_settings():
 26 |     """Mock settings for testing."""
 27 |     with patch.dict(
 28 |         os.environ,
 29 |         {
 30 |             "AUTH_ENABLED": "true",
 31 |             "ENVIRONMENT": "test",
 32 |             "DATABASE_URL": "sqlite:///:memory:",
 33 |             "REDIS_URL": "redis://localhost:6379/15",
 34 |         },
 35 |     ):
 36 |         yield
 37 | 
 38 | 
 39 | @pytest.fixture
 40 | def mock_redis():
 41 |     """Mock Redis client."""
 42 |     mock_redis = MagicMock()
 43 |     mock_redis.ping.return_value = True
 44 |     mock_redis.get.return_value = None
 45 |     mock_redis.setex.return_value = True
 46 |     mock_redis.delete.return_value = 1
 47 |     mock_redis.keys.return_value = []
 48 |     mock_redis.flushdb.return_value = True
 49 |     mock_redis.close.return_value = None
 50 |     return mock_redis
 51 | 
 52 | 
 53 | @pytest.fixture
 54 | def mock_database():
 55 |     """Mock database operations."""
 56 |     from unittest.mock import MagicMock
 57 | 
 58 |     mock_db = MagicMock()
 59 | 
 60 |     # Mock SQLAlchemy Session methods
 61 |     mock_query = MagicMock()
 62 |     mock_query.filter.return_value.first.return_value = None  # No user found
 63 |     mock_query.filter.return_value.all.return_value = []
 64 |     mock_db.query.return_value = mock_query
 65 | 
 66 |     # Mock basic session operations
 67 |     mock_db.execute.return_value.scalar.return_value = 1
 68 |     mock_db.execute.return_value.fetchall.return_value = []
 69 |     mock_db.commit.return_value = None
 70 |     mock_db.close.return_value = None
 71 |     mock_db.add.return_value = None
 72 | 
 73 |     return mock_db
 74 | 
 75 | 
 76 | @pytest.fixture
 77 | def integrated_app(mock_settings, mock_redis, mock_database):
 78 |     """Create integrated app with mocked dependencies."""
 79 | 
 80 |     # Mock database dependency
 81 |     def mock_get_db():
 82 |         yield mock_database
 83 | 
 84 |     # Mock Redis connection manager
 85 |     with patch("maverick_mcp.data.performance.redis_manager") as mock_redis_manager:
 86 |         mock_redis_manager.initialize.return_value = True
 87 |         mock_redis_manager.get_client.return_value = mock_redis
 88 |         mock_redis_manager._healthy = True
 89 |         mock_redis_manager._initialized = True
 90 |         mock_redis_manager.get_metrics.return_value = {
 91 |             "healthy": True,
 92 |             "initialized": True,
 93 |             "commands_executed": 0,
 94 |             "errors": 0,
 95 |         }
 96 | 
 97 |         # Mock performance systems
 98 |         with patch(
 99 |             "maverick_mcp.data.performance.initialize_performance_systems"
100 |         ) as mock_init:
101 |             mock_init.return_value = {"redis_manager": True, "request_cache": True}
102 | 
103 |             # Mock monitoring
104 |             with patch("maverick_mcp.utils.monitoring.initialize_monitoring"):
105 |                 # Create app
106 |                 app = create_api_app()
107 | 
108 |                 # Override database dependencies
109 |                 from maverick_mcp.data.models import get_async_db, get_db
110 | 
111 |                 app.dependency_overrides[get_db] = mock_get_db
112 | 
113 |                 # Mock async database dependency
114 |                 async def mock_get_async_db():
115 |                     yield mock_database
116 | 
117 |                 app.dependency_overrides[get_async_db] = mock_get_async_db
118 | 
119 |                 yield app
120 | 
121 | 
122 | @pytest.fixture
123 | def client(integrated_app):
124 |     """Create test client."""
125 |     return TestClient(integrated_app)
126 | 
127 | 
128 | class TestSystemIntegration:
129 |     """Test core system integration."""
130 | 
131 |     def test_api_server_creation(self, integrated_app):
132 |         """Test that API server can be created successfully."""
133 |         assert integrated_app is not None
134 |         assert hasattr(integrated_app, "router")
135 |         assert hasattr(integrated_app, "middleware")
136 | 
137 |     @pytest.mark.skip(reason="Requires Redis and external services not available in CI")
138 |     def test_health_check_endpoint(self, client):
139 |         """Test health check endpoint is available."""
140 |         response = client.get("/health")
141 |         assert response.status_code == 200
142 | 
143 |         health_data = response.json()
144 |         assert "status" in health_data
145 |         assert "service" in health_data
146 |         assert health_data["service"] == "MaverickMCP API"
147 | 
148 |     def test_security_middleware_present(self, integrated_app):
149 |         """Test that security middleware is loaded."""
150 |         # FastAPI middleware stack is different, check if the app has middleware
151 |         assert hasattr(integrated_app, "middleware_stack") or hasattr(
152 |             integrated_app, "middleware"
153 |         )
154 | 
155 |         # The actual middleware is added during app creation
156 |         # We can verify by checking the app structure
157 |         assert integrated_app is not None
158 | 
159 |     def test_cors_configuration(self, integrated_app):
160 |         """Test CORS middleware is configured."""
161 |         # CORS middleware is added during app creation
162 |         assert integrated_app is not None
163 | 
164 |     def test_api_endpoints_available(self, client):
165 |         """Test that key API endpoints are available."""
166 | 
167 |         # Test root endpoint
168 |         response = client.get("/")
169 |         assert response.status_code == 200
170 | 
171 |         root_data = response.json()
172 |         assert "service" in root_data
173 |         assert "endpoints" in root_data
174 | 
175 |         # Verify key endpoints are listed and billing endpoints are absent
176 |         endpoints = root_data["endpoints"]
177 |         if isinstance(endpoints, dict):
178 |             endpoint_names = set(endpoints.keys())
179 |         elif isinstance(endpoints, list):
180 |             endpoint_names = set(endpoints)
181 |         else:
182 |             pytest.fail(f"Unexpected endpoints payload type: {type(endpoints)!r}")
183 | 
184 |         assert "auth" in endpoint_names
185 |         assert "health" in endpoint_names
186 |         assert "billing" not in endpoint_names
187 | 
188 |     def test_authentication_endpoints_available(self, client):
189 |         """Test authentication endpoints are available."""
190 | 
191 |         # Test registration endpoint (should require data)
192 |         response = client.post("/auth/signup", json={})
193 |         assert response.status_code in [400, 422]  # Validation error, not 404
194 | 
195 |         # Test login endpoint (should require data)
196 |         response = client.post("/auth/login", json={})
197 |         assert response.status_code in [400, 422]  # Validation error, not 404
198 | 
199 |     def test_billing_endpoints_removed(self, client):
200 |         """Ensure legacy billing endpoints are no longer exposed."""
201 | 
202 |         response = client.get("/billing/balance")
203 |         assert response.status_code == 404
204 | 
205 |     def test_error_handling_active(self, client):
206 |         """Test that error handling middleware is active."""
207 | 
208 |         # Test 404 handling
209 |         response = client.get("/nonexistent/endpoint")
210 |         assert response.status_code == 404
211 | 
212 |         error_data = response.json()
213 |         assert "error" in error_data or "detail" in error_data
214 | 
215 |         # Should have structured error response
216 |         assert isinstance(error_data, dict)
217 | 
218 |     @pytest.mark.skip(reason="Requires Redis and external services not available in CI")
219 |     def test_request_tracing_active(self, client):
220 |         """Test request tracing is active."""
221 | 
222 |         # Make request and check for tracing headers
223 |         response = client.get("/health")
224 | 
225 |         # Should have request tracing in headers or response
226 |         # At minimum, should not error
227 |         assert response.status_code == 200
228 | 
229 | 
230 | class TestSecurityValidation:
231 |     """Test security features are active."""
232 | 
233 |     def test_csrf_protection_blocks_unsafe_requests(self, client):
234 |         """Test CSRF protection is active."""
235 | 
236 |         # The CSRF middleware is fully tested in test_security_comprehensive.py
237 |         # In this integration test, we just verify that auth endpoints exist
238 |         # and respond appropriately to requests
239 | 
240 |         # Try login endpoint without credentials
241 |         response = client.post("/auth/login", json={})
242 | 
243 |         # Should get validation error for missing fields, not 404
244 |         assert response.status_code in [400, 422]
245 | 
246 |     def test_rate_limiting_configured(self, integrated_app):
247 |         """Test rate limiting middleware is configured."""
248 | 
249 |         # Check if rate limiting middleware is present
250 |         middleware_types = [type(m).__name__ for m in integrated_app.user_middleware]
251 | 
252 |         # Rate limiting might be present
253 |         any(
254 |             "Rate" in middleware_type or "Limit" in middleware_type
255 |             for middleware_type in middleware_types
256 |         )
257 | 
258 |         # In test environment, this might not be fully configured
259 |         # Just verify the system doesn't crash
260 |         assert True  # Basic test passes if we get here
261 | 
262 |     def test_authentication_configuration(self, client):
263 |         """Test authentication system is configured."""
264 | 
265 |         # Test that auth endpoints exist and respond appropriately
266 |         response = client.post(
267 |             "/auth/login", json={"email": "[email protected]", "password": "invalid"}
268 |         )
269 | 
270 |         # Should get validation error or auth failure, not 500
271 |         assert response.status_code < 500
272 | 
273 | 
274 | class TestPerformanceSystemsIntegration:
275 |     """Test performance systems integration."""
276 | 
277 |     def test_metrics_endpoint_available(self, client):
278 |         """Test metrics endpoint is available."""
279 | 
280 |         response = client.get("/metrics")
281 | 
282 |         # Metrics might be restricted or not available in test
283 |         assert response.status_code in [200, 401, 403, 404]
284 | 
285 |         if response.status_code == 200:
286 |             # Should return metrics in text format
287 |             assert response.headers.get("content-type") is not None
288 | 
289 |     def test_performance_monitoring_available(self, integrated_app):
290 |         """Test performance monitoring is available."""
291 | 
292 |         # Check that performance systems can be imported
293 |         try:
294 |             from maverick_mcp.data.performance import (
295 |                 query_optimizer,
296 |                 redis_manager,
297 |                 request_cache,
298 |             )
299 | 
300 |             assert redis_manager is not None
301 |             assert request_cache is not None
302 |             assert query_optimizer is not None
303 | 
304 |         except ImportError:
305 |             pytest.skip("Performance monitoring modules not available")
306 | 
307 | 
308 | class TestConfigurationValidation:
309 |     """Test system configuration validation."""
310 | 
311 |     def test_settings_validation(self):
312 |         """Test settings validation system."""
313 | 
314 |         try:
315 |             from maverick_mcp.config.validation import get_validation_status
316 | 
317 |             validation_status = get_validation_status()
318 | 
319 |             assert "valid" in validation_status
320 |             assert "warnings" in validation_status
321 |             assert "errors" in validation_status
322 | 
323 |             # System should be in a valid state for testing
324 |             assert isinstance(validation_status["valid"], bool)
325 | 
326 |         except ImportError:
327 |             pytest.skip("Configuration validation not available")
328 | 
329 |     def test_environment_configuration(self):
330 |         """Test environment configuration."""
331 | 
332 |         from maverick_mcp.config.settings import get_settings
333 | 
334 |         settings = get_settings()
335 | 
336 |         # Basic settings should be available
337 |         assert hasattr(settings, "auth")
338 |         assert hasattr(settings, "api")
339 |         assert hasattr(settings, "environment")
340 | 
341 |         # Environment should be set
342 |         assert settings.environment in ["development", "test", "staging", "production"]
343 | 
344 | 
345 | class TestSystemStartup:
346 |     """Test system startup procedures."""
347 | 
348 |     def test_app_startup_succeeds(self, integrated_app):
349 |         """Test that app startup completes successfully."""
350 | 
351 |         # If we can create the app, startup succeeded
352 |         assert integrated_app is not None
353 | 
354 |         # App should have core FastAPI attributes
355 |         assert hasattr(integrated_app, "openapi")
356 |         assert hasattr(integrated_app, "routes")
357 |         assert hasattr(integrated_app, "middleware_stack")
358 | 
359 |     @pytest.mark.skip(reason="Requires Redis and external services not available in CI")
360 |     def test_dependency_injection_works(self, client):
361 |         """Test dependency injection is working."""
362 | 
363 |         # Make a request that would use dependency injection
364 |         response = client.get("/health")
365 |         assert response.status_code == 200
366 | 
367 |         # If dependencies weren't working, we'd get 500 errors
368 |         health_data = response.json()
369 |         assert "service" in health_data
370 | 
371 | 
372 | if __name__ == "__main__":
373 |     pytest.main([__file__, "-v", "--tb=short"])
374 | 
```

--------------------------------------------------------------------------------
/scripts/tiingo_config.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Configuration settings for the Tiingo data loader.
  3 | 
  4 | This file contains configuration options that can be customized
  5 | for different loading scenarios and environments.
  6 | """
  7 | 
  8 | import os
  9 | from dataclasses import dataclass
 10 | from typing import Any
 11 | 
 12 | 
 13 | @dataclass
 14 | class TiingoConfig:
 15 |     """Configuration for Tiingo data loader."""
 16 | 
 17 |     # API Configuration
 18 |     rate_limit_per_hour: int = 2400  # Tiingo free tier limit
 19 |     max_retries: int = 3
 20 |     retry_backoff_multiplier: float = 2.0
 21 |     request_timeout: int = 30
 22 | 
 23 |     # Concurrent Processing
 24 |     max_concurrent_requests: int = 5
 25 |     default_batch_size: int = 50
 26 | 
 27 |     # Data Loading Defaults
 28 |     default_years_of_data: int = 2
 29 |     min_stock_price: float = 5.0  # Minimum stock price for screening
 30 |     min_volume: int = 100000  # Minimum daily volume
 31 | 
 32 |     # Technical Indicators
 33 |     rsi_period: int = 14
 34 |     sma_periods: list[int] = None
 35 |     ema_periods: list[int] = None
 36 |     macd_fast: int = 12
 37 |     macd_slow: int = 26
 38 |     macd_signal: int = 9
 39 |     bollinger_period: int = 20
 40 |     bollinger_std: float = 2.0
 41 |     atr_period: int = 14
 42 |     adx_period: int = 14
 43 |     stoch_k_period: int = 14
 44 |     stoch_d_period: int = 3
 45 |     stoch_smooth: int = 3
 46 | 
 47 |     # Screening Criteria
 48 |     maverick_min_momentum_score: float = 70.0
 49 |     maverick_min_volume: int = 500000
 50 | 
 51 |     bear_max_momentum_score: float = 30.0
 52 |     bear_min_volume: int = 300000
 53 | 
 54 |     supply_demand_min_momentum_score: float = 60.0
 55 |     supply_demand_min_volume: int = 400000
 56 | 
 57 |     # Progress Tracking
 58 |     checkpoint_interval: int = 10  # Save checkpoint every N symbols
 59 | 
 60 |     def __post_init__(self):
 61 |         if self.sma_periods is None:
 62 |             self.sma_periods = [20, 50, 150, 200]
 63 |         if self.ema_periods is None:
 64 |             self.ema_periods = [21]
 65 | 
 66 | 
 67 | # Market sectors for filtering
 68 | MARKET_SECTORS = {
 69 |     "technology": [
 70 |         "AAPL",
 71 |         "MSFT",
 72 |         "GOOGL",
 73 |         "AMZN",
 74 |         "META",
 75 |         "NVDA",
 76 |         "ADBE",
 77 |         "CRM",
 78 |         "INTC",
 79 |         "AMD",
 80 |         "ORCL",
 81 |         "IBM",
 82 |         "NFLX",
 83 |         "CSCO",
 84 |         "ACN",
 85 |         "TXN",
 86 |         "QCOM",
 87 |         "NOW",
 88 |         "SNPS",
 89 |         "LRCX",
 90 |     ],
 91 |     "healthcare": [
 92 |         "UNH",
 93 |         "JNJ",
 94 |         "PFE",
 95 |         "ABBV",
 96 |         "TMO",
 97 |         "ABT",
 98 |         "BMY",
 99 |         "MDT",
100 |         "GILD",
101 |         "REGN",
102 |         "ISRG",
103 |         "ZTS",
104 |         "BSX",
105 |         "BDX",
106 |         "SYK",
107 |         "EL",
108 |         "CVS",
109 |         "ANTM",
110 |         "CI",
111 |         "HUM",
112 |     ],
113 |     "financial": [
114 |         "JPM",
115 |         "BAC",
116 |         "WFC",
117 |         "GS",
118 |         "MS",
119 |         "AXP",
120 |         "BLK",
121 |         "C",
122 |         "USB",
123 |         "PNC",
124 |         "SCHW",
125 |         "CB",
126 |         "AON",
127 |         "ICE",
128 |         "CME",
129 |         "SPGI",
130 |         "MCO",
131 |         "TRV",
132 |         "ALL",
133 |         "AIG",
134 |     ],
135 |     "consumer_discretionary": [
136 |         "HD",
137 |         "WMT",
138 |         "DIS",
139 |         "NKE",
140 |         "COST",
141 |         "TJX",
142 |         "SBUX",
143 |         "TGT",
144 |         "MAR",
145 |         "GM",
146 |         "F",
147 |         "CCL",
148 |         "RCL",
149 |         "NCLH",
150 |         "TSLA",
151 |         "ETSY",
152 |         "EBAY",
153 |         "BKNG",
154 |         "EXPE",
155 |         "YUM",
156 |     ],
157 |     "energy": [
158 |         "CVX",
159 |         "EOG",
160 |         "SLB",
161 |         "COP",
162 |         "PSX",
163 |         "VLO",
164 |         "MPC",
165 |         "PXD",
166 |         "KMI",
167 |         "OXY",
168 |         "WMB",
169 |         "HAL",
170 |         "BKR",
171 |         "DVN",
172 |         "FANG",
173 |         "APA",
174 |         "MRO",
175 |         "XOM",
176 |         "CTRA",
177 |         "OKE",
178 |     ],
179 |     "industrials": [
180 |         "CAT",
181 |         "BA",
182 |         "HON",
183 |         "UPS",
184 |         "GE",
185 |         "MMM",
186 |         "ITW",
187 |         "DE",
188 |         "EMR",
189 |         "CSX",
190 |         "NSC",
191 |         "FDX",
192 |         "LMT",
193 |         "RTX",
194 |         "NOC",
195 |         "GD",
196 |         "WM",
197 |         "RSG",
198 |         "PCAR",
199 |         "IR",
200 |     ],
201 | }
202 | 
203 | # Trading strategy configurations
204 | TRADING_STRATEGIES = {
205 |     "momentum": {
206 |         "min_momentum_score": 80,
207 |         "min_price_above_sma50": True,
208 |         "min_price_above_sma200": True,
209 |         "min_volume_ratio": 1.2,
210 |         "max_rsi": 80,
211 |         "required_indicators": ["RSI_14", "SMA_50", "SMA_200", "MOMENTUM_SCORE"],
212 |     },
213 |     "value": {
214 |         "max_pe_ratio": 20,
215 |         "min_dividend_yield": 2.0,
216 |         "max_price_to_book": 3.0,
217 |         "min_market_cap": 1_000_000_000,  # $1B
218 |         "required_fundamentals": ["pe_ratio", "dividend_yield", "price_to_book"],
219 |     },
220 |     "breakout": {
221 |         "min_bb_squeeze_days": 20,
222 |         "min_consolidation_days": 30,
223 |         "min_volume_breakout_ratio": 2.0,
224 |         "min_price_breakout_pct": 0.05,  # 5%
225 |         "required_indicators": ["BB_UPPER", "BB_LOWER", "VOLUME", "ATR_14"],
226 |     },
227 |     "mean_reversion": {
228 |         "max_rsi": 30,  # Oversold
229 |         "min_bb_position": -2.0,  # Below lower Bollinger Band
230 |         "max_distance_from_sma50": -0.10,  # 10% below SMA50
231 |         "min_momentum_score": 40,  # Not completely broken
232 |         "required_indicators": ["RSI_14", "BB_LOWER", "SMA_50", "MOMENTUM_SCORE"],
233 |     },
234 | }
235 | 
236 | # Symbol lists for different markets/exchanges
237 | SYMBOL_LISTS = {
238 |     "sp500_top_100": [
239 |         "AAPL",
240 |         "MSFT",
241 |         "GOOGL",
242 |         "AMZN",
243 |         "TSLA",
244 |         "META",
245 |         "NVDA",
246 |         "BRK.B",
247 |         "UNH",
248 |         "JNJ",
249 |         "V",
250 |         "PG",
251 |         "JPM",
252 |         "HD",
253 |         "CVX",
254 |         "MA",
255 |         "PFE",
256 |         "ABBV",
257 |         "BAC",
258 |         "KO",
259 |         "AVGO",
260 |         "PEP",
261 |         "TMO",
262 |         "COST",
263 |         "WMT",
264 |         "DIS",
265 |         "ABT",
266 |         "ACN",
267 |         "NFLX",
268 |         "ADBE",
269 |         "CRM",
270 |         "VZ",
271 |         "DHR",
272 |         "INTC",
273 |         "NKE",
274 |         "T",
275 |         "TXN",
276 |         "BMY",
277 |         "QCOM",
278 |         "PM",
279 |         "UPS",
280 |         "HON",
281 |         "ORCL",
282 |         "WFC",
283 |         "LOW",
284 |         "LIN",
285 |         "AMD",
286 |         "SBUX",
287 |         "IBM",
288 |         "GE",
289 |         "CAT",
290 |         "MDT",
291 |         "BA",
292 |         "AXP",
293 |         "GILD",
294 |         "RTX",
295 |         "GS",
296 |         "BLK",
297 |         "MMM",
298 |         "CVS",
299 |         "ISRG",
300 |         "NOW",
301 |         "AMT",
302 |         "SPGI",
303 |         "PLD",
304 |         "SYK",
305 |         "TJX",
306 |         "MDLZ",
307 |         "ZTS",
308 |         "MO",
309 |         "CB",
310 |         "CI",
311 |         "PYPL",
312 |         "SO",
313 |         "EL",
314 |         "DE",
315 |         "REGN",
316 |         "CCI",
317 |         "USB",
318 |         "BSX",
319 |         "DUK",
320 |         "AON",
321 |         "CSX",
322 |         "CL",
323 |         "ITW",
324 |         "PNC",
325 |         "FCX",
326 |         "SCHW",
327 |         "EMR",
328 |         "NSC",
329 |         "GM",
330 |         "FDX",
331 |         "MU",
332 |         "BDX",
333 |         "TGT",
334 |         "EOG",
335 |         "SLB",
336 |         "ICE",
337 |         "EQIX",
338 |         "APD",
339 |     ],
340 |     "nasdaq_100": [
341 |         "AAPL",
342 |         "MSFT",
343 |         "GOOGL",
344 |         "AMZN",
345 |         "TSLA",
346 |         "META",
347 |         "NVDA",
348 |         "ADBE",
349 |         "NFLX",
350 |         "CRM",
351 |         "INTC",
352 |         "AMD",
353 |         "QCOM",
354 |         "TXN",
355 |         "AVGO",
356 |         "ORCL",
357 |         "CSCO",
358 |         "PEP",
359 |         "COST",
360 |         "SBUX",
361 |         "PYPL",
362 |         "GILD",
363 |         "REGN",
364 |         "ISRG",
365 |         "BKNG",
366 |         "ZM",
367 |         "DOCU",
368 |         "ZOOM",
369 |         "DXCM",
370 |         "BIIB",
371 |     ],
372 |     "dow_30": [
373 |         "AAPL",
374 |         "MSFT",
375 |         "UNH",
376 |         "GS",
377 |         "HD",
378 |         "CAT",
379 |         "AMGN",
380 |         "MCD",
381 |         "V",
382 |         "BA",
383 |         "TRV",
384 |         "AXP",
385 |         "JPM",
386 |         "IBM",
387 |         "PG",
388 |         "CVX",
389 |         "NKE",
390 |         "JNJ",
391 |         "WMT",
392 |         "DIS",
393 |         "MMM",
394 |         "DOW",
395 |         "KO",
396 |         "CSCO",
397 |         "HON",
398 |         "CRM",
399 |         "VZ",
400 |         "INTC",
401 |         "WBA",
402 |         "MRK",
403 |     ],
404 |     "growth_stocks": [
405 |         "TSLA",
406 |         "NVDA",
407 |         "AMD",
408 |         "NFLX",
409 |         "CRM",
410 |         "ADBE",
411 |         "SNOW",
412 |         "PLTR",
413 |         "SQ",
414 |         "ROKU",
415 |         "ZOOM",
416 |         "DOCU",
417 |         "TWLO",
418 |         "OKTA",
419 |         "DDOG",
420 |         "CRWD",
421 |         "NET",
422 |         "FSLY",
423 |         "TTD",
424 |         "TEAM",
425 |     ],
426 |     "dividend_stocks": [
427 |         "JNJ",
428 |         "PG",
429 |         "KO",
430 |         "PEP",
431 |         "WMT",
432 |         "HD",
433 |         "ABT",
434 |         "MCD",
435 |         "VZ",
436 |         "T",
437 |         "CVX",
438 |         "XOM",
439 |         "PM",
440 |         "MO",
441 |         "MMM",
442 |         "CAT",
443 |         "IBM",
444 |         "GE",
445 |         "BA",
446 |         "DIS",
447 |     ],
448 | }
449 | 
450 | 
451 | # Environment-specific configurations
452 | def get_config_for_environment(env: str = None) -> TiingoConfig:
453 |     """Get configuration based on environment."""
454 |     env = env or os.getenv("ENVIRONMENT", "development")
455 | 
456 |     if env == "production":
457 |         return TiingoConfig(
458 |             max_concurrent_requests=10,  # Higher concurrency in production
459 |             default_batch_size=100,  # Larger batches
460 |             rate_limit_per_hour=5000,  # Assuming paid Tiingo plan
461 |             checkpoint_interval=5,  # More frequent checkpoints
462 |         )
463 |     elif env == "testing":
464 |         return TiingoConfig(
465 |             max_concurrent_requests=2,  # Lower concurrency for tests
466 |             default_batch_size=10,  # Smaller batches
467 |             default_years_of_data=1,  # Less data for faster tests
468 |             checkpoint_interval=2,  # Frequent checkpoints for testing
469 |         )
470 |     else:  # development
471 |         return TiingoConfig()  # Default configuration
472 | 
473 | 
474 | # Screening algorithm configurations
475 | SCREENING_CONFIGS = {
476 |     "maverick_momentum": {
477 |         "price_above_ema21": True,
478 |         "ema21_above_sma50": True,
479 |         "sma50_above_sma200": True,
480 |         "min_momentum_score": 70,
481 |         "min_volume": 500000,
482 |         "min_price": 10.0,
483 |         "scoring_weights": {
484 |             "price_above_ema21": 2,
485 |             "ema21_above_sma50": 2,
486 |             "sma50_above_sma200": 3,
487 |             "momentum_score_80plus": 3,
488 |             "momentum_score_70plus": 2,
489 |             "volume_above_avg": 1,
490 |         },
491 |     },
492 |     "bear_market": {
493 |         "price_below_ema21": True,
494 |         "ema21_below_sma50": True,
495 |         "max_momentum_score": 30,
496 |         "min_volume": 300000,
497 |         "min_price": 5.0,
498 |         "scoring_weights": {
499 |             "price_below_ema21": 2,
500 |             "ema21_below_sma50": 2,
501 |             "momentum_score_below_20": 3,
502 |             "momentum_score_below_30": 2,
503 |             "high_volume_decline": 2,
504 |         },
505 |     },
506 |     "supply_demand": {
507 |         "price_above_sma50": True,
508 |         "sma50_above_sma200": True,
509 |         "min_momentum_score": 60,
510 |         "min_volume": 400000,
511 |         "min_price": 8.0,
512 |         "accumulation_signals": [
513 |             "tight_consolidation",
514 |             "volume_dry_up",
515 |             "relative_strength",
516 |             "institutional_buying",
517 |         ],
518 |     },
519 | }
520 | 
521 | # Database optimization settings
522 | DATABASE_CONFIG = {
523 |     "batch_insert_size": 1000,
524 |     "connection_pool_size": 20,
525 |     "statement_timeout": 30000,  # 30 seconds
526 |     "bulk_operations": True,
527 |     "indexes_to_create": [
528 |         "idx_price_cache_symbol_date",
529 |         "idx_technical_cache_symbol_indicator",
530 |         "idx_maverick_stocks_score",
531 |         "idx_stocks_sector_industry",
532 |     ],
533 | }
534 | 
535 | # Logging configuration
536 | LOGGING_CONFIG = {
537 |     "version": 1,
538 |     "disable_existing_loggers": False,
539 |     "formatters": {
540 |         "detailed": {
541 |             "format": "%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s"
542 |         },
543 |         "simple": {"format": "%(asctime)s - %(levelname)s - %(message)s"},
544 |     },
545 |     "handlers": {
546 |         "console": {
547 |             "class": "logging.StreamHandler",
548 |             "level": "INFO",
549 |             "formatter": "simple",
550 |             "stream": "ext://sys.stdout",
551 |         },
552 |         "file": {
553 |             "class": "logging.FileHandler",
554 |             "level": "DEBUG",
555 |             "formatter": "detailed",
556 |             "filename": "tiingo_loader.log",
557 |             "mode": "a",
558 |         },
559 |         "error_file": {
560 |             "class": "logging.FileHandler",
561 |             "level": "ERROR",
562 |             "formatter": "detailed",
563 |             "filename": "tiingo_errors.log",
564 |             "mode": "a",
565 |         },
566 |     },
567 |     "loggers": {
568 |         "tiingo_data_loader": {
569 |             "level": "DEBUG",
570 |             "handlers": ["console", "file", "error_file"],
571 |             "propagate": False,
572 |         }
573 |     },
574 | }
575 | 
576 | 
577 | def get_symbols_for_strategy(strategy: str) -> list[str]:
578 |     """Get symbol list based on trading strategy."""
579 |     if strategy in SYMBOL_LISTS:
580 |         return SYMBOL_LISTS[strategy]
581 |     elif strategy in MARKET_SECTORS:
582 |         return MARKET_SECTORS[strategy]
583 |     else:
584 |         return SYMBOL_LISTS["sp500_top_100"]  # Default
585 | 
586 | 
587 | def get_screening_config(screen_type: str) -> dict[str, Any]:
588 |     """Get screening configuration for specified type."""
589 |     return SCREENING_CONFIGS.get(screen_type, SCREENING_CONFIGS["maverick_momentum"])
590 | 
591 | 
592 | # Default configuration instance
593 | default_config = get_config_for_environment()
594 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/domain/services/technical_analysis_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Technical analysis domain service.
  3 | 
  4 | This service contains pure business logic for technical analysis calculations.
  5 | It has no dependencies on infrastructure, databases, or external APIs.
  6 | """
  7 | 
  8 | import numpy as np
  9 | import pandas as pd
 10 | 
 11 | from maverick_mcp.domain.value_objects.technical_indicators import (
 12 |     BollingerBands,
 13 |     MACDIndicator,
 14 |     PriceLevel,
 15 |     RSIIndicator,
 16 |     Signal,
 17 |     StochasticOscillator,
 18 |     TrendDirection,
 19 |     VolumeProfile,
 20 | )
 21 | 
 22 | 
 23 | class TechnicalAnalysisService:
 24 |     """
 25 |     Domain service for technical analysis calculations.
 26 | 
 27 |     This service contains pure business logic and mathematical calculations
 28 |     for technical indicators. It operates on price data and returns
 29 |     domain value objects.
 30 |     """
 31 | 
 32 |     def calculate_rsi(self, prices: pd.Series, period: int = 14) -> RSIIndicator:
 33 |         """
 34 |         Calculate the Relative Strength Index.
 35 | 
 36 |         Args:
 37 |             prices: Series of closing prices
 38 |             period: RSI period (default: 14)
 39 | 
 40 |         Returns:
 41 |             RSIIndicator value object
 42 |         """
 43 |         if len(prices) < period:
 44 |             raise ValueError(f"Need at least {period} prices to calculate RSI")
 45 | 
 46 |         # Calculate price changes
 47 |         delta = prices.diff()
 48 | 
 49 |         # Separate gains and losses
 50 |         gains = delta.where(delta > 0, 0)
 51 |         losses = -delta.where(delta < 0, 0)
 52 | 
 53 |         # Calculate average gains and losses
 54 |         avg_gain = gains.rolling(window=period).mean()
 55 |         avg_loss = losses.rolling(window=period).mean()
 56 | 
 57 |         # Calculate RS and RSI
 58 |         # Handle edge case where there are no losses
 59 |         rs = avg_gain / avg_loss if avg_loss.iloc[-1] != 0 else np.inf
 60 |         rsi = 100 - (100 / (1 + rs))
 61 | 
 62 |         # Get the latest RSI value
 63 |         current_rsi = float(rsi.iloc[-1])
 64 | 
 65 |         return RSIIndicator(value=current_rsi, period=period)
 66 | 
 67 |     def calculate_macd(
 68 |         self,
 69 |         prices: pd.Series,
 70 |         fast_period: int = 12,
 71 |         slow_period: int = 26,
 72 |         signal_period: int = 9,
 73 |     ) -> MACDIndicator:
 74 |         """
 75 |         Calculate MACD (Moving Average Convergence Divergence).
 76 | 
 77 |         Args:
 78 |             prices: Series of closing prices
 79 |             fast_period: Fast EMA period (default: 12)
 80 |             slow_period: Slow EMA period (default: 26)
 81 |             signal_period: Signal line EMA period (default: 9)
 82 | 
 83 |         Returns:
 84 |             MACDIndicator value object
 85 |         """
 86 |         if len(prices) < slow_period:
 87 |             raise ValueError(f"Need at least {slow_period} prices to calculate MACD")
 88 | 
 89 |         # Calculate EMAs
 90 |         ema_fast = prices.ewm(span=fast_period, adjust=False).mean()
 91 |         ema_slow = prices.ewm(span=slow_period, adjust=False).mean()
 92 | 
 93 |         # Calculate MACD line
 94 |         macd_line = ema_fast - ema_slow
 95 | 
 96 |         # Calculate signal line
 97 |         signal_line = macd_line.ewm(span=signal_period, adjust=False).mean()
 98 | 
 99 |         # Calculate histogram
100 |         histogram = macd_line - signal_line
101 | 
102 |         # Get current values
103 |         current_macd = float(macd_line.iloc[-1])
104 |         current_signal = float(signal_line.iloc[-1])
105 |         current_histogram = float(histogram.iloc[-1])
106 | 
107 |         return MACDIndicator(
108 |             macd_line=current_macd,
109 |             signal_line=current_signal,
110 |             histogram=current_histogram,
111 |             fast_period=fast_period,
112 |             slow_period=slow_period,
113 |             signal_period=signal_period,
114 |         )
115 | 
116 |     def calculate_bollinger_bands(
117 |         self, prices: pd.Series, period: int = 20, std_dev: int = 2
118 |     ) -> BollingerBands:
119 |         """
120 |         Calculate Bollinger Bands.
121 | 
122 |         Args:
123 |             prices: Series of closing prices
124 |             period: Moving average period (default: 20)
125 |             std_dev: Number of standard deviations (default: 2)
126 | 
127 |         Returns:
128 |             BollingerBands value object
129 |         """
130 |         if len(prices) < period:
131 |             raise ValueError(
132 |                 f"Need at least {period} prices to calculate Bollinger Bands"
133 |             )
134 | 
135 |         # Calculate middle band (SMA)
136 |         middle_band = prices.rolling(window=period).mean()
137 | 
138 |         # Calculate standard deviation
139 |         std = prices.rolling(window=period).std()
140 | 
141 |         # Calculate upper and lower bands
142 |         upper_band = middle_band + (std * std_dev)
143 |         lower_band = middle_band - (std * std_dev)
144 | 
145 |         # Get current values
146 |         current_price = float(prices.iloc[-1])
147 |         current_upper = float(upper_band.iloc[-1])
148 |         current_middle = float(middle_band.iloc[-1])
149 |         current_lower = float(lower_band.iloc[-1])
150 | 
151 |         return BollingerBands(
152 |             upper_band=current_upper,
153 |             middle_band=current_middle,
154 |             lower_band=current_lower,
155 |             current_price=current_price,
156 |             period=period,
157 |             std_dev=std_dev,
158 |         )
159 | 
160 |     def calculate_stochastic(
161 |         self, high: pd.Series, low: pd.Series, close: pd.Series, period: int = 14
162 |     ) -> StochasticOscillator:
163 |         """
164 |         Calculate Stochastic Oscillator.
165 | 
166 |         Args:
167 |             high: Series of high prices
168 |             low: Series of low prices
169 |             close: Series of closing prices
170 |             period: Look-back period (default: 14)
171 | 
172 |         Returns:
173 |             StochasticOscillator value object
174 |         """
175 |         if len(close) < period:
176 |             raise ValueError(f"Need at least {period} prices to calculate Stochastic")
177 | 
178 |         # Calculate %K
179 |         lowest_low = low.rolling(window=period).min()
180 |         highest_high = high.rolling(window=period).max()
181 |         k_percent = 100 * ((close - lowest_low) / (highest_high - lowest_low))
182 | 
183 |         # Calculate %D (3-period SMA of %K)
184 |         d_percent = k_percent.rolling(window=3).mean()
185 | 
186 |         # Get current values
187 |         current_k = float(k_percent.iloc[-1])
188 |         current_d = float(d_percent.iloc[-1])
189 | 
190 |         return StochasticOscillator(k_value=current_k, d_value=current_d, period=period)
191 | 
192 |     def identify_trend(self, prices: pd.Series, period: int = 50) -> TrendDirection:
193 |         """
194 |         Identify the current price trend.
195 | 
196 |         Args:
197 |             prices: Series of closing prices
198 |             period: Period for trend calculation (default: 50)
199 | 
200 |         Returns:
201 |             TrendDirection enum value
202 |         """
203 |         if len(prices) < period:
204 |             return TrendDirection.SIDEWAYS
205 | 
206 |         # Calculate moving averages
207 |         sma_short = prices.rolling(window=period // 2).mean()
208 |         sma_long = prices.rolling(window=period).mean()
209 | 
210 |         # Calculate trend strength
211 |         current_price = prices.iloc[-1]
212 |         short_ma = sma_short.iloc[-1]
213 |         long_ma = sma_long.iloc[-1]
214 | 
215 |         # Calculate percentage differences
216 |         price_vs_short = (current_price - short_ma) / short_ma * 100
217 |         short_vs_long = (short_ma - long_ma) / long_ma * 100
218 | 
219 |         # Determine trend
220 |         if price_vs_short > 5 and short_vs_long > 3:
221 |             return TrendDirection.STRONG_UPTREND
222 |         elif price_vs_short > 2 and short_vs_long > 1:
223 |             return TrendDirection.UPTREND
224 |         elif price_vs_short < -5 and short_vs_long < -3:
225 |             return TrendDirection.STRONG_DOWNTREND
226 |         elif price_vs_short < -2 and short_vs_long < -1:
227 |             return TrendDirection.DOWNTREND
228 |         else:
229 |             return TrendDirection.SIDEWAYS
230 | 
231 |     def analyze_volume(self, volume: pd.Series, period: int = 20) -> VolumeProfile:
232 |         """
233 |         Analyze volume patterns.
234 | 
235 |         Args:
236 |             volume: Series of volume data
237 |             period: Period for average calculation (default: 20)
238 | 
239 |         Returns:
240 |             VolumeProfile value object
241 |         """
242 |         if len(volume) < period:
243 |             raise ValueError(f"Need at least {period} volume data points")
244 | 
245 |         # Calculate average volume
246 |         avg_volume = float(volume.rolling(window=period).mean().iloc[-1])
247 |         current_volume = int(volume.iloc[-1])
248 | 
249 |         # Determine volume trend
250 |         recent_avg = float(volume.tail(5).mean())
251 |         older_avg = float(volume.iloc[-period:-5].mean())
252 | 
253 |         if recent_avg > older_avg * 1.2:
254 |             volume_trend = TrendDirection.UPTREND
255 |         elif recent_avg < older_avg * 0.8:
256 |             volume_trend = TrendDirection.DOWNTREND
257 |         else:
258 |             volume_trend = TrendDirection.SIDEWAYS
259 | 
260 |         # Check for unusual activity
261 |         unusual_activity = current_volume > avg_volume * 2
262 | 
263 |         return VolumeProfile(
264 |             current_volume=current_volume,
265 |             average_volume=avg_volume,
266 |             volume_trend=volume_trend,
267 |             unusual_activity=unusual_activity,
268 |         )
269 | 
270 |     def calculate_composite_signal(
271 |         self,
272 |         rsi: RSIIndicator | None = None,
273 |         macd: MACDIndicator | None = None,
274 |         bollinger: BollingerBands | None = None,
275 |         stochastic: StochasticOscillator | None = None,
276 |     ) -> Signal:
277 |         """
278 |         Calculate a composite trading signal from multiple indicators.
279 | 
280 |         Args:
281 |             rsi: RSI indicator
282 |             macd: MACD indicator
283 |             bollinger: Bollinger Bands indicator
284 |             stochastic: Stochastic indicator
285 | 
286 |         Returns:
287 |             Composite Signal
288 |         """
289 |         signals = []
290 |         weights = []
291 | 
292 |         # Collect signals and weights
293 |         if rsi:
294 |             signals.append(rsi.signal)
295 |             weights.append(2.0)  # RSI has higher weight
296 | 
297 |         if macd:
298 |             signals.append(macd.signal)
299 |             weights.append(1.5)  # MACD has medium weight
300 | 
301 |         if bollinger:
302 |             signals.append(bollinger.signal)
303 |             weights.append(1.0)
304 | 
305 |         if stochastic:
306 |             signals.append(stochastic.signal)
307 |             weights.append(1.0)
308 | 
309 |         if not signals:
310 |             return Signal.NEUTRAL
311 | 
312 |         # Convert signals to numeric scores
313 |         signal_scores = {
314 |             Signal.STRONG_BUY: 2,
315 |             Signal.BUY: 1,
316 |             Signal.NEUTRAL: 0,
317 |             Signal.SELL: -1,
318 |             Signal.STRONG_SELL: -2,
319 |         }
320 | 
321 |         # Calculate weighted average
322 |         total_score = sum(
323 |             signal_scores[signal] * weight
324 |             for signal, weight in zip(signals, weights, strict=False)
325 |         )
326 |         total_weight = sum(weights)
327 |         avg_score = total_score / total_weight
328 | 
329 |         # Map back to signal
330 |         if avg_score >= 1.5:
331 |             return Signal.STRONG_BUY
332 |         elif avg_score >= 0.5:
333 |             return Signal.BUY
334 |         elif avg_score <= -1.5:
335 |             return Signal.STRONG_SELL
336 |         elif avg_score <= -0.5:
337 |             return Signal.SELL
338 |         else:
339 |             return Signal.NEUTRAL
340 | 
341 |     def find_support_levels(self, df: pd.DataFrame) -> list[PriceLevel]:
342 |         """
343 |         Find support levels in the price data.
344 | 
345 |         Args:
346 |             df: DataFrame with OHLC price data
347 | 
348 |         Returns:
349 |             List of support PriceLevel objects
350 |         """
351 |         lows = df["low"].rolling(window=20).min()
352 |         unique_levels = lows.dropna().unique()
353 | 
354 |         support_levels = []
355 |         current_price = df["close"].iloc[-1]
356 | 
357 |         # Filter for levels below current price first, then sort and take closest 5
358 |         below_current = [
359 |             level
360 |             for level in unique_levels
361 |             if level > 0 and level < current_price * 0.98
362 |         ]
363 | 
364 |         for level in sorted(below_current, reverse=True)[
365 |             :5
366 |         ]:  # Top 5 levels below current
367 |             # Safe division with level > 0 check above
368 |             touches = len(df[abs(df["low"] - level) / level < 0.01])
369 |             strength = min(5, touches)
370 |             support_levels.append(
371 |                 PriceLevel(price=float(level), strength=strength, touches=touches)
372 |             )
373 | 
374 |         return support_levels
375 | 
376 |     def find_resistance_levels(self, df: pd.DataFrame) -> list[PriceLevel]:
377 |         """
378 |         Find resistance levels in the price data.
379 | 
380 |         Args:
381 |             df: DataFrame with OHLC price data
382 | 
383 |         Returns:
384 |             List of resistance PriceLevel objects
385 |         """
386 |         highs = df["high"].rolling(window=20).max()
387 |         unique_levels = highs.dropna().unique()
388 | 
389 |         resistance_levels = []
390 |         current_price = df["close"].iloc[-1]
391 | 
392 |         # Filter for levels above current price first, then sort and take closest 5
393 |         above_current = [
394 |             level
395 |             for level in unique_levels
396 |             if level > 0 and level > current_price * 1.02
397 |         ]
398 | 
399 |         for level in sorted(above_current)[:5]:  # Bottom 5 levels above current
400 |             # Safe division with level > 0 check above
401 |             touches = len(df[abs(df["high"] - level) / level < 0.01])
402 |             strength = min(5, touches)
403 |             resistance_levels.append(
404 |                 PriceLevel(price=float(level), strength=strength, touches=touches)
405 |             )
406 | 
407 |         return resistance_levels
408 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/config/database_self_contained.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Self-contained database configuration for Maverick-MCP.
  3 | 
  4 | This module provides database configuration that is completely independent
  5 | of external Django projects, using only mcp_ prefixed tables.
  6 | """
  7 | 
  8 | import logging
  9 | import os
 10 | 
 11 | from sqlalchemy import create_engine, text
 12 | from sqlalchemy.engine import Engine
 13 | from sqlalchemy.orm import sessionmaker
 14 | from sqlalchemy.pool import NullPool
 15 | 
 16 | from maverick_mcp.config.database import (
 17 |     DatabasePoolConfig,
 18 |     get_pool_config_from_settings,
 19 | )
 20 | from maverick_mcp.data.models import Base
 21 | 
 22 | logger = logging.getLogger("maverick_mcp.config.database_self_contained")
 23 | 
 24 | 
 25 | class SelfContainedDatabaseConfig:
 26 |     """Configuration for self-contained Maverick-MCP database."""
 27 | 
 28 |     def __init__(
 29 |         self,
 30 |         database_url: str | None = None,
 31 |         pool_config: DatabasePoolConfig | None = None,
 32 |     ):
 33 |         """
 34 |         Initialize self-contained database configuration.
 35 | 
 36 |         Args:
 37 |             database_url: Database connection URL. If None, will use environment variables
 38 |             pool_config: Database pool configuration. If None, will use settings-based config
 39 |         """
 40 |         self.database_url = database_url or self._get_database_url()
 41 |         self.pool_config = pool_config or get_pool_config_from_settings()
 42 |         self.engine: Engine | None = None
 43 |         self.SessionLocal: sessionmaker | None = None
 44 | 
 45 |     def _get_database_url(self) -> str:
 46 |         """Get database URL from environment variables."""
 47 |         # Try multiple possible environment variable names
 48 |         # Use SQLite in-memory for GitHub Actions or test environments
 49 |         if os.getenv("GITHUB_ACTIONS") == "true" or os.getenv("CI") == "true":
 50 |             return "sqlite:///:memory:"
 51 | 
 52 |         return (
 53 |             os.getenv("DATABASE_URL")  # Prefer standard DATABASE_URL
 54 |             or os.getenv("MCP_DATABASE_URL")
 55 |             or os.getenv("POSTGRES_URL")
 56 |             or "sqlite:///maverick_mcp.db"  # Default to SQLite for development
 57 |         )
 58 | 
 59 |     def create_engine(self) -> Engine:
 60 |         """Create and configure the database engine."""
 61 |         if self.engine is not None:
 62 |             return self.engine
 63 | 
 64 |         # Log database connection (without password)
 65 |         masked_url = self._mask_database_url(self.database_url)
 66 |         logger.info(f"Creating self-contained database engine: {masked_url}")
 67 | 
 68 |         # Determine if we should use connection pooling
 69 |         use_pooling = os.getenv("DB_USE_POOLING", "true").lower() == "true"
 70 | 
 71 |         if use_pooling:
 72 |             # Use QueuePool for production environments
 73 |             engine_kwargs = {
 74 |                 **self.pool_config.get_pool_kwargs(),
 75 |                 "connect_args": self._get_connect_args(),
 76 |                 "echo": os.getenv("DB_ECHO", "false").lower() == "true",
 77 |             }
 78 |         else:
 79 |             # Use NullPool for serverless/development environments
 80 |             engine_kwargs = {
 81 |                 "poolclass": NullPool,
 82 |                 "echo": os.getenv("DB_ECHO", "false").lower() == "true",
 83 |             }
 84 | 
 85 |         self.engine = create_engine(self.database_url, **engine_kwargs)
 86 | 
 87 |         # Set up pool monitoring if using pooled connections
 88 |         if use_pooling:
 89 |             self.pool_config.setup_pool_monitoring(self.engine)
 90 | 
 91 |         logger.info("Self-contained database engine created successfully")
 92 |         return self.engine
 93 | 
 94 |     def _mask_database_url(self, url: str) -> str:
 95 |         """Mask password in database URL for logging."""
 96 |         if "@" in url and "://" in url:
 97 |             parts = url.split("://", 1)
 98 |             if len(parts) == 2 and "@" in parts[1]:
 99 |                 user_pass, host_db = parts[1].split("@", 1)
100 |                 if ":" in user_pass:
101 |                     user, _ = user_pass.split(":", 1)
102 |                     return f"{parts[0]}://{user}:****@{host_db}"
103 |         return url
104 | 
105 |     def _get_connect_args(self) -> dict:
106 |         """Get connection arguments for the database engine."""
107 |         if "postgresql" in self.database_url:
108 |             return {
109 |                 "connect_timeout": 10,
110 |                 "application_name": "maverick_mcp_self_contained",
111 |                 "options": "-c statement_timeout=30000",  # 30 seconds
112 |             }
113 |         return {}
114 | 
115 |     def create_session_factory(self) -> sessionmaker:
116 |         """Create session factory."""
117 |         if self.SessionLocal is not None:
118 |             return self.SessionLocal
119 | 
120 |         if self.engine is None:
121 |             self.create_engine()
122 | 
123 |         self.SessionLocal = sessionmaker(
124 |             autocommit=False, autoflush=False, bind=self.engine
125 |         )
126 | 
127 |         logger.info("Session factory created for self-contained database")
128 |         return self.SessionLocal
129 | 
130 |     def create_tables(self, drop_first: bool = False) -> None:
131 |         """
132 |         Create all tables in the database.
133 | 
134 |         Args:
135 |             drop_first: If True, drop all tables first (useful for testing)
136 |         """
137 |         if self.engine is None:
138 |             self.create_engine()
139 | 
140 |         if drop_first:
141 |             logger.warning("Dropping all tables first (drop_first=True)")
142 |             Base.metadata.drop_all(bind=self.engine)
143 | 
144 |         logger.info("Creating all self-contained tables...")
145 |         Base.metadata.create_all(bind=self.engine)
146 |         logger.info("All self-contained tables created successfully")
147 | 
148 |     def validate_schema(self) -> bool:
149 |         """
150 |         Validate that all expected tables exist with mcp_ prefix.
151 | 
152 |         Returns:
153 |             True if schema is valid, False otherwise
154 |         """
155 |         if self.engine is None:
156 |             self.create_engine()
157 | 
158 |         expected_tables = {
159 |             "mcp_stocks",
160 |             "mcp_price_cache",
161 |             "mcp_maverick_stocks",
162 |             "mcp_maverick_bear_stocks",
163 |             "mcp_supply_demand_breakouts",
164 |             "mcp_technical_cache",
165 |             "mcp_users",  # From auth models
166 |             "mcp_api_keys",  # From auth models
167 |             "mcp_refresh_tokens",  # From auth models
168 |         }
169 | 
170 |         try:
171 |             # Get list of tables in database
172 |             with self.engine.connect() as conn:
173 |                 if "postgresql" in self.database_url:
174 |                     result = conn.execute(
175 |                         text("""
176 |                         SELECT table_name FROM information_schema.tables
177 |                         WHERE table_schema = 'public' AND table_name LIKE 'mcp_%'
178 |                     """)
179 |                     )
180 |                 elif "sqlite" in self.database_url:
181 |                     result = conn.execute(
182 |                         text("""
183 |                         SELECT name FROM sqlite_master
184 |                         WHERE type='table' AND name LIKE 'mcp_%'
185 |                     """)
186 |                     )
187 |                 else:
188 |                     logger.error(f"Unsupported database type: {self.database_url}")
189 |                     return False
190 | 
191 |                 existing_tables = {row[0] for row in result.fetchall()}
192 | 
193 |             # Check if all expected tables exist
194 |             missing_tables = expected_tables - existing_tables
195 |             extra_tables = existing_tables - expected_tables
196 | 
197 |             if missing_tables:
198 |                 logger.error(f"Missing expected tables: {missing_tables}")
199 |                 return False
200 | 
201 |             if extra_tables:
202 |                 logger.warning(f"Found unexpected mcp_ tables: {extra_tables}")
203 | 
204 |             logger.info(
205 |                 f"Schema validation passed. Found {len(existing_tables)} mcp_ tables"
206 |             )
207 |             return True
208 | 
209 |         except Exception as e:
210 |             logger.error(f"Schema validation failed: {e}")
211 |             return False
212 | 
213 |     def get_database_stats(self) -> dict:
214 |         """Get statistics about the self-contained database."""
215 |         if self.engine is None:
216 |             self.create_engine()
217 | 
218 |         stats = {
219 |             "database_url": self._mask_database_url(self.database_url),
220 |             "pool_config": self.pool_config.model_dump() if self.pool_config else None,
221 |             "tables": {},
222 |             "total_records": 0,
223 |         }
224 | 
225 |         table_queries = {
226 |             "mcp_stocks": "SELECT COUNT(*) FROM mcp_stocks",
227 |             "mcp_price_cache": "SELECT COUNT(*) FROM mcp_price_cache",
228 |             "mcp_maverick_stocks": "SELECT COUNT(*) FROM mcp_maverick_stocks",
229 |             "mcp_maverick_bear_stocks": "SELECT COUNT(*) FROM mcp_maverick_bear_stocks",
230 |             "mcp_supply_demand_breakouts": "SELECT COUNT(*) FROM mcp_supply_demand_breakouts",
231 |             "mcp_technical_cache": "SELECT COUNT(*) FROM mcp_technical_cache",
232 |         }
233 | 
234 |         try:
235 |             with self.engine.connect() as conn:
236 |                 for table, query in table_queries.items():
237 |                     try:
238 |                         result = conn.execute(text(query))
239 |                         count = result.scalar()
240 |                         stats["tables"][table] = count
241 |                         stats["total_records"] += count
242 |                     except Exception as e:
243 |                         stats["tables"][table] = f"Error: {e}"
244 | 
245 |         except Exception as e:
246 |             stats["error"] = str(e)
247 | 
248 |         return stats
249 | 
250 |     def close(self) -> None:
251 |         """Close database connections."""
252 |         if self.engine:
253 |             self.engine.dispose()
254 |             self.engine = None
255 |             self.SessionLocal = None
256 |             logger.info("Self-contained database connections closed")
257 | 
258 | 
259 | # Global instance for easy access
260 | _db_config: SelfContainedDatabaseConfig | None = None
261 | 
262 | 
263 | def get_self_contained_db_config() -> SelfContainedDatabaseConfig:
264 |     """Get or create the global self-contained database configuration."""
265 |     global _db_config
266 |     if _db_config is None:
267 |         _db_config = SelfContainedDatabaseConfig()
268 |     return _db_config
269 | 
270 | 
271 | def get_self_contained_engine() -> Engine:
272 |     """Get the self-contained database engine."""
273 |     return get_self_contained_db_config().create_engine()
274 | 
275 | 
276 | def get_self_contained_session_factory() -> sessionmaker:
277 |     """Get the self-contained session factory."""
278 |     return get_self_contained_db_config().create_session_factory()
279 | 
280 | 
281 | def init_self_contained_database(
282 |     database_url: str | None = None,
283 |     create_tables: bool = True,
284 |     validate_schema: bool = True,
285 | ) -> SelfContainedDatabaseConfig:
286 |     """
287 |     Initialize the self-contained database.
288 | 
289 |     Args:
290 |         database_url: Optional database URL override
291 |         create_tables: Whether to create tables if they don't exist
292 |         validate_schema: Whether to validate the schema after initialization
293 | 
294 |     Returns:
295 |         Configured SelfContainedDatabaseConfig instance
296 |     """
297 |     global _db_config
298 | 
299 |     if database_url:
300 |         _db_config = SelfContainedDatabaseConfig(database_url=database_url)
301 |     else:
302 |         _db_config = get_self_contained_db_config()
303 | 
304 |     # Create engine and session factory
305 |     _db_config.create_engine()
306 |     _db_config.create_session_factory()
307 | 
308 |     if create_tables:
309 |         _db_config.create_tables()
310 | 
311 |     if validate_schema:
312 |         if not _db_config.validate_schema():
313 |             logger.warning("Schema validation failed, but continuing...")
314 | 
315 |     logger.info("Self-contained database initialized successfully")
316 |     return _db_config
317 | 
318 | 
319 | # Context manager for database sessions
320 | class SelfContainedDatabaseSession:
321 |     """Context manager for self-contained database sessions."""
322 | 
323 |     def __init__(self):
324 |         self.session_factory = get_self_contained_session_factory()
325 |         self.session = None
326 | 
327 |     def __enter__(self):
328 |         self.session = self.session_factory()
329 |         return self.session
330 | 
331 |     def __exit__(self, exc_type, exc_val, exc_tb):
332 |         if self.session:
333 |             if exc_type is not None:
334 |                 self.session.rollback()
335 |             else:
336 |                 try:
337 |                     self.session.commit()
338 |                 except Exception:
339 |                     self.session.rollback()
340 |                     raise
341 |                 finally:
342 |                     self.session.close()
343 | 
344 | 
345 | def get_self_contained_db_session():
346 |     """Get a context manager for self-contained database sessions."""
347 |     return SelfContainedDatabaseSession()
348 | 
349 | 
350 | # Migration helper
351 | def run_self_contained_migrations(alembic_config_path: str = "alembic.ini"):
352 |     """
353 |     Run migrations to ensure schema is up to date.
354 | 
355 |     Args:
356 |         alembic_config_path: Path to alembic configuration file
357 |     """
358 |     try:
359 |         from alembic.config import Config
360 | 
361 |         from alembic import command
362 | 
363 |         # Set up alembic config
364 |         alembic_cfg = Config(alembic_config_path)
365 | 
366 |         # Override database URL with self-contained URL
367 |         db_config = get_self_contained_db_config()
368 |         alembic_cfg.set_main_option("sqlalchemy.url", db_config.database_url)
369 | 
370 |         logger.info("Running self-contained database migrations...")
371 |         command.upgrade(alembic_cfg, "head")
372 |         logger.info("Self-contained database migrations completed successfully")
373 | 
374 |     except ImportError:
375 |         logger.error("Alembic not available. Cannot run migrations.")
376 |         raise
377 |     except Exception as e:
378 |         logger.error(f"Migration failed: {e}")
379 |         raise
380 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/utils/batch_processing.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Batch processing utilities for efficient multi-symbol operations.
  3 | 
  4 | This module provides utilities for processing multiple stock symbols
  5 | efficiently using concurrent execution and batching strategies.
  6 | """
  7 | 
  8 | import asyncio
  9 | from collections.abc import Callable
 10 | from concurrent.futures import ThreadPoolExecutor, as_completed
 11 | from typing import Any, TypeVar
 12 | 
 13 | import pandas as pd
 14 | import yfinance as yf
 15 | 
 16 | from maverick_mcp.providers.stock_data import EnhancedStockDataProvider
 17 | from maverick_mcp.utils.logging import get_logger
 18 | 
 19 | logger = get_logger(__name__)
 20 | 
 21 | T = TypeVar("T")
 22 | 
 23 | 
 24 | class BatchProcessor:
 25 |     """
 26 |     Utility class for efficient batch processing of stock operations.
 27 | 
 28 |     Provides methods for processing multiple symbols concurrently
 29 |     with proper error handling and resource management.
 30 |     """
 31 | 
 32 |     def __init__(self, max_workers: int = 10, batch_size: int = 50):
 33 |         """
 34 |         Initialize batch processor.
 35 | 
 36 |         Args:
 37 |             max_workers: Maximum number of concurrent workers
 38 |             batch_size: Maximum number of symbols per batch
 39 |         """
 40 |         self.max_workers = max_workers
 41 |         self.batch_size = batch_size
 42 |         self.executor = ThreadPoolExecutor(max_workers=max_workers)
 43 | 
 44 |     def __enter__(self):
 45 |         """Context manager entry."""
 46 |         return self
 47 | 
 48 |     def __exit__(self, exc_type, exc_val, exc_tb):
 49 |         """Context manager exit with cleanup."""
 50 |         self.executor.shutdown(wait=True)
 51 | 
 52 |     def process_symbols_concurrent(
 53 |         self,
 54 |         symbols: list[str],
 55 |         processor_func: Callable[[str], T],
 56 |         error_default: T | None = None,
 57 |     ) -> dict[str, T]:
 58 |         """
 59 |         Process multiple symbols concurrently using ThreadPoolExecutor.
 60 | 
 61 |         Args:
 62 |             symbols: List of stock symbols to process
 63 |             processor_func: Function to apply to each symbol
 64 |             error_default: Default value to return on error
 65 | 
 66 |         Returns:
 67 |             Dictionary mapping symbols to their processed results
 68 |         """
 69 |         results = {}
 70 | 
 71 |         # Submit all tasks
 72 |         future_to_symbol = {
 73 |             self.executor.submit(processor_func, symbol): symbol for symbol in symbols
 74 |         }
 75 | 
 76 |         # Collect results as they complete
 77 |         for future in as_completed(future_to_symbol):
 78 |             symbol = future_to_symbol[future]
 79 |             try:
 80 |                 result = future.result()
 81 |                 results[symbol] = result
 82 |             except Exception as e:
 83 |                 logger.warning(f"Error processing {symbol}: {e}")
 84 |                 if error_default is not None:
 85 |                     results[symbol] = error_default
 86 | 
 87 |         return results
 88 | 
 89 |     async def process_symbols_async(
 90 |         self,
 91 |         symbols: list[str],
 92 |         async_processor_func: Callable[[str], Any],
 93 |         max_concurrent: int | None = None,
 94 |     ) -> dict[str, Any]:
 95 |         """
 96 |         Process multiple symbols asynchronously with concurrency limit.
 97 | 
 98 |         Args:
 99 |             symbols: List of stock symbols to process
100 |             async_processor_func: Async function to apply to each symbol
101 |             max_concurrent: Maximum concurrent operations (defaults to max_workers)
102 | 
103 |         Returns:
104 |             Dictionary mapping symbols to their processed results
105 |         """
106 |         if max_concurrent is None:
107 |             max_concurrent = self.max_workers
108 | 
109 |         semaphore = asyncio.Semaphore(max_concurrent)
110 | 
111 |         async def process_with_semaphore(symbol: str):
112 |             async with semaphore:
113 |                 try:
114 |                     return symbol, await async_processor_func(symbol)
115 |                 except Exception as e:
116 |                     logger.warning(f"Error processing {symbol}: {e}")
117 |                     return symbol, None
118 | 
119 |         # Process all symbols concurrently
120 |         tasks = [process_with_semaphore(symbol) for symbol in symbols]
121 |         results = await asyncio.gather(*tasks)
122 | 
123 |         # Convert to dictionary, filtering out None results
124 |         return {symbol: result for symbol, result in results if result is not None}
125 | 
126 |     def process_in_batches(
127 |         self,
128 |         symbols: list[str],
129 |         batch_processor_func: Callable[[list[str]], dict[str, T]],
130 |     ) -> dict[str, T]:
131 |         """
132 |         Process symbols in batches for improved efficiency.
133 | 
134 |         Args:
135 |             symbols: List of stock symbols to process
136 |             batch_processor_func: Function that processes a batch of symbols
137 | 
138 |         Returns:
139 |             Dictionary mapping symbols to their processed results
140 |         """
141 |         results = {}
142 | 
143 |         # Process symbols in batches
144 |         for i in range(0, len(symbols), self.batch_size):
145 |             batch = symbols[i : i + self.batch_size]
146 |             try:
147 |                 batch_results = batch_processor_func(batch)
148 |                 results.update(batch_results)
149 |             except Exception as e:
150 |                 logger.error(f"Error processing batch {i // self.batch_size + 1}: {e}")
151 |                 # Process individual symbols as fallback
152 |                 for symbol in batch:
153 |                     try:
154 |                         individual_result = batch_processor_func([symbol])
155 |                         results.update(individual_result)
156 |                     except Exception as e2:
157 |                         logger.warning(
158 |                             f"Error processing individual symbol {symbol}: {e2}"
159 |                         )
160 | 
161 |         return results
162 | 
163 | 
164 | class StockDataBatchProcessor:
165 |     """Specialized batch processor for stock data operations."""
166 | 
167 |     def __init__(self, provider: EnhancedStockDataProvider | None = None):
168 |         """Initialize with optional stock data provider."""
169 |         self.provider = provider or EnhancedStockDataProvider()
170 | 
171 |     def get_batch_stock_data(
172 |         self, symbols: list[str], start_date: str, end_date: str
173 |     ) -> dict[str, pd.DataFrame]:
174 |         """
175 |         Fetch stock data for multiple symbols efficiently using yfinance batch download.
176 | 
177 |         Args:
178 |             symbols: List of stock symbols
179 |             start_date: Start date (YYYY-MM-DD)
180 |             end_date: End date (YYYY-MM-DD)
181 | 
182 |         Returns:
183 |             Dictionary mapping symbols to their DataFrames
184 |         """
185 |         try:
186 |             # Use yfinance batch download for efficiency
187 |             tickers_str = " ".join(symbols)
188 |             data = yf.download(
189 |                 tickers_str,
190 |                 start=start_date,
191 |                 end=end_date,
192 |                 group_by="ticker",
193 |                 threads=True,
194 |                 progress=False,
195 |             )
196 | 
197 |             results = {}
198 | 
199 |             if len(symbols) == 1:
200 |                 # Single symbol case
201 |                 symbol = symbols[0]
202 |                 if not data.empty:
203 |                     # Standardize column names
204 |                     df = data.copy()
205 |                     if "Close" in df.columns:
206 |                         df.columns = df.columns.str.title()
207 |                     results[symbol] = df
208 |             else:
209 |                 # Multiple symbols case
210 |                 for symbol in symbols:
211 |                     try:
212 |                         if symbol in data.columns.get_level_values(0):
213 |                             symbol_data = data[symbol].copy()
214 |                             # Remove any NaN-only rows
215 |                             symbol_data = symbol_data.dropna(how="all")
216 |                             if not symbol_data.empty:
217 |                                 results[symbol] = symbol_data
218 |                     except Exception as e:
219 |                         logger.warning(f"Error extracting data for {symbol}: {e}")
220 | 
221 |             return results
222 | 
223 |         except Exception as e:
224 |             logger.error(f"Error in batch stock data download: {e}")
225 |             # Fallback to individual downloads
226 |             return self._fallback_individual_downloads(symbols, start_date, end_date)
227 | 
228 |     def _fallback_individual_downloads(
229 |         self, symbols: list[str], start_date: str, end_date: str
230 |     ) -> dict[str, pd.DataFrame]:
231 |         """Fallback to individual downloads if batch fails."""
232 |         results = {}
233 | 
234 |         with BatchProcessor(max_workers=5) as processor:
235 | 
236 |             def download_single(symbol: str) -> pd.DataFrame:
237 |                 try:
238 |                     return self.provider.get_stock_data(symbol, start_date, end_date)
239 |                 except Exception as e:
240 |                     logger.warning(f"Error downloading {symbol}: {e}")
241 |                     return pd.DataFrame()
242 | 
243 |             symbol_results = processor.process_symbols_concurrent(
244 |                 symbols, download_single, pd.DataFrame()
245 |             )
246 | 
247 |             # Filter out empty DataFrames
248 |             results = {
249 |                 symbol: df for symbol, df in symbol_results.items() if not df.empty
250 |             }
251 | 
252 |         return results
253 | 
254 |     async def get_batch_stock_data_async(
255 |         self, symbols: list[str], start_date: str, end_date: str
256 |     ) -> dict[str, pd.DataFrame]:
257 |         """
258 |         Async version of batch stock data fetching.
259 | 
260 |         Args:
261 |             symbols: List of stock symbols
262 |             start_date: Start date (YYYY-MM-DD)
263 |             end_date: End date (YYYY-MM-DD)
264 | 
265 |         Returns:
266 |             Dictionary mapping symbols to their DataFrames
267 |         """
268 |         loop = asyncio.get_event_loop()
269 |         return await loop.run_in_executor(
270 |             None, self.get_batch_stock_data, symbols, start_date, end_date
271 |         )
272 | 
273 |     def get_batch_stock_info(self, symbols: list[str]) -> dict[str, dict[str, Any]]:
274 |         """
275 |         Get stock info for multiple symbols efficiently.
276 | 
277 |         Args:
278 |             symbols: List of stock symbols
279 | 
280 |         Returns:
281 |             Dictionary mapping symbols to their info dictionaries
282 |         """
283 |         with BatchProcessor(max_workers=10) as processor:
284 | 
285 |             def get_info(symbol: str) -> dict[str, Any]:
286 |                 try:
287 |                     ticker = yf.Ticker(symbol)
288 |                     return ticker.info
289 |                 except Exception as e:
290 |                     logger.warning(f"Error getting info for {symbol}: {e}")
291 |                     return {}
292 | 
293 |             return processor.process_symbols_concurrent(symbols, get_info, {})
294 | 
295 |     def get_batch_technical_analysis(
296 |         self, symbols: list[str], days: int = 365
297 |     ) -> dict[str, pd.DataFrame]:
298 |         """
299 |         Get technical analysis for multiple symbols efficiently.
300 | 
301 |         Args:
302 |             symbols: List of stock symbols
303 |             days: Number of days of data
304 | 
305 |         Returns:
306 |             Dictionary mapping symbols to DataFrames with technical indicators
307 |         """
308 |         from maverick_mcp.utils.stock_helpers import get_stock_dataframe
309 | 
310 |         with BatchProcessor(max_workers=8) as processor:
311 | 
312 |             def get_analysis(symbol: str) -> pd.DataFrame:
313 |                 try:
314 |                     return get_stock_dataframe(symbol, days)
315 |                 except Exception as e:
316 |                     logger.warning(
317 |                         f"Error getting technical analysis for {symbol}: {e}"
318 |                     )
319 |                     return pd.DataFrame()
320 | 
321 |             results = processor.process_symbols_concurrent(
322 |                 symbols, get_analysis, pd.DataFrame()
323 |             )
324 | 
325 |             # Filter out empty DataFrames
326 |             return {symbol: df for symbol, df in results.items() if not df.empty}
327 | 
328 | 
329 | # Convenience functions for common batch operations
330 | 
331 | 
332 | def batch_download_stock_data(
333 |     symbols: list[str], start_date: str, end_date: str
334 | ) -> dict[str, pd.DataFrame]:
335 |     """
336 |     Convenience function for batch downloading stock data.
337 | 
338 |     Args:
339 |         symbols: List of stock symbols
340 |         start_date: Start date (YYYY-MM-DD)
341 |         end_date: End date (YYYY-MM-DD)
342 | 
343 |     Returns:
344 |         Dictionary mapping symbols to their DataFrames
345 |     """
346 |     processor = StockDataBatchProcessor()
347 |     return processor.get_batch_stock_data(symbols, start_date, end_date)
348 | 
349 | 
350 | async def batch_download_stock_data_async(
351 |     symbols: list[str], start_date: str, end_date: str
352 | ) -> dict[str, pd.DataFrame]:
353 |     """
354 |     Convenience function for async batch downloading stock data.
355 | 
356 |     Args:
357 |         symbols: List of stock symbols
358 |         start_date: Start date (YYYY-MM-DD)
359 |         end_date: End date (YYYY-MM-DD)
360 | 
361 |     Returns:
362 |         Dictionary mapping symbols to their DataFrames
363 |     """
364 |     processor = StockDataBatchProcessor()
365 |     return await processor.get_batch_stock_data_async(symbols, start_date, end_date)
366 | 
367 | 
368 | def batch_get_stock_info(symbols: list[str]) -> dict[str, dict[str, Any]]:
369 |     """
370 |     Convenience function for batch getting stock info.
371 | 
372 |     Args:
373 |         symbols: List of stock symbols
374 | 
375 |     Returns:
376 |         Dictionary mapping symbols to their info dictionaries
377 |     """
378 |     processor = StockDataBatchProcessor()
379 |     return processor.get_batch_stock_info(symbols)
380 | 
381 | 
382 | def batch_get_technical_analysis(
383 |     symbols: list[str], days: int = 365
384 | ) -> dict[str, pd.DataFrame]:
385 |     """
386 |     Convenience function for batch technical analysis.
387 | 
388 |     Args:
389 |         symbols: List of stock symbols
390 |         days: Number of days of data
391 | 
392 |     Returns:
393 |         Dictionary mapping symbols to DataFrames with technical indicators
394 |     """
395 |     processor = StockDataBatchProcessor()
396 |     return processor.get_batch_technical_analysis(symbols, days)
397 | 
```

--------------------------------------------------------------------------------
/tests/test_provider_architecture.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Test cases for the new provider architecture.
  3 | 
  4 | This module demonstrates how to use the new interface-based architecture
  5 | for testing and validates that the abstractions work correctly.
  6 | """
  7 | 
  8 | import pandas as pd
  9 | import pytest
 10 | 
 11 | from maverick_mcp.providers.dependencies import (
 12 |     DependencyOverride,
 13 |     create_test_dependencies,
 14 |     get_dependencies_for_testing,
 15 | )
 16 | from maverick_mcp.providers.factories.config_factory import ConfigurationFactory
 17 | from maverick_mcp.providers.factories.provider_factory import ProviderFactory
 18 | from maverick_mcp.providers.mocks.mock_cache import MockCacheManager
 19 | from maverick_mcp.providers.mocks.mock_config import MockConfigurationProvider
 20 | from maverick_mcp.providers.mocks.mock_macro_data import MockMacroDataProvider
 21 | from maverick_mcp.providers.mocks.mock_market_data import MockMarketDataProvider
 22 | from maverick_mcp.providers.mocks.mock_stock_data import (
 23 |     MockStockDataFetcher,
 24 |     MockStockScreener,
 25 | )
 26 | 
 27 | 
 28 | class TestProviderInterfaces:
 29 |     """Test the provider interfaces work correctly."""
 30 | 
 31 |     @pytest.mark.asyncio
 32 |     async def test_mock_cache_manager(self):
 33 |         """Test the mock cache manager implementation."""
 34 |         cache = MockCacheManager()
 35 | 
 36 |         # Test basic operations
 37 |         assert await cache.get("nonexistent") is None
 38 |         assert await cache.set("test_key", "test_value", 60) is True
 39 |         assert await cache.get("test_key") == "test_value"
 40 |         assert await cache.exists("test_key") is True
 41 |         assert await cache.delete("test_key") is True
 42 |         assert await cache.exists("test_key") is False
 43 | 
 44 |         # Test batch operations
 45 |         items = [("key1", "value1", 60), ("key2", "value2", 60)]
 46 |         assert await cache.set_many(items) == 2
 47 | 
 48 |         results = await cache.get_many(["key1", "key2", "key3"])
 49 |         assert results == {"key1": "value1", "key2": "value2"}
 50 | 
 51 |         # Test call logging
 52 |         call_log = cache.get_call_log()
 53 |         assert len(call_log) > 0
 54 |         assert call_log[0]["method"] == "get"
 55 | 
 56 |     @pytest.mark.asyncio
 57 |     async def test_mock_stock_data_fetcher(self):
 58 |         """Test the mock stock data fetcher implementation."""
 59 |         fetcher = MockStockDataFetcher()
 60 | 
 61 |         # Test stock data retrieval
 62 |         data = await fetcher.get_stock_data("AAPL", "2024-01-01", "2024-01-31")
 63 |         assert isinstance(data, pd.DataFrame)
 64 |         assert not data.empty
 65 |         assert "Close" in data.columns
 66 | 
 67 |         # Test real-time data
 68 |         realtime = await fetcher.get_realtime_data("AAPL")
 69 |         assert realtime is not None
 70 |         assert "symbol" in realtime
 71 |         assert realtime["symbol"] == "AAPL"
 72 | 
 73 |         # Test stock info
 74 |         info = await fetcher.get_stock_info("AAPL")
 75 |         assert "symbol" in info
 76 |         assert info["symbol"] == "AAPL"
 77 | 
 78 |         # Test market status
 79 |         is_open = await fetcher.is_market_open()
 80 |         assert isinstance(is_open, bool)
 81 | 
 82 |         # Test call logging
 83 |         call_log = fetcher.get_call_log()
 84 |         assert len(call_log) > 0
 85 | 
 86 |     @pytest.mark.asyncio
 87 |     async def test_mock_stock_screener(self):
 88 |         """Test the mock stock screener implementation."""
 89 |         screener = MockStockScreener()
 90 | 
 91 |         # Test maverick recommendations
 92 |         maverick = await screener.get_maverick_recommendations(limit=5)
 93 |         assert isinstance(maverick, list)
 94 |         assert len(maverick) <= 5
 95 | 
 96 |         # Test bear recommendations
 97 |         bear = await screener.get_maverick_bear_recommendations(limit=3)
 98 |         assert isinstance(bear, list)
 99 |         assert len(bear) <= 3
100 | 
101 |         # Test trending recommendations
102 |         trending = await screener.get_trending_recommendations(limit=2)
103 |         assert isinstance(trending, list)
104 |         assert len(trending) <= 2
105 | 
106 |         # Test all recommendations
107 |         all_recs = await screener.get_all_screening_recommendations()
108 |         assert "maverick_stocks" in all_recs
109 |         assert "maverick_bear_stocks" in all_recs
110 |         assert "trending_stocks" in all_recs
111 | 
112 |     @pytest.mark.asyncio
113 |     async def test_mock_market_data_provider(self):
114 |         """Test the mock market data provider implementation."""
115 |         provider = MockMarketDataProvider()
116 | 
117 |         # Test market summary
118 |         summary = await provider.get_market_summary()
119 |         assert isinstance(summary, dict)
120 |         assert "^GSPC" in summary
121 | 
122 |         # Test top gainers
123 |         gainers = await provider.get_top_gainers(5)
124 |         assert isinstance(gainers, list)
125 |         assert len(gainers) <= 5
126 | 
127 |         # Test sector performance
128 |         sectors = await provider.get_sector_performance()
129 |         assert isinstance(sectors, dict)
130 |         assert "Technology" in sectors
131 | 
132 |     @pytest.mark.asyncio
133 |     async def test_mock_macro_data_provider(self):
134 |         """Test the mock macro data provider implementation."""
135 |         provider = MockMacroDataProvider()
136 | 
137 |         # Test individual indicators
138 |         gdp = await provider.get_gdp_growth_rate()
139 |         assert "current" in gdp
140 |         assert "previous" in gdp
141 | 
142 |         unemployment = await provider.get_unemployment_rate()
143 |         assert "current" in unemployment
144 | 
145 |         vix = await provider.get_vix()
146 |         assert isinstance(vix, int | float) or vix is None
147 | 
148 |         # Test comprehensive statistics
149 |         stats = await provider.get_macro_statistics()
150 |         assert "sentiment_score" in stats
151 |         assert "gdp_growth_rate" in stats
152 | 
153 |     def test_mock_configuration_provider(self):
154 |         """Test the mock configuration provider implementation."""
155 |         config = MockConfigurationProvider()
156 | 
157 |         # Test default values
158 |         assert config.get_database_url() == "sqlite:///:memory:"
159 |         assert config.is_cache_enabled() is False
160 |         assert config.is_development_mode() is True
161 | 
162 |         # Test overrides
163 |         config.set_override("CACHE_ENABLED", True)
164 |         assert config.is_cache_enabled() is True
165 | 
166 |         # Test helper methods
167 |         config.enable_cache()
168 |         assert config.is_cache_enabled() is True
169 | 
170 |         config.disable_cache()
171 |         assert config.is_cache_enabled() is False
172 | 
173 | 
174 | class TestProviderFactory:
175 |     """Test the provider factory functionality."""
176 | 
177 |     def test_provider_factory_creation(self):
178 |         """Test creating providers through the factory."""
179 |         config = ConfigurationFactory.create_test_config()
180 |         factory = ProviderFactory(config)
181 | 
182 |         # Test provider creation
183 |         cache_manager = factory.get_cache_manager()
184 |         assert cache_manager is not None
185 | 
186 |         persistence = factory.get_persistence()
187 |         assert persistence is not None
188 | 
189 |         stock_fetcher = factory.get_stock_data_fetcher()
190 |         assert stock_fetcher is not None
191 | 
192 |         # Test singleton behavior
193 |         cache_manager2 = factory.get_cache_manager()
194 |         assert cache_manager is cache_manager2
195 | 
196 |     def test_provider_factory_validation(self):
197 |         """Test provider factory configuration validation."""
198 |         config = ConfigurationFactory.create_test_config()
199 |         factory = ProviderFactory(config)
200 | 
201 |         errors = factory.validate_configuration()
202 |         assert isinstance(errors, list)
203 |         # Test config should have no errors
204 |         assert len(errors) == 0
205 | 
206 |     def test_provider_factory_reset(self):
207 |         """Test provider factory cache reset."""
208 |         config = ConfigurationFactory.create_test_config()
209 |         factory = ProviderFactory(config)
210 | 
211 |         # Create providers
212 |         cache1 = factory.get_cache_manager()
213 | 
214 |         # Reset factory
215 |         factory.reset_cache()
216 | 
217 |         # Get provider again
218 |         cache2 = factory.get_cache_manager()
219 | 
220 |         # Should be different instances
221 |         assert cache1 is not cache2
222 | 
223 | 
224 | class TestDependencyInjection:
225 |     """Test the dependency injection system."""
226 | 
227 |     def test_dependency_override_context(self):
228 |         """Test dependency override context manager."""
229 |         mock_cache = MockCacheManager()
230 | 
231 |         with DependencyOverride(cache_manager=mock_cache):
232 |             # Inside the context, dependencies should be overridden
233 |             # This would be tested with actual dependency resolution
234 |             pass
235 | 
236 |         # Outside the context, dependencies should be restored
237 |         assert True  # Placeholder assertion
238 | 
239 |     def test_create_test_dependencies(self):
240 |         """Test creating test dependencies."""
241 |         mock_cache = MockCacheManager()
242 | 
243 |         deps = create_test_dependencies(cache_manager=mock_cache)
244 | 
245 |         assert "cache_manager" in deps
246 |         assert deps["cache_manager"] is mock_cache
247 |         assert "stock_data_fetcher" in deps
248 |         assert "configuration" in deps
249 | 
250 |     def test_get_dependencies_for_testing(self):
251 |         """Test getting dependencies configured for testing."""
252 |         deps = get_dependencies_for_testing()
253 | 
254 |         assert isinstance(deps, dict)
255 |         assert "cache_manager" in deps
256 |         assert "stock_data_fetcher" in deps
257 | 
258 | 
259 | class TestIntegrationScenarios:
260 |     """Test integration scenarios using the new architecture."""
261 | 
262 |     @pytest.mark.asyncio
263 |     async def test_stock_data_with_caching(self):
264 |         """Test stock data fetching with caching integration."""
265 |         # Create mock dependencies
266 |         cache = MockCacheManager()
267 |         fetcher = MockStockDataFetcher()
268 |         config = MockConfigurationProvider()
269 |         config.enable_cache()
270 | 
271 |         # Set up test data
272 |         test_data = pd.DataFrame(
273 |             {
274 |                 "Open": [100.0, 101.0],
275 |                 "High": [102.0, 103.0],
276 |                 "Low": [99.0, 100.0],
277 |                 "Close": [101.0, 102.0],
278 |                 "Volume": [1000000, 1100000],
279 |             },
280 |             index=pd.date_range("2024-01-01", periods=2),
281 |         )
282 | 
283 |         fetcher.set_test_data("AAPL", test_data)
284 | 
285 |         # Test the integration
286 |         cache_key = "stock_data:AAPL:2024-01-01:2024-01-02"
287 | 
288 |         # First call should fetch from provider
289 |         data = await fetcher.get_stock_data("AAPL", "2024-01-01", "2024-01-02")
290 |         assert not data.empty
291 | 
292 |         # Cache the result
293 |         await cache.set(cache_key, data.to_dict(), ttl=300)
294 | 
295 |         # Verify cache hit
296 |         cached_result = await cache.get(cache_key)
297 |         assert cached_result is not None
298 | 
299 |     @pytest.mark.asyncio
300 |     async def test_screening_workflow(self):
301 |         """Test a complete screening workflow."""
302 |         screener = MockStockScreener()
303 | 
304 |         # Set up test recommendations
305 |         test_maverick = [
306 |             {"symbol": "TEST1", "combined_score": 95, "momentum_score": 90},
307 |             {"symbol": "TEST2", "combined_score": 85, "momentum_score": 85},
308 |         ]
309 |         screener.set_test_recommendations("maverick", test_maverick)
310 | 
311 |         # Test the workflow
312 |         results = await screener.get_maverick_recommendations(limit=10, min_score=80)
313 |         assert len(results) == 2
314 | 
315 |         # Test filtering
316 |         filtered_results = await screener.get_maverick_recommendations(
317 |             limit=10, min_score=90
318 |         )
319 |         assert len(filtered_results) == 1
320 |         assert filtered_results[0]["symbol"] == "TEST1"
321 | 
322 |     def test_configuration_scenarios(self):
323 |         """Test different configuration scenarios."""
324 |         # Test development config
325 |         dev_config = ConfigurationFactory.create_development_config()
326 |         assert dev_config.is_development_mode()
327 | 
328 |         # Test with overrides
329 |         test_config = ConfigurationFactory.create_test_config(
330 |             {
331 |                 "CACHE_ENABLED": "true",
332 |                 "AUTH_ENABLED": "true",
333 |             }
334 |         )
335 |         assert test_config.is_cache_enabled()
336 |         assert test_config.is_auth_enabled()
337 | 
338 |     def test_mock_behavior_verification(self):
339 |         """Test that mocks properly track behavior for verification."""
340 |         cache = MockCacheManager()
341 | 
342 |         # Perform some operations
343 |         import asyncio
344 | 
345 |         async def perform_operations():
346 |             await cache.set("key1", "value1")
347 |             await cache.get("key1")
348 |             await cache.delete("key1")
349 | 
350 |         asyncio.run(perform_operations())
351 | 
352 |         # Verify call log
353 |         call_log = cache.get_call_log()
354 |         assert len(call_log) == 3
355 |         assert call_log[0]["method"] == "set"
356 |         assert call_log[1]["method"] == "get"
357 |         assert call_log[2]["method"] == "delete"
358 | 
359 | 
360 | if __name__ == "__main__":
361 |     # Run a simple smoke test
362 |     import asyncio
363 | 
364 |     async def smoke_test():
365 |         """Run a simple smoke test of the architecture."""
366 |         print("Running provider architecture smoke test...")
367 | 
368 |         # Test mock implementations
369 |         cache = MockCacheManager()
370 |         await cache.set("test", "value")
371 |         result = await cache.get("test")
372 |         assert result == "value"
373 |         print("✓ Cache manager working")
374 | 
375 |         fetcher = MockStockDataFetcher()
376 |         data = await fetcher.get_stock_data("AAPL")
377 |         assert not data.empty
378 |         print("✓ Stock data fetcher working")
379 | 
380 |         screener = MockStockScreener()
381 |         recommendations = await screener.get_maverick_recommendations()
382 |         assert len(recommendations) > 0
383 |         print("✓ Stock screener working")
384 | 
385 |         # Test factory
386 |         config = ConfigurationFactory.create_test_config()
387 |         factory = ProviderFactory(config)
388 |         errors = factory.validate_configuration()
389 |         assert len(errors) == 0
390 |         print("✓ Provider factory working")
391 | 
392 |         print("All tests passed! 🎉")
393 | 
394 |     asyncio.run(smoke_test())
395 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/api/openapi_config.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Custom OpenAPI configuration for MaverickMCP API.
  3 | 
  4 | This module provides enhanced OpenAPI schema generation with:
  5 | - Comprehensive API metadata tailored for the open-source build
  6 | - Standardized tags and descriptions
  7 | - Custom examples and documentation
  8 | - Export functionality for Postman/Insomnia
  9 | """
 10 | 
 11 | from typing import Any
 12 | 
 13 | from fastapi import FastAPI
 14 | from fastapi.openapi.utils import get_openapi
 15 | from fastapi.responses import JSONResponse, Response
 16 | 
 17 | 
 18 | def custom_openapi(app: FastAPI) -> dict[str, Any]:
 19 |     """
 20 |     Generate custom OpenAPI schema with enhanced documentation.
 21 | 
 22 |     Args:
 23 |         app: FastAPI application instance
 24 | 
 25 |     Returns:
 26 |         OpenAPI schema dictionary
 27 |     """
 28 |     if app.openapi_schema:
 29 |         return app.openapi_schema
 30 | 
 31 |     description = """
 32 | # MaverickMCP Personal Research API
 33 | 
 34 | MaverickMCP is an open-source Model Context Protocol (MCP) server focused on
 35 | independent research, portfolio experimentation, and desktop analytics. It runs
 36 | entirely without billing, subscription tracking, or usage credits.
 37 | 
 38 | ## Highlights
 39 | 
 40 | - 📊 **Historical & Intraday Market Data** — request equities data across
 41 |   flexible ranges with caching for fast iteration.
 42 | - 📈 **Advanced Technical Analysis** — generate RSI, MACD, Bollinger Bands, and
 43 |   other indicator overlays for deeper insight.
 44 | - 🧪 **Backtesting & Scenario Tools** — evaluate trading ideas with the
 45 |   VectorBT-powered engine and inspect saved results locally.
 46 | - 🧠 **Research Agents & Screeners** — launch summarization and screening tools
 47 |   that operate with zero payment integration.
 48 | - 🛡️ **Secure Defaults** — observability hooks, CSP headers, and rate limiting
 49 |   are enabled without requiring extra configuration.
 50 | 
 51 | ## Access Model
 52 | 
 53 | - No authentication or API keys are required in this distribution.
 54 | - There is no purchase flow, billing portal, or credit ledger.
 55 | - All stateful data remains on the machine that hosts the server.
 56 | 
 57 | ## Error Handling
 58 | 
 59 | Every error response follows this JSON envelope:
 60 | 
 61 | ```json
 62 | {
 63 |   "success": false,
 64 |   "error": {
 65 |     "code": "ERROR_CODE",
 66 |     "message": "Human readable explanation"
 67 |   },
 68 |   "status_code": 400,
 69 |   "trace_id": "uuid-for-debugging"
 70 | }
 71 | ```
 72 | 
 73 | ## Support
 74 | 
 75 | - Documentation: https://github.com/wshobson/maverick-mcp#readme
 76 | - GitHub Issues: https://github.com/wshobson/maverick-mcp/issues
 77 | - Discussions: https://github.com/wshobson/maverick-mcp/discussions
 78 |     """
 79 | 
 80 |     tags = [
 81 |         {
 82 |             "name": "Technical Analysis",
 83 |             "description": """
 84 |             Stock technical indicators and analytics for personal research.
 85 | 
 86 |             Generate RSI, MACD, Bollinger Bands, and multi-indicator overlays
 87 |             without authentication or billing requirements.
 88 |             """,
 89 |         },
 90 |         {
 91 |             "name": "Market Data",
 92 |             "description": """
 93 |             Historical and intraday market data endpoints.
 94 | 
 95 |             Fetch quotes, price history, and metadata with smart caching to keep
 96 |             local research responsive.
 97 |             """,
 98 |         },
 99 |         {
100 |             "name": "Stock Screening",
101 |             "description": """
102 |             Automated screeners and discovery workflows.
103 | 
104 |             Run Maverick and custom screening strategies to surface candidates
105 |             for deeper analysis.
106 |             """,
107 |         },
108 |         {
109 |             "name": "Research Agents",
110 |             "description": """
111 |             AI-assisted research personas and orchestration helpers.
112 | 
113 |             Summarize market structure, compile reports, and investigate trends
114 |             entirely within your self-hosted environment.
115 |             """,
116 |         },
117 |         {
118 |             "name": "Backtesting",
119 |             "description": """
120 |             Strategy evaluation and performance inspection tools.
121 | 
122 |             Execute parameterized backtests with VectorBT and review results
123 |             without uploading data to third-party services.
124 |             """,
125 |         },
126 |         {
127 |             "name": "Portfolio",
128 |             "description": """
129 |             Personal portfolio calculators and scenario planners.
130 | 
131 |             Model allocations, rebalance strategies, and track watchlists with
132 |             zero dependency on payment providers.
133 |             """,
134 |         },
135 |         {
136 |             "name": "Monitoring",
137 |             "description": """
138 |             Operational monitoring and diagnostics endpoints.
139 | 
140 |             Inspect Prometheus metrics, runtime health, and background task
141 |             status for self-hosted deployments.
142 |             """,
143 |         },
144 |         {
145 |             "name": "Health",
146 |             "description": """
147 |             Lightweight readiness and liveness checks.
148 | 
149 |             Ideal for Docker, Kubernetes, or local supervisor probes.
150 |             """,
151 |         },
152 |     ]
153 | 
154 |     servers = [
155 |         {
156 |             "url": "http://localhost:8000",
157 |             "description": "Local HTTP development server",
158 |         },
159 |         {
160 |             "url": "http://0.0.0.0:8003",
161 |             "description": "Default SSE transport endpoint",
162 |         },
163 |     ]
164 | 
165 |     openapi_schema = get_openapi(
166 |         title="MaverickMCP API",
167 |         version="1.0.0",
168 |         description=description,
169 |         routes=app.routes,
170 |         tags=tags,
171 |         servers=servers,
172 |         contact={
173 |             "name": "MaverickMCP Maintainers",
174 |             "url": "https://github.com/wshobson/maverick-mcp",
175 |         },
176 |         license_info={
177 |             "name": "MIT License",
178 |             "url": "https://github.com/wshobson/maverick-mcp/blob/main/LICENSE",
179 |         },
180 |     )
181 | 
182 |     # Add external docs
183 |     openapi_schema["externalDocs"] = {
184 |         "description": "Project documentation",
185 |         "url": "https://github.com/wshobson/maverick-mcp#readme",
186 |     }
187 | 
188 |     # The open-source build intentionally has no authentication schemes.
189 |     openapi_schema.setdefault("components", {})
190 |     openapi_schema["components"]["securitySchemes"] = {}
191 |     openapi_schema["security"] = []
192 | 
193 |     # Add common response schemas
194 |     if "components" not in openapi_schema:
195 |         openapi_schema["components"] = {}
196 | 
197 |     openapi_schema["components"]["responses"] = {
198 |         "UnauthorizedError": {
199 |             "description": "Authentication required or invalid credentials",
200 |             "content": {
201 |                 "application/json": {
202 |                     "schema": {"$ref": "#/components/schemas/ErrorResponse"},
203 |                     "example": {
204 |                         "success": False,
205 |                         "error": {
206 |                             "code": "UNAUTHORIZED",
207 |                             "message": "Authentication required",
208 |                         },
209 |                         "status_code": 401,
210 |                     },
211 |                 }
212 |             },
213 |         },
214 |         "ForbiddenError": {
215 |             "description": "Insufficient permissions",
216 |             "content": {
217 |                 "application/json": {
218 |                     "schema": {"$ref": "#/components/schemas/ErrorResponse"},
219 |                     "example": {
220 |                         "success": False,
221 |                         "error": {
222 |                             "code": "FORBIDDEN",
223 |                             "message": "Insufficient permissions for this operation",
224 |                         },
225 |                         "status_code": 403,
226 |                     },
227 |                 }
228 |             },
229 |         },
230 |         "NotFoundError": {
231 |             "description": "Resource not found",
232 |             "content": {
233 |                 "application/json": {
234 |                     "schema": {"$ref": "#/components/schemas/ErrorResponse"},
235 |                     "example": {
236 |                         "success": False,
237 |                         "error": {
238 |                             "code": "NOT_FOUND",
239 |                             "message": "The requested resource was not found",
240 |                         },
241 |                         "status_code": 404,
242 |                     },
243 |                 }
244 |             },
245 |         },
246 |         "ValidationError": {
247 |             "description": "Request validation failed",
248 |             "content": {
249 |                 "application/json": {
250 |                     "schema": {"$ref": "#/components/schemas/ValidationErrorResponse"},
251 |                     "example": {
252 |                         "success": False,
253 |                         "error": {
254 |                             "code": "VALIDATION_ERROR",
255 |                             "message": "Validation failed",
256 |                         },
257 |                         "errors": [
258 |                             {
259 |                                 "code": "INVALID_FORMAT",
260 |                                 "field": "email",
261 |                                 "message": "Invalid email format",
262 |                             }
263 |                         ],
264 |                         "status_code": 422,
265 |                     },
266 |                 }
267 |             },
268 |         },
269 |         "RateLimitError": {
270 |             "description": "Rate limit exceeded",
271 |             "content": {
272 |                 "application/json": {
273 |                     "schema": {"$ref": "#/components/schemas/RateLimitResponse"},
274 |                     "example": {
275 |                         "success": False,
276 |                         "error": {
277 |                             "code": "RATE_LIMIT_EXCEEDED",
278 |                             "message": "Too many requests",
279 |                         },
280 |                         "rate_limit": {
281 |                             "limit": 100,
282 |                             "remaining": 0,
283 |                             "reset": "2024-01-15T12:00:00Z",
284 |                             "retry_after": 42,
285 |                         },
286 |                         "status_code": 429,
287 |                     },
288 |                 }
289 |             },
290 |         },
291 |         "ServerError": {
292 |             "description": "Internal server error",
293 |             "content": {
294 |                 "application/json": {
295 |                     "schema": {"$ref": "#/components/schemas/ErrorResponse"},
296 |                     "example": {
297 |                         "success": False,
298 |                         "error": {
299 |                             "code": "INTERNAL_ERROR",
300 |                             "message": "An unexpected error occurred",
301 |                         },
302 |                         "status_code": 500,
303 |                         "trace_id": "550e8400-e29b-41d4-a716-446655440000",
304 |                     },
305 |                 }
306 |             },
307 |         },
308 |     }
309 | 
310 |     # Cache the schema
311 |     app.openapi_schema = openapi_schema
312 |     return app.openapi_schema
313 | 
314 | 
315 | def configure_openapi(app: FastAPI) -> None:
316 |     """
317 |     Configure OpenAPI for the FastAPI application.
318 | 
319 |     Args:
320 |         app: FastAPI application instance
321 |     """
322 |     # Override the default OpenAPI function
323 |     app.openapi = lambda: custom_openapi(app)  # type: ignore[assignment]
324 | 
325 |     # Add export endpoints
326 |     @app.get("/api/openapi.json", include_in_schema=False)
327 |     async def get_openapi_json():
328 |         """Get raw OpenAPI JSON specification."""
329 |         return JSONResponse(content=custom_openapi(app))
330 | 
331 |     @app.get("/api/openapi.yaml", include_in_schema=False)
332 |     async def get_openapi_yaml():
333 |         """Get OpenAPI specification in YAML format."""
334 |         import yaml
335 | 
336 |         openapi_dict = custom_openapi(app)
337 |         yaml_content = yaml.dump(openapi_dict, sort_keys=False, allow_unicode=True)
338 | 
339 |         return Response(
340 |             content=yaml_content,
341 |             media_type="application/x-yaml",
342 |             headers={"Content-Disposition": "attachment; filename=openapi.yaml"},
343 |         )
344 | 
345 |     # Add Postman collection export
346 |     @app.get("/api/postman.json", include_in_schema=False)
347 |     async def get_postman_collection():
348 |         """Export API as Postman collection."""
349 |         from maverick_mcp.api.utils.postman_export import convert_to_postman
350 | 
351 |         openapi_dict = custom_openapi(app)
352 |         postman_collection = convert_to_postman(openapi_dict)
353 | 
354 |         return JSONResponse(
355 |             content=postman_collection,
356 |             headers={
357 |                 "Content-Disposition": "attachment; filename=maverickmcp-api.postman_collection.json"
358 |             },
359 |         )
360 | 
361 |     # Add Insomnia collection export
362 |     @app.get("/api/insomnia.json", include_in_schema=False)
363 |     async def get_insomnia_collection():
364 |         """Export API as Insomnia collection."""
365 |         from maverick_mcp.api.utils.insomnia_export import convert_to_insomnia
366 | 
367 |         openapi_dict = custom_openapi(app)
368 |         insomnia_collection = convert_to_insomnia(openapi_dict)
369 | 
370 |         return JSONResponse(
371 |             content=insomnia_collection,
372 |             headers={
373 |                 "Content-Disposition": "attachment; filename=maverickmcp-api.insomnia_collection.json"
374 |             },
375 |         )
376 | 
377 | 
378 | # ReDoc configuration
379 | REDOC_CONFIG = {
380 |     "spec_url": "/api/openapi.json",
381 |     "title": "MaverickMCP API Documentation",
382 |     "favicon_url": "https://maverickmcp.com/favicon.ico",
383 |     "logo": {"url": "https://maverickmcp.com/logo.png", "altText": "MaverickMCP Logo"},
384 |     "theme": {
385 |         "colors": {
386 |             "primary": {
387 |                 "main": "#2563eb"  # Blue-600
388 |             }
389 |         },
390 |         "typography": {"fontSize": "14px", "code": {"fontSize": "13px"}},
391 |     },
392 |     "hideDownloadButton": False,
393 |     "disableSearch": False,
394 |     "showExtensions": True,
395 |     "expandResponses": "200,201",
396 |     "requiredPropsFirst": True,
397 |     "sortPropsAlphabetically": False,
398 |     "payloadSampleIdx": 0,
399 |     "hideHostname": False,
400 |     "noAutoAuth": False,
401 | }
402 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/backtesting/optimization.py:
--------------------------------------------------------------------------------

```python
  1 | """Strategy optimization utilities for VectorBT."""
  2 | 
  3 | from typing import Any
  4 | 
  5 | import numpy as np
  6 | import pandas as pd
  7 | 
  8 | 
  9 | class StrategyOptimizer:
 10 |     """Optimizer for trading strategy parameters."""
 11 | 
 12 |     def __init__(self, engine):
 13 |         """Initialize optimizer with VectorBT engine.
 14 | 
 15 |         Args:
 16 |             engine: VectorBTEngine instance
 17 |         """
 18 |         self.engine = engine
 19 | 
 20 |     def generate_param_grid(
 21 |         self, strategy_type: str, optimization_level: str = "medium"
 22 |     ) -> dict[str, list]:
 23 |         """Generate parameter grid based on strategy and optimization level.
 24 | 
 25 |         Args:
 26 |             strategy_type: Type of strategy
 27 |             optimization_level: Level of optimization (coarse, medium, fine)
 28 | 
 29 |         Returns:
 30 |             Parameter grid for optimization
 31 |         """
 32 |         if strategy_type == "sma_cross":
 33 |             return self._sma_param_grid(optimization_level)
 34 |         elif strategy_type == "rsi":
 35 |             return self._rsi_param_grid(optimization_level)
 36 |         elif strategy_type == "macd":
 37 |             return self._macd_param_grid(optimization_level)
 38 |         elif strategy_type == "bollinger":
 39 |             return self._bollinger_param_grid(optimization_level)
 40 |         elif strategy_type == "momentum":
 41 |             return self._momentum_param_grid(optimization_level)
 42 |         else:
 43 |             raise ValueError(f"Unknown strategy type: {strategy_type}")
 44 | 
 45 |     def _sma_param_grid(self, level: str) -> dict[str, list]:
 46 |         """Generate SMA crossover parameter grid."""
 47 |         if level == "coarse":
 48 |             return {
 49 |                 "fast_period": [5, 10, 20],
 50 |                 "slow_period": [20, 50, 100],
 51 |             }
 52 |         elif level == "fine":
 53 |             return {
 54 |                 "fast_period": list(range(5, 25, 2)),
 55 |                 "slow_period": list(range(20, 101, 5)),
 56 |             }
 57 |         else:  # medium
 58 |             return {
 59 |                 "fast_period": [5, 10, 15, 20],
 60 |                 "slow_period": [20, 30, 50, 100],
 61 |             }
 62 | 
 63 |     def _rsi_param_grid(self, level: str) -> dict[str, list]:
 64 |         """Generate RSI parameter grid."""
 65 |         if level == "coarse":
 66 |             return {
 67 |                 "period": [7, 14, 21],
 68 |                 "oversold": [20, 30],
 69 |                 "overbought": [70, 80],
 70 |             }
 71 |         elif level == "fine":
 72 |             return {
 73 |                 "period": list(range(7, 22, 2)),
 74 |                 "oversold": list(range(20, 41, 5)),
 75 |                 "overbought": list(range(60, 81, 5)),
 76 |             }
 77 |         else:  # medium
 78 |             return {
 79 |                 "period": [7, 14, 21],
 80 |                 "oversold": [20, 25, 30, 35],
 81 |                 "overbought": [65, 70, 75, 80],
 82 |             }
 83 | 
 84 |     def _macd_param_grid(self, level: str) -> dict[str, list]:
 85 |         """Generate MACD parameter grid."""
 86 |         if level == "coarse":
 87 |             return {
 88 |                 "fast_period": [8, 12],
 89 |                 "slow_period": [21, 26],
 90 |                 "signal_period": [9],
 91 |             }
 92 |         elif level == "fine":
 93 |             return {
 94 |                 "fast_period": list(range(8, 15)),
 95 |                 "slow_period": list(range(20, 31)),
 96 |                 "signal_period": list(range(7, 12)),
 97 |             }
 98 |         else:  # medium
 99 |             return {
100 |                 "fast_period": [8, 10, 12, 14],
101 |                 "slow_period": [21, 24, 26, 30],
102 |                 "signal_period": [7, 9, 11],
103 |             }
104 | 
105 |     def _bollinger_param_grid(self, level: str) -> dict[str, list]:
106 |         """Generate Bollinger Bands parameter grid."""
107 |         if level == "coarse":
108 |             return {
109 |                 "period": [10, 20],
110 |                 "std_dev": [1.5, 2.0, 2.5],
111 |             }
112 |         elif level == "fine":
113 |             return {
114 |                 "period": list(range(10, 31, 2)),
115 |                 "std_dev": np.arange(1.0, 3.1, 0.25).tolist(),
116 |             }
117 |         else:  # medium
118 |             return {
119 |                 "period": [10, 15, 20, 25],
120 |                 "std_dev": [1.5, 2.0, 2.5, 3.0],
121 |             }
122 | 
123 |     def _momentum_param_grid(self, level: str) -> dict[str, list]:
124 |         """Generate momentum parameter grid."""
125 |         if level == "coarse":
126 |             return {
127 |                 "lookback": [10, 20, 30],
128 |                 "threshold": [0.03, 0.05, 0.10],
129 |             }
130 |         elif level == "fine":
131 |             return {
132 |                 "lookback": list(range(10, 41, 2)),
133 |                 "threshold": np.arange(0.02, 0.11, 0.01).tolist(),
134 |             }
135 |         else:  # medium
136 |             return {
137 |                 "lookback": [10, 15, 20, 25, 30],
138 |                 "threshold": [0.02, 0.03, 0.05, 0.07, 0.10],
139 |             }
140 | 
141 |     async def walk_forward_analysis(
142 |         self,
143 |         symbol: str,
144 |         strategy_type: str,
145 |         parameters: dict[str, Any],
146 |         start_date: str,
147 |         end_date: str,
148 |         window_size: int = 252,  # Trading days in a year
149 |         step_size: int = 63,  # Trading days in a quarter
150 |         optimization_window: int = 504,  # 2 years for optimization
151 |     ) -> dict[str, Any]:
152 |         """Perform walk-forward analysis.
153 | 
154 |         Args:
155 |             symbol: Stock symbol
156 |             strategy_type: Strategy type
157 |             parameters: Initial parameters
158 |             start_date: Start date
159 |             end_date: End date
160 |             window_size: Test window size in days
161 |             step_size: Step size for rolling window
162 |             optimization_window: Optimization window size
163 | 
164 |         Returns:
165 |             Walk-forward analysis results
166 |         """
167 |         results = []
168 | 
169 |         # Convert dates to pandas datetime
170 |         start = pd.to_datetime(start_date)
171 |         end = pd.to_datetime(end_date)
172 |         current = start + pd.Timedelta(days=optimization_window)
173 | 
174 |         while current <= end:
175 |             # Optimization period
176 |             opt_start = current - pd.Timedelta(days=optimization_window)
177 |             opt_end = current
178 | 
179 |             # Test period
180 |             test_start = current
181 |             test_end = min(current + pd.Timedelta(days=window_size), end)
182 | 
183 |             # Optimize on training data
184 |             param_grid = self.generate_param_grid(strategy_type, "coarse")
185 |             optimization = await self.engine.optimize_parameters(
186 |                 symbol=symbol,
187 |                 strategy_type=strategy_type,
188 |                 param_grid=param_grid,
189 |                 start_date=opt_start.strftime("%Y-%m-%d"),
190 |                 end_date=opt_end.strftime("%Y-%m-%d"),
191 |                 top_n=1,
192 |             )
193 | 
194 |             best_params = optimization["best_parameters"]
195 | 
196 |             # Test on out-of-sample data
197 |             if test_start < test_end:
198 |                 test_result = await self.engine.run_backtest(
199 |                     symbol=symbol,
200 |                     strategy_type=strategy_type,
201 |                     parameters=best_params,
202 |                     start_date=test_start.strftime("%Y-%m-%d"),
203 |                     end_date=test_end.strftime("%Y-%m-%d"),
204 |                 )
205 | 
206 |                 results.append(
207 |                     {
208 |                         "period": f"{test_start.strftime('%Y-%m-%d')} to {test_end.strftime('%Y-%m-%d')}",
209 |                         "parameters": best_params,
210 |                         "in_sample_sharpe": optimization["best_metric_value"],
211 |                         "out_sample_return": test_result["metrics"]["total_return"],
212 |                         "out_sample_sharpe": test_result["metrics"]["sharpe_ratio"],
213 |                         "out_sample_drawdown": test_result["metrics"]["max_drawdown"],
214 |                     }
215 |                 )
216 | 
217 |             # Move window forward
218 |             current += pd.Timedelta(days=step_size)
219 | 
220 |         # Calculate aggregate metrics
221 |         if results:
222 |             avg_return = np.mean([r["out_sample_return"] for r in results])
223 |             avg_sharpe = np.mean([r["out_sample_sharpe"] for r in results])
224 |             avg_drawdown = np.mean([r["out_sample_drawdown"] for r in results])
225 |             consistency = sum(1 for r in results if r["out_sample_return"] > 0) / len(
226 |                 results
227 |             )
228 |         else:
229 |             avg_return = avg_sharpe = avg_drawdown = consistency = 0
230 | 
231 |         return {
232 |             "symbol": symbol,
233 |             "strategy": strategy_type,
234 |             "periods_tested": len(results),
235 |             "average_return": avg_return,
236 |             "average_sharpe": avg_sharpe,
237 |             "average_drawdown": avg_drawdown,
238 |             "consistency": consistency,
239 |             "walk_forward_results": results,
240 |             "summary": self._generate_wf_summary(avg_return, avg_sharpe, consistency),
241 |         }
242 | 
243 |     def _generate_wf_summary(
244 |         self, avg_return: float, avg_sharpe: float, consistency: float
245 |     ) -> str:
246 |         """Generate walk-forward analysis summary."""
247 |         summary = f"Walk-forward analysis shows {avg_return * 100:.1f}% average return "
248 |         summary += f"with Sharpe ratio of {avg_sharpe:.2f}. "
249 |         summary += f"Strategy was profitable in {consistency * 100:.0f}% of periods. "
250 | 
251 |         if avg_sharpe >= 1.0 and consistency >= 0.7:
252 |             summary += "Results indicate robust performance across different market conditions."
253 |         elif avg_sharpe >= 0.5 and consistency >= 0.5:
254 |             summary += "Results show moderate robustness with room for improvement."
255 |         else:
256 |             summary += "Results suggest the strategy may not be robust to changing market conditions."
257 | 
258 |         return summary
259 | 
260 |     async def monte_carlo_simulation(
261 |         self,
262 |         backtest_results: dict[str, Any],
263 |         num_simulations: int = 1000,
264 |         confidence_levels: list[float] | None = None,
265 |     ) -> dict[str, Any]:
266 |         """Run Monte Carlo simulation on backtest results.
267 | 
268 |         Args:
269 |             backtest_results: Results from run_backtest
270 |             num_simulations: Number of simulations to run
271 |             confidence_levels: Confidence levels for percentiles
272 | 
273 |         Returns:
274 |             Monte Carlo simulation results
275 |         """
276 |         if confidence_levels is None:
277 |             confidence_levels = [0.05, 0.25, 0.50, 0.75, 0.95]
278 |         trades = backtest_results.get("trades", [])
279 | 
280 |         if not trades:
281 |             return {"error": "No trades to simulate"}
282 | 
283 |         # Extract returns from trades
284 |         trade_returns = [t["return"] for t in trades]
285 | 
286 |         # Run simulations
287 |         simulated_returns = []
288 |         simulated_drawdowns = []
289 | 
290 |         for _ in range(num_simulations):
291 |             # Bootstrap sample with replacement
292 |             sampled_returns = np.random.choice(
293 |                 trade_returns, size=len(trade_returns), replace=True
294 |             )
295 | 
296 |             # Calculate cumulative return
297 |             cumulative = np.cumprod(1 + np.array(sampled_returns))
298 |             total_return = cumulative[-1] - 1
299 | 
300 |             # Calculate max drawdown
301 |             running_max = np.maximum.accumulate(cumulative)
302 |             drawdown = (cumulative - running_max) / running_max
303 |             max_drawdown = np.min(drawdown)
304 | 
305 |             simulated_returns.append(total_return)
306 |             simulated_drawdowns.append(max_drawdown)
307 | 
308 |         # Calculate percentiles
309 |         return_percentiles = np.percentile(
310 |             simulated_returns, np.array(confidence_levels) * 100
311 |         )
312 |         drawdown_percentiles = np.percentile(
313 |             simulated_drawdowns, np.array(confidence_levels) * 100
314 |         )
315 | 
316 |         return {
317 |             "num_simulations": num_simulations,
318 |             "expected_return": np.mean(simulated_returns),
319 |             "return_std": np.std(simulated_returns),
320 |             "return_percentiles": dict(
321 |                 zip(
322 |                     [f"p{int(cl * 100)}" for cl in confidence_levels],
323 |                     return_percentiles.tolist(),
324 |                     strict=False,
325 |                 )
326 |             ),
327 |             "expected_drawdown": np.mean(simulated_drawdowns),
328 |             "drawdown_std": np.std(simulated_drawdowns),
329 |             "drawdown_percentiles": dict(
330 |                 zip(
331 |                     [f"p{int(cl * 100)}" for cl in confidence_levels],
332 |                     drawdown_percentiles.tolist(),
333 |                     strict=False,
334 |                 )
335 |             ),
336 |             "probability_profit": sum(1 for r in simulated_returns if r > 0)
337 |             / num_simulations,
338 |             "var_95": return_percentiles[0],  # Value at Risk at 95% confidence
339 |             "summary": self._generate_mc_summary(
340 |                 np.mean(simulated_returns),
341 |                 return_percentiles[0],
342 |                 sum(1 for r in simulated_returns if r > 0) / num_simulations,
343 |             ),
344 |         }
345 | 
346 |     def _generate_mc_summary(
347 |         self, expected_return: float, var_95: float, prob_profit: float
348 |     ) -> str:
349 |         """Generate Monte Carlo simulation summary."""
350 |         summary = f"Monte Carlo simulation shows {expected_return * 100:.1f}% expected return "
351 |         summary += f"with {prob_profit * 100:.1f}% probability of profit. "
352 |         summary += f"95% Value at Risk is {abs(var_95) * 100:.1f}%. "
353 | 
354 |         if prob_profit >= 0.8 and expected_return > 0.10:
355 |             summary += "Strategy shows strong probabilistic edge."
356 |         elif prob_profit >= 0.6 and expected_return > 0:
357 |             summary += "Strategy shows positive expectancy with moderate confidence."
358 |         else:
359 |             summary += "Strategy may not have sufficient edge for live trading."
360 | 
361 |         return summary
362 | 
```
Page 10/39FirstPrevNextLast