#
tokens: 49674/50000 14/435 files (page 8/39)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 8 of 39. Use http://codebase.md/wshobson/maverick-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .github
│   ├── dependabot.yml
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── feature_request.md
│   │   ├── question.md
│   │   └── security_report.md
│   ├── pull_request_template.md
│   └── workflows
│       ├── claude-code-review.yml
│       └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│   ├── launch.json
│   └── settings.json
├── alembic
│   ├── env.py
│   ├── script.py.mako
│   └── versions
│       ├── 001_initial_schema.py
│       ├── 003_add_performance_indexes.py
│       ├── 006_rename_metadata_columns.py
│       ├── 008_performance_optimization_indexes.py
│       ├── 009_rename_to_supply_demand.py
│       ├── 010_self_contained_schema.py
│       ├── 011_remove_proprietary_terms.py
│       ├── 013_add_backtest_persistence_models.py
│       ├── 014_add_portfolio_models.py
│       ├── 08e3945a0c93_merge_heads.py
│       ├── 9374a5c9b679_merge_heads_for_testing.py
│       ├── abf9b9afb134_merge_multiple_heads.py
│       ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│       ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│       ├── f0696e2cac15_add_essential_performance_indexes.py
│       └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── api
│   │   └── backtesting.md
│   ├── BACKTESTING.md
│   ├── COST_BASIS_SPECIFICATION.md
│   ├── deep_research_agent.md
│   ├── exa_research_testing_strategy.md
│   ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│   ├── PORTFOLIO.md
│   ├── SETUP_SELF_CONTAINED.md
│   └── speed_testing_framework.md
├── examples
│   ├── complete_speed_validation.py
│   ├── deep_research_integration.py
│   ├── llm_optimization_example.py
│   ├── llm_speed_demo.py
│   ├── monitoring_example.py
│   ├── parallel_research_example.py
│   ├── speed_optimization_demo.py
│   └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│   ├── __init__.py
│   ├── agents
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── circuit_breaker.py
│   │   ├── deep_research.py
│   │   ├── market_analysis.py
│   │   ├── optimized_research.py
│   │   ├── supervisor.py
│   │   └── technical_analysis.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── api_server.py
│   │   ├── connection_manager.py
│   │   ├── dependencies
│   │   │   ├── __init__.py
│   │   │   ├── stock_analysis.py
│   │   │   └── technical_analysis.py
│   │   ├── error_handling.py
│   │   ├── inspector_compatible_sse.py
│   │   ├── inspector_sse.py
│   │   ├── middleware
│   │   │   ├── error_handling.py
│   │   │   ├── mcp_logging.py
│   │   │   ├── rate_limiting_enhanced.py
│   │   │   └── security.py
│   │   ├── openapi_config.py
│   │   ├── routers
│   │   │   ├── __init__.py
│   │   │   ├── agents.py
│   │   │   ├── backtesting.py
│   │   │   ├── data_enhanced.py
│   │   │   ├── data.py
│   │   │   ├── health_enhanced.py
│   │   │   ├── health_tools.py
│   │   │   ├── health.py
│   │   │   ├── intelligent_backtesting.py
│   │   │   ├── introspection.py
│   │   │   ├── mcp_prompts.py
│   │   │   ├── monitoring.py
│   │   │   ├── news_sentiment_enhanced.py
│   │   │   ├── performance.py
│   │   │   ├── portfolio.py
│   │   │   ├── research.py
│   │   │   ├── screening_ddd.py
│   │   │   ├── screening_parallel.py
│   │   │   ├── screening.py
│   │   │   ├── technical_ddd.py
│   │   │   ├── technical_enhanced.py
│   │   │   ├── technical.py
│   │   │   └── tool_registry.py
│   │   ├── server.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   ├── base_service.py
│   │   │   ├── market_service.py
│   │   │   ├── portfolio_service.py
│   │   │   ├── prompt_service.py
│   │   │   └── resource_service.py
│   │   ├── simple_sse.py
│   │   └── utils
│   │       ├── __init__.py
│   │       ├── insomnia_export.py
│   │       └── postman_export.py
│   ├── application
│   │   ├── __init__.py
│   │   ├── commands
│   │   │   └── __init__.py
│   │   ├── dto
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_dto.py
│   │   ├── queries
│   │   │   ├── __init__.py
│   │   │   └── get_technical_analysis.py
│   │   └── screening
│   │       ├── __init__.py
│   │       ├── dtos.py
│   │       └── queries.py
│   ├── backtesting
│   │   ├── __init__.py
│   │   ├── ab_testing.py
│   │   ├── analysis.py
│   │   ├── batch_processing_stub.py
│   │   ├── batch_processing.py
│   │   ├── model_manager.py
│   │   ├── optimization.py
│   │   ├── persistence.py
│   │   ├── retraining_pipeline.py
│   │   ├── strategies
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── ml
│   │   │   │   ├── __init__.py
│   │   │   │   ├── adaptive.py
│   │   │   │   ├── ensemble.py
│   │   │   │   ├── feature_engineering.py
│   │   │   │   └── regime_aware.py
│   │   │   ├── ml_strategies.py
│   │   │   ├── parser.py
│   │   │   └── templates.py
│   │   ├── strategy_executor.py
│   │   ├── vectorbt_engine.py
│   │   └── visualization.py
│   ├── config
│   │   ├── __init__.py
│   │   ├── constants.py
│   │   ├── database_self_contained.py
│   │   ├── database.py
│   │   ├── llm_optimization_config.py
│   │   ├── logging_settings.py
│   │   ├── plotly_config.py
│   │   ├── security_utils.py
│   │   ├── security.py
│   │   ├── settings.py
│   │   ├── technical_constants.py
│   │   ├── tool_estimation.py
│   │   └── validation.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── technical_analysis.py
│   │   └── visualization.py
│   ├── data
│   │   ├── __init__.py
│   │   ├── cache_manager.py
│   │   ├── cache.py
│   │   ├── django_adapter.py
│   │   ├── health.py
│   │   ├── models.py
│   │   ├── performance.py
│   │   ├── session_management.py
│   │   └── validation.py
│   ├── database
│   │   ├── __init__.py
│   │   ├── base.py
│   │   └── optimization.py
│   ├── dependencies.py
│   ├── domain
│   │   ├── __init__.py
│   │   ├── entities
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis.py
│   │   ├── events
│   │   │   └── __init__.py
│   │   ├── portfolio.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   ├── entities.py
│   │   │   ├── services.py
│   │   │   └── value_objects.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_service.py
│   │   ├── stock_analysis
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis_service.py
│   │   └── value_objects
│   │       ├── __init__.py
│   │       └── technical_indicators.py
│   ├── exceptions.py
│   ├── infrastructure
│   │   ├── __init__.py
│   │   ├── cache
│   │   │   └── __init__.py
│   │   ├── caching
│   │   │   ├── __init__.py
│   │   │   └── cache_management_service.py
│   │   ├── connection_manager.py
│   │   ├── data_fetching
│   │   │   ├── __init__.py
│   │   │   └── stock_data_service.py
│   │   ├── health
│   │   │   ├── __init__.py
│   │   │   └── health_checker.py
│   │   ├── persistence
│   │   │   ├── __init__.py
│   │   │   └── stock_repository.py
│   │   ├── providers
│   │   │   └── __init__.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   └── repositories.py
│   │   └── sse_optimizer.py
│   ├── langchain_tools
│   │   ├── __init__.py
│   │   ├── adapters.py
│   │   └── registry.py
│   ├── logging_config.py
│   ├── memory
│   │   ├── __init__.py
│   │   └── stores.py
│   ├── monitoring
│   │   ├── __init__.py
│   │   ├── health_check.py
│   │   ├── health_monitor.py
│   │   ├── integration_example.py
│   │   ├── metrics.py
│   │   ├── middleware.py
│   │   └── status_dashboard.py
│   ├── providers
│   │   ├── __init__.py
│   │   ├── dependencies.py
│   │   ├── factories
│   │   │   ├── __init__.py
│   │   │   ├── config_factory.py
│   │   │   └── provider_factory.py
│   │   ├── implementations
│   │   │   ├── __init__.py
│   │   │   ├── cache_adapter.py
│   │   │   ├── macro_data_adapter.py
│   │   │   ├── market_data_adapter.py
│   │   │   ├── persistence_adapter.py
│   │   │   └── stock_data_adapter.py
│   │   ├── interfaces
│   │   │   ├── __init__.py
│   │   │   ├── cache.py
│   │   │   ├── config.py
│   │   │   ├── macro_data.py
│   │   │   ├── market_data.py
│   │   │   ├── persistence.py
│   │   │   └── stock_data.py
│   │   ├── llm_factory.py
│   │   ├── macro_data.py
│   │   ├── market_data.py
│   │   ├── mocks
│   │   │   ├── __init__.py
│   │   │   ├── mock_cache.py
│   │   │   ├── mock_config.py
│   │   │   ├── mock_macro_data.py
│   │   │   ├── mock_market_data.py
│   │   │   ├── mock_persistence.py
│   │   │   └── mock_stock_data.py
│   │   ├── openrouter_provider.py
│   │   ├── optimized_screening.py
│   │   ├── optimized_stock_data.py
│   │   └── stock_data.py
│   ├── README.md
│   ├── tests
│   │   ├── __init__.py
│   │   ├── README_INMEMORY_TESTS.md
│   │   ├── test_cache_debug.py
│   │   ├── test_fixes_validation.py
│   │   ├── test_in_memory_routers.py
│   │   ├── test_in_memory_server.py
│   │   ├── test_macro_data_provider.py
│   │   ├── test_mailgun_email.py
│   │   ├── test_market_calendar_caching.py
│   │   ├── test_mcp_tool_fixes_pytest.py
│   │   ├── test_mcp_tool_fixes.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_models_functional.py
│   │   ├── test_server.py
│   │   ├── test_stock_data_enhanced.py
│   │   ├── test_stock_data_provider.py
│   │   └── test_technical_analysis.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── performance_monitoring.py
│   │   ├── portfolio_manager.py
│   │   ├── risk_management.py
│   │   └── sentiment_analysis.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── agent_errors.py
│   │   ├── batch_processing.py
│   │   ├── cache_warmer.py
│   │   ├── circuit_breaker_decorators.py
│   │   ├── circuit_breaker_services.py
│   │   ├── circuit_breaker.py
│   │   ├── data_chunking.py
│   │   ├── database_monitoring.py
│   │   ├── debug_utils.py
│   │   ├── fallback_strategies.py
│   │   ├── llm_optimization.py
│   │   ├── logging_example.py
│   │   ├── logging_init.py
│   │   ├── logging.py
│   │   ├── mcp_logging.py
│   │   ├── memory_profiler.py
│   │   ├── monitoring_middleware.py
│   │   ├── monitoring.py
│   │   ├── orchestration_logging.py
│   │   ├── parallel_research.py
│   │   ├── parallel_screening.py
│   │   ├── quick_cache.py
│   │   ├── resource_manager.py
│   │   ├── shutdown.py
│   │   ├── stock_helpers.py
│   │   ├── structured_logger.py
│   │   ├── tool_monitoring.py
│   │   ├── tracing.py
│   │   └── yfinance_pool.py
│   ├── validation
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── data.py
│   │   ├── middleware.py
│   │   ├── portfolio.py
│   │   ├── responses.py
│   │   ├── screening.py
│   │   └── technical.py
│   └── workflows
│       ├── __init__.py
│       ├── agents
│       │   ├── __init__.py
│       │   ├── market_analyzer.py
│       │   ├── optimizer_agent.py
│       │   ├── strategy_selector.py
│       │   └── validator_agent.py
│       ├── backtesting_workflow.py
│       └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│   ├── dev.sh
│   ├── INSTALLATION_GUIDE.md
│   ├── load_example.py
│   ├── load_market_data.py
│   ├── load_tiingo_data.py
│   ├── migrate_db.py
│   ├── README_TIINGO_LOADER.md
│   ├── requirements_tiingo.txt
│   ├── run_stock_screening.py
│   ├── run-migrations.sh
│   ├── seed_db.py
│   ├── seed_sp500.py
│   ├── setup_database.sh
│   ├── setup_self_contained.py
│   ├── setup_sp500_database.sh
│   ├── test_seeded_data.py
│   ├── test_tiingo_loader.py
│   ├── tiingo_config.py
│   └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── core
│   │   └── test_technical_analysis.py
│   ├── data
│   │   └── test_portfolio_models.py
│   ├── domain
│   │   ├── conftest.py
│   │   ├── test_portfolio_entities.py
│   │   └── test_technical_analysis_service.py
│   ├── fixtures
│   │   └── orchestration_fixtures.py
│   ├── integration
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── README.md
│   │   ├── run_integration_tests.sh
│   │   ├── test_api_technical.py
│   │   ├── test_chaos_engineering.py
│   │   ├── test_config_management.py
│   │   ├── test_full_backtest_workflow_advanced.py
│   │   ├── test_full_backtest_workflow.py
│   │   ├── test_high_volume.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_orchestration_complete.py
│   │   ├── test_portfolio_persistence.py
│   │   ├── test_redis_cache.py
│   │   ├── test_security_integration.py.disabled
│   │   └── vcr_setup.py
│   ├── performance
│   │   ├── __init__.py
│   │   ├── test_benchmarks.py
│   │   ├── test_load.py
│   │   ├── test_profiling.py
│   │   └── test_stress.py
│   ├── providers
│   │   └── test_stock_data_simple.py
│   ├── README.md
│   ├── test_agents_router_mcp.py
│   ├── test_backtest_persistence.py
│   ├── test_cache_management_service.py
│   ├── test_cache_serialization.py
│   ├── test_circuit_breaker.py
│   ├── test_database_pool_config_simple.py
│   ├── test_database_pool_config.py
│   ├── test_deep_research_functional.py
│   ├── test_deep_research_integration.py
│   ├── test_deep_research_parallel_execution.py
│   ├── test_error_handling.py
│   ├── test_event_loop_integrity.py
│   ├── test_exa_research_integration.py
│   ├── test_exception_hierarchy.py
│   ├── test_financial_search.py
│   ├── test_graceful_shutdown.py
│   ├── test_integration_simple.py
│   ├── test_langgraph_workflow.py
│   ├── test_market_data_async.py
│   ├── test_market_data_simple.py
│   ├── test_mcp_orchestration_functional.py
│   ├── test_ml_strategies.py
│   ├── test_optimized_research_agent.py
│   ├── test_orchestration_integration.py
│   ├── test_orchestration_logging.py
│   ├── test_orchestration_tools_simple.py
│   ├── test_parallel_research_integration.py
│   ├── test_parallel_research_orchestrator.py
│   ├── test_parallel_research_performance.py
│   ├── test_performance_optimizations.py
│   ├── test_production_validation.py
│   ├── test_provider_architecture.py
│   ├── test_rate_limiting_enhanced.py
│   ├── test_runner_validation.py
│   ├── test_security_comprehensive.py.disabled
│   ├── test_security_cors.py
│   ├── test_security_enhancements.py.disabled
│   ├── test_security_headers.py
│   ├── test_security_penetration.py
│   ├── test_session_management.py
│   ├── test_speed_optimization_validation.py
│   ├── test_stock_analysis_dependencies.py
│   ├── test_stock_analysis_service.py
│   ├── test_stock_data_fetching_service.py
│   ├── test_supervisor_agent.py
│   ├── test_supervisor_functional.py
│   ├── test_tool_estimation_config.py
│   ├── test_visualization.py
│   └── utils
│       ├── test_agent_errors.py
│       ├── test_logging.py
│       ├── test_parallel_screening.py
│       └── test_quick_cache.py
├── tools
│   ├── check_orchestration_config.py
│   ├── experiments
│   │   ├── validation_examples.py
│   │   └── validation_fixed.py
│   ├── fast_dev.sh
│   ├── hot_reload.py
│   ├── quick_test.py
│   └── templates
│       ├── new_router_template.py
│       ├── new_tool_template.py
│       ├── screening_strategy_template.py
│       └── test_template.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/maverick_mcp/utils/circuit_breaker_services.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Service-specific circuit breakers for external APIs.
  3 | Provides pre-configured circuit breakers for different external services.
  4 | """
  5 | 
  6 | import logging
  7 | 
  8 | import pandas as pd
  9 | import requests
 10 | from requests.exceptions import ConnectionError, HTTPError, RequestException, Timeout
 11 | 
 12 | from maverick_mcp.config.settings import get_settings
 13 | from maverick_mcp.utils.circuit_breaker import (
 14 |     CircuitBreakerConfig,
 15 |     EnhancedCircuitBreaker,
 16 |     FailureDetectionStrategy,
 17 | )
 18 | from maverick_mcp.utils.fallback_strategies import (
 19 |     ECONOMIC_DATA_FALLBACK,
 20 |     MARKET_DATA_FALLBACK,
 21 |     NEWS_FALLBACK,
 22 |     STOCK_DATA_FALLBACK_CHAIN,
 23 | )
 24 | 
 25 | logger = logging.getLogger(__name__)
 26 | settings = get_settings()
 27 | 
 28 | 
 29 | # Service-specific configurations
 30 | YFINANCE_CONFIG = CircuitBreakerConfig(
 31 |     name="yfinance",
 32 |     failure_threshold=3,
 33 |     failure_rate_threshold=0.5,
 34 |     timeout_threshold=30.0,
 35 |     recovery_timeout=120,  # 2 minutes
 36 |     success_threshold=2,
 37 |     window_size=300,  # 5 minutes
 38 |     detection_strategy=FailureDetectionStrategy.COMBINED,
 39 |     expected_exceptions=(Exception,),  # yfinance can throw various exceptions
 40 | )
 41 | 
 42 | FINVIZ_CONFIG = CircuitBreakerConfig(
 43 |     name="finviz",
 44 |     failure_threshold=5,
 45 |     failure_rate_threshold=0.6,
 46 |     timeout_threshold=20.0,
 47 |     recovery_timeout=180,  # 3 minutes
 48 |     success_threshold=3,
 49 |     window_size=300,
 50 |     detection_strategy=FailureDetectionStrategy.COMBINED,
 51 |     expected_exceptions=(Exception,),
 52 | )
 53 | 
 54 | FRED_CONFIG = CircuitBreakerConfig(
 55 |     name="fred_api",
 56 |     failure_threshold=5,
 57 |     failure_rate_threshold=0.5,
 58 |     timeout_threshold=15.0,
 59 |     recovery_timeout=300,  # 5 minutes
 60 |     success_threshold=3,
 61 |     window_size=600,  # 10 minutes
 62 |     detection_strategy=FailureDetectionStrategy.COMBINED,
 63 |     expected_exceptions=(Exception,),
 64 | )
 65 | 
 66 | EXTERNAL_API_CONFIG = CircuitBreakerConfig(
 67 |     name="external_api",
 68 |     failure_threshold=3,
 69 |     failure_rate_threshold=0.4,
 70 |     timeout_threshold=10.0,
 71 |     recovery_timeout=60,
 72 |     success_threshold=2,
 73 |     window_size=300,
 74 |     detection_strategy=FailureDetectionStrategy.COMBINED,
 75 |     expected_exceptions=(RequestException, HTTPError, Timeout, ConnectionError),
 76 | )
 77 | 
 78 | TIINGO_CONFIG = CircuitBreakerConfig(
 79 |     name="tiingo",
 80 |     failure_threshold=3,
 81 |     failure_rate_threshold=0.5,
 82 |     timeout_threshold=15.0,
 83 |     recovery_timeout=120,
 84 |     success_threshold=2,
 85 |     window_size=300,
 86 |     detection_strategy=FailureDetectionStrategy.COMBINED,
 87 |     expected_exceptions=(Exception,),
 88 | )
 89 | 
 90 | HTTP_CONFIG = CircuitBreakerConfig(
 91 |     name="http_general",
 92 |     failure_threshold=5,
 93 |     failure_rate_threshold=0.6,
 94 |     timeout_threshold=30.0,
 95 |     recovery_timeout=60,
 96 |     success_threshold=3,
 97 |     window_size=300,
 98 |     detection_strategy=FailureDetectionStrategy.FAILURE_RATE,
 99 |     expected_exceptions=(RequestException, HTTPError, Timeout, ConnectionError),
100 | )
101 | 
102 | 
103 | class StockDataCircuitBreaker(EnhancedCircuitBreaker):
104 |     """Circuit breaker for stock data APIs (yfinance)."""
105 | 
106 |     def __init__(self):
107 |         """Initialize with yfinance configuration."""
108 |         super().__init__(YFINANCE_CONFIG)
109 |         self.fallback_chain = STOCK_DATA_FALLBACK_CHAIN
110 | 
111 |     def fetch_with_fallback(
112 |         self,
113 |         fetch_func: callable,
114 |         symbol: str,
115 |         start_date: str,
116 |         end_date: str,
117 |         **kwargs,
118 |     ) -> pd.DataFrame:
119 |         """
120 |         Fetch stock data with circuit breaker and fallback.
121 | 
122 |         Args:
123 |             fetch_func: The function to fetch data (e.g., yfinance call)
124 |             symbol: Stock symbol
125 |             start_date: Start date
126 |             end_date: End date
127 |             **kwargs: Additional arguments for fetch_func
128 | 
129 |         Returns:
130 |             DataFrame with stock data
131 |         """
132 |         try:
133 |             # Try primary fetch through circuit breaker
134 |             return self.call_sync(fetch_func, symbol, start_date, end_date, **kwargs)
135 |         except Exception as e:
136 |             logger.warning(
137 |                 f"Primary stock data fetch failed for {symbol}: {e}. "
138 |                 f"Attempting fallback strategies."
139 |             )
140 |             # Execute fallback chain
141 |             return self.fallback_chain.execute_sync(
142 |                 symbol, start_date, end_date, **kwargs
143 |             )
144 | 
145 |     async def fetch_with_fallback_async(
146 |         self,
147 |         fetch_func: callable,
148 |         symbol: str,
149 |         start_date: str,
150 |         end_date: str,
151 |         **kwargs,
152 |     ) -> pd.DataFrame:
153 |         """Async version of fetch_with_fallback."""
154 |         try:
155 |             return await self.call_async(
156 |                 fetch_func, symbol, start_date, end_date, **kwargs
157 |             )
158 |         except Exception as e:
159 |             logger.warning(
160 |                 f"Primary stock data fetch failed for {symbol}: {e}. "
161 |                 f"Attempting fallback strategies."
162 |             )
163 |             return await self.fallback_chain.execute_async(
164 |                 symbol, start_date, end_date, **kwargs
165 |             )
166 | 
167 | 
168 | class MarketDataCircuitBreaker(EnhancedCircuitBreaker):
169 |     """Circuit breaker for market data APIs (finviz, External API)."""
170 | 
171 |     def __init__(self, service_name: str = "market_data"):
172 |         """Initialize with market data configuration."""
173 |         if service_name == "finviz":
174 |             config = FINVIZ_CONFIG
175 |         elif service_name == "external_api":
176 |             config = EXTERNAL_API_CONFIG
177 |         else:
178 |             config = FINVIZ_CONFIG  # Default
179 | 
180 |         super().__init__(config)
181 |         self.fallback = MARKET_DATA_FALLBACK
182 | 
183 |     def fetch_with_fallback(
184 |         self, fetch_func: callable, mover_type: str = "gainers", **kwargs
185 |     ) -> dict:
186 |         """Fetch market data with circuit breaker and fallback."""
187 |         try:
188 |             return self.call_sync(fetch_func, mover_type, **kwargs)
189 |         except Exception as e:
190 |             logger.warning(
191 |                 f"Market data fetch failed for {mover_type}: {e}. "
192 |                 f"Returning fallback data."
193 |             )
194 |             return self.fallback.execute_sync(mover_type, **kwargs)
195 | 
196 | 
197 | class EconomicDataCircuitBreaker(EnhancedCircuitBreaker):
198 |     """Circuit breaker for economic data APIs (FRED)."""
199 | 
200 |     def __init__(self):
201 |         """Initialize with FRED configuration."""
202 |         super().__init__(FRED_CONFIG)
203 |         self.fallback = ECONOMIC_DATA_FALLBACK
204 | 
205 |     def fetch_with_fallback(
206 |         self,
207 |         fetch_func: callable,
208 |         series_id: str,
209 |         start_date: str,
210 |         end_date: str,
211 |         **kwargs,
212 |     ) -> pd.Series:
213 |         """Fetch economic data with circuit breaker and fallback."""
214 |         try:
215 |             return self.call_sync(fetch_func, series_id, start_date, end_date, **kwargs)
216 |         except Exception as e:
217 |             logger.warning(
218 |                 f"Economic data fetch failed for {series_id}: {e}. "
219 |                 f"Using fallback values."
220 |             )
221 |             return self.fallback.execute_sync(series_id, start_date, end_date, **kwargs)
222 | 
223 | 
224 | class NewsDataCircuitBreaker(EnhancedCircuitBreaker):
225 |     """Circuit breaker for news/sentiment APIs."""
226 | 
227 |     def __init__(self):
228 |         """Initialize with news API configuration."""
229 |         # Use a generic config for news APIs
230 |         config = CircuitBreakerConfig(
231 |             name="news_api",
232 |             failure_threshold=3,
233 |             failure_rate_threshold=0.6,
234 |             timeout_threshold=10.0,
235 |             recovery_timeout=300,
236 |             success_threshold=2,
237 |             window_size=600,
238 |             detection_strategy=FailureDetectionStrategy.COMBINED,
239 |             expected_exceptions=(Exception,),
240 |         )
241 |         super().__init__(config)
242 |         self.fallback = NEWS_FALLBACK
243 | 
244 |     def fetch_with_fallback(self, fetch_func: callable, symbol: str, **kwargs) -> dict:
245 |         """Fetch news data with circuit breaker and fallback."""
246 |         try:
247 |             return self.call_sync(fetch_func, symbol, **kwargs)
248 |         except Exception as e:
249 |             logger.warning(
250 |                 f"News data fetch failed for {symbol}: {e}. Returning empty news data."
251 |             )
252 |             return self.fallback.execute_sync(symbol, **kwargs)
253 | 
254 | 
255 | class HttpCircuitBreaker(EnhancedCircuitBreaker):
256 |     """General circuit breaker for HTTP requests."""
257 | 
258 |     def __init__(self):
259 |         """Initialize with HTTP configuration."""
260 |         super().__init__(HTTP_CONFIG)
261 | 
262 |     def request_with_circuit_breaker(
263 |         self, method: str, url: str, session: requests.Session | None = None, **kwargs
264 |     ) -> requests.Response:
265 |         """
266 |         Make HTTP request with circuit breaker protection.
267 | 
268 |         Args:
269 |             method: HTTP method (GET, POST, etc.)
270 |             url: URL to request
271 |             session: Optional requests session
272 |             **kwargs: Additional arguments for requests
273 | 
274 |         Returns:
275 |             Response object
276 |         """
277 | 
278 |         def make_request():
279 |             # Ensure timeout is set
280 |             if "timeout" not in kwargs:
281 |                 kwargs["timeout"] = self.config.timeout_threshold
282 | 
283 |             if session:
284 |                 return session.request(method, url, **kwargs)
285 |             else:
286 |                 return requests.request(method, url, **kwargs)
287 | 
288 |         return self.call_sync(make_request)
289 | 
290 | 
291 | # Global instances for reuse
292 | stock_data_breaker = StockDataCircuitBreaker()
293 | market_data_breaker = MarketDataCircuitBreaker()
294 | economic_data_breaker = EconomicDataCircuitBreaker()
295 | news_data_breaker = NewsDataCircuitBreaker()
296 | http_breaker = HttpCircuitBreaker()
297 | 
298 | 
299 | def get_service_circuit_breaker(service: str) -> EnhancedCircuitBreaker:
300 |     """
301 |     Get a circuit breaker for a specific service.
302 | 
303 |     Args:
304 |         service: Service name (yfinance, finviz, fred, news, http)
305 | 
306 |     Returns:
307 |         Configured circuit breaker for the service
308 |     """
309 |     service_breakers = {
310 |         "yfinance": stock_data_breaker,
311 |         "finviz": market_data_breaker,
312 |         "fred": economic_data_breaker,
313 |         "external_api": MarketDataCircuitBreaker("external_api"),
314 |         "tiingo": EnhancedCircuitBreaker(TIINGO_CONFIG),
315 |         "news": news_data_breaker,
316 |         "http": http_breaker,
317 |     }
318 | 
319 |     breaker = service_breakers.get(service)
320 |     if not breaker:
321 |         logger.warning(
322 |             f"No specific circuit breaker for service '{service}', using HTTP breaker"
323 |         )
324 |         return http_breaker
325 | 
326 |     return breaker
327 | 
```

--------------------------------------------------------------------------------
/docs/SETUP_SELF_CONTAINED.md:
--------------------------------------------------------------------------------

```markdown
  1 | # MaverickMCP Self-Contained Setup Guide
  2 | 
  3 | ⚠️ **IMPORTANT FINANCIAL DISCLAIMER**: This software is for educational and informational purposes only. It is NOT financial advice. Past performance does not guarantee future results. Always consult with a qualified financial advisor before making investment decisions.
  4 | 
  5 | This guide explains how to set up MaverickMCP as a completely self-contained system for personal-use financial analysis with Claude Desktop.
  6 | 
  7 | ## Overview
  8 | 
  9 | MaverickMCP is now fully self-contained and doesn't require any external database dependencies. All data is stored in its own PostgreSQL database with the `mcp_` prefix for all tables to avoid conflicts.
 10 | 
 11 | ## Prerequisites
 12 | 
 13 | - Python 3.12+
 14 | - PostgreSQL 14+ (or SQLite for development)
 15 | - Redis (optional, for caching)
 16 | - Tiingo API account (free tier available)
 17 | 
 18 | ## Quick Start
 19 | 
 20 | ### 1. Clone and Install
 21 | 
 22 | ```bash
 23 | # Clone the repository
 24 | git clone https://github.com/wshobson/maverick-mcp.git
 25 | cd maverick-mcp
 26 | 
 27 | # Install dependencies using uv (recommended)
 28 | uv sync
 29 | 
 30 | # Or use pip
 31 | pip install -e .
 32 | ```
 33 | 
 34 | ### 2. Configure Environment
 35 | 
 36 | Create a `.env` file with your configuration:
 37 | 
 38 | ```bash
 39 | # Database Configuration (MaverickMCP's own database)
 40 | MCP_DATABASE_URL=postgresql://user:password@localhost/maverick_mcp
 41 | # Or use SQLite for development
 42 | # MCP_DATABASE_URL=sqlite:///maverick_mcp.db
 43 | 
 44 | # Tiingo API Configuration (required for data loading)
 45 | TIINGO_API_TOKEN=your_tiingo_api_token_here
 46 | 
 47 | # Redis Configuration (optional)
 48 | REDIS_HOST=localhost
 49 | REDIS_PORT=6379
 50 | REDIS_DB=0
 51 | CACHE_ENABLED=true
 52 | 
 53 | # Personal Use Configuration
 54 | # Note: Authentication and billing systems have been removed for simplicity
 55 | # This version is designed for local personal use only
 56 | ```
 57 | 
 58 | ### 3. Create Database
 59 | 
 60 | ```bash
 61 | # Create PostgreSQL database
 62 | createdb maverick_mcp
 63 | 
 64 | # Or use existing PostgreSQL
 65 | psql -U postgres -c "CREATE DATABASE maverick_mcp;"
 66 | ```
 67 | 
 68 | ### 4. Run Migrations
 69 | 
 70 | ```bash
 71 | # Initialize Alembic (if not already done)
 72 | alembic init alembic
 73 | 
 74 | # Run all migrations to create schema
 75 | alembic upgrade head
 76 | 
 77 | # Verify migration
 78 | alembic current
 79 | ```
 80 | 
 81 | The migration creates the following self-contained tables:
 82 | - `mcp_stocks` - Master stock information
 83 | - `mcp_price_cache` - Historical price data
 84 | - `mcp_maverick_stocks` - Maverick screening results
 85 | - `mcp_maverick_bear_stocks` - Bear market screening
 86 | - `mcp_supply_demand_breakouts` - Supply/demand analysis
 87 | - `mcp_technical_cache` - Technical indicator cache
 88 | - `mcp_users`, `mcp_api_keys`, etc. - Authentication tables
 89 | 
 90 | ### 5. Load Initial Data
 91 | 
 92 | #### Option A: Quick Start (Top 10 S&P 500)
 93 | 
 94 | ```bash
 95 | # Load 2 years of data for top 10 S&P 500 stocks
 96 | python scripts/load_tiingo_data.py \
 97 |     --symbols AAPL MSFT GOOGL AMZN NVDA META TSLA LLY V UNH \
 98 |     --years 2 \
 99 |     --calculate-indicators \
100 |     --run-screening
101 | ```
102 | 
103 | #### Option B: Full S&P 500
104 | 
105 | ```bash
106 | # Load S&P 500 stocks with screening
107 | python scripts/load_tiingo_data.py \
108 |     --sp500 \
109 |     --years 2 \
110 |     --calculate-indicators \
111 |     --run-screening
112 | ```
113 | 
114 | #### Option C: Custom Symbols
115 | 
116 | ```bash
117 | # Load specific symbols with custom date range
118 | python scripts/load_tiingo_data.py \
119 |     --symbols AAPL MSFT GOOGL \
120 |     --start-date 2022-01-01 \
121 |     --end-date 2024-01-01 \
122 |     --calculate-indicators \
123 |     --run-screening
124 | ```
125 | 
126 | ### 6. Start the Server
127 | 
128 | ```bash
129 | # Recommended: Use the Makefile
130 | make dev
131 | 
132 | # Alternative: Direct FastMCP server
133 | python -m maverick_mcp.api.server --transport streamable-http --port 8003
134 | 
135 | # Development mode with hot reload
136 | ./scripts/dev.sh
137 | ```
138 | 
139 | ### 7. Verify Setup
140 | 
141 | ```bash
142 | # Check health endpoint
143 | curl http://localhost:8003/health
144 | 
145 | # Test a simple query (if using MCP client)
146 | echo '{"method": "tools/list"}' | nc localhost 8003
147 | 
148 | # Or use the API directly
149 | curl http://localhost:8003/api/data/stock/AAPL
150 | ```
151 | 
152 | ## Database Schema
153 | 
154 | The self-contained schema uses the `mcp_` prefix for all tables:
155 | 
156 | ```sql
157 | -- Stock Data Tables
158 | mcp_stocks                  -- Master stock information
159 | ├── stock_id (UUID PK)
160 | ├── ticker_symbol
161 | ├── company_name
162 | ├── sector
163 | └── ...
164 | 
165 | mcp_price_cache            -- Historical OHLCV data
166 | ├── price_cache_id (UUID PK)
167 | ├── stock_id (FK -> mcp_stocks)
168 | ├── date
169 | ├── open_price, high_price, low_price, close_price
170 | └── volume
171 | 
172 | -- Screening Tables
173 | mcp_maverick_stocks        -- Momentum screening
174 | ├── id (PK)
175 | ├── stock_id (FK -> mcp_stocks)
176 | ├── combined_score
177 | └── technical indicators...
178 | 
179 | mcp_maverick_bear_stocks   -- Bear screening
180 | ├── id (PK)
181 | ├── stock_id (FK -> mcp_stocks)
182 | ├── score
183 | └── bearish indicators...
184 | 
185 | mcp_supply_demand_breakouts -- Supply/demand analysis
186 | ├── id (PK)
187 | ├── stock_id (FK -> mcp_stocks)
188 | ├── momentum_score
189 | └── accumulation metrics...
190 | 
191 | mcp_technical_cache        -- Flexible indicator storage
192 | ├── id (PK)
193 | ├── stock_id (FK -> mcp_stocks)
194 | ├── indicator_type
195 | └── values...
196 | ```
197 | 
198 | ## Data Loading Details
199 | 
200 | ### Rate Limiting
201 | 
202 | Tiingo API has a rate limit of 2400 requests/hour. The loader automatically handles this:
203 | 
204 | ```python
205 | # Configure in load_tiingo_data.py
206 | MAX_CONCURRENT_REQUESTS = 5  # Parallel requests
207 | RATE_LIMIT_DELAY = 1.5       # Seconds between requests
208 | ```
209 | 
210 | ### Resume Capability
211 | 
212 | The loader saves progress and can resume interrupted loads:
213 | 
214 | ```bash
215 | # Start loading (creates checkpoint file)
216 | python scripts/load_tiingo_data.py --sp500 --years 2
217 | 
218 | # If interrupted, resume from checkpoint
219 | python scripts/load_tiingo_data.py --resume
220 | ```
221 | 
222 | ### Technical Indicators Calculated
223 | 
224 | - **Trend**: SMA (20, 50, 150, 200), EMA (21)
225 | - **Momentum**: RSI, MACD, Relative Strength Rating
226 | - **Volatility**: ATR, ADR (Average Daily Range)
227 | - **Volume**: 30-day average, volume ratio
228 | 
229 | ### Screening Algorithms
230 | 
231 | 1. **Maverick Screening** (Momentum)
232 |    - Price > EMA21 > SMA50 > SMA200
233 |    - Momentum Score > 70
234 |    - Combined score calculation
235 | 
236 | 2. **Bear Screening** (Weakness)
237 |    - Price < EMA21 < SMA50
238 |    - Momentum Score < 30
239 |    - Negative MACD
240 | 
241 | 3. **Supply/Demand Screening** (Accumulation)
242 |    - Price > SMA50 > SMA150 > SMA200
243 |    - Momentum Score > 80
244 |    - Volume confirmation
245 | 
246 | ## Troubleshooting
247 | 
248 | ### Database Connection Issues
249 | 
250 | ```bash
251 | # Test PostgreSQL connection
252 | psql -U user -d maverick_mcp -c "SELECT 1;"
253 | 
254 | # Use SQLite for testing
255 | export MCP_DATABASE_URL=sqlite:///test.db
256 | ```
257 | 
258 | ### Tiingo API Issues
259 | 
260 | ```bash
261 | # Test Tiingo API token
262 | curl -H "Authorization: Token YOUR_TOKEN" \
263 |      "https://api.tiingo.com/api/test"
264 | 
265 | # Check rate limit status
266 | curl -H "Authorization: Token YOUR_TOKEN" \
267 |      "https://api.tiingo.com/account/usage"
268 | ```
269 | 
270 | ### Migration Issues
271 | 
272 | ```bash
273 | # Check current migration
274 | alembic current
275 | 
276 | # Show migration history
277 | alembic history
278 | 
279 | # Downgrade if needed
280 | alembic downgrade -1
281 | 
282 | # Re-run specific migration
283 | alembic upgrade 010_self_contained_schema
284 | ```
285 | 
286 | ### Data Loading Issues
287 | 
288 | ```bash
289 | # Check checkpoint file
290 | cat tiingo_load_progress.json
291 | 
292 | # Clear checkpoint to start fresh
293 | rm tiingo_load_progress.json
294 | 
295 | # Load single symbol for testing
296 | python scripts/load_tiingo_data.py \
297 |     --symbols AAPL \
298 |     --years 1 \
299 |     --calculate-indicators
300 | ```
301 | 
302 | ## Performance Optimization
303 | 
304 | ### Database Indexes
305 | 
306 | The migration creates optimized indexes for common queries:
307 | 
308 | ```sql
309 | -- Price data lookups
310 | CREATE INDEX mcp_price_cache_stock_id_date_idx 
311 | ON mcp_price_cache(stock_id, date);
312 | 
313 | -- Screening queries
314 | CREATE INDEX mcp_maverick_stocks_combined_score_idx 
315 | ON mcp_maverick_stocks(combined_score DESC);
316 | 
317 | -- Supply/demand filtering
318 | CREATE INDEX mcp_supply_demand_breakouts_ma_filter_idx 
319 | ON mcp_supply_demand_breakouts(close_price, sma_50, sma_150, sma_200);
320 | ```
321 | 
322 | ### Connection Pooling
323 | 
324 | Configure in `.env`:
325 | 
326 | ```bash
327 | DB_POOL_SIZE=20
328 | DB_MAX_OVERFLOW=10
329 | DB_POOL_TIMEOUT=30
330 | DB_POOL_RECYCLE=3600
331 | ```
332 | 
333 | ### Caching with Redis
334 | 
335 | Enable Redis caching for better performance:
336 | 
337 | ```bash
338 | CACHE_ENABLED=true
339 | CACHE_TTL_SECONDS=300
340 | REDIS_HOST=localhost
341 | REDIS_PORT=6379
342 | ```
343 | 
344 | ## Personal Use Deployment
345 | 
346 | ### 1. Use Local Database
347 | 
348 | ```bash
349 | # Use SQLite for simplicity
350 | MCP_DATABASE_URL=sqlite:///maverick_mcp.db
351 | 
352 | # Or PostgreSQL for better performance
353 | MCP_DATABASE_URL=postgresql://user:password@localhost/maverick_mcp
354 | ```
355 | 
356 | ### 2. Connect with Claude Desktop
357 | 
358 | Add to `~/Library/Application Support/Claude/claude_desktop_config.json`:
359 | 
360 | ```json
361 | {
362 |   "mcpServers": {
363 |     "maverick-mcp": {
364 |       "command": "npx",
365 |       "args": ["-y", "mcp-remote", "http://localhost:8003/mcp"]
366 |     }
367 |   }
368 | }
369 | ```
370 | 
371 | ### 5. Set Up Daily Data Updates
372 | 
373 | Create a cron job for daily updates:
374 | 
375 | ```bash
376 | # Add to crontab
377 | 0 1 * * * /path/to/venv/bin/python /path/to/scripts/load_tiingo_data.py \
378 |     --sp500 --years 0.1 --calculate-indicators --run-screening
379 | ```
380 | 
381 | ## API Usage Examples
382 | 
383 | ### Fetch Stock Data
384 | 
385 | ```python
386 | from maverick_mcp.data.models import Stock, PriceCache, get_db
387 | 
388 | # Get historical data
389 | with get_db() as session:
390 |     df = PriceCache.get_price_data(
391 |         session, 
392 |         "AAPL", 
393 |         "2023-01-01", 
394 |         "2024-01-01"
395 |     )
396 | ```
397 | 
398 | ### Run Screening
399 | 
400 | ```python
401 | from maverick_mcp.data.models import MaverickStocks
402 | 
403 | # Get top momentum stocks
404 | with get_db() as session:
405 |     top_stocks = MaverickStocks.get_top_stocks(session, limit=20)
406 |     for stock in top_stocks:
407 |         print(f"{stock.stock}: Score {stock.combined_score}")
408 | ```
409 | 
410 | ### Using MCP Tools
411 | 
412 | ```bash
413 | # List available tools
414 | curl -X POST http://localhost:8003/mcp \
415 |     -H "Content-Type: application/json" \
416 |     -d '{"method": "tools/list"}'
417 | 
418 | # Get screening results
419 | curl -X POST http://localhost:8003/mcp \
420 |     -H "Content-Type: application/json" \
421 |     -d '{
422 |         "method": "tools/call",
423 |         "params": {
424 |             "name": "get_maverick_stocks",
425 |             "arguments": {"limit": 10}
426 |         }
427 |     }'
428 | ```
429 | 
430 | ## Next Steps
431 | 
432 | 1. **Load More Data**: Expand beyond S&P 500 to Russell 3000
433 | 2. **Add More Indicators**: Implement additional technical indicators
434 | 3. **Custom Screening**: Create your own screening algorithms
435 | 4. **Web Dashboard**: Deploy the maverick-mcp-web frontend
436 | 5. **API Integration**: Build applications using the MCP protocol
437 | 
438 | ## Support
439 | 
440 | - GitHub Issues: [Report bugs or request features](https://github.com/wshobson/maverick-mcp/issues)
441 | - Discussions: [Join community discussions](https://github.com/wshobson/maverick-mcp/discussions)
442 | - Documentation: [Read the full docs](https://github.com/wshobson/maverick-mcp)
443 | 
444 | ---
445 | 
446 | *MaverickMCP is now completely self-contained and ready for deployment!*
```

--------------------------------------------------------------------------------
/maverick_mcp/backtesting/strategies/parser.py:
--------------------------------------------------------------------------------

```python
  1 | """Natural language strategy parser for VectorBT."""
  2 | 
  3 | import re
  4 | from typing import Any
  5 | 
  6 | from langchain.prompts import PromptTemplate
  7 | from langchain_anthropic import ChatAnthropic
  8 | 
  9 | from .templates import STRATEGY_TEMPLATES
 10 | 
 11 | 
 12 | class StrategyParser:
 13 |     """Parser for converting natural language to VectorBT strategies."""
 14 | 
 15 |     def __init__(self, llm: ChatAnthropic | None = None):
 16 |         """Initialize strategy parser.
 17 | 
 18 |         Args:
 19 |             llm: Language model for parsing (optional)
 20 |         """
 21 |         self.llm = llm
 22 |         self.templates = STRATEGY_TEMPLATES
 23 | 
 24 |     def parse_simple(self, description: str) -> dict[str, Any]:
 25 |         """Parse simple strategy descriptions without LLM.
 26 | 
 27 |         Args:
 28 |             description: Natural language strategy description
 29 | 
 30 |         Returns:
 31 |             Strategy configuration
 32 |         """
 33 |         description_lower = description.lower()
 34 | 
 35 |         # Try to match known strategy patterns
 36 |         if "sma" in description_lower or "moving average cross" in description_lower:
 37 |             return self._parse_sma_strategy(description)
 38 |         elif "rsi" in description_lower:
 39 |             return self._parse_rsi_strategy(description)
 40 |         elif "macd" in description_lower:
 41 |             return self._parse_macd_strategy(description)
 42 |         elif "bollinger" in description_lower or "band" in description_lower:
 43 |             return self._parse_bollinger_strategy(description)
 44 |         elif "momentum" in description_lower:
 45 |             return self._parse_momentum_strategy(description)
 46 |         elif "ema" in description_lower or "exponential" in description_lower:
 47 |             return self._parse_ema_strategy(description)
 48 |         elif "breakout" in description_lower or "channel" in description_lower:
 49 |             return self._parse_breakout_strategy(description)
 50 |         elif "mean reversion" in description_lower or "reversion" in description_lower:
 51 |             return self._parse_mean_reversion_strategy(description)
 52 |         else:
 53 |             # Default to momentum if no clear match
 54 |             return {
 55 |                 "strategy_type": "momentum",
 56 |                 "parameters": self.templates["momentum"]["parameters"],
 57 |             }
 58 | 
 59 |     def _parse_sma_strategy(self, description: str) -> dict[str, Any]:
 60 |         """Parse SMA crossover strategy from description."""
 61 |         # Extract numbers from description
 62 |         numbers = re.findall(r"\d+", description)
 63 | 
 64 |         params = dict(self.templates["sma_cross"]["parameters"])
 65 |         if len(numbers) >= 2:
 66 |             params["fast_period"] = int(numbers[0])
 67 |             params["slow_period"] = int(numbers[1])
 68 |         elif len(numbers) == 1:
 69 |             params["fast_period"] = int(numbers[0])
 70 | 
 71 |         return {
 72 |             "strategy_type": "sma_cross",
 73 |             "parameters": params,
 74 |         }
 75 | 
 76 |     def _parse_rsi_strategy(self, description: str) -> dict[str, Any]:
 77 |         """Parse RSI strategy from description."""
 78 |         numbers = re.findall(r"\d+", description)
 79 | 
 80 |         params = dict(self.templates["rsi"]["parameters"])
 81 | 
 82 |         # Look for period
 83 |         for _i, num in enumerate(numbers):
 84 |             num_val = int(num)
 85 |             # Period is typically 7-21
 86 |             if 5 <= num_val <= 30 and "period" not in params:
 87 |                 params["period"] = num_val
 88 |             # Oversold is typically 20-35
 89 |             elif 15 <= num_val <= 35:
 90 |                 params["oversold"] = num_val
 91 |             # Overbought is typically 65-85
 92 |             elif 65 <= num_val <= 85:
 93 |                 params["overbought"] = num_val
 94 | 
 95 |         return {
 96 |             "strategy_type": "rsi",
 97 |             "parameters": params,
 98 |         }
 99 | 
100 |     def _parse_macd_strategy(self, description: str) -> dict[str, Any]:
101 |         """Parse MACD strategy from description."""
102 |         numbers = re.findall(r"\d+", description)
103 | 
104 |         params = dict(self.templates["macd"]["parameters"])
105 |         if len(numbers) >= 3:
106 |             params["fast_period"] = int(numbers[0])
107 |             params["slow_period"] = int(numbers[1])
108 |             params["signal_period"] = int(numbers[2])
109 | 
110 |         return {
111 |             "strategy_type": "macd",
112 |             "parameters": params,
113 |         }
114 | 
115 |     def _parse_bollinger_strategy(self, description: str) -> dict[str, Any]:
116 |         """Parse Bollinger Bands strategy from description."""
117 |         numbers = re.findall(r"\d+\.?\d*", description)
118 | 
119 |         params = dict(self.templates["bollinger"]["parameters"])
120 |         for num in numbers:
121 |             num_val = float(num)
122 |             # Period is typically 10-30
123 |             if num_val == int(num_val) and 5 <= num_val <= 50:
124 |                 params["period"] = int(num_val)
125 |             # Std dev is typically 1.5-3.0
126 |             elif 1.0 <= num_val <= 4.0:
127 |                 params["std_dev"] = num_val
128 | 
129 |         return {
130 |             "strategy_type": "bollinger",
131 |             "parameters": params,
132 |         }
133 | 
134 |     def _parse_momentum_strategy(self, description: str) -> dict[str, Any]:
135 |         """Parse momentum strategy from description."""
136 |         numbers = re.findall(r"\d+\.?\d*", description)
137 | 
138 |         params = dict(self.templates["momentum"]["parameters"])
139 |         for num in numbers:
140 |             num_val = float(num)
141 |             # Lookback is typically 10-50
142 |             if num_val == int(num_val) and 5 <= num_val <= 100:
143 |                 params["lookback"] = int(num_val)
144 |             # Threshold is typically 0.01-0.20
145 |             elif 0.001 <= num_val <= 0.5:
146 |                 params["threshold"] = num_val
147 |             # Handle percentage notation (e.g., "5%" -> 0.05)
148 |             elif description[description.find(str(num)) + len(str(num))] == "%":
149 |                 params["threshold"] = num_val / 100
150 | 
151 |         return {
152 |             "strategy_type": "momentum",
153 |             "parameters": params,
154 |         }
155 | 
156 |     def _parse_ema_strategy(self, description: str) -> dict[str, Any]:
157 |         """Parse EMA crossover strategy from description."""
158 |         numbers = re.findall(r"\d+", description)
159 | 
160 |         params = dict(self.templates["ema_cross"]["parameters"])
161 |         if len(numbers) >= 2:
162 |             params["fast_period"] = int(numbers[0])
163 |             params["slow_period"] = int(numbers[1])
164 |         elif len(numbers) == 1:
165 |             params["fast_period"] = int(numbers[0])
166 | 
167 |         return {
168 |             "strategy_type": "ema_cross",
169 |             "parameters": params,
170 |         }
171 | 
172 |     def _parse_breakout_strategy(self, description: str) -> dict[str, Any]:
173 |         """Parse breakout strategy from description."""
174 |         numbers = re.findall(r"\d+", description)
175 | 
176 |         params = dict(self.templates["breakout"]["parameters"])
177 |         if len(numbers) >= 2:
178 |             params["lookback"] = int(numbers[0])
179 |             params["exit_lookback"] = int(numbers[1])
180 |         elif len(numbers) == 1:
181 |             params["lookback"] = int(numbers[0])
182 | 
183 |         return {
184 |             "strategy_type": "breakout",
185 |             "parameters": params,
186 |         }
187 | 
188 |     def _parse_mean_reversion_strategy(self, description: str) -> dict[str, Any]:
189 |         """Parse mean reversion strategy from description."""
190 |         numbers = re.findall(r"\d+\.?\d*", description)
191 | 
192 |         params = dict(self.templates["mean_reversion"]["parameters"])
193 |         for num in numbers:
194 |             num_val = float(num)
195 |             if num_val == int(num_val) and 5 <= num_val <= 100:
196 |                 params["ma_period"] = int(num_val)
197 |             elif 0.001 <= num_val <= 0.2:
198 |                 if "entry" in description.lower():
199 |                     params["entry_threshold"] = num_val
200 |                 elif "exit" in description.lower():
201 |                     params["exit_threshold"] = num_val
202 | 
203 |         return {
204 |             "strategy_type": "mean_reversion",
205 |             "parameters": params,
206 |         }
207 | 
208 |     async def parse_with_llm(self, description: str) -> dict[str, Any]:
209 |         """Parse complex strategy descriptions using LLM.
210 | 
211 |         Args:
212 |             description: Natural language strategy description
213 | 
214 |         Returns:
215 |             Strategy configuration
216 |         """
217 |         if not self.llm:
218 |             # Fall back to simple parsing
219 |             return self.parse_simple(description)
220 | 
221 |         prompt = PromptTemplate(
222 |             input_variables=["description", "available_strategies"],
223 |             template="""
224 |             Convert this trading strategy description into a structured format.
225 | 
226 |             Description: {description}
227 | 
228 |             Available strategy types:
229 |             {available_strategies}
230 | 
231 |             Return a JSON object with:
232 |             - strategy_type: one of the available types
233 |             - parameters: dictionary of parameters for that strategy
234 |             - entry_logic: description of entry conditions
235 |             - exit_logic: description of exit conditions
236 | 
237 |             Example response:
238 |             {{
239 |                 "strategy_type": "sma_cross",
240 |                 "parameters": {{
241 |                     "fast_period": 10,
242 |                     "slow_period": 20
243 |                 }},
244 |                 "entry_logic": "Buy when fast SMA crosses above slow SMA",
245 |                 "exit_logic": "Sell when fast SMA crosses below slow SMA"
246 |             }}
247 |             """,
248 |         )
249 | 
250 |         available = "\n".join(
251 |             [f"- {k}: {v['description']}" for k, v in self.templates.items()]
252 |         )
253 | 
254 |         response = await self.llm.ainvoke(
255 |             prompt.format(description=description, available_strategies=available)
256 |         )
257 | 
258 |         # Parse JSON response
259 |         import json
260 | 
261 |         try:
262 |             result = json.loads(response.content)
263 |             return result
264 |         except json.JSONDecodeError:
265 |             # Fall back to simple parsing
266 |             return self.parse_simple(description)
267 | 
268 |     def validate_strategy(self, config: dict[str, Any]) -> bool:
269 |         """Validate strategy configuration.
270 | 
271 |         Args:
272 |             config: Strategy configuration
273 | 
274 |         Returns:
275 |             True if valid
276 |         """
277 |         strategy_type = config.get("strategy_type")
278 |         if strategy_type not in self.templates:
279 |             return False
280 | 
281 |         template = self.templates[strategy_type]
282 |         required_params = set(template["parameters"].keys())
283 |         provided_params = set(config.get("parameters", {}).keys())
284 | 
285 |         # Check if all required parameters are present
286 |         return required_params.issubset(provided_params)
287 | 
```

--------------------------------------------------------------------------------
/docs/PORTFOLIO.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Portfolio Management Guide
  2 | 
  3 | Complete guide to using MaverickMCP's portfolio personalization features for intelligent, context-aware stock analysis.
  4 | 
  5 | ## Table of Contents
  6 | 
  7 | - [Overview](#overview)
  8 | - [Quick Start](#quick-start)
  9 | - [Portfolio Management](#portfolio-management)
 10 | - [Intelligent Analysis](#intelligent-analysis)
 11 | - [MCP Resource](#mcp-resource)
 12 | - [Best Practices](#best-practices)
 13 | - [Troubleshooting](#troubleshooting)
 14 | - [Technical Details](#technical-details)
 15 | 
 16 | ## Overview
 17 | 
 18 | MaverickMCP's portfolio features transform your AI financial assistant from stateless analysis to personalized, context-aware recommendations. The system:
 19 | 
 20 | - **Tracks your holdings** with automatic cost basis averaging
 21 | - **Calculates live P&L** using real-time market data
 22 | - **Enhances analysis tools** to auto-detect your positions
 23 | - **Provides AI context** through MCP resources
 24 | 
 25 | **DISCLAIMER**: All portfolio features are for educational purposes only. This is not investment advice. Always consult qualified financial professionals before making investment decisions.
 26 | 
 27 | ## Quick Start
 28 | 
 29 | ### 1. Add Your First Position
 30 | 
 31 | ```
 32 | Add 10 shares of Apple stock I bought at $150.50 on January 15, 2024
 33 | ```
 34 | 
 35 | Behind the scenes, this uses:
 36 | ```python
 37 | portfolio_add_position(
 38 |     ticker="AAPL",
 39 |     shares=10,
 40 |     purchase_price=150.50,
 41 |     purchase_date="2024-01-15"
 42 | )
 43 | ```
 44 | 
 45 | ### 2. View Your Portfolio
 46 | 
 47 | ```
 48 | Show me my portfolio
 49 | ```
 50 | 
 51 | Response includes:
 52 | - All positions with current prices
 53 | - Unrealized P&L for each position
 54 | - Total portfolio value and performance
 55 | - Diversification metrics
 56 | 
 57 | ### 3. Smart Analysis
 58 | 
 59 | ```
 60 | Compare my portfolio holdings
 61 | ```
 62 | 
 63 | Automatically compares all your positions without manual ticker entry!
 64 | 
 65 | ## Portfolio Management
 66 | 
 67 | ### Adding Positions
 68 | 
 69 | **Add a new position:**
 70 | ```
 71 | Add 50 shares of Microsoft at $380.25
 72 | ```
 73 | 
 74 | **Add to existing position (automatic cost averaging):**
 75 | ```
 76 | Add 25 more shares of Apple at $165.00
 77 | ```
 78 | 
 79 | The system automatically:
 80 | - Averages your cost basis
 81 | - Updates total investment
 82 | - Preserves earliest purchase date
 83 | 
 84 | **Example:**
 85 | - Initial: 10 shares @ $150 = $1,500 total cost, $150 avg cost
 86 | - Add: 10 shares @ $170 = $1,700 total cost
 87 | - Result: 20 shares, $160 avg cost, $3,200 total invested
 88 | 
 89 | ### Viewing Positions
 90 | 
 91 | **Get complete portfolio:**
 92 | ```
 93 | Show my portfolio with current prices
 94 | ```
 95 | 
 96 | Returns:
 97 | ```json
 98 | {
 99 |   "portfolio_name": "My Portfolio",
100 |   "positions": [
101 |     {
102 |       "ticker": "AAPL",
103 |       "shares": 20.0,
104 |       "average_cost_basis": 160.00,
105 |       "current_price": 175.50,
106 |       "unrealized_pnl": 310.00,
107 |       "unrealized_pnl_pct": 9.69
108 |     }
109 |   ],
110 |   "total_value": 3510.00,
111 |   "total_invested": 3200.00,
112 |   "total_pnl": 310.00,
113 |   "total_pnl_pct": 9.69
114 | }
115 | ```
116 | 
117 | ### Removing Positions
118 | 
119 | **Partial sale:**
120 | ```
121 | Sell 10 shares of Apple
122 | ```
123 | 
124 | Maintains average cost basis on remaining shares.
125 | 
126 | **Full position exit:**
127 | ```
128 | Remove all my Tesla shares
129 | ```
130 | 
131 | or simply:
132 | ```
133 | Remove TSLA
134 | ```
135 | 
136 | ### Clearing Portfolio
137 | 
138 | **Remove all positions:**
139 | ```
140 | Clear my entire portfolio
141 | ```
142 | 
143 | Requires confirmation for safety.
144 | 
145 | ## Intelligent Analysis
146 | 
147 | ### Auto-Compare Holdings
148 | 
149 | Instead of:
150 | ```
151 | Compare AAPL, MSFT, GOOGL, TSLA
152 | ```
153 | 
154 | Simply use:
155 | ```
156 | Compare my holdings
157 | ```
158 | 
159 | The tool automatically:
160 | - Pulls all tickers from your portfolio
161 | - Analyzes relative performance
162 | - Ranks by metrics
163 | - Shows best/worst performers
164 | 
165 | ### Auto-Correlation Analysis
166 | 
167 | ```
168 | Analyze correlation in my portfolio
169 | ```
170 | 
171 | Automatically:
172 | - Calculates correlation matrix for all holdings
173 | - Identifies highly correlated pairs (diversification issues)
174 | - Finds negative correlations (natural hedges)
175 | - Provides diversification score
176 | 
177 | Example output:
178 | ```json
179 | {
180 |   "average_portfolio_correlation": 0.612,
181 |   "diversification_score": 38.8,
182 |   "high_correlation_pairs": [
183 |     {
184 |       "pair": ["AAPL", "MSFT"],
185 |       "correlation": 0.823,
186 |       "interpretation": "High positive correlation"
187 |     }
188 |   ],
189 |   "recommendation": "Consider adding uncorrelated assets"
190 | }
191 | ```
192 | 
193 | ### Position-Aware Risk Analysis
194 | 
195 | ```
196 | Analyze AAPL with risk analysis
197 | ```
198 | 
199 | If you own AAPL, automatically shows:
200 | - Your current position (shares, cost basis)
201 | - Unrealized P&L
202 | - Position sizing recommendations
203 | - Averaging down/up suggestions
204 | 
205 | Example with existing position:
206 | ```json
207 | {
208 |   "ticker": "AAPL",
209 |   "current_price": 175.50,
210 |   "existing_position": {
211 |     "shares_owned": 20.0,
212 |     "average_cost_basis": 160.00,
213 |     "total_invested": 3200.00,
214 |     "current_value": 3510.00,
215 |     "unrealized_pnl": 310.00,
216 |     "unrealized_pnl_pct": 9.69,
217 |     "position_recommendation": "Hold current position"
218 |   }
219 | }
220 | ```
221 | 
222 | ## MCP Resource
223 | 
224 | ### portfolio://my-holdings Resource
225 | 
226 | The portfolio resource provides automatic context to AI agents during conversations.
227 | 
228 | **Accessed via:**
229 | ```
230 | What's in my portfolio?
231 | ```
232 | 
233 | The AI automatically sees:
234 | - All current positions
235 | - Live prices and P&L
236 | - Portfolio composition
237 | - Diversification status
238 | 
239 | This enables natural conversations:
240 | ```
241 | Should I add more tech exposure?
242 | ```
243 | 
244 | The AI knows you already own AAPL, MSFT, GOOGL and can provide personalized advice.
245 | 
246 | ## Best Practices
247 | 
248 | ### Cost Basis Tracking
249 | 
250 | - **Always specify purchase date** for accurate records
251 | - **Add notes** for important context (e.g., "RSU vest", "DCA purchase #3")
252 | - **Review regularly** to ensure accuracy
253 | 
254 | ### Diversification
255 | 
256 | - Use `portfolio_correlation_analysis()` monthly
257 | - Watch for correlations above 0.7 (concentration risk)
258 | - Consider uncorrelated assets when diversification score < 50
259 | 
260 | ### Position Sizing
261 | 
262 | - Use `risk_adjusted_analysis()` before adding to positions
263 | - Follow position sizing recommendations
264 | - Respect stop-loss suggestions
265 | 
266 | ### Maintenance
267 | 
268 | - **Weekly**: Review portfolio performance
269 | - **Monthly**: Analyze correlations
270 | - **Quarterly**: Rebalance based on analysis tools
271 | 
272 | ## Troubleshooting
273 | 
274 | ### "No portfolio found"
275 | 
276 | **Problem**: Trying to use auto-detection features without any positions.
277 | 
278 | **Solution**: Add at least one position:
279 | ```
280 | Add 1 share of SPY at current price
281 | ```
282 | 
283 | ### "Insufficient positions for comparison"
284 | 
285 | **Problem**: Need minimum 2 positions for comparison/correlation.
286 | 
287 | **Solution**: Add another position or specify tickers manually:
288 | ```
289 | Compare AAPL, MSFT
290 | ```
291 | 
292 | ### "Invalid ticker symbol"
293 | 
294 | **Problem**: Ticker doesn't exist or is incorrectly formatted.
295 | 
296 | **Solution**:
297 | - Check ticker spelling
298 | - Verify symbol on financial websites
299 | - Use standard format (e.g., "BRK.B" not "BRKB")
300 | 
301 | ### Stale Price Data
302 | 
303 | **Problem**: Portfolio shows old prices.
304 | 
305 | **Solution**: Refresh by calling `get_my_portfolio(include_current_prices=True)`
306 | 
307 | ### Position Not Found
308 | 
309 | **Problem**: Trying to remove shares from position you don't own.
310 | 
311 | **Solution**: Check your portfolio first:
312 | ```
313 | Show my portfolio
314 | ```
315 | 
316 | ## Technical Details
317 | 
318 | ### Cost Basis Method
319 | 
320 | **Average Cost Method**: Simplest and most appropriate for educational use.
321 | 
322 | Formula:
323 | ```
324 | New Avg Cost = (Existing Total Cost + New Purchase Cost) / Total Shares
325 | ```
326 | 
327 | Example:
328 | - Buy 10 @ $100 = $1,000 total, $100 avg
329 | - Buy 10 @ $120 = $1,200 additional
330 | - Result: 20 shares, $110 avg cost ($2,200 / 20)
331 | 
332 | ### Database Schema
333 | 
334 | **Tables:**
335 | - `mcp_portfolios`: User portfolio metadata
336 | - `mcp_portfolio_positions`: Individual positions
337 | 
338 | **Precision:**
339 | - Shares: Numeric(20,8) - supports fractional shares
340 | - Prices: Numeric(12,4) - 4 decimal precision
341 | - Total Cost: Numeric(20,4) - high precision for large positions
342 | 
343 | ### Supported Features
344 | 
345 | ✅ Fractional shares (0.001 minimum)
346 | ✅ Multiple portfolios per user
347 | ✅ Automatic cost averaging
348 | ✅ Live P&L calculations
349 | ✅ Position notes/annotations
350 | ✅ Timezone-aware timestamps
351 | ✅ Cascade deletion (portfolio → positions)
352 | 
353 | ### Limitations
354 | 
355 | - Single currency (USD)
356 | - Stock equities only (no options, futures, crypto)
357 | - Average cost method only (no FIFO/LIFO)
358 | - No tax lot tracking
359 | - No dividend tracking (planned for future)
360 | - No transaction history (planned for future)
361 | 
362 | ### Data Sources
363 | 
364 | - **Historical Prices**: Tiingo API (free tier: 500 req/day)
365 | - **Live Prices**: Same as historical (delayed 15 minutes on free tier)
366 | - **Company Info**: Pre-seeded S&P 500 database
367 | 
368 | ### Performance
369 | 
370 | - **Database**: SQLite default (PostgreSQL optional for better performance)
371 | - **Caching**: In-memory by default (Redis optional)
372 | - **Price Fetching**: Sequential (batch optimization in Phase 3)
373 | - **Query Optimization**: selectin loading for relationships
374 | 
375 | ### Privacy & Security
376 | 
377 | - **Local-first**: All data stored locally in your database
378 | - **No cloud sync**: Portfolio data never leaves your machine
379 | - **No authentication**: Personal use only (no multi-user)
380 | - **No external sharing**: Data accessible only to you
381 | 
382 | ## Migration Guide
383 | 
384 | ### Upgrading from No Portfolio
385 | 
386 | 1. Start MaverickMCP server
387 | 2. Migration runs automatically on first startup
388 | 3. Add your first position
389 | 4. Verify with `get_my_portfolio()`
390 | 
391 | ### Downgrading (Rollback)
392 | 
393 | ```bash
394 | # Backup first
395 | cp maverick_mcp.db maverick_mcp.db.backup
396 | 
397 | # Rollback migration
398 | alembic downgrade -1
399 | 
400 | # Verify
401 | alembic current
402 | ```
403 | 
404 | ### Exporting Portfolio
405 | 
406 | Currently manual:
407 | ```bash
408 | sqlite3 maverick_mcp.db "SELECT * FROM mcp_portfolio_positions;" > portfolio_export.csv
409 | ```
410 | 
411 | Future: Built-in export tool planned.
412 | 
413 | ## FAQs
414 | 
415 | **Q: Can I track multiple portfolios?**
416 | A: Yes! Use the `portfolio_name` parameter:
417 | ```python
418 | add_portfolio_position("AAPL", 10, 150, portfolio_name="IRA")
419 | add_portfolio_position("VOO", 5, 400, portfolio_name="401k")
420 | ```
421 | 
422 | **Q: What happens if I add wrong data?**
423 | A: Simply remove the position and re-add:
424 | ```
425 | Remove AAPL
426 | Add 10 shares of AAPL at $150.50
427 | ```
428 | 
429 | **Q: Can I track realized gains?**
430 | A: Not yet. Currently tracks unrealized P&L only. Transaction history is planned for future release.
431 | 
432 | **Q: Is my data backed up?**
433 | A: No automatic backups. Manually copy `maverick_mcp.db` regularly:
434 | ```bash
435 | cp maverick_mcp.db ~/backups/maverick_mcp_$(date +%Y%m%d).db
436 | ```
437 | 
438 | **Q: Can I use this for tax purposes?**
439 | A: **NO**. This is educational software only. Use professional tax software for tax reporting.
440 | 
441 | ---
442 | 
443 | ## Getting Help
444 | 
445 | - **Issues**: https://github.com/wshobson/maverick-mcp/issues
446 | - **Discussions**: https://github.com/wshobson/maverick-mcp/discussions
447 | - **Documentation**: https://github.com/wshobson/maverick-mcp/tree/main/docs
448 | 
449 | ---
450 | 
451 | **Remember**: This software is for educational purposes only. Always consult qualified financial professionals before making investment decisions.
452 | 
```

--------------------------------------------------------------------------------
/docs/BACKTESTING.md:
--------------------------------------------------------------------------------

```markdown
  1 | # MaverickMCP Backtesting Documentation
  2 | 
  3 | ## Overview
  4 | 
  5 | MaverickMCP provides a comprehensive backtesting system powered by VectorBT with advanced parallel processing capabilities. The system supports 35+ pre-built strategies ranging from simple moving averages to advanced ML ensembles, with optimization, validation, and analysis tools.
  6 | 
  7 | ## Quick Start
  8 | 
  9 | ### Basic Backtesting
 10 | 
 11 | ```python
 12 | # Simple SMA crossover backtest
 13 | run_backtest("AAPL", "sma_cross", fast_period=10, slow_period=20)
 14 | 
 15 | # RSI mean reversion strategy
 16 | run_backtest("TSLA", "rsi", period=14, oversold=30, overbought=70)
 17 | 
 18 | # MACD strategy
 19 | run_backtest("MSFT", "macd", fast_period=12, slow_period=26, signal_period=9)
 20 | ```
 21 | 
 22 | ### Parallel Execution (6-8x Performance Boost)
 23 | 
 24 | ```python
 25 | from maverick_mcp.backtesting.strategy_executor import ExecutionContext, get_strategy_executor
 26 | 
 27 | # Create execution contexts for multiple strategies
 28 | contexts = [
 29 |     ExecutionContext(
 30 |         strategy_id="sma_AAPL",
 31 |         symbol="AAPL",
 32 |         strategy_type="sma_cross",
 33 |         parameters={"fast_period": 10, "slow_period": 20},
 34 |         start_date="2023-01-01",
 35 |         end_date="2024-01-01"
 36 |     )
 37 | ]
 38 | 
 39 | # Execute in parallel
 40 | async with get_strategy_executor(max_concurrent_strategies=6) as executor:
 41 |     results = await executor.execute_strategies_parallel(contexts)
 42 | ```
 43 | 
 44 | ## Available Strategies
 45 | 
 46 | ### Technical Analysis Strategies
 47 | - **sma_cross**: Simple Moving Average Crossover
 48 | - **ema_cross**: Exponential Moving Average Crossover
 49 | - **rsi**: Relative Strength Index Mean Reversion
 50 | - **macd**: MACD Crossover Strategy
 51 | - **bollinger**: Bollinger Bands Mean Reversion
 52 | - **momentum**: Price Momentum Strategy
 53 | - **breakout**: Price Channel Breakout
 54 | - **mean_reversion**: Statistical Mean Reversion
 55 | - **volume_weighted**: Volume-Weighted Moving Average
 56 | - **stochastic**: Stochastic Oscillator
 57 | 
 58 | ### Advanced Strategies
 59 | - **adaptive_momentum**: ML-Enhanced Adaptive Momentum
 60 | - **ensemble**: Multi-Strategy Ensemble Approach
 61 | - **regime_aware**: Market Regime Detection & Switching
 62 | - **ml_enhanced**: Machine Learning Enhanced Trading
 63 | - **pairs_trading**: Statistical Arbitrage Pairs Trading
 64 | 
 65 | ## Core API Functions
 66 | 
 67 | ### run_backtest
 68 | 
 69 | Execute a comprehensive backtest with specified strategy and parameters.
 70 | 
 71 | ```python
 72 | run_backtest(
 73 |     symbol="AAPL",
 74 |     strategy="sma_cross",
 75 |     start_date="2023-01-01",  # Optional, defaults to 1 year ago
 76 |     end_date="2024-01-01",     # Optional, defaults to today
 77 |     initial_capital=10000.0,
 78 |     fast_period=10,
 79 |     slow_period=20
 80 | )
 81 | ```
 82 | 
 83 | **Returns:**
 84 | ```json
 85 | {
 86 |   "symbol": "AAPL",
 87 |   "strategy": "sma_cross",
 88 |   "metrics": {
 89 |     "total_return": 0.15,
 90 |     "sharpe_ratio": 1.2,
 91 |     "max_drawdown": -0.08,
 92 |     "total_trades": 24,
 93 |     "win_rate": 0.58,
 94 |     "profit_factor": 1.45,
 95 |     "calmar_ratio": 1.85
 96 |   },
 97 |   "trades": [...],
 98 |   "equity_curve": [...],
 99 |   "analysis": {...}
100 | }
101 | ```
102 | 
103 | ### optimize_strategy
104 | 
105 | Find optimal parameters using grid search optimization.
106 | 
107 | ```python
108 | optimize_strategy(
109 |     symbol="AAPL",
110 |     strategy="sma_cross",
111 |     optimization_params={
112 |         "fast_period": [5, 10, 15, 20],
113 |         "slow_period": [20, 30, 40, 50]
114 |     },
115 |     granularity="medium"  # "coarse", "medium", or "fine"
116 | )
117 | ```
118 | 
119 | ### validate_strategy
120 | 
121 | Validate strategy robustness using walk-forward analysis.
122 | 
123 | ```python
124 | validate_strategy(
125 |     symbol="AAPL",
126 |     strategy="sma_cross",
127 |     parameters={"fast_period": 10, "slow_period": 20},
128 |     n_splits=5,           # Number of walk-forward periods
129 |     test_size=0.2,        # Out-of-sample test size
130 |     validation_type="walk_forward"
131 | )
132 | ```
133 | 
134 | ### analyze_portfolio
135 | 
136 | Run portfolio-level backtesting across multiple symbols.
137 | 
138 | ```python
139 | analyze_portfolio(
140 |     symbols=["AAPL", "MSFT", "GOOGL"],
141 |     strategy="momentum",
142 |     weights=[0.33, 0.33, 0.34],  # Optional, equal weight if not specified
143 |     rebalance_frequency="monthly"
144 | )
145 | ```
146 | 
147 | ## Parallel Processing Configuration
148 | 
149 | ### Performance Tuning
150 | 
151 | ```python
152 | # Development/Testing (conservative)
153 | executor = StrategyExecutor(
154 |     max_concurrent_strategies=4,
155 |     max_concurrent_api_requests=8,
156 |     connection_pool_size=50
157 | )
158 | 
159 | # Production (aggressive)
160 | executor = StrategyExecutor(
161 |     max_concurrent_strategies=8,
162 |     max_concurrent_api_requests=15,
163 |     connection_pool_size=100
164 | )
165 | 
166 | # High-volume backtesting
167 | executor = StrategyExecutor(
168 |     max_concurrent_strategies=12,
169 |     max_concurrent_api_requests=20,
170 |     connection_pool_size=200
171 | )
172 | ```
173 | 
174 | ### Environment Variables
175 | 
176 | ```bash
177 | # Database optimization
178 | DB_POOL_SIZE=20
179 | DB_MAX_OVERFLOW=40
180 | DB_POOL_TIMEOUT=30
181 | 
182 | # Parallel execution limits
183 | MAX_CONCURRENT_STRATEGIES=6
184 | MAX_CONCURRENT_API_REQUESTS=10
185 | CONNECTION_POOL_SIZE=100
186 | ```
187 | 
188 | ## Database Optimization
189 | 
190 | ### Indexes for Performance
191 | 
192 | The system automatically creates optimized indexes for fast data retrieval:
193 | 
194 | - **Composite index** for date range queries with symbol lookup
195 | - **Covering index** for OHLCV queries (includes all price data)
196 | - **Partial index** for recent data (PostgreSQL only)
197 | 
198 | ### Batch Data Fetching
199 | 
200 | ```python
201 | from maverick_mcp.backtesting.strategy_executor import batch_fetch_stock_data
202 | 
203 | # Fetch data for multiple symbols efficiently
204 | symbols = ["AAPL", "MSFT", "GOOGL", "TSLA", "NVDA"]
205 | data_dict = await batch_fetch_stock_data(
206 |     symbols=symbols,
207 |     start_date="2023-01-01",
208 |     end_date="2024-01-01",
209 |     max_concurrent=10
210 | )
211 | ```
212 | 
213 | ## Best Practices
214 | 
215 | ### 1. Strategy Development
216 | - Start with simple strategies before complex ones
217 | - Always validate with out-of-sample data
218 | - Use walk-forward analysis for robustness testing
219 | - Consider transaction costs and slippage
220 | 
221 | ### 2. Parameter Optimization
222 | - Avoid overfitting with too many parameters
223 | - Use coarse optimization first, then refine
224 | - Validate optimal parameters on different time periods
225 | - Consider parameter stability over time
226 | 
227 | ### 3. Risk Management
228 | - Always set appropriate position sizing
229 | - Use stop-loss and risk limits
230 | - Monitor maximum drawdown
231 | - Diversify across strategies and assets
232 | 
233 | ### 4. Performance Optimization
234 | - Use parallel execution for multiple backtests
235 | - Enable database caching for frequently accessed data
236 | - Batch fetch data for multiple symbols
237 | - Monitor memory usage with large datasets
238 | 
239 | ## Troubleshooting
240 | 
241 | ### Common Issues
242 | 
243 | **High memory usage**
244 | - Reduce `max_concurrent_strategies`
245 | - Use smaller date ranges for initial testing
246 | - Enable database caching
247 | 
248 | **Slow performance**
249 | - Ensure database indexes are created
250 | - Increase connection pool size
251 | - Use parallel execution
252 | - Check API rate limits
253 | 
254 | **API rate limiting**
255 | - Lower `max_concurrent_api_requests`
256 | - Implement exponential backoff
257 | - Use cached data when possible
258 | 
259 | **Data quality issues**
260 | - Verify data source reliability
261 | - Check for missing data periods
262 | - Validate against multiple sources
263 | - Handle corporate actions properly
264 | 
265 | ### Debug Mode
266 | 
267 | Enable detailed logging for troubleshooting:
268 | 
269 | ```python
270 | import logging
271 | logging.getLogger("maverick_mcp.backtesting").setLevel(logging.DEBUG)
272 | ```
273 | 
274 | ## Performance Metrics
275 | 
276 | ### Key Metrics Explained
277 | 
278 | - **Total Return**: Overall strategy performance
279 | - **Sharpe Ratio**: Risk-adjusted returns (>1.0 is good, >2.0 is excellent)
280 | - **Max Drawdown**: Maximum peak-to-trough decline
281 | - **Win Rate**: Percentage of profitable trades
282 | - **Profit Factor**: Gross profit / Gross loss (>1.5 is good)
283 | - **Calmar Ratio**: Annual return / Max drawdown (>1.0 is good)
284 | 
285 | ### Benchmark Comparison
286 | 
287 | Compare strategy performance against buy-and-hold:
288 | 
289 | ```python
290 | results = run_backtest(...)
291 | benchmark = results.get("benchmark_comparison")
292 | print(f"Strategy vs Buy-Hold: {benchmark['excess_return']:.2%}")
293 | ```
294 | 
295 | ## Advanced Features
296 | 
297 | ### Monte Carlo Simulation
298 | 
299 | Assess strategy robustness with randomized scenarios:
300 | 
301 | ```python
302 | monte_carlo_results = run_monte_carlo_simulation(
303 |     strategy_results=results,
304 |     n_simulations=1000,
305 |     confidence_level=0.95
306 | )
307 | ```
308 | 
309 | ### Market Regime Detection
310 | 
311 | Automatically adjust strategy based on market conditions:
312 | 
313 | ```python
314 | regime_results = analyze_market_regime(
315 |     symbol="SPY",
316 |     lookback_period=252,
317 |     regime_indicators=["volatility", "trend", "momentum"]
318 | )
319 | ```
320 | 
321 | ### Multi-Strategy Ensemble
322 | 
323 | Combine multiple strategies for better risk-adjusted returns:
324 | 
325 | ```python
326 | ensemble_results = run_ensemble_backtest(
327 |     symbol="AAPL",
328 |     strategies=["sma_cross", "rsi", "momentum"],
329 |     weights="equal",  # or "optimize" for dynamic weighting
330 |     correlation_threshold=0.7
331 | )
332 | ```
333 | 
334 | ## Integration Examples
335 | 
336 | ### With Claude Desktop
337 | 
338 | ```python
339 | # Use MCP tools for comprehensive analysis
340 | "Run a backtest for AAPL using SMA crossover strategy with
341 | optimization for the best parameters over the last 2 years"
342 | 
343 | # The system will:
344 | # 1. Fetch historical data
345 | # 2. Run parameter optimization
346 | # 3. Execute backtest with optimal parameters
347 | # 4. Provide detailed performance metrics
348 | ```
349 | 
350 | ### Programmatic Usage
351 | 
352 | ```python
353 | from maverick_mcp.backtesting import BacktestingEngine
354 | 
355 | async def run_comprehensive_analysis():
356 |     engine = BacktestingEngine()
357 | 
358 |     # Run backtest
359 |     results = await engine.run_backtest(
360 |         symbol="AAPL",
361 |         strategy="momentum"
362 |     )
363 | 
364 |     # Optimize parameters
365 |     optimal = await engine.optimize_strategy(
366 |         symbol="AAPL",
367 |         strategy="momentum",
368 |         granularity="fine"
369 |     )
370 | 
371 |     # Validate robustness
372 |     validation = await engine.validate_strategy(
373 |         symbol="AAPL",
374 |         strategy="momentum",
375 |         parameters=optimal["best_params"]
376 |     )
377 | 
378 |     return {
379 |         "backtest": results,
380 |         "optimization": optimal,
381 |         "validation": validation
382 |     }
383 | ```
384 | 
385 | ## Testing
386 | 
387 | Run the test suite to verify functionality:
388 | 
389 | ```bash
390 | # Unit tests
391 | pytest tests/test_backtesting.py
392 | 
393 | # Integration tests
394 | pytest tests/test_strategy_executor.py
395 | 
396 | # Performance benchmarks
397 | python scripts/benchmark_parallel_backtesting.py
398 | 
399 | # Comprehensive validation
400 | python scripts/test_all_strategies.py
401 | ```
402 | 
403 | ## Summary
404 | 
405 | MaverickMCP's backtesting system provides:
406 | 
407 | - **35+ pre-built strategies** with extensive customization
408 | - **6-8x performance improvement** with parallel processing
409 | - **Comprehensive optimization** and validation tools
410 | - **Professional-grade metrics** and risk analysis
411 | - **Production-ready architecture** with error handling and monitoring
412 | 
413 | The system is designed for both simple strategy testing and complex portfolio analysis, with a focus on performance, reliability, and ease of use.
```

--------------------------------------------------------------------------------
/maverick_mcp/domain/screening/value_objects.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Screening domain value objects.
  3 | 
  4 | This module contains immutable value objects that represent
  5 | core concepts in the screening domain.
  6 | """
  7 | 
  8 | from dataclasses import dataclass
  9 | from decimal import Decimal
 10 | from enum import Enum
 11 | 
 12 | 
 13 | class ScreeningStrategy(Enum):
 14 |     """
 15 |     Enumeration of available screening strategies.
 16 | 
 17 |     Each strategy represents a different approach to identifying
 18 |     investment opportunities in the stock market.
 19 |     """
 20 | 
 21 |     MAVERICK_BULLISH = "maverick_bullish"
 22 |     MAVERICK_BEARISH = "maverick_bearish"
 23 |     TRENDING_STAGE2 = "trending_stage2"
 24 | 
 25 |     def get_description(self) -> str:
 26 |         """Get human-readable description of the strategy."""
 27 |         descriptions = {
 28 |             self.MAVERICK_BULLISH: "High momentum stocks with bullish technical setups",
 29 |             self.MAVERICK_BEARISH: "Weak stocks with bearish technical setups",
 30 |             self.TRENDING_STAGE2: "Uptrend stocks meeting trending criteria",
 31 |         }
 32 |         return descriptions[self]
 33 | 
 34 |     def get_primary_sort_field(self) -> str:
 35 |         """Get the primary field used for sorting results."""
 36 |         sort_fields = {
 37 |             self.MAVERICK_BULLISH: "combined_score",
 38 |             self.MAVERICK_BEARISH: "bear_score",
 39 |             self.TRENDING_STAGE2: "momentum_score",
 40 |         }
 41 |         return sort_fields[self]
 42 | 
 43 |     def get_minimum_score_threshold(self) -> int:
 44 |         """Get the minimum score threshold for meaningful results."""
 45 |         thresholds = {
 46 |             self.MAVERICK_BULLISH: 50,
 47 |             self.MAVERICK_BEARISH: 30,
 48 |             self.TRENDING_STAGE2: 70,
 49 |         }
 50 |         return thresholds[self]
 51 | 
 52 | 
 53 | @dataclass(frozen=True)
 54 | class ScreeningCriteria:
 55 |     """
 56 |     Immutable value object representing screening filter criteria.
 57 | 
 58 |     This encapsulates all the parameters that can be used to filter
 59 |     and refine screening results.
 60 |     """
 61 | 
 62 |     # Basic filters
 63 |     min_momentum_score: Decimal | None = None
 64 |     max_momentum_score: Decimal | None = None
 65 |     min_volume: int | None = None
 66 |     max_volume: int | None = None
 67 |     min_price: Decimal | None = None
 68 |     max_price: Decimal | None = None
 69 | 
 70 |     # Technical filters
 71 |     min_combined_score: int | None = None
 72 |     min_bear_score: int | None = None
 73 |     min_adr_percentage: Decimal | None = None
 74 |     max_adr_percentage: Decimal | None = None
 75 | 
 76 |     # Pattern filters
 77 |     require_pattern_detected: bool = False
 78 |     require_squeeze: bool = False
 79 |     require_consolidation: bool = False
 80 |     require_entry_signal: bool = False
 81 | 
 82 |     # Moving average filters
 83 |     require_above_sma50: bool = False
 84 |     require_above_sma150: bool = False
 85 |     require_above_sma200: bool = False
 86 |     require_ma_alignment: bool = False  # 50 > 150 > 200
 87 | 
 88 |     # Sector/Industry filters
 89 |     allowed_sectors: list[str] | None = None
 90 |     excluded_sectors: list[str] | None = None
 91 | 
 92 |     def __post_init__(self):
 93 |         """Validate criteria constraints."""
 94 |         self._validate_rating_ranges()
 95 |         self._validate_volume_ranges()
 96 |         self._validate_price_ranges()
 97 |         self._validate_score_ranges()
 98 | 
 99 |     def _validate_rating_ranges(self) -> None:
100 |         """Validate momentum score range constraints."""
101 |         if self.min_momentum_score is not None:
102 |             if not (0 <= self.min_momentum_score <= 100):
103 |                 raise ValueError("Minimum momentum score must be between 0 and 100")
104 | 
105 |         if self.max_momentum_score is not None:
106 |             if not (0 <= self.max_momentum_score <= 100):
107 |                 raise ValueError("Maximum momentum score must be between 0 and 100")
108 | 
109 |         if (
110 |             self.min_momentum_score is not None
111 |             and self.max_momentum_score is not None
112 |             and self.min_momentum_score > self.max_momentum_score
113 |         ):
114 |             raise ValueError(
115 |                 "Minimum momentum score cannot exceed maximum momentum score"
116 |             )
117 | 
118 |     def _validate_volume_ranges(self) -> None:
119 |         """Validate volume range constraints."""
120 |         if self.min_volume is not None and self.min_volume < 0:
121 |             raise ValueError("Minimum volume cannot be negative")
122 | 
123 |         if self.max_volume is not None and self.max_volume < 0:
124 |             raise ValueError("Maximum volume cannot be negative")
125 | 
126 |         if (
127 |             self.min_volume is not None
128 |             and self.max_volume is not None
129 |             and self.min_volume > self.max_volume
130 |         ):
131 |             raise ValueError("Minimum volume cannot exceed maximum volume")
132 | 
133 |     def _validate_price_ranges(self) -> None:
134 |         """Validate price range constraints."""
135 |         if self.min_price is not None and self.min_price <= 0:
136 |             raise ValueError("Minimum price must be positive")
137 | 
138 |         if self.max_price is not None and self.max_price <= 0:
139 |             raise ValueError("Maximum price must be positive")
140 | 
141 |         if (
142 |             self.min_price is not None
143 |             and self.max_price is not None
144 |             and self.min_price > self.max_price
145 |         ):
146 |             raise ValueError("Minimum price cannot exceed maximum price")
147 | 
148 |     def _validate_score_ranges(self) -> None:
149 |         """Validate score range constraints."""
150 |         if self.min_combined_score is not None and self.min_combined_score < 0:
151 |             raise ValueError("Minimum combined score cannot be negative")
152 | 
153 |         if self.min_bear_score is not None and self.min_bear_score < 0:
154 |             raise ValueError("Minimum bear score cannot be negative")
155 | 
156 |     def has_any_filters(self) -> bool:
157 |         """Check if any filters are applied."""
158 |         return any(
159 |             [
160 |                 self.min_momentum_score is not None,
161 |                 self.max_momentum_score is not None,
162 |                 self.min_volume is not None,
163 |                 self.max_volume is not None,
164 |                 self.min_price is not None,
165 |                 self.max_price is not None,
166 |                 self.min_combined_score is not None,
167 |                 self.min_bear_score is not None,
168 |                 self.min_adr_percentage is not None,
169 |                 self.max_adr_percentage is not None,
170 |                 self.require_pattern_detected,
171 |                 self.require_squeeze,
172 |                 self.require_consolidation,
173 |                 self.require_entry_signal,
174 |                 self.require_above_sma50,
175 |                 self.require_above_sma150,
176 |                 self.require_above_sma200,
177 |                 self.require_ma_alignment,
178 |                 self.allowed_sectors is not None,
179 |                 self.excluded_sectors is not None,
180 |             ]
181 |         )
182 | 
183 |     def get_filter_description(self) -> str:
184 |         """Get human-readable description of active filters."""
185 |         filters = []
186 | 
187 |         if self.min_momentum_score is not None:
188 |             filters.append(f"Momentum Score >= {self.min_momentum_score}")
189 | 
190 |         if self.max_momentum_score is not None:
191 |             filters.append(f"Momentum Score <= {self.max_momentum_score}")
192 | 
193 |         if self.min_volume is not None:
194 |             filters.append(f"Volume >= {self.min_volume:,}")
195 | 
196 |         if self.min_price is not None:
197 |             filters.append(f"Price >= ${self.min_price}")
198 | 
199 |         if self.max_price is not None:
200 |             filters.append(f"Price <= ${self.max_price}")
201 | 
202 |         if self.require_above_sma50:
203 |             filters.append("Above SMA 50")
204 | 
205 |         if self.require_pattern_detected:
206 |             filters.append("Pattern Detected")
207 | 
208 |         if not filters:
209 |             return "No filters applied"
210 | 
211 |         return "; ".join(filters)
212 | 
213 | 
214 | @dataclass(frozen=True)
215 | class ScreeningLimits:
216 |     """
217 |     Value object representing limits and constraints for screening operations.
218 | 
219 |     This encapsulates business rules around result limits, timeouts,
220 |     and resource constraints.
221 |     """
222 | 
223 |     max_results: int = 100
224 |     default_results: int = 20
225 |     min_results: int = 1
226 |     max_timeout_seconds: int = 30
227 | 
228 |     def __post_init__(self):
229 |         """Validate limit constraints."""
230 |         if self.min_results <= 0:
231 |             raise ValueError("Minimum results must be positive")
232 | 
233 |         if self.default_results < self.min_results:
234 |             raise ValueError("Default results cannot be less than minimum")
235 | 
236 |         if self.max_results < self.default_results:
237 |             raise ValueError("Maximum results cannot be less than default")
238 | 
239 |         if self.max_timeout_seconds <= 0:
240 |             raise ValueError("Maximum timeout must be positive")
241 | 
242 |     def validate_limit(self, requested_limit: int) -> int:
243 |         """
244 |         Validate and adjust requested result limit.
245 | 
246 |         Returns the adjusted limit within valid bounds.
247 |         """
248 |         if requested_limit < self.min_results:
249 |             return self.min_results
250 | 
251 |         if requested_limit > self.max_results:
252 |             return self.max_results
253 | 
254 |         return requested_limit
255 | 
256 | 
257 | @dataclass(frozen=True)
258 | class SortingOptions:
259 |     """
260 |     Value object representing sorting options for screening results.
261 | 
262 |     This encapsulates the various ways results can be ordered.
263 |     """
264 | 
265 |     field: str
266 |     descending: bool = True
267 |     secondary_field: str | None = None
268 |     secondary_descending: bool = True
269 | 
270 |     # Valid sortable fields
271 |     VALID_FIELDS = {
272 |         "combined_score",
273 |         "bear_score",
274 |         "momentum_score",
275 |         "close_price",
276 |         "volume",
277 |         "avg_volume_30d",
278 |         "adr_percentage",
279 |         "quality_score",
280 |     }
281 | 
282 |     def __post_init__(self):
283 |         """Validate sorting configuration."""
284 |         if self.field not in self.VALID_FIELDS:
285 |             raise ValueError(
286 |                 f"Invalid sort field: {self.field}. Must be one of {self.VALID_FIELDS}"
287 |             )
288 | 
289 |         if (
290 |             self.secondary_field is not None
291 |             and self.secondary_field not in self.VALID_FIELDS
292 |         ):
293 |             raise ValueError(f"Invalid secondary sort field: {self.secondary_field}")
294 | 
295 |     @classmethod
296 |     def for_strategy(cls, strategy: ScreeningStrategy) -> "SortingOptions":
297 |         """Create default sorting options for a screening strategy."""
298 |         primary_field = strategy.get_primary_sort_field()
299 | 
300 |         # Add appropriate secondary sort field
301 |         secondary_field = (
302 |             "momentum_score" if primary_field != "momentum_score" else "close_price"
303 |         )
304 | 
305 |         return cls(
306 |             field=primary_field,
307 |             descending=True,
308 |             secondary_field=secondary_field,
309 |             secondary_descending=True,
310 |         )
311 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/providers/implementations/persistence_adapter.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Data persistence adapter.
  3 | 
  4 | This module provides adapters that make the existing database models
  5 | compatible with the new IDataPersistence interface.
  6 | """
  7 | 
  8 | import asyncio
  9 | import logging
 10 | from typing import Any
 11 | 
 12 | import pandas as pd
 13 | from sqlalchemy.orm import Session
 14 | 
 15 | from maverick_mcp.data.models import (
 16 |     MaverickBearStocks,
 17 |     MaverickStocks,
 18 |     PriceCache,
 19 |     SessionLocal,
 20 |     Stock,
 21 |     SupplyDemandBreakoutStocks,
 22 |     bulk_insert_price_data,
 23 |     get_latest_maverick_screening,
 24 | )
 25 | from maverick_mcp.providers.interfaces.persistence import (
 26 |     DatabaseConfig,
 27 |     IDataPersistence,
 28 | )
 29 | 
 30 | logger = logging.getLogger(__name__)
 31 | 
 32 | 
 33 | class SQLAlchemyPersistenceAdapter(IDataPersistence):
 34 |     """
 35 |     Adapter that makes the existing SQLAlchemy models compatible with IDataPersistence interface.
 36 | 
 37 |     This adapter wraps the existing database operations and exposes them through the new
 38 |     interface contracts, enabling gradual migration to the new architecture.
 39 |     """
 40 | 
 41 |     def __init__(self, config: DatabaseConfig | None = None):
 42 |         """
 43 |         Initialize the persistence adapter.
 44 | 
 45 |         Args:
 46 |             config: Database configuration (optional)
 47 |         """
 48 |         self._config = config
 49 | 
 50 |         logger.debug("SQLAlchemyPersistenceAdapter initialized")
 51 | 
 52 |     async def get_session(self) -> Session:
 53 |         """
 54 |         Get a database session (async wrapper).
 55 | 
 56 |         Returns:
 57 |             Database session for operations
 58 |         """
 59 |         loop = asyncio.get_event_loop()
 60 |         return await loop.run_in_executor(None, SessionLocal)
 61 | 
 62 |     async def get_read_only_session(self) -> Session:
 63 |         """
 64 |         Get a read-only database session.
 65 | 
 66 |         Returns:
 67 |             Read-only database session for queries
 68 |         """
 69 |         # Use the existing read-only session manager
 70 |         # Since get_db_session_read_only returns a context manager, we need to handle it differently
 71 |         # For now, return a regular session - this could be enhanced later
 72 |         return await self.get_session()
 73 | 
 74 |     async def save_price_data(
 75 |         self, session: Session, symbol: str, data: pd.DataFrame
 76 |     ) -> int:
 77 |         """
 78 |         Save stock price data to persistence layer (async wrapper).
 79 | 
 80 |         Args:
 81 |             session: Database session
 82 |             symbol: Stock ticker symbol
 83 |             data: DataFrame with OHLCV data
 84 | 
 85 |         Returns:
 86 |             Number of records saved
 87 |         """
 88 |         loop = asyncio.get_event_loop()
 89 |         return await loop.run_in_executor(
 90 |             None, bulk_insert_price_data, session, symbol, data
 91 |         )
 92 | 
 93 |     async def get_price_data(
 94 |         self,
 95 |         session: Session,
 96 |         symbol: str,
 97 |         start_date: str,
 98 |         end_date: str,
 99 |     ) -> pd.DataFrame:
100 |         """
101 |         Retrieve stock price data from persistence layer (async wrapper).
102 | 
103 |         Args:
104 |             session: Database session
105 |             symbol: Stock ticker symbol
106 |             start_date: Start date in YYYY-MM-DD format
107 |             end_date: End date in YYYY-MM-DD format
108 | 
109 |         Returns:
110 |             DataFrame with historical price data
111 |         """
112 |         loop = asyncio.get_event_loop()
113 |         return await loop.run_in_executor(
114 |             None, PriceCache.get_price_data, session, symbol, start_date, end_date
115 |         )
116 | 
117 |     async def get_or_create_stock(self, session: Session, symbol: str) -> Any:
118 |         """
119 |         Get or create a stock record (async wrapper).
120 | 
121 |         Args:
122 |             session: Database session
123 |             symbol: Stock ticker symbol
124 | 
125 |         Returns:
126 |             Stock entity/record
127 |         """
128 |         loop = asyncio.get_event_loop()
129 |         return await loop.run_in_executor(None, Stock.get_or_create, session, symbol)
130 | 
131 |     async def save_screening_results(
132 |         self,
133 |         session: Session,
134 |         screening_type: str,
135 |         results: list[dict[str, Any]],
136 |     ) -> int:
137 |         """
138 |         Save stock screening results.
139 | 
140 |         Args:
141 |             session: Database session
142 |             screening_type: Type of screening (e.g., 'maverick', 'bearish', 'trending')
143 |             results: List of screening results
144 | 
145 |         Returns:
146 |             Number of records saved
147 |         """
148 |         # This would need to be implemented based on the specific screening models
149 |         # For now, return the count of results as a placeholder
150 |         logger.info(f"Saving {len(results)} {screening_type} screening results")
151 |         return len(results)
152 | 
153 |     async def get_screening_results(
154 |         self,
155 |         session: Session,
156 |         screening_type: str,
157 |         limit: int | None = None,
158 |         min_score: float | None = None,
159 |     ) -> list[dict[str, Any]]:
160 |         """
161 |         Retrieve stock screening results (async wrapper).
162 | 
163 |         Args:
164 |             session: Database session
165 |             screening_type: Type of screening
166 |             limit: Maximum number of results
167 |             min_score: Minimum score filter
168 | 
169 |         Returns:
170 |             List of screening results
171 |         """
172 |         loop = asyncio.get_event_loop()
173 | 
174 |         if screening_type == "maverick":
175 |             # Use the existing MaverickStocks query logic
176 |             def get_maverick_results():
177 |                 query = session.query(MaverickStocks)
178 |                 if min_score:
179 |                     query = query.filter(MaverickStocks.combined_score >= min_score)
180 |                 if limit:
181 |                     query = query.limit(limit)
182 |                 stocks = query.order_by(MaverickStocks.combined_score.desc()).all()
183 |                 return [stock.to_dict() for stock in stocks]
184 | 
185 |             return await loop.run_in_executor(None, get_maverick_results)
186 | 
187 |         elif screening_type == "bearish":
188 |             # Use the existing MaverickBearStocks query logic
189 |             def get_bear_results():
190 |                 query = session.query(MaverickBearStocks)
191 |                 if min_score:
192 |                     query = query.filter(MaverickBearStocks.score >= min_score)
193 |                 if limit:
194 |                     query = query.limit(limit)
195 |                 stocks = query.order_by(MaverickBearStocks.score.desc()).all()
196 |                 return [stock.to_dict() for stock in stocks]
197 | 
198 |             return await loop.run_in_executor(None, get_bear_results)
199 | 
200 |         elif screening_type == "trending":
201 |             # Use the existing SupplyDemandBreakoutStocks query logic
202 |             def get_trending_results():
203 |                 query = session.query(SupplyDemandBreakoutStocks).filter(
204 |                     SupplyDemandBreakoutStocks.close_price
205 |                     > SupplyDemandBreakoutStocks.sma_50,
206 |                     SupplyDemandBreakoutStocks.close_price
207 |                     > SupplyDemandBreakoutStocks.sma_150,
208 |                     SupplyDemandBreakoutStocks.close_price
209 |                     > SupplyDemandBreakoutStocks.sma_200,
210 |                     SupplyDemandBreakoutStocks.sma_50
211 |                     > SupplyDemandBreakoutStocks.sma_150,
212 |                     SupplyDemandBreakoutStocks.sma_150
213 |                     > SupplyDemandBreakoutStocks.sma_200,
214 |                 )
215 |                 if min_score:
216 |                     query = query.filter(
217 |                         SupplyDemandBreakoutStocks.momentum_score >= min_score
218 |                     )
219 |                 if limit:
220 |                     query = query.limit(limit)
221 |                 stocks = query.order_by(
222 |                     SupplyDemandBreakoutStocks.momentum_score.desc()
223 |                 ).all()
224 |                 return [stock.to_dict() for stock in stocks]
225 | 
226 |             return await loop.run_in_executor(None, get_trending_results)
227 | 
228 |         else:
229 |             logger.warning(f"Unknown screening type: {screening_type}")
230 |             return []
231 | 
232 |     async def get_latest_screening_data(self) -> dict[str, list[dict[str, Any]]]:
233 |         """
234 |         Get the latest screening data for all types (async wrapper).
235 | 
236 |         Returns:
237 |             Dictionary with all screening types and their latest results
238 |         """
239 |         loop = asyncio.get_event_loop()
240 |         return await loop.run_in_executor(None, get_latest_maverick_screening)
241 | 
242 |     async def check_data_freshness(self, symbol: str, max_age_hours: int = 24) -> bool:
243 |         """
244 |         Check if cached data for a symbol is fresh enough.
245 | 
246 |         Args:
247 |             symbol: Stock ticker symbol
248 |             max_age_hours: Maximum age in hours before data is considered stale
249 | 
250 |         Returns:
251 |             True if data is fresh, False if stale or missing
252 |         """
253 |         # This would need to be implemented based on timestamp fields in the models
254 |         # For now, return True as a placeholder
255 |         logger.debug(
256 |             f"Checking data freshness for {symbol} (max age: {max_age_hours}h)"
257 |         )
258 |         return True
259 | 
260 |     async def bulk_save_price_data(
261 |         self, session: Session, symbol: str, data: pd.DataFrame
262 |     ) -> int:
263 |         """
264 |         Bulk save price data for better performance (async wrapper).
265 | 
266 |         Args:
267 |             session: Database session
268 |             symbol: Stock ticker symbol
269 |             data: DataFrame with OHLCV data
270 | 
271 |         Returns:
272 |             Number of records saved
273 |         """
274 |         # Use the same implementation as save_price_data since bulk_insert_price_data is already optimized
275 |         return await self.save_price_data(session, symbol, data)
276 | 
277 |     async def get_symbols_with_data(
278 |         self, session: Session, limit: int | None = None
279 |     ) -> list[str]:
280 |         """
281 |         Get list of symbols that have price data (async wrapper).
282 | 
283 |         Args:
284 |             session: Database session
285 |             limit: Maximum number of symbols to return
286 | 
287 |         Returns:
288 |             List of stock symbols
289 |         """
290 |         loop = asyncio.get_event_loop()
291 | 
292 |         def get_symbols():
293 |             query = session.query(Stock.symbol).distinct()
294 |             if limit:
295 |                 query = query.limit(limit)
296 |             return [row[0] for row in query.all()]
297 | 
298 |         return await loop.run_in_executor(None, get_symbols)
299 | 
300 |     async def cleanup_old_data(self, session: Session, days_to_keep: int = 365) -> int:
301 |         """
302 |         Clean up old data beyond retention period.
303 | 
304 |         Args:
305 |             session: Database session
306 |             days_to_keep: Number of days of data to retain
307 | 
308 |         Returns:
309 |             Number of records deleted
310 |         """
311 |         # This would need to be implemented based on specific cleanup requirements
312 |         # For now, return 0 as a placeholder
313 |         logger.info(f"Cleanup old data beyond {days_to_keep} days")
314 |         return 0
315 | 
```

--------------------------------------------------------------------------------
/alembic/versions/008_performance_optimization_indexes.py:
--------------------------------------------------------------------------------

```python
  1 | """Add comprehensive performance optimization indexes
  2 | 
  3 | Revision ID: 008_performance_optimization_indexes
  4 | Revises: 007_enhance_audit_logging
  5 | Create Date: 2025-06-25 12:00:00
  6 | 
  7 | This migration adds comprehensive performance indexes for:
  8 | - Stock data queries with date ranges
  9 | - Screening table optimizations
 10 | - Rate limiting and authentication tables
 11 | - Cache key lookup optimizations
 12 | """
 13 | 
 14 | import sqlalchemy as sa
 15 | 
 16 | from alembic import op
 17 | 
 18 | # revision identifiers, used by Alembic.
 19 | revision = "008_performance_optimization_indexes"
 20 | down_revision = "007_enhance_audit_logging"
 21 | branch_labels = None
 22 | depends_on = None
 23 | 
 24 | 
 25 | def upgrade():
 26 |     """Add comprehensive performance optimization indexes."""
 27 | 
 28 |     # Stock data performance indexes
 29 |     print("Creating stock data performance indexes...")
 30 | 
 31 |     # Composite index for price cache queries (stock_id, date)
 32 |     # This is the most common query pattern for historical data
 33 |     op.create_index(
 34 |         "idx_stocks_pricecache_stock_date_range",
 35 |         "stocks_pricecache",
 36 |         ["stock_id", "date"],
 37 |         postgresql_using="btree",
 38 |     )
 39 | 
 40 |     # Index for volume-based queries (high volume screening)
 41 |     op.create_index(
 42 |         "idx_stocks_pricecache_volume_desc",
 43 |         "stocks_pricecache",
 44 |         [sa.text("volume DESC")],
 45 |         postgresql_using="btree",
 46 |     )
 47 | 
 48 |     # Index for price-based queries (close price for technical analysis)
 49 |     op.create_index(
 50 |         "idx_stocks_pricecache_close_price",
 51 |         "stocks_pricecache",
 52 |         ["close_price"],
 53 |         postgresql_using="btree",
 54 |     )
 55 | 
 56 |     # Stock lookup optimizations
 57 |     print("Creating stock lookup optimization indexes...")
 58 | 
 59 |     # Case-insensitive ticker lookup (for user input handling)
 60 |     op.execute(
 61 |         "CREATE INDEX IF NOT EXISTS idx_stocks_stock_ticker_lower "
 62 |         "ON stocks_stock (LOWER(ticker_symbol))"
 63 |     )
 64 | 
 65 |     # Sector and industry filtering
 66 |     op.create_index(
 67 |         "idx_stocks_stock_sector",
 68 |         "stocks_stock",
 69 |         ["sector"],
 70 |         postgresql_using="btree",
 71 |     )
 72 | 
 73 |     op.create_index(
 74 |         "idx_stocks_stock_industry",
 75 |         "stocks_stock",
 76 |         ["industry"],
 77 |         postgresql_using="btree",
 78 |     )
 79 | 
 80 |     # Exchange filtering for market-specific queries
 81 |     op.create_index(
 82 |         "idx_stocks_stock_exchange",
 83 |         "stocks_stock",
 84 |         ["exchange"],
 85 |         postgresql_using="btree",
 86 |     )
 87 | 
 88 |     # Screening table optimizations
 89 |     print("Creating screening performance indexes...")
 90 | 
 91 |     # Maverick bullish screening indexes
 92 |     op.create_index(
 93 |         "idx_stocks_maverickstocks_score_desc",
 94 |         "stocks_maverickstocks",
 95 |         [sa.text("score DESC")],
 96 |         postgresql_using="btree",
 97 |     )
 98 | 
 99 |     op.create_index(
100 |         "idx_stocks_maverickstocks_rank_asc",
101 |         "stocks_maverickstocks",
102 |         ["rank"],
103 |         postgresql_using="btree",
104 |     )
105 | 
106 |     op.create_index(
107 |         "idx_stocks_maverickstocks_date_analyzed",
108 |         "stocks_maverickstocks",
109 |         [sa.text("date_analyzed DESC")],
110 |         postgresql_using="btree",
111 |     )
112 | 
113 |     # Composite index for score and date filtering
114 |     op.create_index(
115 |         "idx_stocks_maverickstocks_score_date",
116 |         "stocks_maverickstocks",
117 |         [sa.text("score DESC"), sa.text("date_analyzed DESC")],
118 |         postgresql_using="btree",
119 |     )
120 | 
121 |     # Maverick bearish screening indexes
122 |     op.create_index(
123 |         "idx_stocks_maverickbearstocks_score_desc",
124 |         "stocks_maverickbearstocks",
125 |         [sa.text("score DESC")],
126 |         postgresql_using="btree",
127 |     )
128 | 
129 |     op.create_index(
130 |         "idx_stocks_maverickbearstocks_date_analyzed",
131 |         "stocks_maverickbearstocks",
132 |         [sa.text("date_analyzed DESC")],
133 |         postgresql_using="btree",
134 |     )
135 | 
136 |     # Supply/Demand (Trending) screening indexes
137 |     op.create_index(
138 |         "idx_stocks_supply_demand_breakouts_momentum_score_desc",
139 |         "stocks_supply_demand_breakouts",
140 |         [sa.text("momentum_score DESC")],
141 |         postgresql_using="btree",
142 |     )
143 | 
144 |     op.create_index(
145 |         "idx_stocks_supply_demand_breakouts_date_analyzed",
146 |         "stocks_supply_demand_breakouts",
147 |         [sa.text("date_analyzed DESC")],
148 |         postgresql_using="btree",
149 |     )
150 | 
151 |     # Composite index for momentum score and date
152 |     op.create_index(
153 |         "idx_stocks_supply_demand_breakouts_momentum_date",
154 |         "stocks_supply_demand_breakouts",
155 |         [sa.text("momentum_score DESC"), sa.text("date_analyzed DESC")],
156 |         postgresql_using="btree",
157 |     )
158 | 
159 |     # Authentication and rate limiting optimizations
160 |     print("Creating authentication performance indexes...")
161 | 
162 |     # API key lookups (most frequent auth operation)
163 |     op.create_index(
164 |         "idx_mcp_api_keys_key_hash",
165 |         "mcp_api_keys",
166 |         ["key_hash"],
167 |         postgresql_using="hash",  # Hash index for exact equality
168 |     )
169 | 
170 |     # Active API keys filter
171 |     op.create_index(
172 |         "idx_mcp_api_keys_active_expires",
173 |         "mcp_api_keys",
174 |         ["is_active", "expires_at"],
175 |         postgresql_using="btree",
176 |     )
177 | 
178 |     # User API keys lookup
179 |     op.create_index(
180 |         "idx_mcp_api_keys_user_id_active",
181 |         "mcp_api_keys",
182 |         ["user_id", "is_active"],
183 |         postgresql_using="btree",
184 |     )
185 | 
186 |     # Refresh token lookups
187 |     op.create_index(
188 |         "idx_mcp_refresh_tokens_token_hash",
189 |         "mcp_refresh_tokens",
190 |         ["token_hash"],
191 |         postgresql_using="hash",
192 |     )
193 | 
194 |     op.create_index(
195 |         "idx_mcp_refresh_tokens_user_active",
196 |         "mcp_refresh_tokens",
197 |         ["user_id", "is_active"],
198 |         postgresql_using="btree",
199 |     )
200 | 
201 |     # Request tracking for analytics
202 |     op.create_index(
203 |         "idx_mcp_requests_user_timestamp",
204 |         "mcp_requests",
205 |         ["user_id", sa.text("timestamp DESC")],
206 |         postgresql_using="btree",
207 |     )
208 | 
209 |     op.create_index(
210 |         "idx_mcp_requests_tool_name",
211 |         "mcp_requests",
212 |         ["tool_name"],
213 |         postgresql_using="btree",
214 |     )
215 | 
216 |     # Request success rate analysis
217 |     op.create_index(
218 |         "idx_mcp_requests_success_timestamp",
219 |         "mcp_requests",
220 |         ["success", sa.text("timestamp DESC")],
221 |         postgresql_using="btree",
222 |     )
223 | 
224 |     # Audit logging optimizations
225 |     print("Creating audit logging performance indexes...")
226 | 
227 |     # User activity tracking
228 |     op.create_index(
229 |         "idx_mcp_audit_logs_user_timestamp",
230 |         "mcp_audit_logs",
231 |         ["user_id", sa.text("timestamp DESC")],
232 |         postgresql_using="btree",
233 |     )
234 | 
235 |     # Action type filtering
236 |     op.create_index(
237 |         "idx_mcp_audit_logs_action",
238 |         "mcp_audit_logs",
239 |         ["action"],
240 |         postgresql_using="btree",
241 |     )
242 | 
243 |     # IP address tracking for security
244 |     op.create_index(
245 |         "idx_mcp_audit_logs_ip_timestamp",
246 |         "mcp_audit_logs",
247 |         ["ip_address", sa.text("timestamp DESC")],
248 |         postgresql_using="btree",
249 |     )
250 | 
251 |     # Partial indexes for common queries
252 |     print("Creating partial indexes for optimal performance...")
253 | 
254 |     # Active users only (most queries filter for active users)
255 |     op.execute(
256 |         "CREATE INDEX IF NOT EXISTS idx_mcp_users_active_email "
257 |         "ON mcp_users (email) WHERE is_active = true"
258 |     )
259 | 
260 |     # Recent price data (last 30 days) - most common query pattern
261 |     op.execute(
262 |         "CREATE INDEX IF NOT EXISTS idx_stocks_pricecache_recent "
263 |         "ON stocks_pricecache (stock_id, date DESC) "
264 |         "WHERE date >= CURRENT_DATE - INTERVAL '30 days'"
265 |     )
266 | 
267 |     # High-volume stocks (for active trading analysis)
268 |     op.execute(
269 |         "CREATE INDEX IF NOT EXISTS idx_stocks_pricecache_high_volume "
270 |         "ON stocks_pricecache (stock_id, date DESC, volume DESC) "
271 |         "WHERE volume > 1000000"
272 |     )
273 | 
274 |     print("Performance optimization indexes created successfully!")
275 | 
276 | 
277 | def downgrade():
278 |     """Remove performance optimization indexes."""
279 | 
280 |     print("Removing performance optimization indexes...")
281 | 
282 |     # Stock data indexes
283 |     op.drop_index("idx_stocks_pricecache_stock_date_range", "stocks_pricecache")
284 |     op.drop_index("idx_stocks_pricecache_volume_desc", "stocks_pricecache")
285 |     op.drop_index("idx_stocks_pricecache_close_price", "stocks_pricecache")
286 | 
287 |     # Stock lookup indexes
288 |     op.execute("DROP INDEX IF EXISTS idx_stocks_stock_ticker_lower")
289 |     op.drop_index("idx_stocks_stock_sector", "stocks_stock")
290 |     op.drop_index("idx_stocks_stock_industry", "stocks_stock")
291 |     op.drop_index("idx_stocks_stock_exchange", "stocks_stock")
292 | 
293 |     # Screening indexes
294 |     op.drop_index("idx_stocks_maverickstocks_score_desc", "stocks_maverickstocks")
295 |     op.drop_index("idx_stocks_maverickstocks_rank_asc", "stocks_maverickstocks")
296 |     op.drop_index("idx_stocks_maverickstocks_date_analyzed", "stocks_maverickstocks")
297 |     op.drop_index("idx_stocks_maverickstocks_score_date", "stocks_maverickstocks")
298 | 
299 |     op.drop_index(
300 |         "idx_stocks_maverickbearstocks_score_desc", "stocks_maverickbearstocks"
301 |     )
302 |     op.drop_index(
303 |         "idx_stocks_maverickbearstocks_date_analyzed", "stocks_maverickbearstocks"
304 |     )
305 | 
306 |     op.drop_index(
307 |         "idx_stocks_supply_demand_breakouts_momentum_score_desc",
308 |         "stocks_supply_demand_breakouts",
309 |     )
310 |     op.drop_index(
311 |         "idx_stocks_supply_demand_breakouts_date_analyzed",
312 |         "stocks_supply_demand_breakouts",
313 |     )
314 |     op.drop_index(
315 |         "idx_stocks_supply_demand_breakouts_momentum_date",
316 |         "stocks_supply_demand_breakouts",
317 |     )
318 | 
319 |     # Authentication indexes
320 |     op.drop_index("idx_mcp_api_keys_key_hash", "mcp_api_keys")
321 |     op.drop_index("idx_mcp_api_keys_active_expires", "mcp_api_keys")
322 |     op.drop_index("idx_mcp_api_keys_user_id_active", "mcp_api_keys")
323 | 
324 |     op.drop_index("idx_mcp_refresh_tokens_token_hash", "mcp_refresh_tokens")
325 |     op.drop_index("idx_mcp_refresh_tokens_user_active", "mcp_refresh_tokens")
326 | 
327 |     op.drop_index("idx_mcp_requests_user_timestamp", "mcp_requests")
328 |     op.drop_index("idx_mcp_requests_tool_name", "mcp_requests")
329 |     op.drop_index("idx_mcp_requests_success_timestamp", "mcp_requests")
330 | 
331 |     # Audit logging indexes
332 |     op.drop_index("idx_mcp_audit_logs_user_timestamp", "mcp_audit_logs")
333 |     op.drop_index("idx_mcp_audit_logs_action", "mcp_audit_logs")
334 |     op.drop_index("idx_mcp_audit_logs_ip_timestamp", "mcp_audit_logs")
335 | 
336 |     # Partial indexes
337 |     op.execute("DROP INDEX IF EXISTS idx_mcp_users_active_email")
338 |     op.execute("DROP INDEX IF EXISTS idx_stocks_pricecache_recent")
339 |     op.execute("DROP INDEX IF EXISTS idx_stocks_pricecache_high_volume")
340 |     print("Performance optimization indexes removed.")
341 | 
```

--------------------------------------------------------------------------------
/scripts/setup_self_contained.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Self-contained setup script for Maverick-MCP.
  4 | 
  5 | This script sets up a completely self-contained Maverick-MCP installation
  6 | with its own database schema, sample data, and validation.
  7 | 
  8 | Usage:
  9 |     python scripts/setup_self_contained.py --full-setup
 10 |     python scripts/setup_self_contained.py --quick-setup
 11 |     python scripts/setup_self_contained.py --migrate-only
 12 | """
 13 | 
 14 | import argparse
 15 | import asyncio
 16 | import logging
 17 | import os
 18 | import sys
 19 | from pathlib import Path
 20 | 
 21 | # Add parent directory to path for imports
 22 | sys.path.append(str(Path(__file__).parent.parent))
 23 | 
 24 | from maverick_mcp.config.database_self_contained import (
 25 |     get_self_contained_db_config,
 26 |     init_self_contained_database,
 27 |     run_self_contained_migrations,
 28 | )
 29 | 
 30 | # Set up logging
 31 | logging.basicConfig(
 32 |     level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
 33 | )
 34 | logger = logging.getLogger("self_contained_setup")
 35 | 
 36 | 
 37 | def check_requirements() -> bool:
 38 |     """Check if all requirements are met for setup."""
 39 |     logger.info("🔍 Checking requirements...")
 40 | 
 41 |     # Check environment variables
 42 |     required_env = []
 43 |     optional_env = {
 44 |         "TIINGO_API_TOKEN": "Required for loading market data from Tiingo API",
 45 |         "MCP_DATABASE_URL": "Custom database URL (defaults to maverick_mcp database)",
 46 |         "POSTGRES_URL": "Alternative database URL",
 47 |         "DATABASE_URL": "Fallback database URL",
 48 |     }
 49 | 
 50 |     missing_required = []
 51 |     for env_var in required_env:
 52 |         if not os.getenv(env_var):
 53 |             missing_required.append(env_var)
 54 | 
 55 |     if missing_required:
 56 |         logger.error(f"❌ Missing required environment variables: {missing_required}")
 57 |         return False
 58 | 
 59 |     # Check optional environment variables
 60 |     missing_optional = []
 61 |     for env_var, description in optional_env.items():
 62 |         if not os.getenv(env_var):
 63 |             missing_optional.append(f"{env_var}: {description}")
 64 | 
 65 |     if missing_optional:
 66 |         logger.info("ℹ️  Optional environment variables not set:")
 67 |         for var in missing_optional:
 68 |             logger.info(f"   - {var}")
 69 | 
 70 |     logger.info("✅ Requirements check passed")
 71 |     return True
 72 | 
 73 | 
 74 | def run_migrations() -> bool:
 75 |     """Run database migrations."""
 76 |     logger.info("🔄 Running database migrations...")
 77 | 
 78 |     try:
 79 |         run_self_contained_migrations()
 80 |         logger.info("✅ Database migrations completed successfully")
 81 |         return True
 82 |     except Exception as e:
 83 |         logger.error(f"❌ Migration failed: {e}")
 84 |         return False
 85 | 
 86 | 
 87 | def validate_schema() -> bool:
 88 |     """Validate the database schema."""
 89 |     logger.info("🔍 Validating database schema...")
 90 | 
 91 |     try:
 92 |         db_config = get_self_contained_db_config()
 93 |         if db_config.validate_schema():
 94 |             logger.info("✅ Schema validation passed")
 95 |             return True
 96 |         else:
 97 |             logger.error("❌ Schema validation failed")
 98 |             return False
 99 |     except Exception as e:
100 |         logger.error(f"❌ Schema validation error: {e}")
101 |         return False
102 | 
103 | 
104 | def load_sample_data(quick: bool = False) -> bool:
105 |     """Load sample market data."""
106 |     logger.info("📊 Loading sample market data...")
107 | 
108 |     try:
109 |         # Import here to avoid circular imports
110 |         from load_market_data import TiingoDataLoader
111 | 
112 |         # Check if Tiingo API token is available
113 |         if not os.getenv("TIINGO_API_TOKEN"):
114 |             logger.warning("⚠️  TIINGO_API_TOKEN not set, skipping market data loading")
115 |             logger.info(
116 |                 "   You can load market data later using: python scripts/load_market_data.py"
117 |             )
118 |             return True
119 | 
120 |         # Determine symbols to load
121 |         if quick:
122 |             symbols = [
123 |                 "AAPL",
124 |                 "MSFT",
125 |                 "GOOGL",
126 |                 "AMZN",
127 |                 "TSLA",
128 |                 "META",
129 |                 "NVDA",
130 |                 "JPM",
131 |                 "V",
132 |                 "PG",
133 |             ]
134 |         else:
135 |             # Load more comprehensive set
136 |             from load_market_data import get_sp500_symbols
137 | 
138 |             symbols = get_sp500_symbols()
139 | 
140 |         async def load_data():
141 |             async with TiingoDataLoader() as loader:
142 |                 loaded_count = await loader.load_stock_data(symbols)
143 |                 return loaded_count
144 | 
145 |         loaded_count = asyncio.run(load_data())
146 |         logger.info(f"✅ Loaded market data for {loaded_count} stocks")
147 |         return True
148 | 
149 |     except ImportError as e:
150 |         logger.error(f"❌ Cannot import market data loader: {e}")
151 |         return False
152 |     except Exception as e:
153 |         logger.error(f"❌ Market data loading failed: {e}")
154 |         return False
155 | 
156 | 
157 | def run_sample_screening(quick: bool = False) -> bool:
158 |     """Run sample stock screening."""
159 |     logger.info("🎯 Running sample stock screening...")
160 | 
161 |     try:
162 |         # Import here to avoid circular imports
163 |         from datetime import datetime
164 | 
165 |         from run_stock_screening import StockScreener
166 | 
167 |         from maverick_mcp.config.database_self_contained import (
168 |             SelfContainedDatabaseSession,
169 |         )
170 |         from maverick_mcp.data.models import MaverickStocks, bulk_insert_screening_data
171 | 
172 |         async def run_screening():
173 |             screener = StockScreener()
174 |             today = datetime.now().date()
175 | 
176 |             with SelfContainedDatabaseSession() as session:
177 |                 if quick:
178 |                     # Just run Maverick screening
179 |                     results = await screener.run_maverick_screening(session)
180 |                     if results:
181 |                         count = bulk_insert_screening_data(
182 |                             session, MaverickStocks, results, today
183 |                         )
184 |                         return count
185 |                 else:
186 |                     # Run all screenings
187 |                     total_count = 0
188 | 
189 |                     # Maverick screening
190 |                     maverick_results = await screener.run_maverick_screening(session)
191 |                     if maverick_results:
192 |                         count = bulk_insert_screening_data(
193 |                             session, MaverickStocks, maverick_results, today
194 |                         )
195 |                         total_count += count
196 | 
197 |                     return total_count
198 | 
199 |             return 0
200 | 
201 |         count = asyncio.run(run_screening())
202 |         logger.info(f"✅ Completed screening, found {count} candidates")
203 |         return True
204 | 
205 |     except ImportError as e:
206 |         logger.error(f"❌ Cannot import screening modules: {e}")
207 |         return False
208 |     except Exception as e:
209 |         logger.error(f"❌ Sample screening failed: {e}")
210 |         return False
211 | 
212 | 
213 | def display_setup_summary() -> None:
214 |     """Display setup summary and next steps."""
215 |     logger.info("📋 Setup Summary:")
216 | 
217 |     try:
218 |         db_config = get_self_contained_db_config()
219 |         stats = db_config.get_database_stats()
220 | 
221 |         print("\n📊 Database Statistics:")
222 |         print(f"   Database URL: {stats.get('database_url', 'Unknown')}")
223 |         print(f"   Total Records: {stats.get('total_records', 0)}")
224 | 
225 |         for table, count in stats.get("tables", {}).items():
226 |             print(f"   {table}: {count}")
227 | 
228 |     except Exception as e:
229 |         logger.error(f"❌ Could not get database stats: {e}")
230 | 
231 |     print("\n🎉 Self-contained Maverick-MCP setup completed!")
232 |     print("\n📚 Next Steps:")
233 |     print("   1. Start the MCP server: python start_mcp_server.py")
234 |     print("   2. Load more market data: python scripts/load_market_data.py --sp500")
235 |     print("   3. Run screening: python scripts/run_stock_screening.py --all")
236 |     print("   4. Access the web dashboard: http://localhost:3001")
237 | 
238 |     print("\n💡 Available Scripts:")
239 |     print("   - scripts/load_market_data.py: Load stock and price data")
240 |     print("   - scripts/run_stock_screening.py: Run screening algorithms")
241 |     print("   - scripts/setup_self_contained.py: This setup script")
242 | 
243 |     print("\n🔧 Environment Variables:")
244 |     print("   - TIINGO_API_TOKEN: Set to load market data")
245 |     print("   - MCP_DATABASE_URL: Override database URL")
246 |     print("   - DB_POOL_SIZE: Database connection pool size (default: 20)")
247 | 
248 | 
249 | async def main():
250 |     """Main setup function."""
251 |     parser = argparse.ArgumentParser(description="Setup self-contained Maverick-MCP")
252 |     parser.add_argument(
253 |         "--full-setup",
254 |         action="store_true",
255 |         help="Run complete setup with comprehensive data loading",
256 |     )
257 |     parser.add_argument(
258 |         "--quick-setup",
259 |         action="store_true",
260 |         help="Run quick setup with minimal sample data",
261 |     )
262 |     parser.add_argument(
263 |         "--migrate-only", action="store_true", help="Only run database migrations"
264 |     )
265 |     parser.add_argument("--database-url", type=str, help="Override database URL")
266 |     parser.add_argument(
267 |         "--skip-data",
268 |         action="store_true",
269 |         help="Skip loading market data and screening",
270 |     )
271 | 
272 |     args = parser.parse_args()
273 | 
274 |     if not any([args.full_setup, args.quick_setup, args.migrate_only]):
275 |         parser.print_help()
276 |         sys.exit(1)
277 | 
278 |     print("🚀 Starting Maverick-MCP Self-Contained Setup...")
279 |     print("=" * 60)
280 | 
281 |     # Step 1: Check requirements
282 |     if not check_requirements():
283 |         sys.exit(1)
284 | 
285 |     # Step 2: Initialize database
286 |     try:
287 |         logger.info("🗄️  Initializing self-contained database...")
288 |         init_self_contained_database(
289 |             database_url=args.database_url, create_tables=True, validate_schema=True
290 |         )
291 |         logger.info("✅ Database initialization completed")
292 |     except Exception as e:
293 |         logger.error(f"❌ Database initialization failed: {e}")
294 |         sys.exit(1)
295 | 
296 |     # Step 3: Run migrations
297 |     if not run_migrations():
298 |         sys.exit(1)
299 | 
300 |     # Step 4: Validate schema
301 |     if not validate_schema():
302 |         sys.exit(1)
303 | 
304 |     # Stop here if migrate-only
305 |     if args.migrate_only:
306 |         logger.info("✅ Migration-only setup completed successfully")
307 |         return
308 | 
309 |     # Step 5: Load sample data (unless skipped)
310 |     if not args.skip_data:
311 |         quick = args.quick_setup
312 | 
313 |         if not load_sample_data(quick=quick):
314 |             logger.warning("⚠️  Market data loading failed, but continuing setup")
315 | 
316 |         # Step 6: Run sample screening
317 |         if not run_sample_screening(quick=quick):
318 |             logger.warning("⚠️  Sample screening failed, but continuing setup")
319 | 
320 |     # Step 7: Display summary
321 |     display_setup_summary()
322 | 
323 |     print("\n" + "=" * 60)
324 |     print("🎉 Self-contained Maverick-MCP setup completed successfully!")
325 | 
326 | 
327 | if __name__ == "__main__":
328 |     asyncio.run(main())
329 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/utils/agent_errors.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Agent-friendly error handler with helpful fix suggestions.
  3 | 
  4 | This module provides decorators and utilities to catch common errors
  5 | and provide actionable solutions for agents.
  6 | """
  7 | 
  8 | import asyncio
  9 | import functools
 10 | import traceback
 11 | from collections.abc import Callable
 12 | from typing import Any, TypeVar
 13 | 
 14 | from maverick_mcp.utils.logging import get_logger
 15 | 
 16 | logger = get_logger(__name__)
 17 | 
 18 | T = TypeVar("T")
 19 | 
 20 | # Common error patterns and their fixes
 21 | ERROR_FIXES = {
 22 |     # DataFrame column errors
 23 |     "KeyError.*close": {
 24 |         "error": "DataFrame column 'close' not found",
 25 |         "fix": "Use 'Close' with capital C - DataFrame columns are case-sensitive",
 26 |         "example": "df['Close'] not df['close']",
 27 |     },
 28 |     "KeyError.*open": {
 29 |         "error": "DataFrame column 'open' not found",
 30 |         "fix": "Use 'Open' with capital O",
 31 |         "example": "df['Open'] not df['open']",
 32 |     },
 33 |     "KeyError.*high": {
 34 |         "error": "DataFrame column 'high' not found",
 35 |         "fix": "Use 'High' with capital H",
 36 |         "example": "df['High'] not df['high']",
 37 |     },
 38 |     "KeyError.*low": {
 39 |         "error": "DataFrame column 'low' not found",
 40 |         "fix": "Use 'Low' with capital L",
 41 |         "example": "df['Low'] not df['low']",
 42 |     },
 43 |     "KeyError.*volume": {
 44 |         "error": "DataFrame column 'volume' not found",
 45 |         "fix": "Use 'Volume' with capital V",
 46 |         "example": "df['Volume'] not df['volume']",
 47 |     },
 48 |     # Authentication errors
 49 |     "401.*Unauthorized": {
 50 |         "error": "Authentication required",
 51 |         "fix": "Set AUTH_ENABLED=false for development or use generate_dev_token tool",
 52 |         "example": "AUTH_ENABLED=false python -m maverick_mcp.api.server",
 53 |     },
 54 |     # Connection errors
 55 |     "Redis.*Connection.*refused": {
 56 |         "error": "Redis connection failed",
 57 |         "fix": "Start Redis: brew services start redis",
 58 |         "example": "Or set REDIS_HOST=none to skip caching",
 59 |     },
 60 |     "psycopg2.*could not connect": {
 61 |         "error": "PostgreSQL connection failed",
 62 |         "fix": "Use SQLite for development: DATABASE_URL=sqlite:///dev.db",
 63 |         "example": "Or start PostgreSQL: brew services start postgresql",
 64 |     },
 65 |     # Import errors
 66 |     "ModuleNotFoundError.*maverick": {
 67 |         "error": "Maverick MCP modules not found",
 68 |         "fix": "Install dependencies: uv sync",
 69 |         "example": "Make sure you're in the project root directory",
 70 |     },
 71 |     "ImportError.*ta_lib": {
 72 |         "error": "TA-Lib not installed",
 73 |         "fix": "Install TA-Lib: brew install ta-lib && uv pip install ta-lib",
 74 |         "example": "TA-Lib requires system libraries",
 75 |     },
 76 |     # Type errors
 77 |     "TypeError.*NoneType.*has no attribute": {
 78 |         "error": "Trying to access attribute on None",
 79 |         "fix": "Check if the object exists before accessing attributes",
 80 |         "example": "if obj is not None: obj.attribute",
 81 |     },
 82 |     # Value errors
 83 |     "ValueError.*not enough values to unpack": {
 84 |         "error": "Unpacking mismatch",
 85 |         "fix": "Check the return value - it might be None or have fewer values",
 86 |         "example": "result = func(); if result: a, b = result",
 87 |     },
 88 |     # Async errors
 89 |     "RuntimeError.*no running event loop": {
 90 |         "error": "Async function called without event loop",
 91 |         "fix": "Use asyncio.run() or await in async context",
 92 |         "example": "asyncio.run(async_function())",
 93 |     },
 94 |     # File errors
 95 |     "FileNotFoundError": {
 96 |         "error": "File not found",
 97 |         "fix": "Check the file path - use absolute paths for reliability",
 98 |         "example": "Path(__file__).parent / 'data.csv'",
 99 |     },
100 |     # Port errors
101 |     "Address already in use.*8000": {
102 |         "error": "Port 8000 already in use",
103 |         "fix": "Stop the existing server: make stop",
104 |         "example": "Or use a different port: --port 8001",
105 |     },
106 | }
107 | 
108 | 
109 | def find_error_fix(error_str: str) -> dict[str, str] | None:
110 |     """Find a fix suggestion for the given error string."""
111 |     import re
112 | 
113 |     error_str_lower = str(error_str).lower()
114 | 
115 |     for pattern, fix_info in ERROR_FIXES.items():
116 |         if re.search(pattern.lower(), error_str_lower):
117 |             return fix_info
118 | 
119 |     return None
120 | 
121 | 
122 | def agent_friendly_errors[T](
123 |     func: Callable[..., T] | None = None,
124 |     *,
125 |     provide_fix: bool = True,
126 |     log_errors: bool = True,
127 |     reraise: bool = True,
128 | ) -> Callable[..., T] | Callable[[Callable[..., T]], Callable[..., T]]:
129 |     """
130 |     Decorator that catches errors and provides helpful fix suggestions.
131 | 
132 |     Args:
133 |         provide_fix: Whether to include fix suggestions
134 |         log_errors: Whether to log errors
135 |         reraise: Whether to re-raise the error after logging
136 | 
137 |     Usage:
138 |         @agent_friendly_errors
139 |         def my_function():
140 |             ...
141 | 
142 |         @agent_friendly_errors(reraise=False)
143 |         def my_function():
144 |             ...
145 |     """
146 | 
147 |     def decorator(f: Callable[..., T]) -> Callable[..., T]:
148 |         @functools.wraps(f)
149 |         def wrapper(*args: Any, **kwargs: Any) -> T:
150 |             try:
151 |                 return f(*args, **kwargs)
152 |             except Exception as e:
153 |                 error_msg = str(e)
154 |                 error_type = type(e).__name__
155 | 
156 |                 # Build error info
157 |                 error_info = {
158 |                     "function": f.__name__,
159 |                     "error_type": error_type,
160 |                     "error_message": error_msg,
161 |                 }
162 | 
163 |                 # Find fix suggestion
164 |                 if provide_fix:
165 |                     fix_info = find_error_fix(error_msg)
166 |                     if fix_info:
167 |                         error_info["fix_suggestion"] = fix_info
168 | 
169 |                 # Log the error
170 |                 if log_errors:
171 |                     logger.error(
172 |                         f"Error in {f.__name__}: {error_type}: {error_msg}",
173 |                         extra=error_info,
174 |                         exc_info=True,
175 |                     )
176 | 
177 |                     if fix_info:
178 |                         logger.info(
179 |                             f"💡 Fix suggestion: {fix_info['fix']}",
180 |                             extra={"example": fix_info.get("example", "")},
181 |                         )
182 | 
183 |                 # Create enhanced error message
184 |                 if fix_info and provide_fix:
185 |                     enhanced_msg = (
186 |                         f"{error_msg}\n\n"
187 |                         f"💡 Fix: {fix_info['fix']}\n"
188 |                         f"Example: {fix_info.get('example', '')}"
189 |                     )
190 |                     # Replace the error message
191 |                     e.args = (enhanced_msg,) + e.args[1:]
192 | 
193 |                 if reraise:
194 |                     raise
195 | 
196 |                 # Return error info if not re-raising
197 |                 return error_info  # type: ignore[return-value]
198 | 
199 |         # Add async support
200 |         if asyncio.iscoroutinefunction(f):
201 | 
202 |             @functools.wraps(f)
203 |             async def async_wrapper(*args: Any, **kwargs: Any) -> T:
204 |                 try:
205 |                     return await f(*args, **kwargs)
206 |                 except Exception as e:
207 |                     # Same error handling logic
208 |                     error_msg = str(e)
209 |                     error_type = type(e).__name__
210 | 
211 |                     error_info = {
212 |                         "function": f.__name__,
213 |                         "error_type": error_type,
214 |                         "error_message": error_msg,
215 |                     }
216 | 
217 |                     if provide_fix:
218 |                         fix_info = find_error_fix(error_msg)
219 |                         if fix_info:
220 |                             error_info["fix_suggestion"] = fix_info
221 | 
222 |                     if log_errors:
223 |                         logger.error(
224 |                             f"Error in {f.__name__}: {error_type}: {error_msg}",
225 |                             extra=error_info,
226 |                             exc_info=True,
227 |                         )
228 | 
229 |                         if fix_info:
230 |                             logger.info(
231 |                                 f"💡 Fix suggestion: {fix_info['fix']}",
232 |                                 extra={"example": fix_info.get("example", "")},
233 |                             )
234 | 
235 |                     if fix_info and provide_fix:
236 |                         enhanced_msg = (
237 |                             f"{error_msg}\n\n"
238 |                             f"💡 Fix: {fix_info['fix']}\n"
239 |                             f"Example: {fix_info.get('example', '')}"
240 |                         )
241 |                         e.args = (enhanced_msg,) + e.args[1:]
242 | 
243 |                     if reraise:
244 |                         raise
245 | 
246 |                     return error_info  # type: ignore[return-value]
247 | 
248 |             return async_wrapper  # type: ignore[return-value]
249 | 
250 |         return wrapper
251 | 
252 |     # Handle being called with or without parentheses
253 |     if func is None:
254 |         return decorator
255 |     else:
256 |         return decorator(func)
257 | 
258 | 
259 | # Context manager for agent-friendly error handling
260 | class AgentErrorContext:
261 |     """Context manager that provides helpful error messages."""
262 | 
263 |     def __init__(self, operation: str = "operation"):
264 |         self.operation = operation
265 | 
266 |     def __enter__(self):
267 |         return self
268 | 
269 |     def __exit__(self, exc_type, exc_val, exc_tb):
270 |         if exc_type is not None:
271 |             error_msg = str(exc_val)
272 |             fix_info = find_error_fix(error_msg)
273 | 
274 |             if fix_info:
275 |                 logger.error(
276 |                     f"Error during {self.operation}: {exc_type.__name__}: {error_msg}"
277 |                 )
278 |                 logger.info(
279 |                     f"💡 Fix: {fix_info['fix']}",
280 |                     extra={"example": fix_info.get("example", "")},
281 |                 )
282 |                 # Don't suppress the exception
283 |                 return False
284 | 
285 |         return False
286 | 
287 | 
288 | # Utility function to get common error context
289 | def get_error_context(error: Exception) -> dict[str, Any]:
290 |     """Extract useful context from an error."""
291 |     context = {
292 |         "error_type": type(error).__name__,
293 |         "error_message": str(error),
294 |         "traceback": traceback.format_exc().split("\n"),
295 |     }
296 | 
297 |     # Add specific context based on error type
298 |     if isinstance(error, KeyError):
299 |         context["key"] = error.args[0] if error.args else "unknown"
300 |     elif isinstance(error, ValueError):
301 |         context["value_error_details"] = error.args
302 |     elif isinstance(error, ConnectionError):
303 |         context["connection_type"] = "network"
304 |     elif hasattr(error, "response"):  # HTTP errors
305 |         context["status_code"] = getattr(error.response, "status_code", None)
306 |         context["response_text"] = getattr(error.response, "text", None)
307 | 
308 |     return context
309 | 
```

--------------------------------------------------------------------------------
/tests/integration/test_redis_cache.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Integration tests for Redis caching functionality.
  3 | """
  4 | 
  5 | import asyncio
  6 | import json
  7 | from datetime import datetime
  8 | 
  9 | import pytest
 10 | 
 11 | from tests.integration.base import RedisIntegrationTest
 12 | 
 13 | 
 14 | @pytest.mark.integration
 15 | @pytest.mark.redis
 16 | class TestRedisCache(RedisIntegrationTest):
 17 |     """Test Redis caching with real Redis instance."""
 18 | 
 19 |     async def test_basic_cache_operations(self):
 20 |         """Test basic cache set/get/delete operations."""
 21 |         # Set value
 22 |         key = "test:basic:key"
 23 |         value = {"data": "test value", "timestamp": datetime.now().isoformat()}
 24 | 
 25 |         await self.redis_client.setex(
 26 |             key,
 27 |             300,  # 5 minutes TTL
 28 |             json.dumps(value),
 29 |         )
 30 | 
 31 |         # Get value
 32 |         cached = await self.redis_client.get(key)
 33 |         assert cached is not None
 34 | 
 35 |         cached_data = json.loads(cached)
 36 |         assert cached_data["data"] == value["data"]
 37 |         assert cached_data["timestamp"] == value["timestamp"]
 38 | 
 39 |         # Delete value
 40 |         deleted = await self.redis_client.delete(key)
 41 |         assert deleted == 1
 42 | 
 43 |         # Verify deleted
 44 |         await self.assert_cache_not_exists(key)
 45 | 
 46 |     async def test_cache_expiration(self):
 47 |         """Test cache TTL and expiration."""
 48 |         key = "test:expiry:key"
 49 |         value = "expires soon"
 50 | 
 51 |         # Set with 1 second TTL
 52 |         await self.redis_client.setex(key, 1, value)
 53 | 
 54 |         # Should exist immediately
 55 |         await self.assert_cache_exists(key)
 56 | 
 57 |         # Wait for expiration
 58 |         await asyncio.sleep(1.5)
 59 | 
 60 |         # Should be expired
 61 |         await self.assert_cache_not_exists(key)
 62 | 
 63 |     async def test_stock_data_caching(self):
 64 |         """Test caching of stock data."""
 65 |         from maverick_mcp.data.cache import CacheManager
 66 | 
 67 |         cache_manager = CacheManager()
 68 | 
 69 |         # Create sample stock data
 70 |         stock_data = {
 71 |             "symbol": "AAPL",
 72 |             "data": {"sample": "data"},  # Simplified for test
 73 |             "timestamp": datetime.now().isoformat(),
 74 |         }
 75 | 
 76 |         # Cache stock data
 77 |         cache_key = "stock:AAPL:1d"
 78 |         await cache_manager.set(
 79 |             cache_key,
 80 |             stock_data,
 81 |             ttl=3600,  # 1 hour
 82 |         )
 83 | 
 84 |         # Retrieve from cache
 85 |         cached = await cache_manager.get(cache_key)
 86 |         assert cached is not None
 87 |         assert cached["symbol"] == "AAPL"
 88 |         assert "data" in cached
 89 | 
 90 |         # Test cache invalidation
 91 |         await cache_manager.delete(cache_key)
 92 | 
 93 |         # Should be removed
 94 |         cached = await cache_manager.get(cache_key)
 95 |         assert cached is None
 96 | 
 97 |     # Test commented out - rate_limiter module not available
 98 |     # async def test_rate_limiting_cache(self):
 99 |     #     """Test rate limiting using Redis."""
100 |     #     from maverick_mcp.auth.rate_limiter import RateLimiter
101 |     #
102 |     #     rate_limiter = RateLimiter(self.redis_client)
103 |     #
104 |     #     # Configure rate limit: 5 requests per minute
105 |     #     user_id = "test_user_123"
106 |     #     limit = 5
107 |     #     window = 60  # seconds
108 |     #
109 |     #     # Make requests up to limit
110 |     #     for _ in range(limit):
111 |     #         allowed = await rate_limiter.check_rate_limit(user_id, limit, window)
112 |     #         assert allowed is True
113 |     #
114 |     #     # Next request should be blocked
115 |     #     allowed = await rate_limiter.check_rate_limit(user_id, limit, window)
116 |     #     assert allowed is False
117 |     #
118 |     #     # Check remaining
119 |     #     remaining = await rate_limiter.get_remaining_requests(user_id, limit, window)
120 |     #     assert remaining == 0
121 | 
122 |     async def test_distributed_locking(self):
123 |         """Test distributed locking with Redis."""
124 |         import uuid
125 | 
126 |         lock_key = "test:lock:resource"
127 |         lock_value = str(uuid.uuid4())
128 |         lock_ttl = 5  # seconds
129 | 
130 |         # Acquire lock
131 |         acquired = await self.redis_client.set(
132 |             lock_key,
133 |             lock_value,
134 |             nx=True,  # Only set if not exists
135 |             ex=lock_ttl,
136 |         )
137 |         assert acquired is not None  # Redis returns 'OK' string on success
138 | 
139 |         # Try to acquire again (should fail)
140 |         acquired2 = await self.redis_client.set(
141 |             lock_key, "different_value", nx=True, ex=lock_ttl
142 |         )
143 |         assert acquired2 is None  # Redis returns None when nx fails
144 | 
145 |         # Release lock (only if we own it)
146 |         lua_script = """
147 |         if redis.call("get", KEYS[1]) == ARGV[1] then
148 |             return redis.call("del", KEYS[1])
149 |         else
150 |             return 0
151 |         end
152 |         """
153 | 
154 |         released = await self.redis_client.eval(
155 |             lua_script, keys=[lock_key], args=[lock_value]
156 |         )
157 |         assert released == 1
158 | 
159 |         # Lock should be available now
160 |         acquired3 = await self.redis_client.set(
161 |             lock_key, "new_value", nx=True, ex=lock_ttl
162 |         )
163 |         assert acquired3 is not None  # Redis returns 'OK' string on success
164 | 
165 |     async def test_cache_patterns(self):
166 |         """Test various cache key patterns and operations."""
167 |         # Set multiple keys with pattern
168 |         base_pattern = "test:pattern"
169 |         for i in range(10):
170 |             key = f"{base_pattern}:{i}"
171 |             await self.redis_client.set(key, f"value_{i}")
172 | 
173 |         # Scan for keys matching pattern
174 |         keys = []
175 |         cursor = 0
176 |         while True:
177 |             cursor, batch = await self.redis_client.scan(
178 |                 cursor, match=f"{base_pattern}:*", count=100
179 |             )
180 |             keys.extend(batch)
181 |             if cursor == 0:
182 |                 break
183 | 
184 |         assert len(keys) == 10
185 | 
186 |         # Bulk get
187 |         values = await self.redis_client.mget(keys)
188 |         assert len(values) == 10
189 |         assert all(v is not None for v in values)
190 | 
191 |         # Bulk delete
192 |         deleted = await self.redis_client.delete(*keys)
193 |         assert deleted == 10
194 | 
195 |     async def test_cache_statistics(self):
196 |         """Test cache hit/miss statistics."""
197 |         stats_key = "cache:stats"
198 | 
199 |         # Initialize stats
200 |         await self.redis_client.hset(
201 |             stats_key,
202 |             mapping={
203 |                 "hits": 0,
204 |                 "misses": 0,
205 |                 "total": 0,
206 |             },
207 |         )
208 | 
209 |         # Simulate cache operations
210 |         async def record_hit():
211 |             await self.redis_client.hincrby(stats_key, "hits", 1)
212 |             await self.redis_client.hincrby(stats_key, "total", 1)
213 | 
214 |         async def record_miss():
215 |             await self.redis_client.hincrby(stats_key, "misses", 1)
216 |             await self.redis_client.hincrby(stats_key, "total", 1)
217 | 
218 |         # Simulate 70% hit rate
219 |         for i in range(100):
220 |             if i % 10 < 7:
221 |                 await record_hit()
222 |             else:
223 |                 await record_miss()
224 | 
225 |         # Get stats
226 |         stats = await self.redis_client.hgetall(stats_key)
227 | 
228 |         hits = int(stats[b"hits"])
229 |         misses = int(stats[b"misses"])
230 |         total = int(stats[b"total"])
231 | 
232 |         assert total == 100
233 |         assert hits == 70
234 |         assert misses == 30
235 | 
236 |         hit_rate = hits / total
237 |         assert hit_rate == 0.7
238 | 
239 |     async def test_pub_sub_messaging(self):
240 |         """Test Redis pub/sub for real-time updates."""
241 |         channel = "test:updates"
242 |         message = {"type": "price_update", "symbol": "AAPL", "price": 150.50}
243 | 
244 |         # Create pubsub
245 |         pubsub = self.redis_client.pubsub()
246 |         await pubsub.subscribe(channel)
247 | 
248 |         # Publish message
249 |         await self.redis_client.publish(channel, json.dumps(message))
250 | 
251 |         # Receive message
252 |         received = None
253 |         async for msg in pubsub.listen():
254 |             if msg["type"] == "message":
255 |                 received = json.loads(msg["data"])
256 |                 break
257 | 
258 |         assert received is not None
259 |         assert received["type"] == "price_update"
260 |         assert received["symbol"] == "AAPL"
261 |         assert received["price"] == 150.50
262 | 
263 |         # Cleanup
264 |         await pubsub.unsubscribe(channel)
265 |         await pubsub.close()
266 | 
267 |     async def test_cache_warming(self):
268 |         """Test cache warming strategies."""
269 |         # Simulate warming cache with frequently accessed data
270 |         frequent_symbols = ["AAPL", "GOOGL", "MSFT", "AMZN", "TSLA"]
271 | 
272 |         # Warm cache
273 |         for symbol in frequent_symbols:
274 |             cache_key = f"stock:quote:{symbol}"
275 |             quote_data = {
276 |                 "symbol": symbol,
277 |                 "price": 100.0 + hash(symbol) % 100,
278 |                 "volume": 1000000,
279 |                 "timestamp": datetime.now().isoformat(),
280 |             }
281 | 
282 |             await self.redis_client.setex(
283 |                 cache_key,
284 |                 3600,  # 1 hour
285 |                 json.dumps(quote_data),
286 |             )
287 | 
288 |         # Verify all cached
289 |         for symbol in frequent_symbols:
290 |             cache_key = f"stock:quote:{symbol}"
291 |             await self.assert_cache_exists(cache_key)
292 | 
293 |         # Test batch retrieval
294 |         keys = [f"stock:quote:{symbol}" for symbol in frequent_symbols]
295 |         values = await self.redis_client.mget(keys)
296 | 
297 |         assert len(values) == len(frequent_symbols)
298 |         assert all(v is not None for v in values)
299 | 
300 |         # Parse and verify
301 |         for value, symbol in zip(values, frequent_symbols, strict=False):
302 |             data = json.loads(value)
303 |             assert data["symbol"] == symbol
304 | 
305 |     async def test_cache_memory_optimization(self):
306 |         """Test memory optimization strategies."""
307 |         # Test different serialization formats
308 |         import pickle
309 |         import zlib
310 | 
311 |         large_data = {
312 |             "symbol": "TEST",
313 |             "historical_data": [
314 |                 {"date": f"2024-01-{i:02d}", "price": 100 + i} for i in range(1, 32)
315 |             ]
316 |             * 10,  # Replicate to make it larger
317 |         }
318 | 
319 |         # JSON serialization
320 |         json_data = json.dumps(large_data)
321 |         json_size = len(json_data.encode())
322 | 
323 |         # Pickle serialization
324 |         pickle_data = pickle.dumps(large_data)
325 |         pickle_size = len(pickle_data)
326 | 
327 |         # Compressed JSON
328 |         compressed_data = zlib.compress(json_data.encode())
329 |         compressed_size = len(compressed_data)
330 | 
331 |         # Store all versions
332 |         await self.redis_client.set("test:json", json_data)
333 |         await self.redis_client.set("test:pickle", pickle_data)
334 |         await self.redis_client.set("test:compressed", compressed_data)
335 | 
336 |         # Compare sizes
337 |         assert compressed_size < json_size
338 |         assert compressed_size < pickle_size
339 | 
340 |         # Verify decompression works
341 |         retrieved = await self.redis_client.get("test:compressed")
342 |         decompressed = zlib.decompress(retrieved)
343 |         restored_data = json.loads(decompressed)
344 | 
345 |         assert restored_data["symbol"] == "TEST"
346 |         assert len(restored_data["historical_data"]) == 310
347 | 
```

--------------------------------------------------------------------------------
/tests/test_cache_management_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for CacheManagementService.
  3 | """
  4 | 
  5 | from unittest.mock import Mock, patch
  6 | 
  7 | import pandas as pd
  8 | 
  9 | from maverick_mcp.infrastructure.caching import CacheManagementService
 10 | 
 11 | 
 12 | class TestCacheManagementService:
 13 |     """Test cases for CacheManagementService."""
 14 | 
 15 |     def setup_method(self):
 16 |         """Set up test fixtures."""
 17 |         self.mock_session = Mock()
 18 |         self.service = CacheManagementService(
 19 |             db_session=self.mock_session, cache_days=1
 20 |         )
 21 | 
 22 |     def test_init_with_session(self):
 23 |         """Test service initialization with provided session."""
 24 |         assert self.service.cache_days == 1
 25 |         assert self.service._db_session == self.mock_session
 26 | 
 27 |     def test_init_without_session(self):
 28 |         """Test service initialization without session."""
 29 |         service = CacheManagementService(cache_days=7)
 30 |         assert service.cache_days == 7
 31 |         assert service._db_session is None
 32 | 
 33 |     @patch("maverick_mcp.infrastructure.caching.cache_management_service.PriceCache")
 34 |     def test_get_cached_data_success(self, mock_price_cache):
 35 |         """Test successful cache data retrieval."""
 36 |         # Mock data from cache
 37 |         mock_data = pd.DataFrame(
 38 |             {
 39 |                 "open": [150.0, 151.0],
 40 |                 "high": [152.0, 153.0],
 41 |                 "low": [149.0, 150.0],
 42 |                 "close": [151.0, 152.0],
 43 |                 "volume": [1000000, 1100000],
 44 |             },
 45 |             index=pd.date_range("2024-01-01", periods=2),
 46 |         )
 47 | 
 48 |         mock_price_cache.get_price_data.return_value = mock_data
 49 | 
 50 |         # Test
 51 |         result = self.service.get_cached_data("AAPL", "2024-01-01", "2024-01-02")
 52 | 
 53 |         # Assertions
 54 |         assert result is not None
 55 |         assert not result.empty
 56 |         assert len(result) == 2
 57 |         # Check column normalization
 58 |         assert "Open" in result.columns
 59 |         assert "Close" in result.columns
 60 |         assert "Dividends" in result.columns
 61 |         assert "Stock Splits" in result.columns
 62 |         mock_price_cache.get_price_data.assert_called_once_with(
 63 |             self.mock_session, "AAPL", "2024-01-01", "2024-01-02"
 64 |         )
 65 | 
 66 |     @patch("maverick_mcp.infrastructure.caching.cache_management_service.PriceCache")
 67 |     def test_get_cached_data_empty(self, mock_price_cache):
 68 |         """Test cache data retrieval with empty result."""
 69 |         mock_price_cache.get_price_data.return_value = pd.DataFrame()
 70 | 
 71 |         # Test
 72 |         result = self.service.get_cached_data("INVALID", "2024-01-01", "2024-01-02")
 73 | 
 74 |         # Assertions
 75 |         assert result is None
 76 | 
 77 |     @patch("maverick_mcp.infrastructure.caching.cache_management_service.PriceCache")
 78 |     def test_get_cached_data_error(self, mock_price_cache):
 79 |         """Test cache data retrieval with database error."""
 80 |         mock_price_cache.get_price_data.side_effect = Exception("Database error")
 81 | 
 82 |         # Test
 83 |         result = self.service.get_cached_data("AAPL", "2024-01-01", "2024-01-02")
 84 | 
 85 |         # Assertions
 86 |         assert result is None
 87 | 
 88 |     @patch(
 89 |         "maverick_mcp.infrastructure.caching.cache_management_service.bulk_insert_price_data"
 90 |     )
 91 |     @patch("maverick_mcp.infrastructure.caching.cache_management_service.Stock")
 92 |     def test_cache_data_success(self, mock_stock, mock_bulk_insert):
 93 |         """Test successful data caching."""
 94 |         # Mock data to cache
 95 |         data = pd.DataFrame(
 96 |             {
 97 |                 "Open": [150.0, 151.0],
 98 |                 "High": [152.0, 153.0],
 99 |                 "Low": [149.0, 150.0],
100 |                 "Close": [151.0, 152.0],
101 |                 "Volume": [1000000, 1100000],
102 |             },
103 |             index=pd.date_range("2024-01-01", periods=2),
104 |         )
105 | 
106 |         mock_stock.get_or_create.return_value = Mock()
107 |         mock_bulk_insert.return_value = 2  # 2 records inserted
108 | 
109 |         # Test
110 |         result = self.service.cache_data("AAPL", data)
111 | 
112 |         # Assertions
113 |         assert result is True
114 |         mock_stock.get_or_create.assert_called_once_with(self.mock_session, "AAPL")
115 |         mock_bulk_insert.assert_called_once()
116 | 
117 |     def test_cache_data_empty_dataframe(self):
118 |         """Test caching with empty DataFrame."""
119 |         empty_df = pd.DataFrame()
120 | 
121 |         # Test
122 |         result = self.service.cache_data("AAPL", empty_df)
123 | 
124 |         # Assertions
125 |         assert result is True  # Should succeed but do nothing
126 | 
127 |     @patch(
128 |         "maverick_mcp.infrastructure.caching.cache_management_service.bulk_insert_price_data"
129 |     )
130 |     @patch("maverick_mcp.infrastructure.caching.cache_management_service.Stock")
131 |     def test_cache_data_error(self, mock_stock, mock_bulk_insert):
132 |         """Test data caching with database error."""
133 |         data = pd.DataFrame(
134 |             {
135 |                 "Open": [150.0],
136 |                 "Close": [151.0],
137 |             },
138 |             index=pd.date_range("2024-01-01", periods=1),
139 |         )
140 | 
141 |         mock_stock.get_or_create.side_effect = Exception("Database error")
142 | 
143 |         # Test
144 |         result = self.service.cache_data("AAPL", data)
145 | 
146 |         # Assertions
147 |         assert result is False
148 |         self.mock_session.rollback.assert_called_once()
149 | 
150 |     @patch("maverick_mcp.infrastructure.caching.cache_management_service.PriceCache")
151 |     def test_invalidate_cache_success(self, mock_price_cache):
152 |         """Test successful cache invalidation."""
153 |         mock_price_cache.delete_price_data.return_value = 5  # 5 records deleted
154 | 
155 |         # Test
156 |         result = self.service.invalidate_cache("AAPL", "2024-01-01", "2024-01-02")
157 | 
158 |         # Assertions
159 |         assert result is True
160 |         mock_price_cache.delete_price_data.assert_called_once_with(
161 |             self.mock_session, "AAPL", "2024-01-01", "2024-01-02"
162 |         )
163 | 
164 |     @patch("maverick_mcp.infrastructure.caching.cache_management_service.PriceCache")
165 |     def test_invalidate_cache_error(self, mock_price_cache):
166 |         """Test cache invalidation with database error."""
167 |         mock_price_cache.delete_price_data.side_effect = Exception("Database error")
168 | 
169 |         # Test
170 |         result = self.service.invalidate_cache("AAPL", "2024-01-01", "2024-01-02")
171 | 
172 |         # Assertions
173 |         assert result is False
174 | 
175 |     @patch("maverick_mcp.infrastructure.caching.cache_management_service.PriceCache")
176 |     def test_get_cache_stats_success(self, mock_price_cache):
177 |         """Test successful cache statistics retrieval."""
178 |         mock_stats = {
179 |             "total_records": 100,
180 |             "date_range": {"start": "2024-01-01", "end": "2024-01-31"},
181 |             "last_updated": "2024-01-31",
182 |         }
183 |         mock_price_cache.get_cache_stats.return_value = mock_stats
184 | 
185 |         # Test
186 |         result = self.service.get_cache_stats("AAPL")
187 | 
188 |         # Assertions
189 |         assert result["symbol"] == "AAPL"
190 |         assert result["total_records"] == 100
191 |         assert result["date_range"] == {"start": "2024-01-01", "end": "2024-01-31"}
192 | 
193 |     @patch("maverick_mcp.infrastructure.caching.cache_management_service.PriceCache")
194 |     def test_get_cache_stats_error(self, mock_price_cache):
195 |         """Test cache statistics retrieval with database error."""
196 |         mock_price_cache.get_cache_stats.side_effect = Exception("Database error")
197 | 
198 |         # Test
199 |         result = self.service.get_cache_stats("AAPL")
200 | 
201 |         # Assertions
202 |         assert result["symbol"] == "AAPL"
203 |         assert result["total_records"] == 0
204 |         assert result["last_updated"] is None
205 | 
206 |     def test_normalize_cached_data(self):
207 |         """Test data normalization from cache format."""
208 |         # Mock data in database format
209 |         data = pd.DataFrame(
210 |             {
211 |                 "open": [150.0, 151.0],
212 |                 "high": [152.0, 153.0],
213 |                 "low": [149.0, 150.0],
214 |                 "close": [151.0, 152.0],
215 |                 "volume": ["1000000", "1100000"],  # String volume to test conversion
216 |             },
217 |             index=pd.date_range("2024-01-01", periods=2),
218 |         )
219 | 
220 |         # Test
221 |         result = self.service._normalize_cached_data(data)
222 | 
223 |         # Assertions
224 |         assert "Open" in result.columns
225 |         assert "High" in result.columns
226 |         assert "Close" in result.columns
227 |         assert "Volume" in result.columns
228 |         assert "Dividends" in result.columns
229 |         assert "Stock Splits" in result.columns
230 | 
231 |         # Check data types
232 |         assert result["Volume"].dtype == "int64"
233 |         assert result["Open"].dtype == "float64"
234 | 
235 |     def test_prepare_data_for_cache(self):
236 |         """Test data preparation for caching."""
237 |         # Mock data in yfinance format
238 |         data = pd.DataFrame(
239 |             {
240 |                 "Open": [150.0, 151.0],
241 |                 "High": [152.0, 153.0],
242 |                 "Low": [149.0, 150.0],
243 |                 "Close": [151.0, 152.0],
244 |                 "Volume": [1000000, 1100000],
245 |             },
246 |             index=pd.date_range("2024-01-01", periods=2),
247 |         )
248 | 
249 |         # Test
250 |         result = self.service._prepare_data_for_cache(data)
251 | 
252 |         # Assertions
253 |         assert "open" in result.columns
254 |         assert "high" in result.columns
255 |         assert "close" in result.columns
256 |         assert "volume" in result.columns
257 | 
258 |     @patch("maverick_mcp.infrastructure.caching.cache_management_service.SessionLocal")
259 |     def test_get_db_session_without_injected_session(self, mock_session_local):
260 |         """Test database session creation when no session is injected."""
261 |         service = CacheManagementService()  # No injected session
262 |         mock_session = Mock()
263 |         mock_session_local.return_value = mock_session
264 | 
265 |         # Test
266 |         session, should_close = service._get_db_session()
267 | 
268 |         # Assertions
269 |         assert session == mock_session
270 |         assert should_close is True
271 | 
272 |     def test_get_db_session_with_injected_session(self):
273 |         """Test database session retrieval with injected session."""
274 |         # Test
275 |         session, should_close = self.service._get_db_session()
276 | 
277 |         # Assertions
278 |         assert session == self.mock_session
279 |         assert should_close is False
280 | 
281 |     def test_check_cache_health_success(self):
282 |         """Test successful cache health check."""
283 |         # Mock successful query
284 |         mock_result = Mock()
285 |         self.mock_session.execute.return_value = mock_result
286 |         mock_result.fetchone.return_value = (1,)
287 |         self.mock_session.query.return_value.count.return_value = 1000
288 | 
289 |         # Test
290 |         result = self.service.check_cache_health()
291 | 
292 |         # Assertions
293 |         assert result["status"] == "healthy"
294 |         assert result["database_connection"] is True
295 |         assert result["total_cached_records"] == 1000
296 | 
297 |     def test_check_cache_health_failure(self):
298 |         """Test cache health check with database error."""
299 |         self.mock_session.execute.side_effect = Exception("Connection failed")
300 | 
301 |         # Test
302 |         result = self.service.check_cache_health()
303 | 
304 |         # Assertions
305 |         assert result["status"] == "unhealthy"
306 |         assert result["database_connection"] is False
307 |         assert "error" in result
308 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/providers/factories/provider_factory.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Provider factory for dependency injection and lifecycle management.
  3 | 
  4 | This module provides a centralized factory for creating and managing
  5 | provider instances with proper dependency injection and configuration.
  6 | """
  7 | 
  8 | import logging
  9 | 
 10 | from maverick_mcp.providers.implementations.cache_adapter import RedisCacheAdapter
 11 | from maverick_mcp.providers.implementations.macro_data_adapter import MacroDataAdapter
 12 | from maverick_mcp.providers.implementations.market_data_adapter import MarketDataAdapter
 13 | from maverick_mcp.providers.implementations.persistence_adapter import (
 14 |     SQLAlchemyPersistenceAdapter,
 15 | )
 16 | from maverick_mcp.providers.implementations.stock_data_adapter import StockDataAdapter
 17 | from maverick_mcp.providers.interfaces.cache import CacheConfig, ICacheManager
 18 | from maverick_mcp.providers.interfaces.config import IConfigurationProvider
 19 | from maverick_mcp.providers.interfaces.macro_data import (
 20 |     IMacroDataProvider,
 21 |     MacroDataConfig,
 22 | )
 23 | from maverick_mcp.providers.interfaces.market_data import (
 24 |     IMarketDataProvider,
 25 |     MarketDataConfig,
 26 | )
 27 | from maverick_mcp.providers.interfaces.persistence import (
 28 |     DatabaseConfig,
 29 |     IDataPersistence,
 30 | )
 31 | from maverick_mcp.providers.interfaces.stock_data import (
 32 |     IStockDataFetcher,
 33 |     IStockScreener,
 34 | )
 35 | 
 36 | logger = logging.getLogger(__name__)
 37 | 
 38 | 
 39 | class ProviderFactory:
 40 |     """
 41 |     Factory class for creating and managing provider instances.
 42 | 
 43 |     This factory handles dependency injection, configuration, and lifecycle
 44 |     management for all providers in the system. It ensures that providers
 45 |     are properly configured and that dependencies are satisfied.
 46 |     """
 47 | 
 48 |     def __init__(self, config: IConfigurationProvider):
 49 |         """
 50 |         Initialize the provider factory.
 51 | 
 52 |         Args:
 53 |             config: Configuration provider for accessing settings
 54 |         """
 55 |         self._config = config
 56 |         self._cache_manager: ICacheManager | None = None
 57 |         self._persistence: IDataPersistence | None = None
 58 |         self._stock_data_fetcher: IStockDataFetcher | None = None
 59 |         self._stock_screener: IStockScreener | None = None
 60 |         self._market_data_provider: IMarketDataProvider | None = None
 61 |         self._macro_data_provider: IMacroDataProvider | None = None
 62 | 
 63 |         logger.debug("ProviderFactory initialized")
 64 | 
 65 |     def get_cache_manager(self) -> ICacheManager:
 66 |         """
 67 |         Get or create a cache manager instance.
 68 | 
 69 |         Returns:
 70 |             ICacheManager implementation
 71 |         """
 72 |         if self._cache_manager is None:
 73 |             cache_config = CacheConfig(
 74 |                 enabled=self._config.is_cache_enabled(),
 75 |                 default_ttl=self._config.get_cache_ttl(),
 76 |                 redis_host=self._config.get_redis_host(),
 77 |                 redis_port=self._config.get_redis_port(),
 78 |                 redis_db=self._config.get_redis_db(),
 79 |                 redis_password=self._config.get_redis_password(),
 80 |                 redis_ssl=self._config.get_redis_ssl(),
 81 |             )
 82 |             self._cache_manager = RedisCacheAdapter(config=cache_config)
 83 |             logger.debug("Cache manager created")
 84 | 
 85 |         return self._cache_manager
 86 | 
 87 |     def get_persistence(self) -> IDataPersistence:
 88 |         """
 89 |         Get or create a persistence instance.
 90 | 
 91 |         Returns:
 92 |             IDataPersistence implementation
 93 |         """
 94 |         if self._persistence is None:
 95 |             db_config = DatabaseConfig(
 96 |                 database_url=self._config.get_database_url(),
 97 |                 pool_size=self._config.get_pool_size(),
 98 |                 max_overflow=self._config.get_max_overflow(),
 99 |             )
100 |             self._persistence = SQLAlchemyPersistenceAdapter(config=db_config)
101 |             logger.debug("Persistence adapter created")
102 | 
103 |         return self._persistence
104 | 
105 |     def get_stock_data_fetcher(self) -> IStockDataFetcher:
106 |         """
107 |         Get or create a stock data fetcher instance.
108 | 
109 |         Returns:
110 |             IStockDataFetcher implementation
111 |         """
112 |         if self._stock_data_fetcher is None:
113 |             self._stock_data_fetcher = StockDataAdapter(
114 |                 cache_manager=self.get_cache_manager(),
115 |                 persistence=self.get_persistence(),
116 |                 config=self._config,
117 |             )
118 |             logger.debug("Stock data fetcher created")
119 | 
120 |         return self._stock_data_fetcher
121 | 
122 |     def get_stock_screener(self) -> IStockScreener:
123 |         """
124 |         Get or create a stock screener instance.
125 | 
126 |         Returns:
127 |             IStockScreener implementation
128 |         """
129 |         if self._stock_screener is None:
130 |             # The StockDataAdapter implements both interfaces
131 |             adapter = self.get_stock_data_fetcher()
132 |             if isinstance(adapter, IStockScreener):
133 |                 self._stock_screener = adapter
134 |             else:
135 |                 # This shouldn't happen with our current implementation
136 |                 raise RuntimeError(
137 |                     "Stock data fetcher does not implement IStockScreener"
138 |                 )
139 |             logger.debug("Stock screener created")
140 | 
141 |         return self._stock_screener
142 | 
143 |     def get_market_data_provider(self) -> IMarketDataProvider:
144 |         """
145 |         Get or create a market data provider instance.
146 | 
147 |         Returns:
148 |             IMarketDataProvider implementation
149 |         """
150 |         if self._market_data_provider is None:
151 |             market_config = MarketDataConfig(
152 |                 external_api_key=self._config.get_external_api_key(),
153 |                 tiingo_api_key=self._config.get_tiingo_api_key(),
154 |                 request_timeout=self._config.get_request_timeout(),
155 |                 max_retries=self._config.get_max_retries(),
156 |             )
157 |             self._market_data_provider = MarketDataAdapter(config=market_config)
158 |             logger.debug("Market data provider created")
159 | 
160 |         return self._market_data_provider
161 | 
162 |     def get_macro_data_provider(self) -> IMacroDataProvider:
163 |         """
164 |         Get or create a macro data provider instance.
165 | 
166 |         Returns:
167 |             IMacroDataProvider implementation
168 |         """
169 |         if self._macro_data_provider is None:
170 |             macro_config = MacroDataConfig(
171 |                 fred_api_key=self._config.get_fred_api_key(),
172 |                 request_timeout=self._config.get_request_timeout(),
173 |                 max_retries=self._config.get_max_retries(),
174 |                 cache_ttl=self._config.get_cache_ttl(),
175 |             )
176 |             self._macro_data_provider = MacroDataAdapter(config=macro_config)
177 |             logger.debug("Macro data provider created")
178 | 
179 |         return self._macro_data_provider
180 | 
181 |     def create_stock_data_fetcher(
182 |         self,
183 |         cache_manager: ICacheManager | None = None,
184 |         persistence: IDataPersistence | None = None,
185 |     ) -> IStockDataFetcher:
186 |         """
187 |         Create a new stock data fetcher instance with optional dependencies.
188 | 
189 |         Args:
190 |             cache_manager: Optional cache manager override
191 |             persistence: Optional persistence override
192 | 
193 |         Returns:
194 |             New IStockDataFetcher instance
195 |         """
196 |         return StockDataAdapter(
197 |             cache_manager=cache_manager or self.get_cache_manager(),
198 |             persistence=persistence or self.get_persistence(),
199 |             config=self._config,
200 |         )
201 | 
202 |     def create_market_data_provider(
203 |         self, config_override: MarketDataConfig | None = None
204 |     ) -> IMarketDataProvider:
205 |         """
206 |         Create a new market data provider instance with optional config override.
207 | 
208 |         Args:
209 |             config_override: Optional market data configuration override
210 | 
211 |         Returns:
212 |             New IMarketDataProvider instance
213 |         """
214 |         if config_override:
215 |             return MarketDataAdapter(config=config_override)
216 |         else:
217 |             return MarketDataAdapter(
218 |                 config=MarketDataConfig(
219 |                     external_api_key=self._config.get_external_api_key(),
220 |                     tiingo_api_key=self._config.get_tiingo_api_key(),
221 |                     request_timeout=self._config.get_request_timeout(),
222 |                     max_retries=self._config.get_max_retries(),
223 |                 )
224 |             )
225 | 
226 |     def create_macro_data_provider(
227 |         self, config_override: MacroDataConfig | None = None
228 |     ) -> IMacroDataProvider:
229 |         """
230 |         Create a new macro data provider instance with optional config override.
231 | 
232 |         Args:
233 |             config_override: Optional macro data configuration override
234 | 
235 |         Returns:
236 |             New IMacroDataProvider instance
237 |         """
238 |         if config_override:
239 |             return MacroDataAdapter(config=config_override)
240 |         else:
241 |             return MacroDataAdapter(
242 |                 config=MacroDataConfig(
243 |                     fred_api_key=self._config.get_fred_api_key(),
244 |                     request_timeout=self._config.get_request_timeout(),
245 |                     max_retries=self._config.get_max_retries(),
246 |                     cache_ttl=self._config.get_cache_ttl(),
247 |                 )
248 |             )
249 | 
250 |     def reset_cache(self) -> None:
251 |         """
252 |         Reset all cached provider instances.
253 | 
254 |         This forces the factory to create new instances on the next request,
255 |         which can be useful for testing or configuration changes.
256 |         """
257 |         self._cache_manager = None
258 |         self._persistence = None
259 |         self._stock_data_fetcher = None
260 |         self._stock_screener = None
261 |         self._market_data_provider = None
262 |         self._macro_data_provider = None
263 | 
264 |         logger.debug("Provider factory cache reset")
265 | 
266 |     def get_all_providers(self) -> dict[str, object]:
267 |         """
268 |         Get all provider instances for introspection or testing.
269 | 
270 |         Returns:
271 |             Dictionary mapping provider names to instances
272 |         """
273 |         return {
274 |             "cache_manager": self.get_cache_manager(),
275 |             "persistence": self.get_persistence(),
276 |             "stock_data_fetcher": self.get_stock_data_fetcher(),
277 |             "stock_screener": self.get_stock_screener(),
278 |             "market_data_provider": self.get_market_data_provider(),
279 |             "macro_data_provider": self.get_macro_data_provider(),
280 |         }
281 | 
282 |     def validate_configuration(self) -> list[str]:
283 |         """
284 |         Validate that all required configuration is present.
285 | 
286 |         Returns:
287 |             List of validation errors (empty if valid)
288 |         """
289 |         errors = []
290 | 
291 |         # Check database configuration
292 |         if not self._config.get_database_url():
293 |             errors.append("Database URL is not configured")
294 | 
295 |         # Check cache configuration if enabled
296 |         if self._config.is_cache_enabled():
297 |             if not self._config.get_redis_host():
298 |                 errors.append("Redis host is not configured but caching is enabled")
299 | 
300 |         # Check API keys (warn but don't fail)
301 |         if not self._config.get_fred_api_key():
302 |             logger.warning(
303 |                 "FRED API key is not configured - macro data will be limited"
304 |             )
305 | 
306 |         if not self._config.get_external_api_key():
307 |             logger.warning(
308 |                 "External API key is not configured - market data will use fallbacks"
309 |             )
310 | 
311 |         return errors
312 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/data_fetching/stock_data_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Stock Data Fetching Service - Responsible only for fetching data from external sources.
  3 | """
  4 | 
  5 | import logging
  6 | from datetime import UTC, datetime, timedelta
  7 | from typing import Any
  8 | 
  9 | import pandas as pd
 10 | import yfinance as yf
 11 | 
 12 | from maverick_mcp.utils.circuit_breaker_decorators import (
 13 |     with_stock_data_circuit_breaker,
 14 | )
 15 | 
 16 | logger = logging.getLogger("maverick_mcp.stock_data_fetching")
 17 | 
 18 | 
 19 | class StockDataFetchingService:
 20 |     """
 21 |     Service responsible ONLY for fetching stock data from external sources.
 22 | 
 23 |     This service:
 24 |     - Handles data fetching from yfinance, Alpha Vantage, etc.
 25 |     - Manages fallback logic between data sources
 26 |     - Contains no caching logic
 27 |     - Contains no business logic beyond data retrieval
 28 |     """
 29 | 
 30 |     def __init__(self, timeout: int = 30, max_retries: int = 3):
 31 |         """
 32 |         Initialize the stock data fetching service.
 33 | 
 34 |         Args:
 35 |             timeout: Request timeout in seconds
 36 |             max_retries: Maximum number of retries for failed requests
 37 |         """
 38 |         self.timeout = timeout
 39 |         self.max_retries = max_retries
 40 | 
 41 |     @with_stock_data_circuit_breaker(use_fallback=False)
 42 |     def fetch_stock_data(
 43 |         self,
 44 |         symbol: str,
 45 |         start_date: str | None = None,
 46 |         end_date: str | None = None,
 47 |         period: str | None = None,
 48 |         interval: str = "1d",
 49 |     ) -> pd.DataFrame:
 50 |         """
 51 |         Fetch stock data from yfinance with circuit breaker protection.
 52 | 
 53 |         Args:
 54 |             symbol: Stock ticker symbol
 55 |             start_date: Start date in YYYY-MM-DD format
 56 |             end_date: End date in YYYY-MM-DD format
 57 |             period: Alternative to start/end dates (e.g., '1d', '5d', '1mo', etc.)
 58 |             interval: Data interval ('1d', '1wk', '1mo', '1m', '5m', etc.)
 59 | 
 60 |         Returns:
 61 |             DataFrame with stock data
 62 | 
 63 |         Raises:
 64 |             Exception: If data fetching fails after retries
 65 |         """
 66 |         logger.info(
 67 |             f"Fetching data from yfinance for {symbol} - "
 68 |             f"Start: {start_date}, End: {end_date}, Period: {period}, Interval: {interval}"
 69 |         )
 70 | 
 71 |         ticker = yf.Ticker(symbol)
 72 | 
 73 |         if period:
 74 |             df = ticker.history(period=period, interval=interval)
 75 |         else:
 76 |             if start_date is None:
 77 |                 start_date = (datetime.now(UTC) - timedelta(days=365)).strftime(
 78 |                     "%Y-%m-%d"
 79 |                 )
 80 |             if end_date is None:
 81 |                 end_date = datetime.now(UTC).strftime("%Y-%m-%d")
 82 |             df = ticker.history(start=start_date, end=end_date, interval=interval)
 83 | 
 84 |         # Validate and clean the data
 85 |         df = self._validate_and_clean_data(df, symbol)
 86 |         return df
 87 | 
 88 |     def _validate_and_clean_data(self, df: pd.DataFrame, symbol: str) -> pd.DataFrame:
 89 |         """
 90 |         Validate and clean the fetched data.
 91 | 
 92 |         Args:
 93 |             df: Raw DataFrame from data source
 94 |             symbol: Stock symbol for logging
 95 | 
 96 |         Returns:
 97 |             Cleaned DataFrame
 98 |         """
 99 |         # Check if dataframe is empty
100 |         if df.empty:
101 |             logger.warning(f"Empty dataframe returned for {symbol}")
102 |             return pd.DataFrame(columns=["Open", "High", "Low", "Close", "Volume"])
103 | 
104 |         # Ensure all expected columns exist
105 |         required_columns = ["Open", "High", "Low", "Close", "Volume"]
106 |         for col in required_columns:
107 |             if col not in df.columns:
108 |                 logger.warning(
109 |                     f"Column {col} missing from data for {symbol}, adding default value"
110 |                 )
111 |                 if col == "Volume":
112 |                     df[col] = 0
113 |                 else:
114 |                     df[col] = 0.0
115 | 
116 |         # Set index name
117 |         df.index.name = "Date"
118 | 
119 |         # Ensure data types
120 |         df["Volume"] = df["Volume"].astype(int)
121 |         for col in ["Open", "High", "Low", "Close"]:
122 |             df[col] = df[col].astype(float)
123 | 
124 |         return df
125 | 
126 |     @with_stock_data_circuit_breaker(use_fallback=False)
127 |     def fetch_stock_info(self, symbol: str) -> dict[str, Any]:
128 |         """
129 |         Fetch detailed stock information.
130 | 
131 |         Args:
132 |             symbol: Stock ticker symbol
133 | 
134 |         Returns:
135 |             Dictionary with stock information
136 |         """
137 |         logger.info(f"Fetching stock info for {symbol}")
138 |         ticker = yf.Ticker(symbol)
139 |         return ticker.info
140 | 
141 |     def fetch_realtime_data(self, symbol: str) -> dict[str, Any] | None:
142 |         """
143 |         Fetch real-time data for a single symbol.
144 | 
145 |         Args:
146 |             symbol: Stock ticker symbol
147 | 
148 |         Returns:
149 |             Dictionary with real-time data or None if failed
150 |         """
151 |         try:
152 |             logger.info(f"Fetching real-time data for {symbol}")
153 |             ticker = yf.Ticker(symbol)
154 |             data = ticker.history(period="1d")
155 | 
156 |             if data.empty:
157 |                 logger.warning(f"No real-time data available for {symbol}")
158 |                 return None
159 | 
160 |             latest = data.iloc[-1]
161 | 
162 |             # Get previous close for change calculation
163 |             prev_close = ticker.info.get("previousClose", None)
164 |             if prev_close is None:
165 |                 # Try to get from 2-day history
166 |                 data_2d = ticker.history(period="2d")
167 |                 if len(data_2d) > 1:
168 |                     prev_close = data_2d.iloc[0]["Close"]
169 |                 else:
170 |                     prev_close = latest["Close"]
171 | 
172 |             # Calculate change
173 |             price = latest["Close"]
174 |             change = price - prev_close
175 |             change_percent = (change / prev_close) * 100 if prev_close != 0 else 0
176 | 
177 |             return {
178 |                 "symbol": symbol,
179 |                 "price": round(price, 2),
180 |                 "change": round(change, 2),
181 |                 "change_percent": round(change_percent, 2),
182 |                 "volume": int(latest["Volume"]),
183 |                 "timestamp": data.index[-1],
184 |                 "timestamp_display": data.index[-1].strftime("%Y-%m-%d %H:%M:%S"),
185 |                 "is_real_time": False,  # yfinance data has some delay
186 |             }
187 |         except Exception as e:
188 |             logger.error(f"Error fetching realtime data for {symbol}: {str(e)}")
189 |             return None
190 | 
191 |     def fetch_multiple_realtime_data(self, symbols: list[str]) -> dict[str, Any]:
192 |         """
193 |         Fetch real-time data for multiple symbols.
194 | 
195 |         Args:
196 |             symbols: List of stock ticker symbols
197 | 
198 |         Returns:
199 |             Dictionary mapping symbols to their real-time data
200 |         """
201 |         logger.info(f"Fetching real-time data for {len(symbols)} symbols")
202 |         results = {}
203 |         for symbol in symbols:
204 |             data = self.fetch_realtime_data(symbol)
205 |             if data:
206 |                 results[symbol] = data
207 |         return results
208 | 
209 |     def fetch_news(self, symbol: str, limit: int = 10) -> pd.DataFrame:
210 |         """
211 |         Fetch news for a stock.
212 | 
213 |         Args:
214 |             symbol: Stock ticker symbol
215 |             limit: Maximum number of news items
216 | 
217 |         Returns:
218 |             DataFrame with news data
219 |         """
220 |         try:
221 |             logger.info(f"Fetching news for {symbol}")
222 |             ticker = yf.Ticker(symbol)
223 |             news = ticker.news
224 | 
225 |             if not news:
226 |                 return pd.DataFrame(
227 |                     columns=[
228 |                         "title",
229 |                         "publisher",
230 |                         "link",
231 |                         "providerPublishTime",
232 |                         "type",
233 |                     ]
234 |                 )
235 | 
236 |             df = pd.DataFrame(news[:limit])
237 | 
238 |             # Convert timestamp to datetime
239 |             if "providerPublishTime" in df.columns:
240 |                 df["providerPublishTime"] = pd.to_datetime(
241 |                     df["providerPublishTime"], unit="s"
242 |                 )
243 | 
244 |             return df
245 |         except Exception as e:
246 |             logger.error(f"Error fetching news for {symbol}: {str(e)}")
247 |             return pd.DataFrame(
248 |                 columns=["title", "publisher", "link", "providerPublishTime", "type"]
249 |             )
250 | 
251 |     def fetch_earnings(self, symbol: str) -> dict[str, Any]:
252 |         """
253 |         Fetch earnings information for a stock.
254 | 
255 |         Args:
256 |             symbol: Stock ticker symbol
257 | 
258 |         Returns:
259 |             Dictionary with earnings data
260 |         """
261 |         try:
262 |             logger.info(f"Fetching earnings for {symbol}")
263 |             ticker = yf.Ticker(symbol)
264 |             return {
265 |                 "earnings": ticker.earnings.to_dict()
266 |                 if hasattr(ticker, "earnings") and not ticker.earnings.empty
267 |                 else {},
268 |                 "earnings_dates": ticker.earnings_dates.to_dict()
269 |                 if hasattr(ticker, "earnings_dates") and not ticker.earnings_dates.empty
270 |                 else {},
271 |                 "earnings_trend": ticker.earnings_trend
272 |                 if hasattr(ticker, "earnings_trend")
273 |                 else {},
274 |             }
275 |         except Exception as e:
276 |             logger.error(f"Error fetching earnings for {symbol}: {str(e)}")
277 |             return {"earnings": {}, "earnings_dates": {}, "earnings_trend": {}}
278 | 
279 |     def fetch_recommendations(self, symbol: str) -> pd.DataFrame:
280 |         """
281 |         Fetch analyst recommendations for a stock.
282 | 
283 |         Args:
284 |             symbol: Stock ticker symbol
285 | 
286 |         Returns:
287 |             DataFrame with recommendations
288 |         """
289 |         try:
290 |             logger.info(f"Fetching recommendations for {symbol}")
291 |             ticker = yf.Ticker(symbol)
292 |             recommendations = ticker.recommendations
293 | 
294 |             if recommendations is None or recommendations.empty:
295 |                 return pd.DataFrame(columns=["firm", "toGrade", "fromGrade", "action"])
296 | 
297 |             return recommendations
298 |         except Exception as e:
299 |             logger.error(f"Error fetching recommendations for {symbol}: {str(e)}")
300 |             return pd.DataFrame(columns=["firm", "toGrade", "fromGrade", "action"])
301 | 
302 |     def check_if_etf(self, symbol: str) -> bool:
303 |         """
304 |         Check if a given symbol is an ETF.
305 | 
306 |         Args:
307 |             symbol: Stock ticker symbol
308 | 
309 |         Returns:
310 |             True if symbol is an ETF
311 |         """
312 |         try:
313 |             logger.debug(f"Checking if {symbol} is an ETF")
314 |             stock = yf.Ticker(symbol)
315 | 
316 |             # Check if quoteType exists and is ETF
317 |             if "quoteType" in stock.info:
318 |                 return stock.info["quoteType"].upper() == "ETF"
319 | 
320 |             # Fallback check for common ETF identifiers
321 |             common_etfs = [
322 |                 "SPY",
323 |                 "QQQ",
324 |                 "IWM",
325 |                 "DIA",
326 |                 "XLB",
327 |                 "XLE",
328 |                 "XLF",
329 |                 "XLI",
330 |                 "XLK",
331 |                 "XLP",
332 |                 "XLU",
333 |                 "XLV",
334 |                 "XLY",
335 |                 "XLC",
336 |                 "XLRE",
337 |                 "XME",
338 |             ]
339 | 
340 |             return any(
341 |                 [
342 |                     symbol.endswith(("ETF", "FUND")),
343 |                     symbol in common_etfs,
344 |                     "ETF" in stock.info.get("longName", "").upper(),
345 |                 ]
346 |             )
347 |         except Exception as e:
348 |             logger.error(f"Error checking if {symbol} is ETF: {e}")
349 |             return False
350 | 
```
Page 8/39FirstPrevNextLast