#
tokens: 49347/50000 11/435 files (page 11/39)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 11 of 39. Use http://codebase.md/wshobson/maverick-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .github
│   ├── dependabot.yml
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── feature_request.md
│   │   ├── question.md
│   │   └── security_report.md
│   ├── pull_request_template.md
│   └── workflows
│       ├── claude-code-review.yml
│       └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│   ├── launch.json
│   └── settings.json
├── alembic
│   ├── env.py
│   ├── script.py.mako
│   └── versions
│       ├── 001_initial_schema.py
│       ├── 003_add_performance_indexes.py
│       ├── 006_rename_metadata_columns.py
│       ├── 008_performance_optimization_indexes.py
│       ├── 009_rename_to_supply_demand.py
│       ├── 010_self_contained_schema.py
│       ├── 011_remove_proprietary_terms.py
│       ├── 013_add_backtest_persistence_models.py
│       ├── 014_add_portfolio_models.py
│       ├── 08e3945a0c93_merge_heads.py
│       ├── 9374a5c9b679_merge_heads_for_testing.py
│       ├── abf9b9afb134_merge_multiple_heads.py
│       ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│       ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│       ├── f0696e2cac15_add_essential_performance_indexes.py
│       └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── api
│   │   └── backtesting.md
│   ├── BACKTESTING.md
│   ├── COST_BASIS_SPECIFICATION.md
│   ├── deep_research_agent.md
│   ├── exa_research_testing_strategy.md
│   ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│   ├── PORTFOLIO.md
│   ├── SETUP_SELF_CONTAINED.md
│   └── speed_testing_framework.md
├── examples
│   ├── complete_speed_validation.py
│   ├── deep_research_integration.py
│   ├── llm_optimization_example.py
│   ├── llm_speed_demo.py
│   ├── monitoring_example.py
│   ├── parallel_research_example.py
│   ├── speed_optimization_demo.py
│   └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│   ├── __init__.py
│   ├── agents
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── circuit_breaker.py
│   │   ├── deep_research.py
│   │   ├── market_analysis.py
│   │   ├── optimized_research.py
│   │   ├── supervisor.py
│   │   └── technical_analysis.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── api_server.py
│   │   ├── connection_manager.py
│   │   ├── dependencies
│   │   │   ├── __init__.py
│   │   │   ├── stock_analysis.py
│   │   │   └── technical_analysis.py
│   │   ├── error_handling.py
│   │   ├── inspector_compatible_sse.py
│   │   ├── inspector_sse.py
│   │   ├── middleware
│   │   │   ├── error_handling.py
│   │   │   ├── mcp_logging.py
│   │   │   ├── rate_limiting_enhanced.py
│   │   │   └── security.py
│   │   ├── openapi_config.py
│   │   ├── routers
│   │   │   ├── __init__.py
│   │   │   ├── agents.py
│   │   │   ├── backtesting.py
│   │   │   ├── data_enhanced.py
│   │   │   ├── data.py
│   │   │   ├── health_enhanced.py
│   │   │   ├── health_tools.py
│   │   │   ├── health.py
│   │   │   ├── intelligent_backtesting.py
│   │   │   ├── introspection.py
│   │   │   ├── mcp_prompts.py
│   │   │   ├── monitoring.py
│   │   │   ├── news_sentiment_enhanced.py
│   │   │   ├── performance.py
│   │   │   ├── portfolio.py
│   │   │   ├── research.py
│   │   │   ├── screening_ddd.py
│   │   │   ├── screening_parallel.py
│   │   │   ├── screening.py
│   │   │   ├── technical_ddd.py
│   │   │   ├── technical_enhanced.py
│   │   │   ├── technical.py
│   │   │   └── tool_registry.py
│   │   ├── server.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   ├── base_service.py
│   │   │   ├── market_service.py
│   │   │   ├── portfolio_service.py
│   │   │   ├── prompt_service.py
│   │   │   └── resource_service.py
│   │   ├── simple_sse.py
│   │   └── utils
│   │       ├── __init__.py
│   │       ├── insomnia_export.py
│   │       └── postman_export.py
│   ├── application
│   │   ├── __init__.py
│   │   ├── commands
│   │   │   └── __init__.py
│   │   ├── dto
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_dto.py
│   │   ├── queries
│   │   │   ├── __init__.py
│   │   │   └── get_technical_analysis.py
│   │   └── screening
│   │       ├── __init__.py
│   │       ├── dtos.py
│   │       └── queries.py
│   ├── backtesting
│   │   ├── __init__.py
│   │   ├── ab_testing.py
│   │   ├── analysis.py
│   │   ├── batch_processing_stub.py
│   │   ├── batch_processing.py
│   │   ├── model_manager.py
│   │   ├── optimization.py
│   │   ├── persistence.py
│   │   ├── retraining_pipeline.py
│   │   ├── strategies
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── ml
│   │   │   │   ├── __init__.py
│   │   │   │   ├── adaptive.py
│   │   │   │   ├── ensemble.py
│   │   │   │   ├── feature_engineering.py
│   │   │   │   └── regime_aware.py
│   │   │   ├── ml_strategies.py
│   │   │   ├── parser.py
│   │   │   └── templates.py
│   │   ├── strategy_executor.py
│   │   ├── vectorbt_engine.py
│   │   └── visualization.py
│   ├── config
│   │   ├── __init__.py
│   │   ├── constants.py
│   │   ├── database_self_contained.py
│   │   ├── database.py
│   │   ├── llm_optimization_config.py
│   │   ├── logging_settings.py
│   │   ├── plotly_config.py
│   │   ├── security_utils.py
│   │   ├── security.py
│   │   ├── settings.py
│   │   ├── technical_constants.py
│   │   ├── tool_estimation.py
│   │   └── validation.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── technical_analysis.py
│   │   └── visualization.py
│   ├── data
│   │   ├── __init__.py
│   │   ├── cache_manager.py
│   │   ├── cache.py
│   │   ├── django_adapter.py
│   │   ├── health.py
│   │   ├── models.py
│   │   ├── performance.py
│   │   ├── session_management.py
│   │   └── validation.py
│   ├── database
│   │   ├── __init__.py
│   │   ├── base.py
│   │   └── optimization.py
│   ├── dependencies.py
│   ├── domain
│   │   ├── __init__.py
│   │   ├── entities
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis.py
│   │   ├── events
│   │   │   └── __init__.py
│   │   ├── portfolio.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   ├── entities.py
│   │   │   ├── services.py
│   │   │   └── value_objects.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_service.py
│   │   ├── stock_analysis
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis_service.py
│   │   └── value_objects
│   │       ├── __init__.py
│   │       └── technical_indicators.py
│   ├── exceptions.py
│   ├── infrastructure
│   │   ├── __init__.py
│   │   ├── cache
│   │   │   └── __init__.py
│   │   ├── caching
│   │   │   ├── __init__.py
│   │   │   └── cache_management_service.py
│   │   ├── connection_manager.py
│   │   ├── data_fetching
│   │   │   ├── __init__.py
│   │   │   └── stock_data_service.py
│   │   ├── health
│   │   │   ├── __init__.py
│   │   │   └── health_checker.py
│   │   ├── persistence
│   │   │   ├── __init__.py
│   │   │   └── stock_repository.py
│   │   ├── providers
│   │   │   └── __init__.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   └── repositories.py
│   │   └── sse_optimizer.py
│   ├── langchain_tools
│   │   ├── __init__.py
│   │   ├── adapters.py
│   │   └── registry.py
│   ├── logging_config.py
│   ├── memory
│   │   ├── __init__.py
│   │   └── stores.py
│   ├── monitoring
│   │   ├── __init__.py
│   │   ├── health_check.py
│   │   ├── health_monitor.py
│   │   ├── integration_example.py
│   │   ├── metrics.py
│   │   ├── middleware.py
│   │   └── status_dashboard.py
│   ├── providers
│   │   ├── __init__.py
│   │   ├── dependencies.py
│   │   ├── factories
│   │   │   ├── __init__.py
│   │   │   ├── config_factory.py
│   │   │   └── provider_factory.py
│   │   ├── implementations
│   │   │   ├── __init__.py
│   │   │   ├── cache_adapter.py
│   │   │   ├── macro_data_adapter.py
│   │   │   ├── market_data_adapter.py
│   │   │   ├── persistence_adapter.py
│   │   │   └── stock_data_adapter.py
│   │   ├── interfaces
│   │   │   ├── __init__.py
│   │   │   ├── cache.py
│   │   │   ├── config.py
│   │   │   ├── macro_data.py
│   │   │   ├── market_data.py
│   │   │   ├── persistence.py
│   │   │   └── stock_data.py
│   │   ├── llm_factory.py
│   │   ├── macro_data.py
│   │   ├── market_data.py
│   │   ├── mocks
│   │   │   ├── __init__.py
│   │   │   ├── mock_cache.py
│   │   │   ├── mock_config.py
│   │   │   ├── mock_macro_data.py
│   │   │   ├── mock_market_data.py
│   │   │   ├── mock_persistence.py
│   │   │   └── mock_stock_data.py
│   │   ├── openrouter_provider.py
│   │   ├── optimized_screening.py
│   │   ├── optimized_stock_data.py
│   │   └── stock_data.py
│   ├── README.md
│   ├── tests
│   │   ├── __init__.py
│   │   ├── README_INMEMORY_TESTS.md
│   │   ├── test_cache_debug.py
│   │   ├── test_fixes_validation.py
│   │   ├── test_in_memory_routers.py
│   │   ├── test_in_memory_server.py
│   │   ├── test_macro_data_provider.py
│   │   ├── test_mailgun_email.py
│   │   ├── test_market_calendar_caching.py
│   │   ├── test_mcp_tool_fixes_pytest.py
│   │   ├── test_mcp_tool_fixes.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_models_functional.py
│   │   ├── test_server.py
│   │   ├── test_stock_data_enhanced.py
│   │   ├── test_stock_data_provider.py
│   │   └── test_technical_analysis.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── performance_monitoring.py
│   │   ├── portfolio_manager.py
│   │   ├── risk_management.py
│   │   └── sentiment_analysis.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── agent_errors.py
│   │   ├── batch_processing.py
│   │   ├── cache_warmer.py
│   │   ├── circuit_breaker_decorators.py
│   │   ├── circuit_breaker_services.py
│   │   ├── circuit_breaker.py
│   │   ├── data_chunking.py
│   │   ├── database_monitoring.py
│   │   ├── debug_utils.py
│   │   ├── fallback_strategies.py
│   │   ├── llm_optimization.py
│   │   ├── logging_example.py
│   │   ├── logging_init.py
│   │   ├── logging.py
│   │   ├── mcp_logging.py
│   │   ├── memory_profiler.py
│   │   ├── monitoring_middleware.py
│   │   ├── monitoring.py
│   │   ├── orchestration_logging.py
│   │   ├── parallel_research.py
│   │   ├── parallel_screening.py
│   │   ├── quick_cache.py
│   │   ├── resource_manager.py
│   │   ├── shutdown.py
│   │   ├── stock_helpers.py
│   │   ├── structured_logger.py
│   │   ├── tool_monitoring.py
│   │   ├── tracing.py
│   │   └── yfinance_pool.py
│   ├── validation
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── data.py
│   │   ├── middleware.py
│   │   ├── portfolio.py
│   │   ├── responses.py
│   │   ├── screening.py
│   │   └── technical.py
│   └── workflows
│       ├── __init__.py
│       ├── agents
│       │   ├── __init__.py
│       │   ├── market_analyzer.py
│       │   ├── optimizer_agent.py
│       │   ├── strategy_selector.py
│       │   └── validator_agent.py
│       ├── backtesting_workflow.py
│       └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│   ├── dev.sh
│   ├── INSTALLATION_GUIDE.md
│   ├── load_example.py
│   ├── load_market_data.py
│   ├── load_tiingo_data.py
│   ├── migrate_db.py
│   ├── README_TIINGO_LOADER.md
│   ├── requirements_tiingo.txt
│   ├── run_stock_screening.py
│   ├── run-migrations.sh
│   ├── seed_db.py
│   ├── seed_sp500.py
│   ├── setup_database.sh
│   ├── setup_self_contained.py
│   ├── setup_sp500_database.sh
│   ├── test_seeded_data.py
│   ├── test_tiingo_loader.py
│   ├── tiingo_config.py
│   └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── core
│   │   └── test_technical_analysis.py
│   ├── data
│   │   └── test_portfolio_models.py
│   ├── domain
│   │   ├── conftest.py
│   │   ├── test_portfolio_entities.py
│   │   └── test_technical_analysis_service.py
│   ├── fixtures
│   │   └── orchestration_fixtures.py
│   ├── integration
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── README.md
│   │   ├── run_integration_tests.sh
│   │   ├── test_api_technical.py
│   │   ├── test_chaos_engineering.py
│   │   ├── test_config_management.py
│   │   ├── test_full_backtest_workflow_advanced.py
│   │   ├── test_full_backtest_workflow.py
│   │   ├── test_high_volume.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_orchestration_complete.py
│   │   ├── test_portfolio_persistence.py
│   │   ├── test_redis_cache.py
│   │   ├── test_security_integration.py.disabled
│   │   └── vcr_setup.py
│   ├── performance
│   │   ├── __init__.py
│   │   ├── test_benchmarks.py
│   │   ├── test_load.py
│   │   ├── test_profiling.py
│   │   └── test_stress.py
│   ├── providers
│   │   └── test_stock_data_simple.py
│   ├── README.md
│   ├── test_agents_router_mcp.py
│   ├── test_backtest_persistence.py
│   ├── test_cache_management_service.py
│   ├── test_cache_serialization.py
│   ├── test_circuit_breaker.py
│   ├── test_database_pool_config_simple.py
│   ├── test_database_pool_config.py
│   ├── test_deep_research_functional.py
│   ├── test_deep_research_integration.py
│   ├── test_deep_research_parallel_execution.py
│   ├── test_error_handling.py
│   ├── test_event_loop_integrity.py
│   ├── test_exa_research_integration.py
│   ├── test_exception_hierarchy.py
│   ├── test_financial_search.py
│   ├── test_graceful_shutdown.py
│   ├── test_integration_simple.py
│   ├── test_langgraph_workflow.py
│   ├── test_market_data_async.py
│   ├── test_market_data_simple.py
│   ├── test_mcp_orchestration_functional.py
│   ├── test_ml_strategies.py
│   ├── test_optimized_research_agent.py
│   ├── test_orchestration_integration.py
│   ├── test_orchestration_logging.py
│   ├── test_orchestration_tools_simple.py
│   ├── test_parallel_research_integration.py
│   ├── test_parallel_research_orchestrator.py
│   ├── test_parallel_research_performance.py
│   ├── test_performance_optimizations.py
│   ├── test_production_validation.py
│   ├── test_provider_architecture.py
│   ├── test_rate_limiting_enhanced.py
│   ├── test_runner_validation.py
│   ├── test_security_comprehensive.py.disabled
│   ├── test_security_cors.py
│   ├── test_security_enhancements.py.disabled
│   ├── test_security_headers.py
│   ├── test_security_penetration.py
│   ├── test_session_management.py
│   ├── test_speed_optimization_validation.py
│   ├── test_stock_analysis_dependencies.py
│   ├── test_stock_analysis_service.py
│   ├── test_stock_data_fetching_service.py
│   ├── test_supervisor_agent.py
│   ├── test_supervisor_functional.py
│   ├── test_tool_estimation_config.py
│   ├── test_visualization.py
│   └── utils
│       ├── test_agent_errors.py
│       ├── test_logging.py
│       ├── test_parallel_screening.py
│       └── test_quick_cache.py
├── tools
│   ├── check_orchestration_config.py
│   ├── experiments
│   │   ├── validation_examples.py
│   │   └── validation_fixed.py
│   ├── fast_dev.sh
│   ├── hot_reload.py
│   ├── quick_test.py
│   └── templates
│       ├── new_router_template.py
│       ├── new_tool_template.py
│       ├── screening_strategy_template.py
│       └── test_template.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/maverick_mcp/api/services/resource_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Resource service for MaverickMCP API.
  3 | 
  4 | Handles MCP resources including health endpoints and stock data resources.
  5 | Extracted from server.py to improve code organization and maintainability.
  6 | """
  7 | 
  8 | from typing import Any
  9 | 
 10 | from .base_service import BaseService
 11 | 
 12 | 
 13 | class ResourceService(BaseService):
 14 |     """
 15 |     Service class for MCP resource operations.
 16 | 
 17 |     Provides health endpoints, stock data resources, and other MCP resources.
 18 |     """
 19 | 
 20 |     def register_tools(self):
 21 |         """Register resource endpoints with MCP."""
 22 | 
 23 |         @self.mcp.resource("health://")
 24 |         def health_resource() -> dict[str, Any]:
 25 |             """
 26 |             Comprehensive health check endpoint.
 27 | 
 28 |             Returns system health status including database, Redis, and external services.
 29 |             """
 30 |             return self._health_check()
 31 | 
 32 |         @self.mcp.resource("stock://{ticker}")
 33 |         def stock_resource(ticker: str) -> Any:
 34 |             """
 35 |             Get stock data resource for a specific ticker.
 36 | 
 37 |             Args:
 38 |                 ticker: Stock ticker symbol
 39 | 
 40 |             Returns:
 41 |                 Stock data resource
 42 |             """
 43 |             return self._get_stock_resource(ticker)
 44 | 
 45 |         @self.mcp.resource("stock://{ticker}/{start_date}/{end_date}")
 46 |         def stock_resource_with_dates(
 47 |             ticker: str, start_date: str, end_date: str
 48 |         ) -> Any:
 49 |             """
 50 |             Get stock data resource for a specific ticker and date range.
 51 | 
 52 |             Args:
 53 |                 ticker: Stock ticker symbol
 54 |                 start_date: Start date (YYYY-MM-DD)
 55 |                 end_date: End date (YYYY-MM-DD)
 56 | 
 57 |             Returns:
 58 |                 Stock data resource for the specified date range
 59 |             """
 60 |             return self._get_stock_resource_with_dates(ticker, start_date, end_date)
 61 | 
 62 |         @self.mcp.resource("stock_info://{ticker}")
 63 |         def stock_info_resource(ticker: str) -> dict[str, Any]:
 64 |             """
 65 |             Get stock information resource for a specific ticker.
 66 | 
 67 |             Args:
 68 |                 ticker: Stock ticker symbol
 69 | 
 70 |             Returns:
 71 |                 Stock information resource
 72 |             """
 73 |             return self._get_stock_info_resource(ticker)
 74 | 
 75 |     def _health_check(self) -> dict[str, Any]:
 76 |         """Comprehensive health check implementation."""
 77 |         from maverick_mcp.config.validation import get_validation_status
 78 |         from maverick_mcp.data.cache import get_redis_client
 79 |         from maverick_mcp.data.health import get_database_health
 80 | 
 81 |         health_status = {
 82 |             "status": "healthy",
 83 |             "timestamp": self._get_current_timestamp(),
 84 |             "version": "1.0.0",
 85 |             "environment": "production" if not self.is_debug_mode() else "development",
 86 |             "services": {},
 87 |             "configuration": {},
 88 |         }
 89 | 
 90 |         # Check database health
 91 |         try:
 92 |             db_health = get_database_health()
 93 |             health_status["services"]["database"] = {
 94 |                 "status": "healthy" if db_health.get("connected") else "unhealthy",
 95 |                 "details": db_health,
 96 |             }
 97 |         except Exception as e:
 98 |             health_status["services"]["database"] = {
 99 |                 "status": "unhealthy",
100 |                 "error": str(e),
101 |             }
102 |             health_status["status"] = "degraded"
103 | 
104 |         # Check Redis health
105 |         try:
106 |             redis_client = get_redis_client()
107 |             if redis_client:
108 |                 redis_client.ping()
109 |                 health_status["services"]["redis"] = {
110 |                     "status": "healthy",
111 |                     "cache_enabled": True,
112 |                 }
113 |             else:
114 |                 health_status["services"]["redis"] = {
115 |                     "status": "unavailable",
116 |                     "cache_enabled": False,
117 |                     "fallback": "in-memory cache",
118 |                 }
119 |         except Exception as e:
120 |             health_status["services"]["redis"] = {
121 |                 "status": "unhealthy",
122 |                 "error": str(e),
123 |                 "fallback": "in-memory cache",
124 |             }
125 |             if health_status["status"] == "healthy":
126 |                 health_status["status"] = "degraded"
127 | 
128 |         # Check authentication service
129 |         health_status["services"]["authentication"] = {
130 |             "status": "healthy" if self.is_auth_enabled() else "disabled",
131 |             "enabled": self.is_auth_enabled(),
132 |         }
133 | 
134 |         # Configuration validation
135 |         try:
136 |             validation_status = get_validation_status()
137 |             health_status["configuration"] = {
138 |                 "status": "valid" if validation_status.get("valid") else "invalid",
139 |                 "details": validation_status,
140 |             }
141 |         except Exception as e:
142 |             health_status["configuration"] = {
143 |                 "status": "error",
144 |                 "error": str(e),
145 |             }
146 |             health_status["status"] = "unhealthy"
147 | 
148 |         # External services check (stock data providers)
149 |         health_status["services"]["stock_data"] = self._check_stock_data_providers()
150 | 
151 |         self.log_tool_usage("health_check", status=health_status["status"])
152 | 
153 |         return health_status
154 | 
155 |     def _check_stock_data_providers(self) -> dict[str, Any]:
156 |         """Check health of stock data providers."""
157 |         try:
158 |             from maverick_mcp.providers.stock_data import StockDataProvider
159 | 
160 |             provider = StockDataProvider()
161 | 
162 |             # Test with a simple request
163 |             test_data = provider.get_stock_data("AAPL", days=1)
164 | 
165 |             if not test_data.empty:
166 |                 return {
167 |                     "status": "healthy",
168 |                     "provider": "yfinance",
169 |                     "last_test": self._get_current_timestamp(),
170 |                 }
171 |             else:
172 |                 return {
173 |                     "status": "degraded",
174 |                     "provider": "yfinance",
175 |                     "issue": "Empty data returned",
176 |                 }
177 | 
178 |         except Exception as e:
179 |             return {
180 |                 "status": "unhealthy",
181 |                 "provider": "yfinance",
182 |                 "error": str(e),
183 |             }
184 | 
185 |     def _get_stock_resource(self, ticker: str) -> Any:
186 |         """Get stock resource implementation."""
187 |         try:
188 |             from maverick_mcp.providers.stock_data import StockDataProvider
189 | 
190 |             provider = StockDataProvider()
191 | 
192 |             # Get recent stock data (30 days)
193 |             df = provider.get_stock_data(ticker.upper(), days=30)
194 | 
195 |             if df.empty:
196 |                 return {
197 |                     "error": f"No data available for ticker {ticker}",
198 |                     "ticker": ticker.upper(),
199 |                 }
200 | 
201 |             # Convert DataFrame to resource format
202 |             resource_data = {
203 |                 "ticker": ticker.upper(),
204 |                 "data_points": len(df),
205 |                 "date_range": {
206 |                     "start": df.index[0].isoformat(),
207 |                     "end": df.index[-1].isoformat(),
208 |                 },
209 |                 "latest_price": float(df["Close"].iloc[-1]),
210 |                 "price_change": float(df["Close"].iloc[-1] - df["Close"].iloc[-2])
211 |                 if len(df) > 1
212 |                 else 0,
213 |                 "volume": int(df["Volume"].iloc[-1]),
214 |                 "high_52w": float(df["High"].max()),
215 |                 "low_52w": float(df["Low"].min()),
216 |                 "data": df.to_dict(orient="records"),
217 |             }
218 | 
219 |             self.log_tool_usage("stock_resource", ticker=ticker)
220 | 
221 |             return resource_data
222 | 
223 |         except Exception as e:
224 |             self.logger.error(f"Failed to get stock resource for {ticker}: {e}")
225 |             return {
226 |                 "error": f"Failed to fetch stock data: {str(e)}",
227 |                 "ticker": ticker.upper(),
228 |             }
229 | 
230 |     def _get_stock_resource_with_dates(
231 |         self, ticker: str, start_date: str, end_date: str
232 |     ) -> Any:
233 |         """Get stock resource with date range implementation."""
234 |         try:
235 |             from datetime import datetime
236 | 
237 |             from maverick_mcp.providers.stock_data import StockDataProvider
238 | 
239 |             # Validate date format
240 |             try:
241 |                 datetime.strptime(start_date, "%Y-%m-%d")
242 |                 datetime.strptime(end_date, "%Y-%m-%d")
243 |             except ValueError:
244 |                 return {
245 |                     "error": "Invalid date format. Use YYYY-MM-DD format.",
246 |                     "start_date": start_date,
247 |                     "end_date": end_date,
248 |                 }
249 | 
250 |             provider = StockDataProvider()
251 | 
252 |             # Get stock data for specified date range
253 |             df = provider.get_stock_data(ticker.upper(), start_date, end_date)
254 | 
255 |             if df.empty:
256 |                 return {
257 |                     "error": f"No data available for ticker {ticker} in date range {start_date} to {end_date}",
258 |                     "ticker": ticker.upper(),
259 |                     "start_date": start_date,
260 |                     "end_date": end_date,
261 |                 }
262 | 
263 |             # Convert DataFrame to resource format
264 |             resource_data = {
265 |                 "ticker": ticker.upper(),
266 |                 "start_date": start_date,
267 |                 "end_date": end_date,
268 |                 "data_points": len(df),
269 |                 "actual_date_range": {
270 |                     "start": df.index[0].isoformat(),
271 |                     "end": df.index[-1].isoformat(),
272 |                 },
273 |                 "price_summary": {
274 |                     "open": float(df["Open"].iloc[0]),
275 |                     "close": float(df["Close"].iloc[-1]),
276 |                     "high": float(df["High"].max()),
277 |                     "low": float(df["Low"].min()),
278 |                     "change": float(df["Close"].iloc[-1] - df["Open"].iloc[0]),
279 |                     "change_pct": float(
280 |                         (
281 |                             (df["Close"].iloc[-1] - df["Open"].iloc[0])
282 |                             / df["Open"].iloc[0]
283 |                         )
284 |                         * 100
285 |                     ),
286 |                 },
287 |                 "volume_summary": {
288 |                     "total": int(df["Volume"].sum()),
289 |                     "average": int(df["Volume"].mean()),
290 |                     "max": int(df["Volume"].max()),
291 |                 },
292 |                 "data": df.to_dict(orient="records"),
293 |             }
294 | 
295 |             self.log_tool_usage(
296 |                 "stock_resource_with_dates",
297 |                 ticker=ticker,
298 |                 start_date=start_date,
299 |                 end_date=end_date,
300 |             )
301 | 
302 |             return resource_data
303 | 
304 |         except Exception as e:
305 |             self.logger.error(
306 |                 f"Failed to get stock resource for {ticker} ({start_date} to {end_date}): {e}"
307 |             )
308 |             return {
309 |                 "error": f"Failed to fetch stock data: {str(e)}",
310 |                 "ticker": ticker.upper(),
311 |                 "start_date": start_date,
312 |                 "end_date": end_date,
313 |             }
314 | 
315 |     def _get_stock_info_resource(self, ticker: str) -> dict[str, Any]:
316 |         """Get stock info resource implementation."""
317 |         try:
318 |             from maverick_mcp.providers.stock_data import StockDataProvider
319 | 
320 |             provider = StockDataProvider()
321 | 
322 |             # Get stock information
323 |             stock_info = provider.get_stock_info(ticker.upper())
324 | 
325 |             if not stock_info:
326 |                 return {
327 |                     "error": f"No information available for ticker {ticker}",
328 |                     "ticker": ticker.upper(),
329 |                 }
330 | 
331 |             # Format stock info resource
332 |             resource_data = {
333 |                 "ticker": ticker.upper(),
334 |                 "company_name": stock_info.get(
335 |                     "longName", stock_info.get("shortName", "N/A")
336 |                 ),
337 |                 "sector": stock_info.get("sector", "N/A"),
338 |                 "industry": stock_info.get("industry", "N/A"),
339 |                 "market_cap": stock_info.get("marketCap"),
340 |                 "enterprise_value": stock_info.get("enterpriseValue"),
341 |                 "pe_ratio": stock_info.get("trailingPE"),
342 |                 "forward_pe": stock_info.get("forwardPE"),
343 |                 "price_to_book": stock_info.get("priceToBook"),
344 |                 "dividend_yield": stock_info.get("dividendYield"),
345 |                 "beta": stock_info.get("beta"),
346 |                 "52_week_high": stock_info.get("fiftyTwoWeekHigh"),
347 |                 "52_week_low": stock_info.get("fiftyTwoWeekLow"),
348 |                 "average_volume": stock_info.get("averageVolume"),
349 |                 "shares_outstanding": stock_info.get("sharesOutstanding"),
350 |                 "float_shares": stock_info.get("floatShares"),
351 |                 "business_summary": stock_info.get("longBusinessSummary", "N/A"),
352 |                 "website": stock_info.get("website"),
353 |                 "employees": stock_info.get("fullTimeEmployees"),
354 |                 "last_updated": self._get_current_timestamp(),
355 |             }
356 | 
357 |             self.log_tool_usage("stock_info_resource", ticker=ticker)
358 | 
359 |             return resource_data
360 | 
361 |         except Exception as e:
362 |             self.logger.error(f"Failed to get stock info resource for {ticker}: {e}")
363 |             return {
364 |                 "error": f"Failed to fetch stock information: {str(e)}",
365 |                 "ticker": ticker.upper(),
366 |             }
367 | 
368 |     def _get_current_timestamp(self) -> str:
369 |         """Get current timestamp in ISO format."""
370 |         from datetime import UTC, datetime
371 | 
372 |         return datetime.now(UTC).isoformat()
373 | 
```

--------------------------------------------------------------------------------
/scripts/README_TIINGO_LOADER.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Tiingo Data Loader for Maverick-MCP
  2 | 
  3 | A comprehensive, production-ready data loader for fetching market data from Tiingo API and storing it in the Maverick-MCP database with technical indicators and screening algorithms.
  4 | 
  5 | ## Features
  6 | 
  7 | ### 🚀 Core Capabilities
  8 | - **Async Operations**: High-performance async data fetching with configurable concurrency
  9 | - **Rate Limiting**: Built-in rate limiting to respect Tiingo's 2400 requests/hour limit
 10 | - **Progress Tracking**: Resume capability with checkpoint files for interrupted loads
 11 | - **Error Handling**: Exponential backoff retry logic with comprehensive error handling
 12 | - **Batch Processing**: Efficient batch processing with configurable batch sizes
 13 | 
 14 | ### 📊 Data Processing
 15 | - **Technical Indicators**: 50+ technical indicators using pandas-ta
 16 | - **Screening Algorithms**: Built-in Maverick, Bear Market, and Supply/Demand screens
 17 | - **Database Optimization**: Bulk inserts with connection pooling
 18 | - **Data Validation**: Comprehensive data validation and cleaning
 19 | 
 20 | ### 🎛️ Flexible Configuration
 21 | - **Multiple Symbol Sources**: S&P 500, custom files, individual symbols, or all Tiingo-supported tickers
 22 | - **Date Range Control**: Configurable date ranges with year-based shortcuts
 23 | - **Processing Options**: Enable/disable technical indicators and screening
 24 | - **Performance Tuning**: Adjustable concurrency, batch sizes, and timeouts
 25 | 
 26 | ## Installation
 27 | 
 28 | ### Prerequisites
 29 | 1. **Tiingo API Token**: Sign up at [tiingo.com](https://www.tiingo.com) and get your API token
 30 | 2. **Database**: Ensure Maverick-MCP database is set up and accessible
 31 | 3. **Python Dependencies**: pandas-ta, aiohttp, SQLAlchemy, and other requirements
 32 | 
 33 | ### Setup
 34 | ```bash
 35 | # Set your Tiingo API token
 36 | export TIINGO_API_TOKEN=your_token_here
 37 | 
 38 | # Set database URL (if different from default)
 39 | export DATABASE_URL=postgresql://user:pass@localhost/maverick_mcp
 40 | 
 41 | # Make scripts executable
 42 | chmod +x scripts/load_tiingo_data.py
 43 | chmod +x scripts/load_example.py
 44 | ```
 45 | 
 46 | ## Usage Examples
 47 | 
 48 | ### 1. Load S&P 500 Stocks (Top 100)
 49 | ```bash
 50 | # Load 2 years of data with technical indicators
 51 | python scripts/load_tiingo_data.py --sp500 --years 2 --calculate-indicators
 52 | 
 53 | # Load with screening algorithms
 54 | python scripts/load_tiingo_data.py --sp500 --years 1 --run-screening
 55 | ```
 56 | 
 57 | ### 2. Load Specific Symbols
 58 | ```bash
 59 | # Load individual stocks
 60 | python scripts/load_tiingo_data.py --symbols AAPL,MSFT,GOOGL,AMZN,TSLA --years 3
 61 | 
 62 | # Load with custom date range
 63 | python scripts/load_tiingo_data.py --symbols AAPL,MSFT --start-date 2020-01-01 --end-date 2023-12-31
 64 | ```
 65 | 
 66 | ### 3. Load from File
 67 | ```bash
 68 | # Create a symbol file
 69 | echo -e "AAPL\nMSFT\nGOOGL\nAMZN\nTSLA" > my_symbols.txt
 70 | 
 71 | # Load from file
 72 | python scripts/load_tiingo_data.py --file my_symbols.txt --calculate-indicators --run-screening
 73 | ```
 74 | 
 75 | ### 4. Resume Interrupted Load
 76 | ```bash
 77 | # If a load was interrupted, resume from checkpoint
 78 | python scripts/load_tiingo_data.py --resume --checkpoint-file load_progress.json
 79 | ```
 80 | 
 81 | ### 5. Performance-Optimized Load
 82 | ```bash
 83 | # High-performance loading with larger batches and more concurrency
 84 | python scripts/load_tiingo_data.py --sp500 --batch-size 100 --max-concurrent 10 --no-checkpoint
 85 | ```
 86 | 
 87 | ### 6. All Supported Tickers
 88 | ```bash
 89 | # Load all Tiingo-supported symbols (this will take a while!)
 90 | python scripts/load_tiingo_data.py --supported --batch-size 50 --max-concurrent 8
 91 | ```
 92 | 
 93 | ## Command Line Options
 94 | 
 95 | ### Symbol Selection (Required - choose one)
 96 | - `--symbols AAPL,MSFT,GOOGL` - Comma-separated list of symbols
 97 | - `--file symbols.txt` - Load symbols from file (one per line or comma-separated)
 98 | - `--sp500` - Load S&P 500 symbols (top 100 most liquid)
 99 | - `--sp500-full` - Load full S&P 500 (500 symbols)
100 | - `--supported` - Load all Tiingo-supported symbols
101 | - `--resume` - Resume from checkpoint file
102 | 
103 | ### Date Range Options
104 | - `--years 2` - Number of years of historical data (default: 2)
105 | - `--start-date 2020-01-01` - Custom start date (YYYY-MM-DD)
106 | - `--end-date 2023-12-31` - Custom end date (YYYY-MM-DD, default: today)
107 | 
108 | ### Processing Options
109 | - `--calculate-indicators` - Calculate technical indicators (default: True)
110 | - `--no-indicators` - Skip technical indicator calculations
111 | - `--run-screening` - Run screening algorithms after data loading
112 | 
113 | ### Performance Options
114 | - `--batch-size 50` - Batch size for processing (default: 50)
115 | - `--max-concurrent 5` - Maximum concurrent requests (default: 5)
116 | 
117 | ### Database Options
118 | - `--create-tables` - Create database tables if they don't exist
119 | - `--database-url` - Override database URL
120 | 
121 | ### Progress Tracking
122 | - `--checkpoint-file load_progress.json` - Checkpoint file location
123 | - `--no-checkpoint` - Disable checkpoint saving
124 | 
125 | ## Configuration
126 | 
127 | The loader can be customized through the `tiingo_config.py` file:
128 | 
129 | ```python
130 | from scripts.tiingo_config import TiingoConfig, get_config_for_environment
131 | 
132 | # Get environment-specific config
133 | config = get_config_for_environment('production')
134 | 
135 | # Customize settings
136 | config.max_concurrent_requests = 10
137 | config.default_batch_size = 100
138 | config.maverick_min_momentum_score = 80.0
139 | ```
140 | 
141 | ### Available Configurations
142 | - **Rate Limiting**: Requests per hour, retry settings
143 | - **Technical Indicators**: Periods for RSI, SMA, EMA, MACD, etc.
144 | - **Screening Criteria**: Minimum momentum scores, volume thresholds
145 | - **Database Settings**: Batch sizes, connection pooling
146 | - **Symbol Lists**: Predefined lists for different strategies
147 | 
148 | ## Technical Indicators Calculated
149 | 
150 | The loader calculates 50+ technical indicators including:
151 | 
152 | ### Trend Indicators
153 | - **SMA**: 20, 50, 150, 200-period Simple Moving Averages
154 | - **EMA**: 21-period Exponential Moving Average
155 | - **ADX**: Average Directional Index (14-period)
156 | 
157 | ### Momentum Indicators
158 | - **RSI**: Relative Strength Index (14-period)
159 | - **MACD**: Moving Average Convergence Divergence (12,26,9)
160 | - **Stochastic**: Stochastic Oscillator (14,3,3)
161 | - **Momentum Score**: Relative Strength vs Market
162 | 
163 | ### Volatility Indicators
164 | - **ATR**: Average True Range (14-period)
165 | - **Bollinger Bands**: 20-period with 2 standard deviations
166 | - **ADR**: Average Daily Range percentage
167 | 
168 | ### Volume Indicators
169 | - **Volume SMA**: 30-period volume average
170 | - **Volume Ratio**: Current vs average volume
171 | - **VWAP**: Volume Weighted Average Price
172 | 
173 | ### Custom Indicators
174 | - **Momentum**: 10 and 20-period price momentum
175 | - **BB Squeeze**: Bollinger Band squeeze detection
176 | - **Price Position**: Position relative to moving averages
177 | 
178 | ## Screening Algorithms
179 | 
180 | ### Maverick Momentum Screen
181 | Identifies stocks with strong upward momentum:
182 | - Price above 21-day EMA
183 | - EMA-21 above SMA-50
184 | - SMA-50 above SMA-200
185 | - Relative Strength Rating > 70
186 | - Minimum volume thresholds
187 | 
188 | ### Bear Market Screen  
189 | Identifies stocks in downtrends:
190 | - Price below 21-day EMA
191 | - EMA-21 below SMA-50
192 | - Relative Strength Rating < 30
193 | - High volume on down moves
194 | 
195 | ### Supply/Demand Breakout Screen
196 | Identifies accumulation patterns:
197 | - Price above SMA-50 and SMA-200
198 | - Strong relative strength (RS > 60)
199 | - Institutional accumulation signals
200 | - Volume dry-up followed by expansion
201 | 
202 | ## Progress Tracking & Resume
203 | 
204 | The loader automatically saves progress to a checkpoint file:
205 | 
206 | ```json
207 | {
208 |   "timestamp": "2024-01-15T10:30:00",
209 |   "total_symbols": 100,
210 |   "processed_symbols": 75,
211 |   "successful_symbols": 73,
212 |   "completed_symbols": ["AAPL", "MSFT", ...],
213 |   "failed_symbols": ["BADTICKER", "ANOTHERBAD"],
214 |   "errors": [...],
215 |   "elapsed_time": 3600
216 | }
217 | ```
218 | 
219 | To resume an interrupted load:
220 | ```bash
221 | python scripts/load_tiingo_data.py --resume --checkpoint-file load_progress.json
222 | ```
223 | 
224 | ## Error Handling
225 | 
226 | ### Automatic Retry Logic
227 | - **Exponential Backoff**: 1s, 2s, 4s delays between retries
228 | - **Rate Limit Handling**: Automatic delays when rate limited
229 | - **Connection Errors**: Automatic retry with timeout handling
230 | 
231 | ### Error Reporting
232 | - **Detailed Logging**: All errors logged with context
233 | - **Error Tracking**: Failed symbols tracked in checkpoint
234 | - **Graceful Degradation**: Continue processing other symbols on individual failures
235 | 
236 | ## Performance Optimization
237 | 
238 | ### Database Optimizations
239 | - **Bulk Inserts**: Use PostgreSQL's UPSERT for efficiency
240 | - **Connection Pooling**: Reuse database connections
241 | - **Batch Processing**: Process multiple symbols together
242 | - **Index Creation**: Automatically create performance indexes
243 | 
244 | ### Memory Management
245 | - **Streaming Processing**: Process data in chunks to minimize memory usage
246 | - **Garbage Collection**: Explicit cleanup of large DataFrames
247 | - **Connection Limits**: Prevent connection exhaustion
248 | 
249 | ### Monitoring
250 | ```bash
251 | # Monitor progress in real-time
252 | tail -f tiingo_data_loader.log
253 | 
254 | # Check database stats
255 | python scripts/load_tiingo_data.py --database-stats
256 | 
257 | # Monitor system resources
258 | htop  # CPU and memory usage
259 | iotop # Disk I/O usage
260 | ```
261 | 
262 | ## Examples and Testing
263 | 
264 | ### Interactive Examples
265 | ```bash
266 | # Run interactive examples
267 | python scripts/load_example.py
268 | ```
269 | 
270 | The example script provides:
271 | 1. Load sample stocks (5 symbols)
272 | 2. Load sector stocks (technology)  
273 | 3. Resume interrupted load demonstration
274 | 4. Database statistics viewer
275 | 
276 | ### Testing Different Configurations
277 | ```bash
278 | # Test with small dataset
279 | python scripts/load_tiingo_data.py --symbols AAPL,MSFT --years 1 --batch-size 10
280 | 
281 | # Test screening only (no new data)
282 | python scripts/load_tiingo_data.py --symbols AAPL --years 0.1 --run-screening
283 | 
284 | # Test resume functionality
285 | python scripts/load_tiingo_data.py --symbols AAPL,MSFT,GOOGL,AMZN,TSLA --batch-size 2
286 | # Interrupt with Ctrl+C, then resume:
287 | python scripts/load_tiingo_data.py --resume
288 | ```
289 | 
290 | ## Troubleshooting
291 | 
292 | ### Common Issues
293 | 
294 | #### 1. API Token Issues
295 | ```bash
296 | # Check if token is set
297 | echo $TIINGO_API_TOKEN
298 | 
299 | # Test API access
300 | curl -H "Authorization: Token $TIINGO_API_TOKEN" \
301 |      "https://api.tiingo.com/tiingo/daily/AAPL"
302 | ```
303 | 
304 | #### 2. Database Connection Issues
305 | ```bash
306 | # Check database URL
307 | echo $DATABASE_URL
308 | 
309 | # Test database connection
310 | python -c "from maverick_mcp.data.models import SessionLocal; print('DB OK')"
311 | ```
312 | 
313 | #### 3. Rate Limiting
314 | If you're getting rate limited frequently:
315 | - Reduce `--max-concurrent` (default: 5)
316 | - Increase `--batch-size` to reduce total requests
317 | - Consider upgrading to Tiingo's paid plan
318 | 
319 | #### 4. Memory Issues
320 | For large loads:
321 | - Reduce `--batch-size` 
322 | - Reduce `--max-concurrent`
323 | - Monitor memory usage with `htop`
324 | 
325 | #### 5. Checkpoint Corruption
326 | ```bash
327 | # Remove corrupted checkpoint
328 | rm load_progress.json
329 | 
330 | # Start fresh
331 | python scripts/load_tiingo_data.py --symbols AAPL,MSFT --no-checkpoint
332 | ```
333 | 
334 | ### Performance Benchmarks
335 | 
336 | Typical performance on modern hardware:
337 | - **Small Load (10 symbols, 1 year)**: 2-3 minutes
338 | - **Medium Load (100 symbols, 2 years)**: 15-20 minutes
339 | - **Large Load (500 symbols, 2 years)**: 1-2 hours
340 | - **Full Load (3000+ symbols, 2 years)**: 6-12 hours
341 | 
342 | ## Integration with Maverick-MCP
343 | 
344 | The loaded data integrates seamlessly with Maverick-MCP:
345 | 
346 | ### API Endpoints
347 | The data is immediately available through Maverick-MCP's API endpoints:
348 | - `/api/v1/stocks` - Stock information
349 | - `/api/v1/prices/{symbol}` - Price data  
350 | - `/api/v1/technical/{symbol}` - Technical indicators
351 | - `/api/v1/screening/maverick` - Maverick stock screen results
352 | 
353 | ### MCP Tools
354 | Use the loaded data in MCP tools:
355 | - `get_stock_analysis` - Comprehensive stock analysis
356 | - `run_screening` - Run custom screens
357 | - `get_technical_indicators` - Retrieve calculated indicators
358 | - `portfolio_analysis` - Analyze portfolio performance
359 | 
360 | ## Advanced Usage
361 | 
362 | ### Custom Symbol Lists
363 | Create sector-specific or strategy-specific symbol files:
364 | 
365 | ```bash
366 | # Create growth stock list
367 | cat > growth_stocks.txt << EOF
368 | TSLA
369 | NVDA
370 | AMZN
371 | GOOGL
372 | META
373 | NFLX
374 | CRM
375 | ADBE
376 | EOF
377 | 
378 | # Load growth stocks
379 | python scripts/load_tiingo_data.py --file growth_stocks.txt --run-screening
380 | ```
381 | 
382 | ### Automated Scheduling
383 | Set up daily data updates with cron:
384 | 
385 | ```bash
386 | # Add to crontab (crontab -e)
387 | # Daily update at 6 PM EST (after market close)
388 | 0 18 * * 1-5 cd /path/to/maverick-mcp && python scripts/load_tiingo_data.py --sp500 --years 0.1 --run-screening >> /var/log/tiingo_updates.log 2>&1
389 | 
390 | # Weekly full reload on weekends  
391 | 0 2 * * 6 cd /path/to/maverick-mcp && python scripts/load_tiingo_data.py --sp500 --years 2 --calculate-indicators --run-screening >> /var/log/tiingo_weekly.log 2>&1
392 | ```
393 | 
394 | ### Integration with CI/CD
395 | ```yaml
396 | # GitHub Actions workflow
397 | name: Update Market Data
398 | on:
399 |   schedule:
400 |     - cron: '0 18 * * 1-5'  # 6 PM EST weekdays
401 | 
402 | jobs:
403 |   update-data:
404 |     runs-on: ubuntu-latest
405 |     steps:
406 |       - uses: actions/checkout@v2
407 |       - name: Setup Python
408 |         uses: actions/setup-python@v2
409 |         with:
410 |           python-version: '3.12'
411 |       - name: Install dependencies
412 |         run: pip install -r requirements.txt
413 |       - name: Update market data
414 |         env:
415 |           TIINGO_API_TOKEN: ${{ secrets.TIINGO_API_TOKEN }}
416 |           DATABASE_URL: ${{ secrets.DATABASE_URL }}
417 |         run: |
418 |           python scripts/load_tiingo_data.py --sp500 --years 0.1 --run-screening
419 | ```
420 | 
421 | ## Support and Contributing
422 | 
423 | ### Getting Help
424 | - **Documentation**: Check this README and inline code comments
425 | - **Logging**: Enable debug logging for detailed troubleshooting
426 | - **Examples**: Use the example script to understand usage patterns
427 | 
428 | ### Contributing
429 | To add new features or fix bugs:
430 | 1. Fork the repository
431 | 2. Create a feature branch
432 | 3. Add tests for new functionality
433 | 4. Submit a pull request
434 | 
435 | ### Feature Requests
436 | Common requested features:
437 | - Additional data providers (Alpha Vantage, Yahoo Finance)
438 | - More technical indicators
439 | - Custom screening algorithms
440 | - Real-time data streaming
441 | - Portfolio backtesting integration
```

--------------------------------------------------------------------------------
/maverick_mcp/tests/test_in_memory_server.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | In-memory tests for Maverick-MCP server using FastMCP patterns.
  3 | 
  4 | These tests demonstrate how to test the server without external processes
  5 | or network calls, using FastMCP's in-memory transport capabilities.
  6 | """
  7 | 
  8 | import asyncio
  9 | from datetime import datetime, timedelta
 10 | from typing import Any
 11 | from unittest.mock import Mock, patch
 12 | 
 13 | import pytest
 14 | from fastmcp import Client
 15 | from sqlalchemy import create_engine
 16 | from sqlalchemy.orm import Session
 17 | 
 18 | from maverick_mcp.api.server import mcp
 19 | from maverick_mcp.data.models import Base, PriceCache, Stock
 20 | 
 21 | 
 22 | @pytest.fixture
 23 | def mock_redis():
 24 |     """Mock Redis client for testing."""
 25 |     with patch("maverick_mcp.data.cache._get_redis_client") as mock_redis:
 26 |         # Mock Redis client
 27 |         redis_instance = Mock()
 28 |         redis_instance.get.return_value = None
 29 |         redis_instance.set.return_value = True
 30 |         redis_instance.delete.return_value = True
 31 |         redis_instance.ping.return_value = True
 32 |         mock_redis.return_value = redis_instance
 33 |         yield redis_instance
 34 | 
 35 | 
 36 | @pytest.fixture
 37 | def test_db():
 38 |     """Create an in-memory SQLite database for testing."""
 39 |     engine = create_engine("sqlite:///:memory:")
 40 |     Base.metadata.create_all(engine)
 41 | 
 42 |     # Add some test data
 43 |     with Session(engine) as session:
 44 |         # Add test stocks
 45 |         aapl = Stock(
 46 |             ticker_symbol="AAPL",
 47 |             company_name="Apple Inc.",
 48 |             sector="Technology",
 49 |             industry="Consumer Electronics",
 50 |         )
 51 |         msft = Stock(
 52 |             ticker_symbol="MSFT",
 53 |             company_name="Microsoft Corp.",
 54 |             sector="Technology",
 55 |             industry="Software",
 56 |         )
 57 |         session.add_all([aapl, msft])
 58 |         session.commit()
 59 | 
 60 |         # Add test price data
 61 |         base_date = datetime.now() - timedelta(days=30)
 62 |         for i in range(30):
 63 |             date = base_date + timedelta(days=i)
 64 |             session.add(
 65 |                 PriceCache(
 66 |                     stock_id=aapl.stock_id,
 67 |                     date=date,
 68 |                     open_price=150.0 + i,
 69 |                     high_price=152.0 + i,
 70 |                     low_price=149.0 + i,
 71 |                     close_price=151.0 + i,
 72 |                     volume=1000000 + i * 10000,
 73 |                 )
 74 |             )
 75 |         session.commit()
 76 | 
 77 |     # Patch the database connection
 78 |     with patch("maverick_mcp.data.models.engine", engine):
 79 |         with patch("maverick_mcp.data.models.SessionLocal", lambda: Session(engine)):
 80 |             yield engine
 81 | 
 82 | 
 83 | class TestInMemoryServer:
 84 |     """Test suite for in-memory server operations."""
 85 | 
 86 |     @pytest.mark.asyncio
 87 |     async def test_server_health(self, test_db, mock_redis):
 88 |         """Test the health endpoint returns correct status."""
 89 |         async with Client(mcp) as client:
 90 |             result = await client.read_resource("health://")
 91 | 
 92 |             # Result is a list of content items
 93 |             assert len(result) > 0
 94 |             assert result[0].text is not None
 95 |             health_data = eval(result[0].text)  # Convert string representation to dict
 96 | 
 97 |             # In testing environment, status might be degraded due to mocked services
 98 |             assert health_data["status"] in ["ok", "degraded"]
 99 |             assert "version" in health_data
100 |             assert "components" in health_data
101 | 
102 |             # Check available components
103 |             components = health_data["components"]
104 |             # Redis should be healthy (mocked)
105 |             if "redis" in components:
106 |                 assert components["redis"]["status"] == "healthy"
107 |             # Database status can be error in test environment due to SQLite pool differences
108 |             if "database" in components:
109 |                 assert components["database"]["status"] in [
110 |                     "healthy",
111 |                     "degraded",
112 |                     "unhealthy",
113 |                     "error",
114 |                 ]
115 | 
116 |     @pytest.mark.asyncio
117 |     async def test_fetch_stock_data(self, test_db, mock_redis):
118 |         """Test fetching stock data from the database."""
119 |         async with Client(mcp) as client:
120 |             result = await client.call_tool(
121 |                 "/data_fetch_stock_data",
122 |                 {
123 |                     "request": {
124 |                         "ticker": "AAPL",
125 |                         "start_date": "2024-01-01",
126 |                         "end_date": "2024-01-31",
127 |                     }
128 |                 },
129 |             )
130 | 
131 |             assert len(result) > 0
132 |             assert result[0].text is not None
133 |             # Result should contain stock data
134 |             data = eval(result[0].text)
135 |             assert data["ticker"] == "AAPL"
136 |             assert "columns" in data
137 |             assert "Open" in data["columns"]
138 |             assert "Close" in data["columns"]
139 |             assert data["record_count"] > 0
140 | 
141 |     @pytest.mark.asyncio
142 |     async def test_rsi_analysis(self, test_db, mock_redis):
143 |         """Test RSI technical analysis calculation."""
144 |         async with Client(mcp) as client:
145 |             result = await client.call_tool(
146 |                 "/technical_get_rsi_analysis", {"ticker": "AAPL", "period": 14}
147 |             )
148 | 
149 |             assert len(result) > 0
150 |             assert result[0].text is not None
151 |             # Should contain RSI data
152 |             data = eval(result[0].text)
153 |             assert "analysis" in data
154 |             assert "ticker" in data
155 |             assert data["ticker"] == "AAPL"
156 | 
157 |     @pytest.mark.asyncio
158 |     async def test_batch_stock_data(self, test_db, mock_redis):
159 |         """Test batch fetching of multiple stocks."""
160 |         async with Client(mcp) as client:
161 |             result = await client.call_tool(
162 |                 "/data_fetch_stock_data_batch",
163 |                 {
164 |                     "request": {
165 |                         "tickers": ["AAPL", "MSFT"],
166 |                         "start_date": "2024-01-01",
167 |                         "end_date": "2024-01-31",
168 |                     }
169 |                 },
170 |             )
171 | 
172 |             assert len(result) > 0
173 |             assert result[0].text is not None
174 |             data = eval(result[0].text)
175 | 
176 |             assert "results" in data
177 |             assert "AAPL" in data["results"]
178 |             assert "MSFT" in data["results"]
179 |             assert data["success_count"] == 2
180 | 
181 |     @pytest.mark.asyncio
182 |     async def test_invalid_ticker(self, test_db, mock_redis):
183 |         """Test handling of invalid ticker symbols."""
184 |         async with Client(mcp) as client:
185 |             # Invalid ticker should return an error, not raise an exception
186 |             result = await client.call_tool(
187 |                 "/data_fetch_stock_data",
188 |                 {
189 |                     "request": {
190 |                         "ticker": "INVALID123",  # Invalid format
191 |                         "start_date": "2024-01-01",
192 |                         "end_date": "2024-01-31",
193 |                     }
194 |                 },
195 |             )
196 | 
197 |             # Should return empty data for invalid ticker
198 |             assert len(result) > 0
199 |             assert result[0].text is not None
200 |             data = eval(result[0].text)
201 |             # Invalid ticker returns empty data
202 |             assert data["record_count"] == 0
203 |             assert len(data["data"]) == 0
204 | 
205 |     @pytest.mark.asyncio
206 |     async def test_date_validation(self, test_db, mock_redis):
207 |         """Test date range validation."""
208 |         async with Client(mcp) as client:
209 |             with pytest.raises(Exception) as exc_info:
210 |                 await client.call_tool(
211 |                     "/data_fetch_stock_data",
212 |                     {
213 |                         "request": {
214 |                             "ticker": "AAPL",
215 |                             "start_date": "2024-01-31",
216 |                             "end_date": "2024-01-01",  # End before start
217 |                         }
218 |                     },
219 |                 )
220 | 
221 |             # Should fail with validation error
222 |             assert (
223 |                 "error" in str(exc_info.value).lower()
224 |                 or "validation" in str(exc_info.value).lower()
225 |             )
226 | 
227 |     @pytest.mark.asyncio
228 |     async def test_concurrent_requests(self, test_db, mock_redis):
229 |         """Test handling multiple concurrent requests."""
230 |         async with Client(mcp) as client:
231 |             # Create multiple concurrent tasks
232 |             tasks = [
233 |                 client.call_tool(
234 |                     "/data_fetch_stock_data",
235 |                     {
236 |                         "request": {
237 |                             "ticker": "AAPL",
238 |                             "start_date": "2024-01-01",
239 |                             "end_date": "2024-01-31",
240 |                         }
241 |                     },
242 |                 )
243 |                 for _ in range(5)
244 |             ]
245 | 
246 |             # All should complete successfully
247 |             results = await asyncio.gather(*tasks)
248 |             assert len(results) == 5
249 |             for result in results:
250 |                 assert len(result) > 0
251 |                 assert result[0].text is not None
252 |                 data = eval(result[0].text)
253 |                 assert data["ticker"] == "AAPL"
254 | 
255 | 
256 | class TestResourceManagement:
257 |     """Test resource management and cleanup."""
258 | 
259 |     @pytest.mark.asyncio
260 |     async def test_list_resources(self, test_db, mock_redis):
261 |         """Test listing available resources."""
262 |         async with Client(mcp) as client:
263 |             resources = await client.list_resources()
264 | 
265 |             # In the current implementation, resources may be empty or have different URIs
266 |             # Just check that the call succeeds
267 |             assert isinstance(resources, list)
268 | 
269 |     @pytest.mark.asyncio
270 |     async def test_read_resource(self, test_db, mock_redis):
271 |         """Test reading a specific resource."""
272 |         async with Client(mcp) as client:
273 |             result = await client.read_resource("health://")
274 | 
275 |             assert len(result) > 0
276 |             assert result[0].text is not None
277 |             # Should contain cache status information
278 |             assert (
279 |                 "redis" in result[0].text.lower() or "memory" in result[0].text.lower()
280 |             )
281 | 
282 | 
283 | class TestErrorHandling:
284 |     """Test error handling and edge cases."""
285 | 
286 |     @pytest.mark.asyncio
287 |     async def test_database_error_handling(self, mock_redis):
288 |         """Test graceful handling of database errors."""
289 |         # No test_db fixture, so database should fail
290 |         with patch(
291 |             "maverick_mcp.data.models.SessionLocal", side_effect=Exception("DB Error")
292 |         ):
293 |             async with Client(mcp) as client:
294 |                 result = await client.read_resource("health://")
295 | 
296 |                 assert len(result) > 0
297 |                 health_data = eval(result[0].text)
298 |                 # Database should show an error
299 |                 assert health_data["status"] in ["degraded", "unhealthy"]
300 |                 assert "components" in health_data
301 | 
302 |     @pytest.mark.asyncio
303 |     async def test_cache_fallback(self, test_db):
304 |         """Test fallback to in-memory cache when Redis is unavailable."""
305 |         # No mock_redis fixture, should fall back to memory
306 |         with patch(
307 |             "maverick_mcp.data.cache.redis.Redis", side_effect=Exception("Redis Error")
308 |         ):
309 |             async with Client(mcp) as client:
310 |                 result = await client.read_resource("health://")
311 | 
312 |                 assert len(result) > 0
313 |                 health_data = eval(result[0].text)
314 |                 # Cache should fall back to memory
315 |                 assert "components" in health_data
316 |                 if "cache" in health_data["components"]:
317 |                     assert health_data["components"]["cache"]["type"] == "memory"
318 | 
319 | 
320 | class TestPerformanceMetrics:
321 |     """Test performance monitoring and metrics."""
322 | 
323 |     @pytest.mark.asyncio
324 |     async def test_query_performance_tracking(self, test_db, mock_redis):
325 |         """Test that query performance is tracked."""
326 |         # Skip this test as health_monitor is not available
327 |         pytest.skip("health_monitor not available in current implementation")
328 | 
329 | 
330 | # Utility functions for testing
331 | 
332 | 
333 | def create_test_stock_data(symbol: str, days: int = 30) -> dict[str, Any]:
334 |     """Create test stock data for a given symbol."""
335 |     data: dict[str, Any] = {"symbol": symbol, "prices": []}
336 | 
337 |     base_date = datetime.now() - timedelta(days=days)
338 |     base_price = 100.0
339 | 
340 |     for i in range(days):
341 |         date = base_date + timedelta(days=i)
342 |         price = base_price + (i * 0.5)  # Gradual increase
343 |         data["prices"].append(
344 |             {
345 |                 "date": date.isoformat(),
346 |                 "open": price,
347 |                 "high": price + 1,
348 |                 "low": price - 1,
349 |                 "close": price + 0.5,
350 |                 "volume": 1000000,
351 |             }
352 |         )
353 | 
354 |     return data
355 | 
356 | 
357 | @pytest.mark.asyncio
358 | async def test_with_mock_data_provider(test_db, mock_redis):
359 |     """Test with mocked external data provider."""
360 |     test_data = create_test_stock_data("TSLA", 30)
361 | 
362 |     with patch("yfinance.download") as mock_yf:
363 |         # Mock yfinance response
364 |         mock_df = Mock()
365 |         mock_df.empty = False
366 |         mock_df.to_dict.return_value = test_data["prices"]
367 |         mock_yf.return_value = mock_df
368 | 
369 |         async with Client(mcp) as client:
370 |             result = await client.call_tool(
371 |                 "/data_fetch_stock_data",
372 |                 {
373 |                     "request": {
374 |                         "ticker": "TSLA",
375 |                         "start_date": "2024-01-01",
376 |                         "end_date": "2024-01-31",
377 |                     }
378 |                 },
379 |             )
380 | 
381 |             assert len(result) > 0
382 |             assert result[0].text is not None
383 |             assert "TSLA" in result[0].text
384 | 
385 | 
386 | if __name__ == "__main__":
387 |     # Run tests directly
388 |     pytest.main([__file__, "-v"])
389 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/utils/cache_warmer.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Cache warming utilities for pre-loading commonly used data.
  3 | Improves performance by pre-fetching and caching frequently accessed data.
  4 | """
  5 | 
  6 | import asyncio
  7 | import logging
  8 | from concurrent.futures import ThreadPoolExecutor
  9 | from datetime import datetime, timedelta
 10 | from typing import Any
 11 | 
 12 | from maverick_mcp.data.cache import (
 13 |     CacheManager,
 14 |     ensure_timezone_naive,
 15 |     generate_cache_key,
 16 |     get_cache_stats,
 17 | )
 18 | from maverick_mcp.providers.stock_data import EnhancedStockDataProvider
 19 | from maverick_mcp.utils.yfinance_pool import get_yfinance_pool
 20 | 
 21 | logger = logging.getLogger(__name__)
 22 | 
 23 | 
 24 | class CacheWarmer:
 25 |     """Pre-loads frequently accessed data into cache for improved performance."""
 26 | 
 27 |     def __init__(
 28 |         self,
 29 |         data_provider: EnhancedStockDataProvider | None = None,
 30 |         cache_manager: CacheManager | None = None,
 31 |         max_workers: int = 5,
 32 |     ):
 33 |         """Initialize cache warmer.
 34 | 
 35 |         Args:
 36 |             data_provider: Stock data provider instance
 37 |             cache_manager: Cache manager instance
 38 |             max_workers: Maximum number of parallel workers
 39 |         """
 40 |         self.data_provider = data_provider or EnhancedStockDataProvider()
 41 |         self.cache = cache_manager or CacheManager()
 42 |         self.executor = ThreadPoolExecutor(max_workers=max_workers)
 43 |         self._yf_pool = get_yfinance_pool()
 44 | 
 45 |         # Common symbols to warm up
 46 |         self.popular_symbols = [
 47 |             "SPY",
 48 |             "QQQ",
 49 |             "AAPL",
 50 |             "MSFT",
 51 |             "GOOGL",
 52 |             "AMZN",
 53 |             "NVDA",
 54 |             "META",
 55 |             "TSLA",
 56 |             "BRK-B",
 57 |             "JPM",
 58 |             "V",
 59 |             "JNJ",
 60 |             "WMT",
 61 |             "PG",
 62 |             "UNH",
 63 |             "HD",
 64 |             "MA",
 65 |             "DIS",
 66 |             "BAC",
 67 |             "XOM",
 68 |             "PFE",
 69 |             "ABBV",
 70 |             "KO",
 71 |             "CVX",
 72 |             "PEP",
 73 |             "TMO",
 74 |             "AVGO",
 75 |             "COST",
 76 |             "MRK",
 77 |             "VZ",
 78 |             "ADBE",
 79 |             "CMCSA",
 80 |             "NKE",
 81 |         ]
 82 | 
 83 |         # Common date ranges
 84 |         self.common_periods = [
 85 |             ("1d", 1),  # Yesterday
 86 |             ("5d", 5),  # Last week
 87 |             ("1mo", 30),  # Last month
 88 |             ("3mo", 90),  # Last 3 months
 89 |             ("1y", 365),  # Last year
 90 |         ]
 91 | 
 92 |     async def warm_popular_stocks(self, symbols: list[str] | None = None):
 93 |         """Pre-load data for popular stocks.
 94 | 
 95 |         Args:
 96 |             symbols: List of symbols to warm up (uses default popular list if None)
 97 |         """
 98 |         symbols = symbols or self.popular_symbols
 99 |         logger.info(f"Warming cache for {len(symbols)} popular stocks")
100 | 
101 |         # Warm up in parallel batches
102 |         batch_size = 5
103 |         for i in range(0, len(symbols), batch_size):
104 |             batch = symbols[i : i + batch_size]
105 |             await self._warm_batch(batch)
106 | 
107 |         logger.info("Popular stocks cache warming completed")
108 | 
109 |     async def _warm_batch(self, symbols: list[str]):
110 |         """Warm cache for a batch of symbols."""
111 |         tasks = []
112 |         for symbol in symbols:
113 |             # Warm different time periods
114 |             for period_name, days in self.common_periods:
115 |                 task = asyncio.create_task(
116 |                     self._warm_symbol_period(symbol, period_name, days)
117 |                 )
118 |                 tasks.append(task)
119 | 
120 |         # Wait for all tasks with timeout
121 |         try:
122 |             await asyncio.wait_for(
123 |                 asyncio.gather(*tasks, return_exceptions=True), timeout=30
124 |             )
125 |         except TimeoutError:
126 |             logger.warning(f"Timeout warming batch: {symbols}")
127 | 
128 |     async def _warm_symbol_period(self, symbol: str, period: str, days: int):
129 |         """Warm cache for a specific symbol and period."""
130 |         try:
131 |             end_date = datetime.now().strftime("%Y-%m-%d")
132 |             start_date = (datetime.now() - timedelta(days=days)).strftime("%Y-%m-%d")
133 | 
134 |             # Generate versioned cache key
135 |             cache_key = generate_cache_key(
136 |                 "backtest_data",
137 |                 symbol=symbol,
138 |                 start_date=start_date,
139 |                 end_date=end_date,
140 |                 interval="1d",
141 |             )
142 | 
143 |             # Check if already cached
144 |             if await self.cache.exists(cache_key):
145 |                 logger.debug(f"Cache already warm for {symbol} ({period})")
146 |                 return
147 | 
148 |             # Fetch data using the data provider
149 |             data = await asyncio.get_event_loop().run_in_executor(
150 |                 self.executor,
151 |                 self.data_provider.get_stock_data,
152 |                 symbol,
153 |                 start_date,
154 |                 end_date,
155 |                 None,  # period
156 |                 "1d",  # interval
157 |             )
158 | 
159 |             if data is not None and not data.empty:
160 |                 # Normalize column names and ensure timezone-naive
161 |                 data.columns = [col.lower() for col in data.columns]
162 |                 data = ensure_timezone_naive(data)
163 | 
164 |                 # Cache with adaptive TTL based on data age
165 |                 ttl = 86400 if days > 7 else 3600  # 24h for older data, 1h for recent
166 |                 await self.cache.set(cache_key, data, ttl=ttl)
167 | 
168 |                 logger.debug(f"Warmed cache for {symbol} ({period}) - {len(data)} rows")
169 | 
170 |         except Exception as e:
171 |             logger.warning(f"Failed to warm cache for {symbol} ({period}): {e}")
172 | 
173 |     async def warm_screening_data(self):
174 |         """Pre-load screening recommendations."""
175 |         logger.info("Warming screening data cache")
176 | 
177 |         try:
178 |             # Warm maverick recommendations
179 |             await asyncio.get_event_loop().run_in_executor(
180 |                 self.executor,
181 |                 self.data_provider.get_maverick_recommendations,
182 |                 20,  # limit
183 |                 None,  # min_score
184 |             )
185 | 
186 |             # Warm bear recommendations
187 |             await asyncio.get_event_loop().run_in_executor(
188 |                 self.executor,
189 |                 self.data_provider.get_maverick_bear_recommendations,
190 |                 20,
191 |                 None,
192 |             )
193 | 
194 |             # Warm supply/demand breakouts
195 |             await asyncio.get_event_loop().run_in_executor(
196 |                 self.executor,
197 |                 self.data_provider.get_supply_demand_breakout_recommendations,
198 |                 20,
199 |                 None,
200 |             )
201 | 
202 |             logger.info("Screening data cache warming completed")
203 | 
204 |         except Exception as e:
205 |             logger.error(f"Failed to warm screening cache: {e}")
206 | 
207 |     async def warm_technical_indicators(self, symbols: list[str] | None = None):
208 |         """Pre-calculate and cache technical indicators for symbols.
209 | 
210 |         Args:
211 |             symbols: List of symbols (uses top 10 popular if None)
212 |         """
213 |         symbols = symbols or self.popular_symbols[:10]
214 |         logger.info(f"Warming technical indicators for {len(symbols)} stocks")
215 | 
216 |         tasks = []
217 |         for symbol in symbols:
218 |             task = asyncio.create_task(self._warm_symbol_technicals(symbol))
219 |             tasks.append(task)
220 | 
221 |         try:
222 |             await asyncio.wait_for(
223 |                 asyncio.gather(*tasks, return_exceptions=True), timeout=60
224 |             )
225 |         except TimeoutError:
226 |             logger.warning("Timeout warming technical indicators")
227 | 
228 |         logger.info("Technical indicators cache warming completed")
229 | 
230 |     async def _warm_symbol_technicals(self, symbol: str):
231 |         """Warm technical indicator cache for a symbol."""
232 |         try:
233 |             # Get recent data
234 |             end_date = datetime.now().strftime("%Y-%m-%d")
235 |             start_date = (datetime.now() - timedelta(days=100)).strftime("%Y-%m-%d")
236 | 
237 |             # Common technical indicator cache keys
238 |             indicators = [
239 |                 ("sma", [20, 50, 200]),
240 |                 ("ema", [12, 26]),
241 |                 ("rsi", [14]),
242 |                 ("macd", [12, 26, 9]),
243 |                 ("bb", [20, 2]),
244 |             ]
245 | 
246 |             for indicator, params in indicators:
247 |                 for param in params:
248 |                     cache_key = f"technical:{symbol}:{indicator}:{param}:{start_date}:{end_date}"
249 | 
250 |                     if await self.cache.exists(cache_key):
251 |                         continue
252 | 
253 |                     # Note: Actual technical calculation would go here
254 |                     # For now, we're just warming the stock data cache
255 |                     logger.debug(
256 |                         f"Would warm {indicator} for {symbol} with param {param}"
257 |                     )
258 | 
259 |         except Exception as e:
260 |             logger.warning(f"Failed to warm technicals for {symbol}: {e}")
261 | 
262 |     async def run_full_warmup(self, report_stats: bool = True):
263 |         """Run complete cache warming routine."""
264 |         logger.info("Starting full cache warmup")
265 | 
266 |         # Get initial cache stats
267 |         initial_stats = get_cache_stats() if report_stats else None
268 | 
269 |         start_time = asyncio.get_event_loop().time()
270 | 
271 |         # Run all warming tasks
272 |         results = await asyncio.gather(
273 |             self.warm_popular_stocks(),
274 |             self.warm_screening_data(),
275 |             self.warm_technical_indicators(),
276 |             return_exceptions=True,
277 |         )
278 | 
279 |         end_time = asyncio.get_event_loop().time()
280 | 
281 |         # Report results and performance
282 |         successful_tasks = sum(1 for r in results if not isinstance(r, Exception))
283 |         failed_tasks = len(results) - successful_tasks
284 | 
285 |         logger.info(
286 |             f"Full cache warmup completed in {end_time - start_time:.2f}s - "
287 |             f"{successful_tasks} successful, {failed_tasks} failed"
288 |         )
289 | 
290 |         if report_stats and initial_stats:
291 |             final_stats = get_cache_stats()
292 |             new_items = final_stats["sets"] - initial_stats["sets"]
293 |             hit_rate_change = (
294 |                 final_stats["hit_rate_percent"] - initial_stats["hit_rate_percent"]
295 |             )
296 | 
297 |             logger.info(
298 |                 f"Cache warmup results: +{new_items} items cached, "
299 |                 f"hit rate change: {hit_rate_change:+.1f}%"
300 |             )
301 | 
302 |     async def schedule_periodic_warmup(self, interval_minutes: int = 30):
303 |         """Schedule periodic cache warming.
304 | 
305 |         Args:
306 |             interval_minutes: Minutes between warmup runs
307 |         """
308 |         logger.info(f"Starting periodic cache warmup every {interval_minutes} minutes")
309 | 
310 |         while True:
311 |             try:
312 |                 await self.run_full_warmup()
313 |             except Exception as e:
314 |                 logger.error(f"Error in periodic warmup: {e}")
315 | 
316 |             # Wait for next cycle
317 |             await asyncio.sleep(interval_minutes * 60)
318 | 
319 |     async def benchmark_cache_performance(
320 |         self, symbols: list[str] | None = None
321 |     ) -> dict[str, Any]:
322 |         """Benchmark cache performance for analysis.
323 | 
324 |         Args:
325 |             symbols: List of symbols to test (uses top 5 if None)
326 | 
327 |         Returns:
328 |             Dictionary with benchmark results
329 |         """
330 |         symbols = symbols or self.popular_symbols[:5]
331 |         logger.info(f"Benchmarking cache performance with {len(symbols)} symbols")
332 | 
333 |         # Test data retrieval performance
334 |         import time
335 | 
336 |         start_time = time.time()
337 |         cache_hits = 0
338 |         cache_misses = 0
339 | 
340 |         for symbol in symbols:
341 |             for _period_name, days in self.common_periods:
342 |                 end_date = datetime.now().strftime("%Y-%m-%d")
343 |                 start_date = (datetime.now() - timedelta(days=days)).strftime(
344 |                     "%Y-%m-%d"
345 |                 )
346 | 
347 |                 cache_key = generate_cache_key(
348 |                     "backtest_data",
349 |                     symbol=symbol,
350 |                     start_date=start_date,
351 |                     end_date=end_date,
352 |                     interval="1d",
353 |                 )
354 | 
355 |                 cached_data = await self.cache.get(cache_key)
356 |                 if cached_data is not None:
357 |                     cache_hits += 1
358 |                 else:
359 |                     cache_misses += 1
360 | 
361 |         end_time = time.time()
362 | 
363 |         # Calculate metrics
364 |         total_requests = cache_hits + cache_misses
365 |         hit_rate = (cache_hits / total_requests * 100) if total_requests > 0 else 0
366 |         avg_request_time = (
367 |             (end_time - start_time) / total_requests if total_requests > 0 else 0
368 |         )
369 | 
370 |         # Get current cache stats
371 |         cache_stats = get_cache_stats()
372 | 
373 |         benchmark_results = {
374 |             "symbols_tested": len(symbols),
375 |             "total_requests": total_requests,
376 |             "cache_hits": cache_hits,
377 |             "cache_misses": cache_misses,
378 |             "hit_rate_percent": round(hit_rate, 2),
379 |             "avg_request_time_ms": round(avg_request_time * 1000, 2),
380 |             "total_time_seconds": round(end_time - start_time, 2),
381 |             "cache_stats": cache_stats,
382 |         }
383 | 
384 |         logger.info(
385 |             f"Benchmark completed: {hit_rate:.1f}% hit rate, "
386 |             f"{avg_request_time * 1000:.1f}ms avg request time"
387 |         )
388 | 
389 |         return benchmark_results
390 | 
391 |     def shutdown(self):
392 |         """Clean up resources."""
393 |         self.executor.shutdown(wait=False)
394 |         logger.info("Cache warmer shutdown")
395 | 
396 | 
397 | async def warm_cache_on_startup():
398 |     """Convenience function to warm cache on application startup."""
399 |     warmer = CacheWarmer()
400 |     try:
401 |         # Only warm the most critical data on startup
402 |         await warmer.warm_popular_stocks(
403 |             ["SPY", "QQQ", "AAPL", "MSFT", "GOOGL", "AMZN", "NVDA", "TSLA"]
404 |         )
405 |         await warmer.warm_screening_data()
406 |     finally:
407 |         warmer.shutdown()
408 | 
409 | 
410 | if __name__ == "__main__":
411 |     # Example usage
412 |     async def main():
413 |         warmer = CacheWarmer()
414 |         try:
415 |             await warmer.run_full_warmup()
416 |         finally:
417 |             warmer.shutdown()
418 | 
419 |     asyncio.run(main())
420 | 
```

--------------------------------------------------------------------------------
/docs/exa_research_testing_strategy.md:
--------------------------------------------------------------------------------

```markdown
  1 | # ExaSearch Research Integration Testing Strategy
  2 | 
  3 | This document outlines the comprehensive testing strategy for validating the ExaSearch integration with the MaverickMCP research agent architecture.
  4 | 
  5 | ## Overview
  6 | 
  7 | The testing strategy covers all aspects of the research system with ExaSearch provider:
  8 | - **DeepResearchAgent** orchestration with ExaSearch integration
  9 | - **Specialized Subagents** (Fundamental, Technical, Sentiment, Competitive)
 10 | - **Parallel Research Orchestration** and task distribution
 11 | - **Timeout Handling** and circuit breaker patterns
 12 | - **MCP Tool Integration** via research router endpoints
 13 | - **Performance Benchmarking** across research depths and configurations
 14 | 
 15 | ## Test Architecture
 16 | 
 17 | ### Test Categories
 18 | 
 19 | 1. **Unit Tests** (`pytest -m unit`)
 20 |    - Individual component testing in isolation
 21 |    - Mock external dependencies
 22 |    - Fast execution (< 30 seconds total)
 23 |    - No external API calls
 24 | 
 25 | 2. **Integration Tests** (`pytest -m integration`)
 26 |    - End-to-end workflow testing
 27 |    - Real ExaSearch API integration
 28 |    - Multi-component interaction validation
 29 |    - Requires `EXA_API_KEY` environment variable
 30 | 
 31 | 3. **Performance Tests** (`pytest -m slow`)
 32 |    - Benchmark different research depths
 33 |    - Parallel vs sequential execution comparison
 34 |    - Memory usage and timeout resilience
 35 |    - Longer execution times (2-5 minutes)
 36 | 
 37 | 4. **Benchmark Suite** (`scripts/benchmark_exa_research.py`)
 38 |    - Comprehensive performance analysis
 39 |    - Cross-configuration comparison
 40 |    - Detailed metrics and reporting
 41 |    - Production-ready performance validation
 42 | 
 43 | ## Test Files and Structure
 44 | 
 45 | ```
 46 | tests/
 47 | ├── test_exa_research_integration.py  # Main comprehensive test suite
 48 | └── conftest.py                       # Shared fixtures and configuration
 49 | 
 50 | scripts/
 51 | ├── run_exa_tests.py                  # Test runner utility
 52 | └── benchmark_exa_research.py         # Performance benchmark suite
 53 | 
 54 | docs/
 55 | └── exa_research_testing_strategy.md  # This document
 56 | ```
 57 | 
 58 | ## Key Test Components
 59 | 
 60 | ### 1. ExaSearchProvider Tests
 61 | 
 62 | **Coverage:**
 63 | - Provider initialization with/without API key
 64 | - Adaptive timeout calculation for different query complexities
 65 | - Failure recording and health status management
 66 | - Successful search execution with realistic mock responses
 67 | - Timeout handling and error recovery
 68 | - Circuit breaker integration
 69 | 
 70 | **Key Test Methods:**
 71 | ```python
 72 | test_exa_provider_initialization()
 73 | test_timeout_calculation()
 74 | test_failure_recording_and_health_status()
 75 | test_exa_search_success()
 76 | test_exa_search_timeout()
 77 | test_exa_search_unhealthy_provider()
 78 | ```
 79 | 
 80 | ### 2. DeepResearchAgent Tests
 81 | 
 82 | **Coverage:**
 83 | - Agent initialization with ExaSearch provider
 84 | - Research execution with different depths (basic, standard, comprehensive, exhaustive)
 85 | - Timeout budget allocation and management
 86 | - Error handling when no providers are available
 87 | - Complete research workflow from query to results
 88 | 
 89 | **Key Test Methods:**
 90 | ```python
 91 | test_agent_initialization_with_exa()
 92 | test_research_comprehensive_success()
 93 | test_research_comprehensive_no_providers()
 94 | test_research_depth_levels()
 95 | ```
 96 | 
 97 | ### 3. Specialized Subagent Tests
 98 | 
 99 | **Coverage:**
100 | - All 4 subagent types: Fundamental, Technical, Sentiment, Competitive
101 | - Query generation for each specialization
102 | - Results processing and analysis
103 | - Focus area validation
104 | - Cross-subagent consistency
105 | 
106 | **Key Test Methods:**
107 | ```python
108 | test_fundamental_research_agent()
109 | test_technical_research_agent()
110 | test_sentiment_research_agent()
111 | test_competitive_research_agent()
112 | ```
113 | 
114 | ### 4. Parallel Research Orchestration Tests
115 | 
116 | **Coverage:**
117 | - ParallelResearchOrchestrator initialization and configuration
118 | - Task preparation and prioritization
119 | - Successful parallel execution with multiple tasks
120 | - Failure handling and partial success scenarios
121 | - Circuit breaker integration
122 | - Performance efficiency measurement
123 | 
124 | **Key Test Methods:**
125 | ```python
126 | test_orchestrator_initialization()
127 | test_parallel_execution_success()
128 | test_parallel_execution_with_failures()
129 | test_circuit_breaker_integration()
130 | ```
131 | 
132 | ### 5. Task Distribution Engine Tests
133 | 
134 | **Coverage:**
135 | - Topic relevance analysis for different task types
136 | - Intelligent task distribution based on query content
137 | - Priority assignment based on relevance scores
138 | - Fallback mechanisms when no relevant tasks found
139 | 
140 | **Key Test Methods:**
141 | ```python
142 | test_topic_relevance_analysis()
143 | test_task_distribution_basic()
144 | test_task_distribution_fallback()
145 | test_task_priority_assignment()
146 | ```
147 | 
148 | ### 6. Timeout and Circuit Breaker Tests
149 | 
150 | **Coverage:**
151 | - Timeout budget allocation across research phases
152 | - Provider health monitoring and recovery
153 | - Research behavior during provider failures
154 | - Graceful degradation strategies
155 | 
156 | **Key Test Methods:**
157 | ```python
158 | test_timeout_budget_allocation()
159 | test_provider_health_monitoring()
160 | test_research_with_provider_failures()
161 | ```
162 | 
163 | ### 7. Performance Benchmark Tests
164 | 
165 | **Coverage:**
166 | - Cross-depth performance comparison (basic → exhaustive)
167 | - Parallel vs sequential execution efficiency
168 | - Memory usage monitoring during parallel execution
169 | - Scalability under load
170 | 
171 | **Key Test Methods:**
172 | ```python
173 | test_research_depth_performance()
174 | test_parallel_vs_sequential_performance()
175 | test_memory_usage_monitoring()
176 | ```
177 | 
178 | ### 8. MCP Integration Tests
179 | 
180 | **Coverage:**
181 | - MCP tool endpoint validation
182 | - Research router integration
183 | - Request/response model validation
184 | - Error handling in MCP context
185 | 
186 | **Key Test Methods:**
187 | ```python
188 | test_comprehensive_research_mcp_tool()
189 | test_research_without_exa_key()
190 | test_research_request_validation()
191 | test_get_research_agent_optimization()
192 | ```
193 | 
194 | ### 9. Content Analysis Tests
195 | 
196 | **Coverage:**
197 | - AI-powered content analysis functionality
198 | - Fallback mechanisms when LLM analysis fails
199 | - Batch content processing
200 | - Sentiment and insight extraction
201 | 
202 | **Key Test Methods:**
203 | ```python
204 | test_content_analysis_success()
205 | test_content_analysis_fallback()
206 | test_batch_content_analysis()
207 | ```
208 | 
209 | ### 10. Error Handling and Edge Cases
210 | 
211 | **Coverage:**
212 | - Empty search results handling
213 | - Malformed API responses
214 | - Network timeout recovery
215 | - Concurrent request limits
216 | - Memory constraints
217 | 
218 | **Key Test Methods:**
219 | ```python
220 | test_empty_search_results()
221 | test_malformed_search_response()
222 | test_network_timeout_recovery()
223 | test_concurrent_request_limits()
224 | ```
225 | 
226 | ## Test Data and Fixtures
227 | 
228 | ### Mock Data Factories
229 | 
230 | The test suite includes comprehensive mock data factories:
231 | 
232 | - **`mock_llm`**: Realistic LLM responses for different research phases
233 | - **`mock_exa_client`**: ExaSearch API client with query-specific responses
234 | - **`sample_research_tasks`**: Representative research tasks for parallel execution
235 | - **`mock_settings`**: Configuration with ExaSearch integration enabled
236 | 
237 | ### Realistic Test Scenarios
238 | 
239 | Test scenarios cover real-world usage patterns:
240 | 
241 | ```python
242 | test_queries = [
243 |     "AAPL stock financial analysis and investment outlook",
244 |     "Tesla market sentiment and competitive position", 
245 |     "Microsoft earnings performance and growth prospects",
246 |     "tech sector analysis and market trends",
247 |     "artificial intelligence investment opportunities",
248 | ]
249 | 
250 | research_depths = ["basic", "standard", "comprehensive", "exhaustive"]
251 | 
252 | focus_areas = {
253 |     "fundamentals": ["earnings", "valuation", "financial_health"],
254 |     "technicals": ["chart_patterns", "technical_indicators", "price_action"],
255 |     "sentiment": ["market_sentiment", "analyst_ratings", "news_sentiment"],
256 |     "competitive": ["competitive_position", "market_share", "industry_trends"],
257 | }
258 | ```
259 | 
260 | ## Running Tests
261 | 
262 | ### Quick Start
263 | 
264 | ```bash
265 | # Install dependencies
266 | uv sync
267 | 
268 | # Set environment variable (for integration tests)
269 | export EXA_API_KEY=your_exa_api_key
270 | 
271 | # Run unit tests (fast, no external dependencies)
272 | python scripts/run_exa_tests.py --unit
273 | 
274 | # Run integration tests (requires EXA_API_KEY)
275 | python scripts/run_exa_tests.py --integration
276 | 
277 | # Run all tests
278 | python scripts/run_exa_tests.py --all
279 | 
280 | # Run quick test suite
281 | python scripts/run_exa_tests.py --quick
282 | 
283 | # Run with coverage reporting
284 | python scripts/run_exa_tests.py --coverage
285 | ```
286 | 
287 | ### Direct pytest Commands
288 | 
289 | ```bash
290 | # Unit tests only
291 | pytest tests/test_exa_research_integration.py -m unit -v
292 | 
293 | # Integration tests (requires API key)
294 | pytest tests/test_exa_research_integration.py -m integration -v
295 | 
296 | # Performance tests
297 | pytest tests/test_exa_research_integration.py -m slow -v
298 | 
299 | # All tests
300 | pytest tests/test_exa_research_integration.py -v
301 | ```
302 | 
303 | ### Performance Benchmarks
304 | 
305 | ```bash
306 | # Comprehensive benchmarks
307 | python scripts/benchmark_exa_research.py
308 | 
309 | # Quick benchmarks (reduced test matrix)
310 | python scripts/benchmark_exa_research.py --quick
311 | 
312 | # Specific depth testing
313 | python scripts/benchmark_exa_research.py --depth basic --focus fundamentals
314 | 
315 | # Parallel execution analysis only
316 | python scripts/benchmark_exa_research.py --depth standard --parallel --no-timeout
317 | ```
318 | 
319 | ## Test Environment Setup
320 | 
321 | ### Prerequisites
322 | 
323 | 1. **Python 3.12+**: Core runtime requirement
324 | 2. **uv or pip**: Package management
325 | 3. **ExaSearch API Key**: For integration tests
326 |    ```bash
327 |    export EXA_API_KEY=your_api_key_here
328 |    ```
329 | 
330 | ### Optional Dependencies
331 | 
332 | - **Redis**: For caching layer tests (optional)
333 | - **PostgreSQL**: For database integration tests (optional)
334 | - **psutil**: For memory usage monitoring in performance tests
335 | 
336 | ### Environment Validation
337 | 
338 | ```bash
339 | # Validate environment setup
340 | python scripts/run_exa_tests.py --validate
341 | ```
342 | 
343 | ## Expected Test Results
344 | 
345 | ### Performance Benchmarks
346 | 
347 | **Research Depth Performance Expectations:**
348 | - **Basic**: < 15 seconds execution time
349 | - **Standard**: 15-30 seconds execution time  
350 | - **Comprehensive**: 30-45 seconds execution time
351 | - **Exhaustive**: 45-60 seconds execution time
352 | 
353 | **Parallel Execution Efficiency:**
354 | - **Speedup**: 2-4x faster than sequential for 3+ subagents
355 | - **Memory Usage**: < 100MB additional during parallel execution
356 | - **Error Rate**: < 5% for timeout-related failures
357 | 
358 | ### Success Criteria
359 | 
360 | **Unit Tests:**
361 | - ✅ 100% pass rate expected
362 | - ⚡ Complete in < 30 seconds
363 | - 🔄 No external dependencies
364 | 
365 | **Integration Tests:**
366 | - ✅ 95%+ pass rate (allowing for API variability)
367 | - ⏱️ Complete in < 5 minutes
368 | - 🔑 Requires valid EXA_API_KEY
369 | 
370 | **Performance Tests:**
371 | - ✅ 90%+ pass rate (allowing for performance variability)
372 | - ⏱️ Complete in < 10 minutes
373 | - 📊 Generate detailed performance metrics
374 | 
375 | ## Debugging and Troubleshooting
376 | 
377 | ### Common Issues
378 | 
379 | 1. **Missing EXA_API_KEY**
380 |    ```
381 |    Error: Research functionality unavailable - Exa search provider not configured
382 |    Solution: Set EXA_API_KEY environment variable
383 |    ```
384 | 
385 | 2. **Import Errors**
386 |    ```
387 |    ImportError: No module named 'exa_py'
388 |    Solution: Install dependencies with `uv sync` or `pip install -e .`
389 |    ```
390 | 
391 | 3. **Timeout Failures**
392 |    ```
393 |    Error: Research operation timed out
394 |    Solution: Check network connection or reduce research scope
395 |    ```
396 | 
397 | 4. **Memory Issues**
398 |    ```
399 |    Error: Memory usage exceeded limits
400 |    Solution: Reduce parallel agents or test data size
401 |    ```
402 | 
403 | ### Debug Mode
404 | 
405 | Enable detailed logging for debugging:
406 | 
407 | ```bash
408 | export PYTHONPATH=/path/to/maverick-mcp
409 | export LOG_LEVEL=DEBUG
410 | python scripts/run_exa_tests.py --unit --verbose
411 | ```
412 | 
413 | ### Test Output Analysis
414 | 
415 | **Successful Test Run Example:**
416 | ```
417 | 🧪 Running ExaSearch Unit Tests
418 | ============================
419 | test_exa_provider_initialization PASSED     [  5%]
420 | test_timeout_calculation PASSED             [ 10%]
421 | test_failure_recording_and_health_status PASSED [ 15%]
422 | ...
423 | ✅ All tests completed successfully!
424 | ```
425 | 
426 | **Benchmark Report Example:**
427 | ```
428 | 📊 BENCHMARK SUMMARY REPORT
429 | ============================
430 | 📋 Total Tests: 25
431 | ✅ Successful: 23
432 | ❌ Failed: 2
433 | ⏱️ Total Time: 127.3s
434 | 
435 | 📈 Performance Metrics:
436 |    Avg Execution Time: 18.45s
437 |    Min/Max Time: 8.21s / 45.67s
438 |    Avg Confidence Score: 0.78
439 |    Avg Sources Analyzed: 8.2
440 | ```
441 | 
442 | ## Continuous Integration
443 | 
444 | ### CI/CD Integration
445 | 
446 | The test suite is designed for CI/CD integration:
447 | 
448 | ```yaml
449 | # Example GitHub Actions workflow
450 | - name: Run ExaSearch Tests
451 |   env:
452 |     EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
453 |   run: |
454 |     python scripts/run_exa_tests.py --unit
455 |     python scripts/run_exa_tests.py --integration
456 |     python scripts/benchmark_exa_research.py --quick
457 | ```
458 | 
459 | ### Test Markers for CI
460 | 
461 | Use pytest markers for selective testing:
462 | 
463 | ```bash
464 | # Fast tests only (for PR validation)
465 | pytest -m "not slow and not external"
466 | 
467 | # Full test suite (for main branch)
468 | pytest -m "not external" --maxfail=5
469 | 
470 | # External API tests (nightly/weekly)  
471 | pytest -m external
472 | ```
473 | 
474 | ## Maintenance and Updates
475 | 
476 | ### Adding New Tests
477 | 
478 | 1. **Extend existing test classes** for related functionality
479 | 2. **Follow naming conventions**: `test_[component]_[scenario]`
480 | 3. **Use appropriate markers**: `@pytest.mark.unit`, `@pytest.mark.integration`
481 | 4. **Mock external dependencies** in unit tests
482 | 5. **Include error scenarios** and edge cases
483 | 
484 | ### Updating Test Data
485 | 
486 | 1. **Mock responses** should reflect real ExaSearch API responses
487 | 2. **Test queries** should cover different complexity levels
488 | 3. **Performance baselines** should be updated as system improves
489 | 4. **Error scenarios** should match actual failure modes
490 | 
491 | ### Performance Regression Detection
492 | 
493 | 1. **Baseline metrics** stored in benchmark results
494 | 2. **Automated comparison** against previous runs
495 | 3. **Alert thresholds** for performance degradation
496 | 4. **Regular benchmark execution** in CI/CD
497 | 
498 | ## Conclusion
499 | 
500 | This comprehensive testing strategy ensures the ExaSearch integration is thoroughly validated across all dimensions:
501 | 
502 | - ✅ **Functional Correctness**: All components work as designed
503 | - ⚡ **Performance Characteristics**: System meets timing requirements
504 | - 🛡️ **Error Resilience**: Graceful handling of failures and edge cases
505 | - 🔗 **Integration Quality**: Seamless operation across component boundaries
506 | - 📊 **Monitoring Capability**: Detailed metrics and reporting for ongoing maintenance
507 | 
508 | The test suite provides confidence in the ExaSearch integration's reliability and performance for production deployment.
```

--------------------------------------------------------------------------------
/maverick_mcp/monitoring/health_monitor.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Background health monitoring system.
  3 | 
  4 | This module provides background tasks for continuous health monitoring,
  5 | alerting, and automatic recovery actions for the backtesting system.
  6 | """
  7 | 
  8 | import asyncio
  9 | import logging
 10 | import time
 11 | from datetime import UTC, datetime, timedelta
 12 | from typing import Any
 13 | 
 14 | from maverick_mcp.config.settings import get_settings
 15 | from maverick_mcp.monitoring.status_dashboard import get_status_dashboard
 16 | from maverick_mcp.utils.circuit_breaker import get_circuit_breaker_manager
 17 | 
 18 | logger = logging.getLogger(__name__)
 19 | settings = get_settings()
 20 | 
 21 | # Monitoring intervals (seconds)
 22 | HEALTH_CHECK_INTERVAL = 30
 23 | CIRCUIT_BREAKER_CHECK_INTERVAL = 60
 24 | RESOURCE_CHECK_INTERVAL = 45
 25 | ALERT_CHECK_INTERVAL = 120
 26 | 
 27 | # Alert thresholds
 28 | ALERT_THRESHOLDS = {
 29 |     "consecutive_failures": 5,
 30 |     "high_cpu_duration": 300,  # 5 minutes
 31 |     "high_memory_duration": 300,  # 5 minutes
 32 |     "circuit_breaker_open_duration": 180,  # 3 minutes
 33 | }
 34 | 
 35 | 
 36 | class HealthMonitor:
 37 |     """Background health monitoring system."""
 38 | 
 39 |     def __init__(self):
 40 |         self.running = False
 41 |         self.tasks = []
 42 |         self.alerts_sent = {}
 43 |         self.start_time = time.time()
 44 |         self.health_history = []
 45 |         self.dashboard = get_status_dashboard()
 46 |         self.circuit_breaker_manager = get_circuit_breaker_manager()
 47 | 
 48 |     async def start(self):
 49 |         """Start all background monitoring tasks."""
 50 |         if self.running:
 51 |             logger.warning("Health monitor is already running")
 52 |             return
 53 | 
 54 |         self.running = True
 55 |         logger.info("Starting health monitoring system...")
 56 | 
 57 |         # Initialize circuit breakers
 58 |         self.circuit_breaker_manager.initialize()
 59 | 
 60 |         # Start monitoring tasks
 61 |         self.tasks = [
 62 |             asyncio.create_task(self._health_check_loop()),
 63 |             asyncio.create_task(self._circuit_breaker_monitor_loop()),
 64 |             asyncio.create_task(self._resource_monitor_loop()),
 65 |             asyncio.create_task(self._alert_processor_loop()),
 66 |         ]
 67 | 
 68 |         logger.info(f"Started {len(self.tasks)} monitoring tasks")
 69 | 
 70 |     async def stop(self):
 71 |         """Stop all monitoring tasks."""
 72 |         if not self.running:
 73 |             return
 74 | 
 75 |         self.running = False
 76 |         logger.info("Stopping health monitoring system...")
 77 | 
 78 |         # Cancel all tasks
 79 |         for task in self.tasks:
 80 |             task.cancel()
 81 | 
 82 |         # Wait for tasks to complete
 83 |         if self.tasks:
 84 |             await asyncio.gather(*self.tasks, return_exceptions=True)
 85 | 
 86 |         self.tasks.clear()
 87 |         logger.info("Health monitoring system stopped")
 88 | 
 89 |     async def _health_check_loop(self):
 90 |         """Background loop for general health checks."""
 91 |         logger.info("Started health check monitoring loop")
 92 | 
 93 |         while self.running:
 94 |             try:
 95 |                 await self._perform_health_check()
 96 |                 await asyncio.sleep(HEALTH_CHECK_INTERVAL)
 97 |             except asyncio.CancelledError:
 98 |                 break
 99 |             except Exception as e:
100 |                 logger.error(f"Error in health check loop: {e}")
101 |                 await asyncio.sleep(HEALTH_CHECK_INTERVAL)
102 | 
103 |         logger.info("Health check monitoring loop stopped")
104 | 
105 |     async def _circuit_breaker_monitor_loop(self):
106 |         """Background loop for circuit breaker monitoring."""
107 |         logger.info("Started circuit breaker monitoring loop")
108 | 
109 |         while self.running:
110 |             try:
111 |                 await self._check_circuit_breakers()
112 |                 await asyncio.sleep(CIRCUIT_BREAKER_CHECK_INTERVAL)
113 |             except asyncio.CancelledError:
114 |                 break
115 |             except Exception as e:
116 |                 logger.error(f"Error in circuit breaker monitoring loop: {e}")
117 |                 await asyncio.sleep(CIRCUIT_BREAKER_CHECK_INTERVAL)
118 | 
119 |         logger.info("Circuit breaker monitoring loop stopped")
120 | 
121 |     async def _resource_monitor_loop(self):
122 |         """Background loop for resource monitoring."""
123 |         logger.info("Started resource monitoring loop")
124 | 
125 |         while self.running:
126 |             try:
127 |                 await self._check_resource_usage()
128 |                 await asyncio.sleep(RESOURCE_CHECK_INTERVAL)
129 |             except asyncio.CancelledError:
130 |                 break
131 |             except Exception as e:
132 |                 logger.error(f"Error in resource monitoring loop: {e}")
133 |                 await asyncio.sleep(RESOURCE_CHECK_INTERVAL)
134 | 
135 |         logger.info("Resource monitoring loop stopped")
136 | 
137 |     async def _alert_processor_loop(self):
138 |         """Background loop for alert processing."""
139 |         logger.info("Started alert processing loop")
140 | 
141 |         while self.running:
142 |             try:
143 |                 await self._process_alerts()
144 |                 await asyncio.sleep(ALERT_CHECK_INTERVAL)
145 |             except asyncio.CancelledError:
146 |                 break
147 |             except Exception as e:
148 |                 logger.error(f"Error in alert processing loop: {e}")
149 |                 await asyncio.sleep(ALERT_CHECK_INTERVAL)
150 | 
151 |         logger.info("Alert processing loop stopped")
152 | 
153 |     async def _perform_health_check(self):
154 |         """Perform comprehensive health check."""
155 |         try:
156 |             from maverick_mcp.api.routers.health_enhanced import (
157 |                 _get_detailed_health_status,
158 |             )
159 | 
160 |             health_status = await _get_detailed_health_status()
161 | 
162 |             # Log health status
163 |             logger.debug(f"Health check: {health_status['status']}")
164 | 
165 |             # Record health data
166 |             self._record_health_data(health_status)
167 | 
168 |             # Check for issues requiring attention
169 |             await self._analyze_health_trends(health_status)
170 | 
171 |         except Exception as e:
172 |             logger.error(f"Failed to perform health check: {e}")
173 | 
174 |     async def _check_circuit_breakers(self):
175 |         """Monitor circuit breaker states and perform recovery actions."""
176 |         try:
177 |             cb_status = self.circuit_breaker_manager.get_health_status()
178 | 
179 |             for name, status in cb_status.items():
180 |                 state = status.get("state", "unknown")
181 | 
182 |                 # Check for stuck open circuit breakers
183 |                 if state == "open":
184 |                     await self._handle_open_circuit_breaker(name, status)
185 | 
186 |                 # Check for high failure rates
187 |                 metrics = status.get("metrics", {})
188 |                 failure_rate = metrics.get("failure_rate", 0)
189 |                 if failure_rate > 0.5:  # 50% failure rate
190 |                     await self._handle_high_failure_rate(name, failure_rate)
191 | 
192 |         except Exception as e:
193 |             logger.error(f"Failed to check circuit breakers: {e}")
194 | 
195 |     async def _check_resource_usage(self):
196 |         """Monitor system resource usage."""
197 |         try:
198 |             from maverick_mcp.api.routers.health_enhanced import _get_resource_usage
199 | 
200 |             resource_usage = _get_resource_usage()
201 | 
202 |             # Check CPU usage
203 |             if resource_usage.cpu_percent > 80:
204 |                 await self._handle_high_cpu_usage(resource_usage.cpu_percent)
205 | 
206 |             # Check memory usage
207 |             if resource_usage.memory_percent > 85:
208 |                 await self._handle_high_memory_usage(resource_usage.memory_percent)
209 | 
210 |             # Check disk usage
211 |             if resource_usage.disk_percent > 90:
212 |                 await self._handle_high_disk_usage(resource_usage.disk_percent)
213 | 
214 |         except Exception as e:
215 |             logger.error(f"Failed to check resource usage: {e}")
216 | 
217 |     async def _process_alerts(self):
218 |         """Process and manage alerts."""
219 |         try:
220 |             dashboard_data = await self.dashboard.get_dashboard_data()
221 |             alerts = dashboard_data.get("alerts", [])
222 | 
223 |             for alert in alerts:
224 |                 await self._handle_alert(alert)
225 | 
226 |         except Exception as e:
227 |             logger.error(f"Failed to process alerts: {e}")
228 | 
229 |     def _record_health_data(self, health_status: dict[str, Any]):
230 |         """Record health data for trend analysis."""
231 |         timestamp = datetime.now(UTC)
232 | 
233 |         health_record = {
234 |             "timestamp": timestamp.isoformat(),
235 |             "overall_status": health_status.get("status", "unknown"),
236 |             "components_healthy": len(
237 |                 [
238 |                     c
239 |                     for c in health_status.get("components", {}).values()
240 |                     if c.status == "healthy"
241 |                 ]
242 |             ),
243 |             "components_total": len(health_status.get("components", {})),
244 |             "resource_usage": health_status.get("resource_usage", {}),
245 |         }
246 | 
247 |         self.health_history.append(health_record)
248 | 
249 |         # Keep only last 24 hours of data
250 |         cutoff_time = timestamp - timedelta(hours=24)
251 |         self.health_history = [
252 |             record
253 |             for record in self.health_history
254 |             if datetime.fromisoformat(record["timestamp"].replace("Z", "+00:00"))
255 |             > cutoff_time
256 |         ]
257 | 
258 |     async def _analyze_health_trends(self, current_status: dict[str, Any]):
259 |         """Analyze health trends and predict issues."""
260 |         if len(self.health_history) < 10:
261 |             return  # Not enough data for trend analysis
262 | 
263 |         # Analyze degradation trends
264 |         recent_records = self.health_history[-10:]  # Last 10 records
265 | 
266 |         unhealthy_trend = sum(
267 |             1
268 |             for record in recent_records
269 |             if record["overall_status"] in ["degraded", "unhealthy"]
270 |         )
271 | 
272 |         if unhealthy_trend >= 7:  # 70% of recent checks are problematic
273 |             logger.warning(
274 |                 "Detected concerning health trend - system may need attention"
275 |             )
276 |             await self._trigger_maintenance_alert()
277 | 
278 |     async def _handle_open_circuit_breaker(self, name: str, status: dict[str, Any]):
279 |         """Handle circuit breaker that's been open too long."""
280 |         # Check if we've already alerted for this breaker recently
281 |         alert_key = f"cb_open_{name}"
282 |         last_alert = self.alerts_sent.get(alert_key)
283 | 
284 |         if last_alert and (time.time() - last_alert) < 300:  # 5 minutes
285 |             return
286 | 
287 |         logger.warning(f"Circuit breaker '{name}' has been open - investigating")
288 | 
289 |         # Record alert
290 |         self.alerts_sent[alert_key] = time.time()
291 | 
292 |         # Could implement automatic recovery attempts here
293 |         # For now, just log the issue
294 | 
295 |     async def _handle_high_failure_rate(self, name: str, failure_rate: float):
296 |         """Handle high failure rate in circuit breaker."""
297 |         logger.warning(f"High failure rate detected for {name}: {failure_rate:.1%}")
298 | 
299 |     async def _handle_high_cpu_usage(self, cpu_percent: float):
300 |         """Handle sustained high CPU usage."""
301 |         alert_key = "high_cpu"
302 |         last_alert = self.alerts_sent.get(alert_key)
303 | 
304 |         if last_alert and (time.time() - last_alert) < 600:  # 10 minutes
305 |             return
306 | 
307 |         logger.warning(f"High CPU usage detected: {cpu_percent:.1f}%")
308 |         self.alerts_sent[alert_key] = time.time()
309 | 
310 |     async def _handle_high_memory_usage(self, memory_percent: float):
311 |         """Handle sustained high memory usage."""
312 |         alert_key = "high_memory"
313 |         last_alert = self.alerts_sent.get(alert_key)
314 | 
315 |         if last_alert and (time.time() - last_alert) < 600:  # 10 minutes
316 |             return
317 | 
318 |         logger.warning(f"High memory usage detected: {memory_percent:.1f}%")
319 |         self.alerts_sent[alert_key] = time.time()
320 | 
321 |     async def _handle_high_disk_usage(self, disk_percent: float):
322 |         """Handle high disk usage."""
323 |         alert_key = "high_disk"
324 |         last_alert = self.alerts_sent.get(alert_key)
325 | 
326 |         if last_alert and (time.time() - last_alert) < 1800:  # 30 minutes
327 |             return
328 | 
329 |         logger.error(f"Critical disk usage detected: {disk_percent:.1f}%")
330 |         self.alerts_sent[alert_key] = time.time()
331 | 
332 |     async def _handle_alert(self, alert: dict[str, Any]):
333 |         """Handle individual alert."""
334 |         severity = alert.get("severity", "info")
335 | 
336 |         # Log alert based on severity
337 |         if severity == "critical":
338 |             logger.error(f"Critical alert: {alert.get('title', 'Unknown')}")
339 |         elif severity == "warning":
340 |             logger.warning(f"Warning alert: {alert.get('title', 'Unknown')}")
341 |         else:
342 |             logger.info(f"Info alert: {alert.get('title', 'Unknown')}")
343 | 
344 |     async def _trigger_maintenance_alert(self):
345 |         """Trigger alert for system maintenance needed."""
346 |         alert_key = "maintenance_needed"
347 |         last_alert = self.alerts_sent.get(alert_key)
348 | 
349 |         if last_alert and (time.time() - last_alert) < 3600:  # 1 hour
350 |             return
351 | 
352 |         logger.error("System health trends indicate maintenance may be needed")
353 |         self.alerts_sent[alert_key] = time.time()
354 | 
355 |     def get_monitoring_status(self) -> dict[str, Any]:
356 |         """Get current monitoring system status."""
357 |         return {
358 |             "running": self.running,
359 |             "uptime_seconds": time.time() - self.start_time,
360 |             "active_tasks": len([t for t in self.tasks if not t.done()]),
361 |             "total_tasks": len(self.tasks),
362 |             "health_records": len(self.health_history),
363 |             "alerts_sent_count": len(self.alerts_sent),
364 |             "last_health_check": max(
365 |                 [record["timestamp"] for record in self.health_history], default=None
366 |             ),
367 |         }
368 | 
369 | 
370 | # Global health monitor instance
371 | _health_monitor = HealthMonitor()
372 | 
373 | 
374 | def get_health_monitor() -> HealthMonitor:
375 |     """Get the global health monitor instance."""
376 |     return _health_monitor
377 | 
378 | 
379 | async def start_health_monitoring():
380 |     """Start health monitoring system (convenience function)."""
381 |     await _health_monitor.start()
382 | 
383 | 
384 | async def stop_health_monitoring():
385 |     """Stop health monitoring system (convenience function)."""
386 |     await _health_monitor.stop()
387 | 
388 | 
389 | def get_monitoring_status() -> dict[str, Any]:
390 |     """Get monitoring status (convenience function)."""
391 |     return _health_monitor.get_monitoring_status()
392 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/tests/test_stock_data_provider.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for the StockDataProvider class.
  3 | """
  4 | 
  5 | import unittest
  6 | from datetime import datetime
  7 | from unittest.mock import MagicMock, PropertyMock, patch
  8 | 
  9 | import pandas as pd
 10 | 
 11 | from maverick_mcp.providers.stock_data import StockDataProvider
 12 | 
 13 | 
 14 | class TestStockDataProvider(unittest.TestCase):
 15 |     """Test suite for StockDataProvider."""
 16 | 
 17 |     def setUp(self):
 18 |         """Set up test fixtures."""
 19 |         self.provider = StockDataProvider()
 20 | 
 21 |         # Create sample data
 22 |         self.sample_data = pd.DataFrame(
 23 |             {
 24 |                 "Open": [100, 101, 102, 103, 104],
 25 |                 "High": [101, 102, 103, 104, 105],
 26 |                 "Low": [99, 100, 101, 102, 103],
 27 |                 "Close": [100.5, 101.5, 102.5, 103.5, 104.5],
 28 |                 "Volume": [1000000, 1100000, 1200000, 1300000, 1400000],
 29 |             },
 30 |             index=pd.date_range(end=datetime.now(), periods=5, freq="D"),
 31 |         )
 32 | 
 33 |     @patch("yfinance.Ticker")
 34 |     def test_get_stock_data_with_period(self, mock_ticker_class):
 35 |         """Test fetching stock data with period parameter."""
 36 |         mock_ticker = MagicMock()
 37 |         mock_ticker_class.return_value = mock_ticker
 38 |         mock_ticker.history.return_value = self.sample_data
 39 | 
 40 |         result = self.provider.get_stock_data("AAPL", period="5d")
 41 | 
 42 |         self.assertIsInstance(result, pd.DataFrame)
 43 |         self.assertEqual(len(result), 5)
 44 |         mock_ticker.history.assert_called_once_with(period="5d", interval="1d")
 45 | 
 46 |     @patch("yfinance.Ticker")
 47 |     def test_get_stock_data_with_dates(self, mock_ticker_class):
 48 |         """Test fetching stock data with date range."""
 49 |         mock_ticker = MagicMock()
 50 |         mock_ticker_class.return_value = mock_ticker
 51 |         mock_ticker.history.return_value = self.sample_data
 52 | 
 53 |         start_date = "2024-01-01"
 54 |         end_date = "2024-01-05"
 55 |         # Disable cache to avoid database connection
 56 |         result = self.provider.get_stock_data(
 57 |             "AAPL", start_date, end_date, use_cache=False
 58 |         )
 59 | 
 60 |         self.assertIsInstance(result, pd.DataFrame)
 61 |         mock_ticker.history.assert_called_once_with(
 62 |             start=start_date, end=end_date, interval="1d"
 63 |         )
 64 | 
 65 |     @patch("yfinance.Ticker")
 66 |     def test_get_stock_data_empty_response(self, mock_ticker_class):
 67 |         """Test handling of empty data response."""
 68 |         mock_ticker = MagicMock()
 69 |         mock_ticker_class.return_value = mock_ticker
 70 |         mock_ticker.history.return_value = pd.DataFrame()
 71 | 
 72 |         result = self.provider.get_stock_data("INVALID")
 73 | 
 74 |         self.assertIsInstance(result, pd.DataFrame)
 75 |         self.assertTrue(result.empty)
 76 |         self.assertListEqual(
 77 |             list(result.columns), ["Open", "High", "Low", "Close", "Volume"]
 78 |         )
 79 | 
 80 |     @patch("yfinance.Ticker")
 81 |     def test_get_stock_data_missing_columns(self, mock_ticker_class):
 82 |         """Test handling of missing columns in data."""
 83 |         mock_ticker = MagicMock()
 84 |         mock_ticker_class.return_value = mock_ticker
 85 |         # Return data missing Volume column
 86 |         incomplete_data = self.sample_data[["Open", "High", "Low", "Close"]].copy()
 87 |         mock_ticker.history.return_value = incomplete_data
 88 | 
 89 |         # Disable cache to ensure we get mocked data
 90 |         result = self.provider.get_stock_data("AAPL", use_cache=False)
 91 | 
 92 |         self.assertIsInstance(result, pd.DataFrame)
 93 |         self.assertIn("Volume", result.columns)
 94 |         # Volume should be 0 when missing (not NaN)
 95 |         self.assertTrue((result["Volume"] == 0).all())
 96 | 
 97 |     @patch("yfinance.Ticker")
 98 |     def test_get_stock_data_with_retry(self, mock_ticker_class):
 99 |         """Test retry mechanism on timeout."""
100 |         mock_ticker = MagicMock()
101 |         mock_ticker_class.return_value = mock_ticker
102 | 
103 |         # First call times out, second succeeds
104 |         import requests
105 | 
106 |         mock_ticker.history.side_effect = [
107 |             requests.Timeout("Read timeout"),
108 |             self.sample_data,
109 |         ]
110 | 
111 |         # Disable cache to avoid database connection
112 |         result = self.provider.get_stock_data("AAPL", use_cache=False)
113 | 
114 |         self.assertIsInstance(result, pd.DataFrame)
115 |         self.assertEqual(mock_ticker.history.call_count, 2)
116 | 
117 |     @patch("yfinance.Ticker")
118 |     def test_get_stock_info(self, mock_ticker_class):
119 |         """Test fetching stock info."""
120 |         mock_ticker = MagicMock()
121 |         mock_ticker_class.return_value = mock_ticker
122 |         mock_ticker.info = {
123 |             "symbol": "AAPL",
124 |             "longName": "Apple Inc.",
125 |             "marketCap": 3000000000000,
126 |             "sector": "Technology",
127 |         }
128 | 
129 |         result = self.provider.get_stock_info("AAPL")
130 | 
131 |         self.assertIsInstance(result, dict)
132 |         self.assertEqual(result["symbol"], "AAPL")
133 |         self.assertEqual(result["longName"], "Apple Inc.")
134 | 
135 |     @patch("yfinance.Ticker")
136 |     def test_get_stock_info_error(self, mock_ticker_class):
137 |         """Test error handling in stock info fetching."""
138 |         mock_ticker = MagicMock()
139 |         mock_ticker_class.return_value = mock_ticker
140 |         # Simulate an exception when accessing info
141 |         type(mock_ticker).info = PropertyMock(side_effect=Exception("API Error"))
142 | 
143 |         result = self.provider.get_stock_info("INVALID")
144 | 
145 |         self.assertIsInstance(result, dict)
146 |         self.assertEqual(result, {})
147 | 
148 |     @patch("yfinance.Ticker")
149 |     def test_get_realtime_data(self, mock_ticker_class):
150 |         """Test fetching real-time data."""
151 |         mock_ticker = MagicMock()
152 |         mock_ticker_class.return_value = mock_ticker
153 | 
154 |         # Mock today's data
155 |         today_data = pd.DataFrame(
156 |             {"Close": [105.0], "Volume": [1500000]}, index=[datetime.now()]
157 |         )
158 |         mock_ticker.history.return_value = today_data
159 |         mock_ticker.info = {"previousClose": 104.0}
160 | 
161 |         result = self.provider.get_realtime_data("AAPL")
162 | 
163 |         self.assertIsInstance(result, dict)
164 |         self.assertIsNotNone(result)
165 |         assert result is not None  # Type narrowing for pyright
166 |         self.assertEqual(result["symbol"], "AAPL")
167 |         self.assertEqual(result["price"], 105.0)
168 |         self.assertEqual(result["change"], 1.0)
169 |         self.assertAlmostEqual(result["change_percent"], 0.96, places=2)
170 | 
171 |     @patch("yfinance.Ticker")
172 |     def test_get_all_realtime_data(self, mock_ticker_class):
173 |         """Test fetching real-time data for multiple symbols."""
174 |         mock_ticker = MagicMock()
175 |         mock_ticker_class.return_value = mock_ticker
176 | 
177 |         # Mock data for each symbol
178 |         mock_data = pd.DataFrame(
179 |             {"Close": [105.0], "Volume": [1500000]}, index=[datetime.now()]
180 |         )
181 |         mock_ticker.history.return_value = mock_data
182 |         mock_ticker.info = {"previousClose": 104.0}
183 | 
184 |         symbols = ["AAPL", "GOOGL", "MSFT"]
185 |         result = self.provider.get_all_realtime_data(symbols)
186 | 
187 |         self.assertIsInstance(result, dict)
188 |         for symbol in symbols:
189 |             self.assertIn(symbol, result)
190 |             self.assertEqual(result[symbol]["symbol"], symbol)
191 | 
192 |     def test_is_market_open_weekday(self):
193 |         """Test market open check on weekday."""
194 |         # Mock a weekday during market hours
195 |         with patch("maverick_mcp.providers.stock_data.datetime") as mock_datetime:
196 |             with patch("maverick_mcp.providers.stock_data.pytz") as mock_pytz:
197 |                 # Create mock timezone
198 |                 mock_tz = MagicMock()
199 |                 mock_pytz.timezone.return_value = mock_tz
200 | 
201 |                 # Tuesday at 2 PM ET
202 |                 mock_now = MagicMock()
203 |                 mock_now.weekday.return_value = 1  # Tuesday
204 |                 mock_now.hour = 14
205 |                 mock_now.minute = 0
206 |                 mock_now.__le__ = lambda self, other: True  # For market_open <= now
207 |                 mock_now.__ge__ = lambda self, other: False  # For now <= market_close
208 | 
209 |                 # Mock replace to return different times for market open/close
210 |                 def mock_replace(**kwargs):
211 |                     if kwargs.get("hour") == 9:  # market open
212 |                         m = MagicMock()
213 |                         m.__le__ = lambda self, other: True
214 |                         return m
215 |                     elif kwargs.get("hour") == 16:  # market close
216 |                         m = MagicMock()
217 |                         m.__ge__ = lambda self, other: True
218 |                         return m
219 |                     return mock_now
220 | 
221 |                 mock_now.replace = mock_replace
222 |                 mock_datetime.now.return_value = mock_now
223 | 
224 |                 result = self.provider.is_market_open()
225 | 
226 |                 self.assertTrue(result)
227 | 
228 |     def test_is_market_open_weekend(self):
229 |         """Test market open check on weekend."""
230 |         # Mock a Saturday
231 |         with patch("maverick_mcp.providers.stock_data.datetime") as mock_datetime:
232 |             with patch("maverick_mcp.providers.stock_data.pytz") as mock_pytz:
233 |                 # Create mock timezone
234 |                 mock_tz = MagicMock()
235 |                 mock_pytz.timezone.return_value = mock_tz
236 | 
237 |                 # Saturday at 2 PM ET
238 |                 mock_now = MagicMock()
239 |                 mock_now.weekday.return_value = 5  # Saturday
240 | 
241 |                 mock_datetime.now.return_value = mock_now
242 | 
243 |                 result = self.provider.is_market_open()
244 | 
245 |                 self.assertFalse(result)
246 | 
247 |     @patch("yfinance.Ticker")
248 |     def test_get_news(self, mock_ticker_class):
249 |         """Test fetching news."""
250 |         mock_ticker = MagicMock()
251 |         mock_ticker_class.return_value = mock_ticker
252 |         mock_ticker.news = [
253 |             {
254 |                 "title": "Apple announces new product",
255 |                 "publisher": "Reuters",
256 |                 "link": "https://example.com/1",
257 |                 "providerPublishTime": 1704150000,
258 |                 "type": "STORY",
259 |             },
260 |             {
261 |                 "title": "Apple stock rises",
262 |                 "publisher": "Bloomberg",
263 |                 "link": "https://example.com/2",
264 |                 "providerPublishTime": 1704153600,
265 |                 "type": "STORY",
266 |             },
267 |         ]
268 | 
269 |         result = self.provider.get_news("AAPL", limit=2)
270 | 
271 |         self.assertIsInstance(result, pd.DataFrame)
272 |         self.assertEqual(len(result), 2)
273 |         self.assertIn("title", result.columns)
274 |         self.assertIn("providerPublishTime", result.columns)
275 |         # Check timestamp conversion
276 |         self.assertEqual(result["providerPublishTime"].dtype, "datetime64[ns]")
277 | 
278 |     @patch("yfinance.Ticker")
279 |     def test_get_news_empty(self, mock_ticker_class):
280 |         """Test fetching news with no results."""
281 |         mock_ticker = MagicMock()
282 |         mock_ticker_class.return_value = mock_ticker
283 |         mock_ticker.news = []
284 | 
285 |         result = self.provider.get_news("AAPL")
286 | 
287 |         self.assertIsInstance(result, pd.DataFrame)
288 |         self.assertTrue(result.empty)
289 |         self.assertListEqual(
290 |             list(result.columns),
291 |             ["title", "publisher", "link", "providerPublishTime", "type"],
292 |         )
293 | 
294 |     @patch("yfinance.Ticker")
295 |     def test_get_earnings(self, mock_ticker_class):
296 |         """Test fetching earnings data."""
297 |         mock_ticker = MagicMock()
298 |         mock_ticker_class.return_value = mock_ticker
299 | 
300 |         # Mock earnings data
301 |         mock_earnings = pd.DataFrame({"Revenue": [100, 110], "Earnings": [10, 12]})
302 |         mock_ticker.earnings = mock_earnings
303 |         mock_ticker.earnings_dates = pd.DataFrame({"EPS Estimate": [1.5, 1.6]})
304 |         mock_ticker.earnings_trend = {"trend": [{"growth": 0.1}]}
305 | 
306 |         result = self.provider.get_earnings("AAPL")
307 | 
308 |         self.assertIsInstance(result, dict)
309 |         self.assertIn("earnings", result)
310 |         self.assertIn("earnings_dates", result)
311 |         self.assertIn("earnings_trend", result)
312 | 
313 |     @patch("yfinance.Ticker")
314 |     def test_get_recommendations(self, mock_ticker_class):
315 |         """Test fetching analyst recommendations."""
316 |         mock_ticker = MagicMock()
317 |         mock_ticker_class.return_value = mock_ticker
318 | 
319 |         mock_recommendations = pd.DataFrame(
320 |             {
321 |                 "firm": ["Morgan Stanley", "Goldman Sachs"],
322 |                 "toGrade": ["Buy", "Hold"],
323 |                 "fromGrade": ["Hold", "Sell"],
324 |                 "action": ["upgrade", "upgrade"],
325 |             }
326 |         )
327 |         mock_ticker.recommendations = mock_recommendations
328 | 
329 |         result = self.provider.get_recommendations("AAPL")
330 | 
331 |         self.assertIsInstance(result, pd.DataFrame)
332 |         self.assertEqual(len(result), 2)
333 |         self.assertIn("firm", result.columns)
334 |         self.assertIn("toGrade", result.columns)
335 | 
336 |     @patch("yfinance.Ticker")
337 |     def test_is_etf_by_quote_type(self, mock_ticker_class):
338 |         """Test ETF detection by quoteType."""
339 |         mock_ticker = MagicMock()
340 |         mock_ticker_class.return_value = mock_ticker
341 |         mock_ticker.info = {"quoteType": "ETF"}
342 | 
343 |         result = self.provider.is_etf("SPY")
344 | 
345 |         self.assertTrue(result)
346 | 
347 |     @patch("yfinance.Ticker")
348 |     def test_is_etf_by_symbol(self, mock_ticker_class):
349 |         """Test ETF detection by known symbol."""
350 |         mock_ticker = MagicMock()
351 |         mock_ticker_class.return_value = mock_ticker
352 |         mock_ticker.info = {}  # No quoteType
353 | 
354 |         result = self.provider.is_etf("SPY")
355 | 
356 |         self.assertTrue(result)  # SPY is in the known ETF list
357 | 
358 |     @patch("yfinance.Ticker")
359 |     def test_is_etf_false(self, mock_ticker_class):
360 |         """Test non-ETF detection."""
361 |         mock_ticker = MagicMock()
362 |         mock_ticker_class.return_value = mock_ticker
363 |         mock_ticker.info = {"quoteType": "EQUITY", "longName": "Apple Inc."}
364 | 
365 |         result = self.provider.is_etf("AAPL")
366 | 
367 |         self.assertFalse(result)
368 | 
369 |     def test_singleton_pattern(self):
370 |         """Test that StockDataProvider follows singleton pattern."""
371 |         provider1 = StockDataProvider()
372 |         provider2 = StockDataProvider()
373 | 
374 |         self.assertIs(provider1, provider2)
375 | 
376 | 
377 | if __name__ == "__main__":
378 |     unittest.main()
379 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/monitoring/integration_example.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Example integration of monitoring metrics into backtesting code.
  3 | 
  4 | This module demonstrates how to integrate the monitoring system
  5 | into existing backtesting strategies and data providers.
  6 | """
  7 | 
  8 | import asyncio
  9 | from typing import Any
 10 | 
 11 | import numpy as np
 12 | import pandas as pd
 13 | 
 14 | from maverick_mcp.monitoring.metrics import get_backtesting_metrics
 15 | from maverick_mcp.monitoring.middleware import (
 16 |     MetricsCircuitBreaker,
 17 |     get_metrics_middleware,
 18 |     track_api_call,
 19 |     track_resource_usage,
 20 |     track_strategy_execution,
 21 | )
 22 | from maverick_mcp.utils.logging import get_logger
 23 | 
 24 | logger = get_logger(__name__)
 25 | 
 26 | 
 27 | class MonitoredStockDataProvider:
 28 |     """
 29 |     Example stock data provider with integrated monitoring.
 30 | 
 31 |     Shows how to add metrics tracking to data fetching operations.
 32 |     """
 33 | 
 34 |     def __init__(self):
 35 |         self.circuit_breaker = MetricsCircuitBreaker(
 36 |             provider="tiingo",
 37 |             endpoint="/daily",
 38 |             failure_threshold=5,
 39 |             recovery_timeout=60,
 40 |         )
 41 |         self.logger = get_logger(f"{__name__}.MonitoredStockDataProvider")
 42 | 
 43 |     @track_api_call("tiingo", "/daily/{symbol}")
 44 |     async def get_stock_data(
 45 |         self, symbol: str, start_date: str = None, end_date: str = None
 46 |     ) -> pd.DataFrame:
 47 |         """
 48 |         Fetch stock data with automatic API call tracking.
 49 | 
 50 |         The @track_api_call decorator automatically tracks:
 51 |         - Request duration
 52 |         - Success/failure status
 53 |         - Rate limiting metrics
 54 |         """
 55 |         # Simulate API call delay
 56 |         await asyncio.sleep(0.1)
 57 | 
 58 |         # Simulate occasional API errors for demonstration
 59 |         if np.random.random() < 0.05:  # 5% error rate
 60 |             raise Exception("API rate limit exceeded")
 61 | 
 62 |         # Generate sample data
 63 |         dates = pd.date_range(start="2023-01-01", end="2023-12-31", freq="D")
 64 |         data = pd.DataFrame(
 65 |             {
 66 |                 "Date": dates,
 67 |                 "Open": np.random.uniform(100, 200, len(dates)),
 68 |                 "High": np.random.uniform(150, 250, len(dates)),
 69 |                 "Low": np.random.uniform(50, 150, len(dates)),
 70 |                 "Close": np.random.uniform(100, 200, len(dates)),
 71 |                 "Volume": np.random.randint(1000000, 10000000, len(dates)),
 72 |             }
 73 |         )
 74 | 
 75 |         # Track additional metrics
 76 |         collector = get_backtesting_metrics()
 77 |         collector.track_cache_operation(
 78 |             cache_type="api_response",
 79 |             operation="fetch",
 80 |             hit=False,  # Assume cache miss for this example
 81 |             key_pattern=f"stock_data_{symbol}",
 82 |         )
 83 | 
 84 |         return data
 85 | 
 86 |     async def get_stock_data_with_circuit_breaker(self, symbol: str) -> pd.DataFrame:
 87 |         """
 88 |         Fetch stock data with circuit breaker protection.
 89 | 
 90 |         Automatically tracks circuit breaker state changes and failures.
 91 |         """
 92 |         return await self.circuit_breaker.call(self.get_stock_data, symbol=symbol)
 93 | 
 94 | 
 95 | class MonitoredTradingStrategy:
 96 |     """
 97 |     Example trading strategy with comprehensive monitoring.
 98 | 
 99 |     Shows how to add metrics tracking to strategy execution.
100 |     """
101 | 
102 |     def __init__(self, name: str):
103 |         self.name = name
104 |         self.data_provider = MonitoredStockDataProvider()
105 |         self.middleware = get_metrics_middleware()
106 |         self.logger = get_logger(f"{__name__}.MonitoredTradingStrategy")
107 | 
108 |     @track_strategy_execution("RSI_Strategy", "AAPL", "1D")
109 |     async def run_backtest(
110 |         self, symbol: str, data: pd.DataFrame = None, data_points: int = None
111 |     ) -> dict[str, Any]:
112 |         """
113 |         Run backtest with automatic strategy execution tracking.
114 | 
115 |         The @track_strategy_execution decorator automatically tracks:
116 |         - Execution duration
117 |         - Memory usage
118 |         - Success/failure status
119 |         - Data points processed
120 |         """
121 |         if data is None:
122 |             data = await self.data_provider.get_stock_data(symbol)
123 | 
124 |         # Track the actual data points being processed
125 |         actual_data_points = len(data)
126 | 
127 |         # Simulate strategy calculations
128 |         await self._calculate_rsi_signals(data)
129 | 
130 |         # Simulate backtest execution
131 |         performance_results = await self._simulate_trading(data, symbol)
132 | 
133 |         # Add data points info to results for metrics tracking
134 |         performance_results["data_points_processed"] = actual_data_points
135 | 
136 |         return performance_results
137 | 
138 |     @track_resource_usage("rsi_calculation")
139 |     async def _calculate_rsi_signals(self, data: pd.DataFrame) -> pd.DataFrame:
140 |         """
141 |         Calculate RSI signals with resource usage tracking.
142 | 
143 |         The @track_resource_usage decorator tracks:
144 |         - Memory usage during calculation
145 |         - Computation time
146 |         - Data size category
147 |         """
148 |         # Simulate RSI calculation (simplified)
149 |         await asyncio.sleep(0.05)  # Simulate computation time
150 | 
151 |         # Calculate RSI (simplified version)
152 |         data["rsi"] = np.random.uniform(20, 80, len(data))
153 |         data["signal"] = np.where(
154 |             data["rsi"] < 30, 1, np.where(data["rsi"] > 70, -1, 0)
155 |         )
156 | 
157 |         return data
158 | 
159 |     async def _simulate_trading(
160 |         self, data: pd.DataFrame, symbol: str
161 |     ) -> dict[str, Any]:
162 |         """
163 |         Simulate trading and calculate performance metrics.
164 | 
165 |         Returns comprehensive performance metrics that will be
166 |         automatically tracked by the strategy execution decorator.
167 |         """
168 |         # Simulate trading logic
169 |         signals = data["signal"]
170 | 
171 |         # Calculate returns (simplified)
172 |         total_return = np.random.uniform(-10, 30)  # Random return between -10% and 30%
173 |         sharpe_ratio = np.random.uniform(-0.5, 2.5)  # Random Sharpe ratio
174 |         max_drawdown = np.random.uniform(5, 25)  # Random drawdown 5-25%
175 |         win_rate = np.random.uniform(0.35, 0.75)  # Random win rate 35-75%
176 | 
177 |         # Count trades
178 |         position_changes = np.diff(signals)
179 |         total_trades = np.sum(np.abs(position_changes))
180 |         winning_trades = int(total_trades * win_rate)
181 | 
182 |         # Track portfolio updates
183 |         collector = get_backtesting_metrics()
184 |         collector.update_portfolio_metrics(
185 |             portfolio_id="demo_portfolio",
186 |             portfolio_value_usd=100000 * (1 + total_return / 100),
187 |             daily_pnl_usd=total_return * 1000,  # Simulated daily PnL
188 |             strategy=self.name,
189 |             positions=[{"symbol": symbol, "quantity": 100, "type": "long"}],
190 |         )
191 | 
192 |         # Return performance metrics in expected format
193 |         return {
194 |             "strategy_name": self.name,
195 |             "symbol": symbol,
196 |             "total_return": total_return,
197 |             "returns": total_return,  # Alternative key name
198 |             "sharpe_ratio": sharpe_ratio,
199 |             "max_drawdown": max_drawdown,
200 |             "max_dd": max_drawdown,  # Alternative key name
201 |             "win_rate": win_rate * 100,  # Convert to percentage
202 |             "win_ratio": win_rate,  # Alternative key name
203 |             "total_trades": int(total_trades),
204 |             "num_trades": int(total_trades),  # Alternative key name
205 |             "winning_trades": winning_trades,
206 |             "performance_summary": {
207 |                 "profitable": total_return > 0,
208 |                 "risk_adjusted_return": sharpe_ratio,
209 |                 "maximum_loss": max_drawdown,
210 |             },
211 |         }
212 | 
213 | 
214 | class MonitoredDatabaseRepository:
215 |     """
216 |     Example database repository with monitoring integration.
217 | 
218 |     Shows how to add database operation tracking.
219 |     """
220 | 
221 |     def __init__(self):
222 |         self.middleware = get_metrics_middleware()
223 |         self.logger = get_logger(f"{__name__}.MonitoredDatabaseRepository")
224 | 
225 |     async def save_backtest_results(
226 |         self, strategy_name: str, symbol: str, results: dict[str, Any]
227 |     ) -> bool:
228 |         """
229 |         Save backtest results with database operation tracking.
230 |         """
231 |         async with self.middleware.track_database_operation(
232 |             query_type="INSERT", table_name="backtest_results", operation="save_results"
233 |         ):
234 |             # Simulate database save operation
235 |             await asyncio.sleep(0.02)
236 | 
237 |             # Simulate occasional database errors
238 |             if np.random.random() < 0.01:  # 1% error rate
239 |                 raise Exception("Database connection timeout")
240 | 
241 |             self.logger.info(
242 |                 f"Saved backtest results for {strategy_name} on {symbol}",
243 |                 extra={
244 |                     "strategy": strategy_name,
245 |                     "symbol": symbol,
246 |                     "total_return": results.get("total_return", 0),
247 |                 },
248 |             )
249 | 
250 |             return True
251 | 
252 |     async def get_historical_performance(
253 |         self, strategy_name: str, days: int = 30
254 |     ) -> list[dict[str, Any]]:
255 |         """
256 |         Retrieve historical performance with tracking.
257 |         """
258 |         async with self.middleware.track_database_operation(
259 |             query_type="SELECT",
260 |             table_name="backtest_results",
261 |             operation="get_performance",
262 |         ):
263 |             # Simulate database query
264 |             await asyncio.sleep(0.01)
265 | 
266 |             # Generate sample historical data
267 |             historical_data = []
268 |             for i in range(days):
269 |                 historical_data.append(
270 |                     {
271 |                         "date": f"2024-01-{i + 1:02d}",
272 |                         "strategy": strategy_name,
273 |                         "return": np.random.uniform(-2, 3),
274 |                         "sharpe_ratio": np.random.uniform(0.5, 2.0),
275 |                     }
276 |                 )
277 | 
278 |             return historical_data
279 | 
280 | 
281 | # Demonstration function
282 | async def demonstrate_monitoring_integration():
283 |     """
284 |     Demonstrate comprehensive monitoring integration.
285 | 
286 |     This function shows how all monitoring components work together
287 |     in a typical backtesting workflow.
288 |     """
289 |     logger.info("Starting monitoring integration demonstration")
290 | 
291 |     # Initialize components
292 |     strategy = MonitoredTradingStrategy("RSI_Momentum")
293 |     repository = MonitoredDatabaseRepository()
294 | 
295 |     # List of symbols to test
296 |     symbols = ["AAPL", "GOOGL", "MSFT", "TSLA"]
297 | 
298 |     for symbol in symbols:
299 |         try:
300 |             logger.info(f"Running backtest for {symbol}")
301 | 
302 |             # Run backtest (automatically tracked)
303 |             results = await strategy.run_backtest(
304 |                 symbol=symbol,
305 |                 data_points=252,  # One year of trading days
306 |             )
307 | 
308 |             # Save results (automatically tracked)
309 |             await repository.save_backtest_results(
310 |                 strategy_name=strategy.name, symbol=symbol, results=results
311 |             )
312 | 
313 |             # Get historical performance (automatically tracked)
314 |             historical = await repository.get_historical_performance(
315 |                 strategy_name=strategy.name, days=30
316 |             )
317 | 
318 |             logger.info(
319 |                 f"Completed backtest for {symbol}",
320 |                 extra={
321 |                     "symbol": symbol,
322 |                     "total_return": results.get("total_return", 0),
323 |                     "sharpe_ratio": results.get("sharpe_ratio", 0),
324 |                     "historical_records": len(historical),
325 |                 },
326 |             )
327 | 
328 |         except Exception as e:
329 |             logger.error(f"Backtest failed for {symbol}: {e}")
330 | 
331 |             # Manually track the anomaly
332 |             collector = get_backtesting_metrics()
333 |             collector.detect_anomaly(
334 |                 anomaly_type="backtest_execution_failure",
335 |                 severity="critical",
336 |                 context={
337 |                     "strategy_name": strategy.name,
338 |                     "symbol": symbol,
339 |                     "error": str(e),
340 |                 },
341 |             )
342 | 
343 |     logger.info("Monitoring integration demonstration completed")
344 | 
345 | 
346 | # Alert checking function
347 | async def check_and_report_anomalies():
348 |     """
349 |     Example function to check for anomalies and generate alerts.
350 | 
351 |     This would typically be run periodically by a scheduler.
352 |     """
353 |     logger.info("Checking for performance anomalies")
354 | 
355 |     collector = get_backtesting_metrics()
356 | 
357 |     # Simulate checking various metrics and detecting anomalies
358 |     anomalies_detected = 0
359 | 
360 |     # Example: Check if any strategy has poor recent performance
361 |     strategies = ["RSI_Momentum", "MACD_Trend", "Bollinger_Bands"]
362 | 
363 |     for strategy in strategies:
364 |         # Simulate performance check
365 |         recent_sharpe = np.random.uniform(-1.0, 2.5)
366 |         recent_drawdown = np.random.uniform(5, 35)
367 | 
368 |         if recent_sharpe < 0.5:
369 |             collector.detect_anomaly(
370 |                 anomaly_type="low_sharpe_ratio",
371 |                 severity="warning" if recent_sharpe > 0 else "critical",
372 |                 context={
373 |                     "strategy_name": strategy,
374 |                     "sharpe_ratio": recent_sharpe,
375 |                     "threshold": 0.5,
376 |                 },
377 |             )
378 |             anomalies_detected += 1
379 | 
380 |         if recent_drawdown > 25:
381 |             collector.detect_anomaly(
382 |                 anomaly_type="high_drawdown",
383 |                 severity="critical",
384 |                 context={
385 |                     "strategy_name": strategy,
386 |                     "max_drawdown": recent_drawdown,
387 |                     "threshold": 25,
388 |                 },
389 |             )
390 |             anomalies_detected += 1
391 | 
392 |     logger.info(f"Anomaly check completed. Detected {anomalies_detected} anomalies")
393 | 
394 |     return anomalies_detected
395 | 
396 | 
397 | if __name__ == "__main__":
398 |     # Run the demonstration
399 |     async def main():
400 |         await demonstrate_monitoring_integration()
401 |         await check_and_report_anomalies()
402 | 
403 |         # Print metrics summary
404 |         collector = get_backtesting_metrics()
405 |         metrics_text = collector.get_metrics_text()
406 |         print("\n" + "=" * 50)
407 |         print("PROMETHEUS METRICS SAMPLE:")
408 |         print("=" * 50)
409 |         print(metrics_text[:1000] + "..." if len(metrics_text) > 1000 else metrics_text)
410 | 
411 |     asyncio.run(main())
412 | 
```

--------------------------------------------------------------------------------
/scripts/seed_db.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Database seeding script for MaverickMCP.
  4 | 
  5 | This script populates the database with sample stock data from Tiingo API,
  6 | including stocks, price data, and sample screening results.
  7 | """
  8 | 
  9 | import logging
 10 | import os
 11 | import sys
 12 | from datetime import UTC, datetime, timedelta
 13 | from decimal import Decimal
 14 | from pathlib import Path
 15 | 
 16 | # Add the project root to the Python path
 17 | project_root = Path(__file__).parent.parent
 18 | sys.path.insert(0, str(project_root))
 19 | 
 20 | # noqa: E402 - imports must come after sys.path modification
 21 | from sqlalchemy import create_engine  # noqa: E402
 22 | from sqlalchemy.orm import sessionmaker  # noqa: E402
 23 | 
 24 | from maverick_mcp.data.models import (  # noqa: E402
 25 |     MaverickBearStocks,
 26 |     MaverickStocks,
 27 |     PriceCache,
 28 |     Stock,
 29 |     SupplyDemandBreakoutStocks,
 30 |     TechnicalCache,
 31 |     bulk_insert_price_data,
 32 |     bulk_insert_screening_data,
 33 | )
 34 | from maverick_mcp.providers.stock_data import EnhancedStockDataProvider  # noqa: E402
 35 | 
 36 | # Set up logging
 37 | logging.basicConfig(
 38 |     level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
 39 | )
 40 | logger = logging.getLogger("maverick_mcp.seed")
 41 | 
 42 | 
 43 | # Sample stock tickers for different categories
 44 | SAMPLE_STOCKS = {
 45 |     "large_cap": [
 46 |         "AAPL",
 47 |         "MSFT",
 48 |         "GOOGL",
 49 |         "AMZN",
 50 |         "TSLA",
 51 |         "NVDA",
 52 |         "META",
 53 |         "BRK-B",
 54 |         "JNJ",
 55 |         "V",
 56 |     ],
 57 |     "growth": ["AMD", "CRM", "SHOP", "ROKU", "ZM", "DOCU", "SNOW", "PLTR", "RBLX", "U"],
 58 |     "value": ["KO", "PFE", "XOM", "CVX", "JPM", "BAC", "WMT", "PG", "T", "VZ"],
 59 |     "small_cap": [
 60 |         "UPST",
 61 |         "SOFI",
 62 |         "OPEN",
 63 |         "WISH",
 64 |         "CLOV",
 65 |         "SPCE",
 66 |         "LCID",
 67 |         "RIVN",
 68 |         "BYND",
 69 |         "PTON",
 70 |     ],
 71 | }
 72 | 
 73 | 
 74 | def get_database_url() -> str:
 75 |     """Get the database URL from environment or settings."""
 76 |     return os.getenv("DATABASE_URL") or "sqlite:///maverick_mcp.db"
 77 | 
 78 | 
 79 | def setup_stock_provider(session) -> EnhancedStockDataProvider:
 80 |     """Set up Enhanced stock data provider."""
 81 |     # The EnhancedStockDataProvider uses yfinance and doesn't require API keys
 82 |     provider = EnhancedStockDataProvider(db_session=session)
 83 |     logger.info("Enhanced stock data provider initialized")
 84 |     return provider
 85 | 
 86 | 
 87 | def create_sample_stocks(session, stocks_list: list[str]) -> dict[str, Stock]:
 88 |     """Create sample stock records."""
 89 |     logger.info(f"Creating {len(stocks_list)} sample stocks...")
 90 | 
 91 |     created_stocks = {}
 92 |     for ticker in stocks_list:
 93 |         try:
 94 |             # Create basic stock record
 95 |             stock = Stock.get_or_create(
 96 |                 session,
 97 |                 ticker_symbol=ticker,
 98 |                 company_name=f"{ticker} Inc.",  # Simple placeholder
 99 |                 sector="Technology",  # Default sector
100 |                 exchange="NASDAQ",
101 |                 country="US",
102 |                 currency="USD",
103 |                 is_active=True,
104 |             )
105 |             created_stocks[ticker] = stock
106 |             logger.info(f"Created stock: {ticker}")
107 | 
108 |         except Exception as e:
109 |             logger.error(f"Error creating stock {ticker}: {e}")
110 |             continue
111 | 
112 |     logger.info(f"Successfully created {len(created_stocks)} stocks")
113 |     return created_stocks
114 | 
115 | 
116 | def fetch_and_store_price_data(
117 |     session, stock_provider: EnhancedStockDataProvider, stocks: dict[str, Stock]
118 | ) -> None:
119 |     """Fetch price data using EnhancedStockDataProvider and store in database."""
120 |     logger.info("Fetching price data using yfinance...")
121 | 
122 |     # Get data for last 200 days
123 |     end_date = datetime.now(UTC)
124 |     start_date = end_date - timedelta(days=200)
125 | 
126 |     success_count = 0
127 |     for ticker, _stock in stocks.items():
128 |         try:
129 |             logger.info(f"Fetching price data for {ticker}...")
130 | 
131 |             # Get price data using the enhanced provider
132 |             data = stock_provider.get_stock_data(
133 |                 ticker,
134 |                 start_date=start_date.strftime("%Y-%m-%d"),
135 |                 end_date=end_date.strftime("%Y-%m-%d"),
136 |             )
137 | 
138 |             if data is not None and not data.empty:
139 |                 # Store in database
140 |                 inserted_count = bulk_insert_price_data(session, ticker, data)
141 |                 logger.info(f"Inserted {inserted_count} price records for {ticker}")
142 |                 success_count += 1
143 |             else:
144 |                 logger.warning(f"No price data received for {ticker}")
145 | 
146 |         except Exception as e:
147 |             logger.error(f"Error fetching price data for {ticker}: {e}")
148 |             continue
149 | 
150 |     logger.info(
151 |         f"Successfully fetched price data for {success_count}/{len(stocks)} stocks"
152 |     )
153 | 
154 | 
155 | def generate_sample_screening_data(stocks: dict[str, Stock]) -> list[dict]:
156 |     """Generate sample screening data for testing."""
157 |     logger.info("Generating sample screening data...")
158 | 
159 |     screening_data = []
160 |     for i, (ticker, _stock) in enumerate(stocks.items()):
161 |         # Generate realistic-looking screening data
162 |         base_score = 50 + (i % 40) + (hash(ticker) % 20)  # Score between 50-110
163 | 
164 |         data = {
165 |             "ticker": ticker,
166 |             "close": round(100 + (i * 10) + (hash(ticker) % 50), 2),
167 |             "volume": 1000000 + (i * 100000),
168 |             "momentum_score": round(base_score + (hash(ticker) % 30), 2),
169 |             "combined_score": min(100, base_score + (hash(ticker) % 25)),
170 |             "ema_21": round(95 + (i * 9), 2),
171 |             "sma_50": round(90 + (i * 8), 2),
172 |             "sma_150": round(85 + (i * 7), 2),
173 |             "sma_200": round(80 + (i * 6), 2),
174 |             "adr_pct": round(2 + (hash(ticker) % 8), 2),
175 |             "atr": round(3 + (hash(ticker) % 5), 2),
176 |             "pattern_type": ["Breakout", "Continuation", "Reversal", "Consolidation"][
177 |                 i % 4
178 |             ],
179 |             "squeeze_status": [
180 |                 "No Squeeze",
181 |                 "Low Squeeze",
182 |                 "Mid Squeeze",
183 |                 "High Squeeze",
184 |             ][i % 4],
185 |             "consolidation_status": ["Base", "Flag", "Pennant", "Triangle"][i % 4],
186 |             "entry_signal": ["Buy", "Hold", "Watch", "Caution"][i % 4],
187 |             "compression_score": hash(ticker) % 10,
188 |             "pattern_detected": 1 if hash(ticker) % 3 == 0 else 0,
189 |         }
190 |         screening_data.append(data)
191 | 
192 |     return screening_data
193 | 
194 | 
195 | def create_sample_screening_results(session, stocks: dict[str, Stock]) -> None:
196 |     """Create sample screening results for all categories."""
197 |     logger.info("Creating sample screening results...")
198 | 
199 |     # Generate sample data
200 |     screening_data = generate_sample_screening_data(stocks)
201 | 
202 |     # Split data for different screening types
203 |     total_stocks = len(screening_data)
204 | 
205 |     # Top 60% for Maverick stocks (bullish)
206 |     maverick_data = sorted(
207 |         screening_data, key=lambda x: x["combined_score"], reverse=True
208 |     )[: int(total_stocks * 0.6)]
209 |     maverick_count = bulk_insert_screening_data(session, MaverickStocks, maverick_data)
210 |     logger.info(f"Created {maverick_count} Maverick screening results")
211 | 
212 |     # Bottom 40% for Bear stocks
213 |     bear_data = sorted(screening_data, key=lambda x: x["combined_score"])[
214 |         : int(total_stocks * 0.4)
215 |     ]
216 |     # Add bear-specific fields
217 |     for data in bear_data:
218 |         data["score"] = 100 - data["combined_score"]  # Invert score for bear
219 |         data["rsi_14"] = 70 + (hash(data["ticker"]) % 20)  # High RSI for bear
220 |         data["macd"] = -1 * (hash(data["ticker"]) % 5) / 10  # Negative MACD
221 |         data["macd_signal"] = -0.5 * (hash(data["ticker"]) % 3) / 10
222 |         data["macd_histogram"] = data["macd"] - data["macd_signal"]
223 |         data["dist_days_20"] = hash(data["ticker"]) % 15
224 |         data["atr_contraction"] = hash(data["ticker"]) % 2 == 0
225 |         data["big_down_vol"] = hash(data["ticker"]) % 3 == 0
226 | 
227 |     bear_count = bulk_insert_screening_data(session, MaverickBearStocks, bear_data)
228 |     logger.info(f"Created {bear_count} Bear screening results")
229 | 
230 |     # Top 40% for Supply/Demand breakouts
231 |     supply_demand_data = sorted(
232 |         screening_data, key=lambda x: x["momentum_score"], reverse=True
233 |     )[: int(total_stocks * 0.4)]
234 |     # Add supply/demand specific fields
235 |     for data in supply_demand_data:
236 |         data["accumulation_rating"] = (hash(data["ticker"]) % 8) + 2  # 2-9 rating
237 |         data["distribution_rating"] = 10 - data["accumulation_rating"]
238 |         data["breakout_strength"] = (hash(data["ticker"]) % 7) + 3  # 3-9 rating
239 |         data["avg_volume_30d"] = data["volume"] * 1.2  # 20% above current volume
240 | 
241 |     supply_demand_count = bulk_insert_screening_data(
242 |         session, SupplyDemandBreakoutStocks, supply_demand_data
243 |     )
244 |     logger.info(f"Created {supply_demand_count} Supply/Demand breakout results")
245 | 
246 | 
247 | def create_sample_technical_indicators(session, stocks: dict[str, Stock]) -> None:
248 |     """Create sample technical indicator cache data."""
249 |     logger.info("Creating sample technical indicator cache...")
250 | 
251 |     # Create sample technical data for the last 30 days
252 |     end_date = datetime.now(UTC).date()
253 | 
254 |     indicator_count = 0
255 |     for days_ago in range(30):
256 |         date = end_date - timedelta(days=days_ago)
257 | 
258 |         for ticker, stock in list(stocks.items())[
259 |             :10
260 |         ]:  # Limit to first 10 stocks for demo
261 |             try:
262 |                 # RSI
263 |                 rsi_value = 30 + (hash(f"{ticker}{date}") % 40)  # RSI between 30-70
264 |                 rsi_cache = TechnicalCache(
265 |                     stock_id=stock.stock_id,
266 |                     date=date,
267 |                     indicator_type="RSI_14",
268 |                     value=Decimal(str(rsi_value)),
269 |                     period=14,
270 |                 )
271 |                 session.add(rsi_cache)
272 | 
273 |                 # SMA_20
274 |                 sma_value = 100 + (hash(f"{ticker}{date}sma") % 50)
275 |                 sma_cache = TechnicalCache(
276 |                     stock_id=stock.stock_id,
277 |                     date=date,
278 |                     indicator_type="SMA_20",
279 |                     value=Decimal(str(sma_value)),
280 |                     period=20,
281 |                 )
282 |                 session.add(sma_cache)
283 | 
284 |                 indicator_count += 2
285 | 
286 |             except Exception as e:
287 |                 logger.error(f"Error creating technical indicators for {ticker}: {e}")
288 |                 continue
289 | 
290 |     session.commit()
291 |     logger.info(f"Created {indicator_count} technical indicator cache entries")
292 | 
293 | 
294 | def verify_data(session) -> None:
295 |     """Verify that data was seeded correctly."""
296 |     logger.info("Verifying seeded data...")
297 | 
298 |     # Count records in each table
299 |     stock_count = session.query(Stock).count()
300 |     price_count = session.query(PriceCache).count()
301 |     maverick_count = session.query(MaverickStocks).count()
302 |     bear_count = session.query(MaverickBearStocks).count()
303 |     supply_demand_count = session.query(SupplyDemandBreakoutStocks).count()
304 |     technical_count = session.query(TechnicalCache).count()
305 | 
306 |     logger.info("=== Data Seeding Summary ===")
307 |     logger.info(f"Stocks: {stock_count}")
308 |     logger.info(f"Price records: {price_count}")
309 |     logger.info(f"Maverick screening: {maverick_count}")
310 |     logger.info(f"Bear screening: {bear_count}")
311 |     logger.info(f"Supply/Demand screening: {supply_demand_count}")
312 |     logger.info(f"Technical indicators: {technical_count}")
313 |     logger.info("============================")
314 | 
315 |     # Test a few queries
316 |     if maverick_count > 0:
317 |         top_maverick = (
318 |             session.query(MaverickStocks)
319 |             .order_by(MaverickStocks.combined_score.desc())
320 |             .first()
321 |         )
322 |         if top_maverick and top_maverick.stock:
323 |             logger.info(
324 |                 f"Top Maverick stock: {top_maverick.stock.ticker_symbol} (Score: {top_maverick.combined_score})"
325 |             )
326 | 
327 |     if bear_count > 0:
328 |         top_bear = (
329 |             session.query(MaverickBearStocks)
330 |             .order_by(MaverickBearStocks.score.desc())
331 |             .first()
332 |         )
333 |         if top_bear and top_bear.stock:
334 |             logger.info(
335 |                 f"Top Bear stock: {top_bear.stock.ticker_symbol} (Score: {top_bear.score})"
336 |             )
337 | 
338 | 
339 | def main():
340 |     """Main seeding function."""
341 |     logger.info("Starting MaverickMCP database seeding...")
342 | 
343 |     # No API key required for yfinance provider
344 |     logger.info("Using yfinance for stock data - no API key required")
345 | 
346 |     # Set up database connection
347 |     database_url = get_database_url()
348 |     engine = create_engine(database_url, echo=False)
349 |     SessionLocal = sessionmaker(bind=engine)
350 | 
351 |     # Set up stock data provider
352 |     stock_provider = None
353 | 
354 |     with SessionLocal() as session:
355 |         try:
356 |             # Get all stock tickers
357 |             all_tickers = []
358 |             for _category, tickers in SAMPLE_STOCKS.items():
359 |                 all_tickers.extend(tickers)
360 | 
361 |             logger.info(
362 |                 f"Seeding database with {len(all_tickers)} stocks across {len(SAMPLE_STOCKS)} categories"
363 |             )
364 | 
365 |             # Set up provider with session
366 |             stock_provider = setup_stock_provider(session)
367 | 
368 |             # Create stocks
369 |             stocks = create_sample_stocks(session, all_tickers)
370 | 
371 |             if not stocks:
372 |                 logger.error("No stocks created. Exiting.")
373 |                 return False
374 | 
375 |             # Fetch price data (this takes time, so we'll do a subset)
376 |             price_stocks = {
377 |                 k: v for i, (k, v) in enumerate(stocks.items()) if i < 10
378 |             }  # First 10 stocks
379 |             fetch_and_store_price_data(session, stock_provider, price_stocks)
380 | 
381 |             # Create screening results
382 |             create_sample_screening_results(session, stocks)
383 | 
384 |             # Create technical indicators
385 |             create_sample_technical_indicators(session, stocks)
386 | 
387 |             # Verify data
388 |             verify_data(session)
389 | 
390 |             logger.info("✅ Database seeding completed successfully!")
391 |             return True
392 | 
393 |         except Exception as e:
394 |             logger.error(f"Database seeding failed: {e}")
395 |             session.rollback()
396 |             return False
397 | 
398 | 
399 | if __name__ == "__main__":
400 |     success = main()
401 |     if not success:
402 |         sys.exit(1)
403 | 
```

--------------------------------------------------------------------------------
/alembic/versions/010_self_contained_schema.py:
--------------------------------------------------------------------------------

```python
  1 | """Create self-contained schema with mcp_ prefixed tables
  2 | 
  3 | Revision ID: 010_self_contained_schema
  4 | Revises: 009_rename_to_supply_demand
  5 | Create Date: 2025-01-31
  6 | 
  7 | This migration creates a complete self-contained schema for maverick-mcp
  8 | with all tables prefixed with 'mcp_' to avoid conflicts with external systems.
  9 | 
 10 | Tables created:
 11 | - mcp_stocks: Master stock information
 12 | - mcp_price_cache: Historical price data
 13 | - mcp_maverick_stocks: Maverick screening results
 14 | - mcp_maverick_bear_stocks: Bear market screening results
 15 | - mcp_supply_demand_breakouts: Supply/demand analysis
 16 | - mcp_technical_cache: Technical indicator cache
 17 | """
 18 | 
 19 | import sqlalchemy as sa
 20 | from sqlalchemy.dialects import postgresql
 21 | 
 22 | from alembic import op
 23 | 
 24 | # revision identifiers
 25 | revision = "010_self_contained_schema"
 26 | down_revision = "009_rename_to_supply_demand"
 27 | branch_labels = None
 28 | depends_on = None
 29 | 
 30 | 
 31 | def upgrade():
 32 |     """Create self-contained schema with all mcp_ prefixed tables."""
 33 | 
 34 |     # Check if we're using PostgreSQL or SQLite
 35 |     op.get_bind()
 36 | 
 37 |     print("🚀 Creating self-contained maverick-mcp schema...")
 38 | 
 39 |     # 1. Create mcp_stocks table (master stock data)
 40 |     print("📊 Creating mcp_stocks table...")
 41 |     op.create_table(
 42 |         "mcp_stocks",
 43 |         sa.Column("stock_id", postgresql.UUID(as_uuid=True), primary_key=True),
 44 |         sa.Column(
 45 |             "ticker_symbol", sa.String(10), nullable=False, unique=True, index=True
 46 |         ),
 47 |         sa.Column("company_name", sa.String(255)),
 48 |         sa.Column("description", sa.Text()),
 49 |         sa.Column("sector", sa.String(100)),
 50 |         sa.Column("industry", sa.String(100)),
 51 |         sa.Column("exchange", sa.String(50)),
 52 |         sa.Column("country", sa.String(50)),
 53 |         sa.Column("currency", sa.String(3)),
 54 |         sa.Column("isin", sa.String(12)),
 55 |         sa.Column("market_cap", sa.BigInteger()),
 56 |         sa.Column("shares_outstanding", sa.BigInteger()),
 57 |         sa.Column("is_etf", sa.Boolean(), default=False),
 58 |         sa.Column("is_active", sa.Boolean(), default=True, index=True),
 59 |         sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
 60 |         sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
 61 |     )
 62 | 
 63 |     # Create indexes for mcp_stocks
 64 |     op.create_index("mcp_stocks_ticker_idx", "mcp_stocks", ["ticker_symbol"])
 65 |     op.create_index("mcp_stocks_sector_idx", "mcp_stocks", ["sector"])
 66 |     op.create_index("mcp_stocks_exchange_idx", "mcp_stocks", ["exchange"])
 67 | 
 68 |     # 2. Create mcp_price_cache table
 69 |     print("💰 Creating mcp_price_cache table...")
 70 |     op.create_table(
 71 |         "mcp_price_cache",
 72 |         sa.Column("price_cache_id", postgresql.UUID(as_uuid=True), primary_key=True),
 73 |         sa.Column(
 74 |             "stock_id",
 75 |             postgresql.UUID(as_uuid=True),
 76 |             sa.ForeignKey("mcp_stocks.stock_id"),
 77 |             nullable=False,
 78 |         ),
 79 |         sa.Column("date", sa.Date(), nullable=False),
 80 |         sa.Column("open_price", sa.Numeric(12, 4)),
 81 |         sa.Column("high_price", sa.Numeric(12, 4)),
 82 |         sa.Column("low_price", sa.Numeric(12, 4)),
 83 |         sa.Column("close_price", sa.Numeric(12, 4)),
 84 |         sa.Column("volume", sa.BigInteger()),
 85 |         sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
 86 |         sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
 87 |     )
 88 | 
 89 |     # Create unique constraint and indexes for price cache
 90 |     op.create_unique_constraint(
 91 |         "mcp_price_cache_stock_date_unique", "mcp_price_cache", ["stock_id", "date"]
 92 |     )
 93 |     op.create_index(
 94 |         "mcp_price_cache_stock_id_date_idx", "mcp_price_cache", ["stock_id", "date"]
 95 |     )
 96 |     op.create_index("mcp_price_cache_date_idx", "mcp_price_cache", ["date"])
 97 | 
 98 |     # 3. Create mcp_maverick_stocks table
 99 |     print("🎯 Creating mcp_maverick_stocks table...")
100 |     op.create_table(
101 |         "mcp_maverick_stocks",
102 |         sa.Column("id", sa.BigInteger(), primary_key=True, autoincrement=True),
103 |         sa.Column(
104 |             "stock_id",
105 |             postgresql.UUID(as_uuid=True),
106 |             sa.ForeignKey("mcp_stocks.stock_id"),
107 |             nullable=False,
108 |             index=True,
109 |         ),
110 |         sa.Column("date_analyzed", sa.Date(), nullable=False),
111 |         # OHLCV Data
112 |         sa.Column("open_price", sa.Numeric(12, 4), default=0),
113 |         sa.Column("high_price", sa.Numeric(12, 4), default=0),
114 |         sa.Column("low_price", sa.Numeric(12, 4), default=0),
115 |         sa.Column("close_price", sa.Numeric(12, 4), default=0),
116 |         sa.Column("volume", sa.BigInteger(), default=0),
117 |         # Technical Indicators
118 |         sa.Column("ema_21", sa.Numeric(12, 4), default=0),
119 |         sa.Column("sma_50", sa.Numeric(12, 4), default=0),
120 |         sa.Column("sma_150", sa.Numeric(12, 4), default=0),
121 |         sa.Column("sma_200", sa.Numeric(12, 4), default=0),
122 |         sa.Column("rs_rating", sa.Numeric(5, 2), default=0),
123 |         sa.Column("avg_vol_30d", sa.Numeric(15, 2), default=0),
124 |         sa.Column("adr_pct", sa.Numeric(5, 2), default=0),
125 |         sa.Column("atr", sa.Numeric(12, 4), default=0),
126 |         # Pattern Analysis
127 |         sa.Column("pattern_type", sa.String(50)),
128 |         sa.Column("squeeze_status", sa.String(50)),
129 |         sa.Column("vcp_status", sa.String(50)),
130 |         sa.Column("entry_signal", sa.String(50)),
131 |         # Scoring
132 |         sa.Column("compression_score", sa.Integer(), default=0),
133 |         sa.Column("pattern_detected", sa.Integer(), default=0),
134 |         sa.Column("combined_score", sa.Integer(), default=0),
135 |         sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
136 |         sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
137 |     )
138 | 
139 |     # Create indexes for maverick stocks
140 |     op.create_index(
141 |         "mcp_maverick_stocks_combined_score_idx",
142 |         "mcp_maverick_stocks",
143 |         ["combined_score"],
144 |     )
145 |     op.create_index(
146 |         "mcp_maverick_stocks_rs_rating_idx", "mcp_maverick_stocks", ["rs_rating"]
147 |     )
148 |     op.create_index(
149 |         "mcp_maverick_stocks_date_analyzed_idx",
150 |         "mcp_maverick_stocks",
151 |         ["date_analyzed"],
152 |     )
153 |     op.create_index(
154 |         "mcp_maverick_stocks_stock_date_idx",
155 |         "mcp_maverick_stocks",
156 |         ["stock_id", "date_analyzed"],
157 |     )
158 | 
159 |     # 4. Create mcp_maverick_bear_stocks table
160 |     print("🐻 Creating mcp_maverick_bear_stocks table...")
161 |     op.create_table(
162 |         "mcp_maverick_bear_stocks",
163 |         sa.Column("id", sa.BigInteger(), primary_key=True, autoincrement=True),
164 |         sa.Column(
165 |             "stock_id",
166 |             postgresql.UUID(as_uuid=True),
167 |             sa.ForeignKey("mcp_stocks.stock_id"),
168 |             nullable=False,
169 |             index=True,
170 |         ),
171 |         sa.Column("date_analyzed", sa.Date(), nullable=False),
172 |         # OHLCV Data
173 |         sa.Column("open_price", sa.Numeric(12, 4), default=0),
174 |         sa.Column("high_price", sa.Numeric(12, 4), default=0),
175 |         sa.Column("low_price", sa.Numeric(12, 4), default=0),
176 |         sa.Column("close_price", sa.Numeric(12, 4), default=0),
177 |         sa.Column("volume", sa.BigInteger(), default=0),
178 |         # Technical Indicators
179 |         sa.Column("rs_rating", sa.Numeric(5, 2), default=0),
180 |         sa.Column("ema_21", sa.Numeric(12, 4), default=0),
181 |         sa.Column("sma_50", sa.Numeric(12, 4), default=0),
182 |         sa.Column("sma_200", sa.Numeric(12, 4), default=0),
183 |         sa.Column("rsi_14", sa.Numeric(5, 2), default=0),
184 |         # MACD Indicators
185 |         sa.Column("macd", sa.Numeric(12, 6), default=0),
186 |         sa.Column("macd_signal", sa.Numeric(12, 6), default=0),
187 |         sa.Column("macd_histogram", sa.Numeric(12, 6), default=0),
188 |         # Bear Market Indicators
189 |         sa.Column("dist_days_20", sa.Integer(), default=0),
190 |         sa.Column("adr_pct", sa.Numeric(5, 2), default=0),
191 |         sa.Column("atr_contraction", sa.Boolean(), default=False),
192 |         sa.Column("atr", sa.Numeric(12, 4), default=0),
193 |         sa.Column("avg_vol_30d", sa.Numeric(15, 2), default=0),
194 |         sa.Column("big_down_vol", sa.Boolean(), default=False),
195 |         # Pattern Analysis
196 |         sa.Column("squeeze_status", sa.String(50)),
197 |         sa.Column("vcp_status", sa.String(50)),
198 |         # Scoring
199 |         sa.Column("score", sa.Integer(), default=0),
200 |         sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
201 |         sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
202 |     )
203 | 
204 |     # Create indexes for bear stocks
205 |     op.create_index(
206 |         "mcp_maverick_bear_stocks_score_idx", "mcp_maverick_bear_stocks", ["score"]
207 |     )
208 |     op.create_index(
209 |         "mcp_maverick_bear_stocks_rs_rating_idx",
210 |         "mcp_maverick_bear_stocks",
211 |         ["rs_rating"],
212 |     )
213 |     op.create_index(
214 |         "mcp_maverick_bear_stocks_date_analyzed_idx",
215 |         "mcp_maverick_bear_stocks",
216 |         ["date_analyzed"],
217 |     )
218 |     op.create_index(
219 |         "mcp_maverick_bear_stocks_stock_date_idx",
220 |         "mcp_maverick_bear_stocks",
221 |         ["stock_id", "date_analyzed"],
222 |     )
223 | 
224 |     # 5. Create mcp_supply_demand_breakouts table
225 |     print("📈 Creating mcp_supply_demand_breakouts table...")
226 |     op.create_table(
227 |         "mcp_supply_demand_breakouts",
228 |         sa.Column("id", sa.BigInteger(), primary_key=True, autoincrement=True),
229 |         sa.Column(
230 |             "stock_id",
231 |             postgresql.UUID(as_uuid=True),
232 |             sa.ForeignKey("mcp_stocks.stock_id"),
233 |             nullable=False,
234 |             index=True,
235 |         ),
236 |         sa.Column("date_analyzed", sa.Date(), nullable=False),
237 |         # OHLCV Data
238 |         sa.Column("open_price", sa.Numeric(12, 4), default=0),
239 |         sa.Column("high_price", sa.Numeric(12, 4), default=0),
240 |         sa.Column("low_price", sa.Numeric(12, 4), default=0),
241 |         sa.Column("close_price", sa.Numeric(12, 4), default=0),
242 |         sa.Column("volume", sa.BigInteger(), default=0),
243 |         # Technical Indicators
244 |         sa.Column("ema_21", sa.Numeric(12, 4), default=0),
245 |         sa.Column("sma_50", sa.Numeric(12, 4), default=0),
246 |         sa.Column("sma_150", sa.Numeric(12, 4), default=0),
247 |         sa.Column("sma_200", sa.Numeric(12, 4), default=0),
248 |         sa.Column("rs_rating", sa.Numeric(5, 2), default=0),
249 |         sa.Column("avg_volume_30d", sa.Numeric(15, 2), default=0),
250 |         sa.Column("adr_pct", sa.Numeric(5, 2), default=0),
251 |         sa.Column("atr", sa.Numeric(12, 4), default=0),
252 |         # Pattern Analysis
253 |         sa.Column("pattern_type", sa.String(50)),
254 |         sa.Column("squeeze_status", sa.String(50)),
255 |         sa.Column("vcp_status", sa.String(50)),
256 |         sa.Column("entry_signal", sa.String(50)),
257 |         # Supply/Demand Analysis
258 |         sa.Column("accumulation_rating", sa.Numeric(5, 2), default=0),
259 |         sa.Column("distribution_rating", sa.Numeric(5, 2), default=0),
260 |         sa.Column("breakout_strength", sa.Numeric(5, 2), default=0),
261 |         sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
262 |         sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
263 |     )
264 | 
265 |     # Create indexes for supply/demand breakouts
266 |     op.create_index(
267 |         "mcp_supply_demand_breakouts_rs_rating_idx",
268 |         "mcp_supply_demand_breakouts",
269 |         ["rs_rating"],
270 |     )
271 |     op.create_index(
272 |         "mcp_supply_demand_breakouts_date_analyzed_idx",
273 |         "mcp_supply_demand_breakouts",
274 |         ["date_analyzed"],
275 |     )
276 |     op.create_index(
277 |         "mcp_supply_demand_breakouts_stock_date_idx",
278 |         "mcp_supply_demand_breakouts",
279 |         ["stock_id", "date_analyzed"],
280 |     )
281 |     op.create_index(
282 |         "mcp_supply_demand_breakouts_ma_filter_idx",
283 |         "mcp_supply_demand_breakouts",
284 |         ["close_price", "sma_50", "sma_150", "sma_200"],
285 |     )
286 | 
287 |     # 6. Create mcp_technical_cache table
288 |     print("🔧 Creating mcp_technical_cache table...")
289 |     op.create_table(
290 |         "mcp_technical_cache",
291 |         sa.Column("id", sa.BigInteger(), primary_key=True, autoincrement=True),
292 |         sa.Column(
293 |             "stock_id",
294 |             postgresql.UUID(as_uuid=True),
295 |             sa.ForeignKey("mcp_stocks.stock_id"),
296 |             nullable=False,
297 |         ),
298 |         sa.Column("date", sa.Date(), nullable=False),
299 |         sa.Column("indicator_type", sa.String(50), nullable=False),
300 |         # Flexible indicator values
301 |         sa.Column("value", sa.Numeric(20, 8)),
302 |         sa.Column("value_2", sa.Numeric(20, 8)),
303 |         sa.Column("value_3", sa.Numeric(20, 8)),
304 |         # Metadata and parameters
305 |         sa.Column("metadata", sa.Text()),
306 |         sa.Column("period", sa.Integer()),
307 |         sa.Column("parameters", sa.Text()),
308 |         sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
309 |         sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
310 |     )
311 | 
312 |     # Create unique constraint and indexes for technical cache
313 |     op.create_unique_constraint(
314 |         "mcp_technical_cache_stock_date_indicator_unique",
315 |         "mcp_technical_cache",
316 |         ["stock_id", "date", "indicator_type"],
317 |     )
318 |     op.create_index(
319 |         "mcp_technical_cache_stock_date_idx",
320 |         "mcp_technical_cache",
321 |         ["stock_id", "date"],
322 |     )
323 |     op.create_index(
324 |         "mcp_technical_cache_indicator_idx", "mcp_technical_cache", ["indicator_type"]
325 |     )
326 |     op.create_index("mcp_technical_cache_date_idx", "mcp_technical_cache", ["date"])
327 | 
328 |     print("✅ Self-contained schema created successfully!")
329 |     print("📋 Tables created:")
330 |     print("   - mcp_stocks (master stock data)")
331 |     print("   - mcp_price_cache (historical prices)")
332 |     print("   - mcp_maverick_stocks (maverick screening)")
333 |     print("   - mcp_maverick_bear_stocks (bear screening)")
334 |     print("   - mcp_supply_demand_breakouts (supply/demand analysis)")
335 |     print("   - mcp_technical_cache (technical indicators)")
336 |     print("🎯 Maverick-MCP is now completely self-contained!")
337 | 
338 | 
339 | def downgrade():
340 |     """Drop all self-contained tables."""
341 |     print("⚠️  Dropping self-contained maverick-mcp schema...")
342 | 
343 |     # Drop tables in reverse order due to foreign key constraints
344 |     tables = [
345 |         "mcp_technical_cache",
346 |         "mcp_supply_demand_breakouts",
347 |         "mcp_maverick_bear_stocks",
348 |         "mcp_maverick_stocks",
349 |         "mcp_price_cache",
350 |         "mcp_stocks",
351 |     ]
352 | 
353 |     for table in tables:
354 |         print(f"🗑️  Dropping {table}...")
355 |         op.drop_table(table)
356 | 
357 |     print("✅ Self-contained schema removed!")
358 | 
```

--------------------------------------------------------------------------------
/tests/test_performance_optimizations.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for performance optimization systems.
  3 | 
  4 | This module tests the Redis connection pooling, request caching,
  5 | query optimization, and database index improvements.
  6 | """
  7 | 
  8 | import asyncio
  9 | import time
 10 | from unittest.mock import AsyncMock, MagicMock, patch
 11 | 
 12 | import pytest
 13 | 
 14 | from maverick_mcp.data.performance import (
 15 |     QueryOptimizer,
 16 |     RedisConnectionManager,
 17 |     RequestCache,
 18 |     cached,
 19 |     get_performance_metrics,
 20 |     initialize_performance_systems,
 21 |     redis_manager,
 22 |     request_cache,
 23 | )
 24 | from maverick_mcp.providers.optimized_stock_data import OptimizedStockDataProvider
 25 | from maverick_mcp.tools.performance_monitoring import (
 26 |     analyze_database_indexes,
 27 |     get_cache_performance_metrics,
 28 |     get_comprehensive_performance_report,
 29 |     get_redis_connection_health,
 30 | )
 31 | 
 32 | 
 33 | class TestRedisConnectionManager:
 34 |     """Test Redis connection pooling and management."""
 35 | 
 36 |     @pytest.mark.asyncio
 37 |     async def test_redis_manager_initialization(self):
 38 |         """Test Redis connection manager initialization."""
 39 |         manager = RedisConnectionManager()
 40 | 
 41 |         # Test initialization
 42 |         with patch("redis.asyncio.ConnectionPool.from_url") as mock_pool:
 43 |             mock_pool.return_value = MagicMock()
 44 | 
 45 |             with patch("redis.asyncio.Redis") as mock_redis:
 46 |                 mock_client = AsyncMock()
 47 |                 mock_redis.return_value = mock_client
 48 | 
 49 |                 success = await manager.initialize()
 50 | 
 51 |                 assert success or not success  # May fail in test environment
 52 |                 mock_pool.assert_called_once()
 53 | 
 54 |     @pytest.mark.asyncio
 55 |     async def test_redis_manager_health_check(self):
 56 |         """Test Redis health check functionality."""
 57 |         manager = RedisConnectionManager()
 58 | 
 59 |         with patch.object(manager, "_client") as mock_client:
 60 |             mock_client.ping = AsyncMock()
 61 | 
 62 |             # Test successful health check
 63 |             result = await manager._health_check()
 64 |             assert isinstance(result, bool)
 65 | 
 66 |     @pytest.mark.asyncio
 67 |     async def test_redis_manager_command_execution(self):
 68 |         """Test Redis command execution with error handling."""
 69 |         manager = RedisConnectionManager()
 70 | 
 71 |         with patch.object(manager, "get_client") as mock_get_client:
 72 |             mock_client = AsyncMock()
 73 |             mock_client.get.return_value = "test_value"
 74 |             mock_get_client.return_value = mock_client
 75 | 
 76 |             result = await manager.execute_command("get", "test_key")
 77 |             assert result == "test_value"
 78 |             mock_client.get.assert_called_once_with("test_key")
 79 | 
 80 |     def test_redis_manager_metrics(self):
 81 |         """Test Redis connection metrics collection."""
 82 |         manager = RedisConnectionManager()
 83 |         metrics = manager.get_metrics()
 84 | 
 85 |         assert isinstance(metrics, dict)
 86 |         assert "healthy" in metrics
 87 |         assert "initialized" in metrics
 88 |         assert "connections_created" in metrics
 89 | 
 90 | 
 91 | class TestRequestCache:
 92 |     """Test request-level caching system."""
 93 | 
 94 |     @pytest.mark.asyncio
 95 |     async def test_cache_key_generation(self):
 96 |         """Test cache key generation from function arguments."""
 97 |         cache = RequestCache()
 98 | 
 99 |         key1 = cache._generate_cache_key("test", "arg1", "arg2", kwarg1="value1")
100 |         key2 = cache._generate_cache_key("test", "arg1", "arg2", kwarg1="value1")
101 |         key3 = cache._generate_cache_key("test", "arg1", "arg3", kwarg1="value1")
102 | 
103 |         assert key1 == key2  # Same arguments should generate same key
104 |         assert key1 != key3  # Different arguments should generate different keys
105 |         assert key1.startswith("cache:test:")
106 | 
107 |     @pytest.mark.asyncio
108 |     async def test_cache_ttl_configuration(self):
109 |         """Test TTL configuration for different data types."""
110 |         cache = RequestCache()
111 | 
112 |         assert cache._get_ttl("stock_data") == 3600
113 |         assert cache._get_ttl("technical_analysis") == 1800
114 |         assert cache._get_ttl("market_data") == 300
115 |         assert cache._get_ttl("unknown") == cache._default_ttls["default"]
116 | 
117 |     @pytest.mark.asyncio
118 |     async def test_cache_operations(self):
119 |         """Test basic cache operations."""
120 |         cache = RequestCache()
121 | 
122 |         with patch.object(redis_manager, "get_client") as mock_get_client:
123 |             mock_client = AsyncMock()
124 |             mock_get_client.return_value = mock_client
125 | 
126 |             # Test cache miss
127 |             mock_client.get.return_value = None
128 |             result = await cache.get("test_key")
129 |             assert result is None
130 | 
131 |             # Test cache set
132 |             mock_client.setex.return_value = True
133 |             await cache.set("test_key", {"data": "value"}, ttl=60)
134 |             mock_client.setex.assert_called_once()
135 | 
136 |     def test_cache_metrics(self):
137 |         """Test cache metrics collection."""
138 |         cache = RequestCache()
139 |         cache._hit_count = 10
140 |         cache._miss_count = 5
141 | 
142 |         metrics = cache.get_metrics()
143 | 
144 |         assert metrics["hit_count"] == 10
145 |         assert metrics["miss_count"] == 5
146 |         assert metrics["total_requests"] == 15
147 |         assert metrics["hit_rate"] == 2 / 3
148 | 
149 | 
150 | class TestQueryOptimizer:
151 |     """Test database query optimization."""
152 | 
153 |     def test_query_monitoring_decorator(self):
154 |         """Test query monitoring decorator functionality."""
155 |         optimizer = QueryOptimizer()
156 | 
157 |         @optimizer.monitor_query("test_query")
158 |         async def test_query():
159 |             await asyncio.sleep(0.1)  # Simulate query time
160 |             return "result"
161 | 
162 |         # This would need to be run to test properly
163 |         assert hasattr(test_query, "__name__")
164 | 
165 |     def test_query_stats_collection(self):
166 |         """Test query statistics collection."""
167 |         optimizer = QueryOptimizer()
168 | 
169 |         # Simulate some query stats
170 |         optimizer._query_stats["test_query"] = {
171 |             "count": 5,
172 |             "total_time": 2.5,
173 |             "avg_time": 0.5,
174 |             "max_time": 1.0,
175 |             "min_time": 0.1,
176 |         }
177 | 
178 |         stats = optimizer.get_query_stats()
179 |         assert "query_stats" in stats
180 |         assert "test_query" in stats["query_stats"]
181 |         assert stats["query_stats"]["test_query"]["avg_time"] == 0.5
182 | 
183 | 
184 | class TestCachedDecorator:
185 |     """Test the cached decorator functionality."""
186 | 
187 |     @pytest.mark.asyncio
188 |     async def test_cached_decorator_basic(self):
189 |         """Test basic cached decorator functionality."""
190 |         call_count = 0
191 | 
192 |         @cached(data_type="test", ttl=60)
193 |         async def test_function(arg1, arg2="default"):
194 |             nonlocal call_count
195 |             call_count += 1
196 |             return f"result_{arg1}_{arg2}"
197 | 
198 |         with patch.object(request_cache, "get") as mock_get:
199 |             with patch.object(request_cache, "set") as mock_set:
200 |                 # First call - cache miss
201 |                 mock_get.return_value = None
202 |                 mock_set.return_value = True
203 | 
204 |                 result1 = await test_function("test", arg2="value")
205 |                 assert result1 == "result_test_value"
206 |                 assert call_count == 1
207 | 
208 |                 # Second call - cache hit
209 |                 mock_get.return_value = "cached_result"
210 | 
211 |                 result2 = await test_function("test", arg2="value")
212 |                 assert result2 == "cached_result"
213 |                 assert call_count == 1  # Function not called again
214 | 
215 | 
216 | class TestOptimizedStockDataProvider:
217 |     """Test the optimized stock data provider."""
218 | 
219 |     @pytest.fixture
220 |     def provider(self):
221 |         """Create an optimized stock data provider instance."""
222 |         return OptimizedStockDataProvider()
223 | 
224 |     @pytest.mark.asyncio
225 |     async def test_provider_caching_configuration(self, provider):
226 |         """Test provider caching configuration."""
227 |         assert provider.cache_ttl_stock_data == 3600
228 |         assert provider.cache_ttl_screening == 7200
229 |         assert provider.cache_ttl_market_data == 300
230 | 
231 |     @pytest.mark.asyncio
232 |     async def test_provider_performance_metrics(self, provider):
233 |         """Test provider performance metrics collection."""
234 |         metrics = await provider.get_performance_metrics()
235 | 
236 |         assert "cache_metrics" in metrics
237 |         assert "query_stats" in metrics
238 |         assert "cache_ttl_config" in metrics
239 | 
240 | 
241 | class TestPerformanceMonitoring:
242 |     """Test performance monitoring tools."""
243 | 
244 |     @pytest.mark.asyncio
245 |     async def test_redis_health_check(self):
246 |         """Test Redis health check functionality."""
247 |         with patch.object(redis_manager, "get_client") as mock_get_client:
248 |             mock_client = AsyncMock()
249 |             mock_client.set.return_value = True
250 |             mock_client.get.return_value = "test_value"
251 |             mock_client.delete.return_value = 1
252 |             mock_get_client.return_value = mock_client
253 | 
254 |             health = await get_redis_connection_health()
255 | 
256 |             assert "redis_health" in health
257 |             assert "connection_metrics" in health
258 |             assert "timestamp" in health
259 | 
260 |     @pytest.mark.asyncio
261 |     async def test_cache_performance_metrics(self):
262 |         """Test cache performance metrics collection."""
263 |         with patch.object(request_cache, "set") as mock_set:
264 |             with patch.object(request_cache, "get") as mock_get:
265 |                 with patch.object(request_cache, "delete") as mock_delete:
266 |                     mock_set.return_value = True
267 |                     mock_get.return_value = {"test": "data", "timestamp": time.time()}
268 |                     mock_delete.return_value = True
269 | 
270 |                     metrics = await get_cache_performance_metrics()
271 | 
272 |                     assert "cache_performance" in metrics
273 |                     assert "performance_test" in metrics
274 | 
275 |     @pytest.mark.asyncio
276 |     async def test_comprehensive_performance_report(self):
277 |         """Test comprehensive performance report generation."""
278 |         with patch(
279 |             "maverick_mcp.tools.performance_monitoring.get_redis_connection_health"
280 |         ) as mock_redis:
281 |             with patch(
282 |                 "maverick_mcp.tools.performance_monitoring.get_cache_performance_metrics"
283 |             ) as mock_cache:
284 |                 with patch(
285 |                     "maverick_mcp.tools.performance_monitoring.get_query_performance_metrics"
286 |                 ) as mock_query:
287 |                     with patch(
288 |                         "maverick_mcp.tools.performance_monitoring.analyze_database_indexes"
289 |                     ) as mock_indexes:
290 |                         mock_redis.return_value = {
291 |                             "redis_health": {"status": "healthy"}
292 |                         }
293 |                         mock_cache.return_value = {
294 |                             "cache_performance": {"hit_rate": 0.85}
295 |                         }
296 |                         mock_query.return_value = {
297 |                             "query_performance": {"query_stats": {}}
298 |                         }
299 |                         mock_indexes.return_value = {"poor_index_usage": []}
300 | 
301 |                         report = await get_comprehensive_performance_report()
302 | 
303 |                         assert "overall_health_score" in report
304 |                         assert "component_scores" in report
305 |                         assert "recommendations" in report
306 |                         assert "detailed_metrics" in report
307 | 
308 | 
309 | class TestPerformanceInitialization:
310 |     """Test performance system initialization."""
311 | 
312 |     @pytest.mark.asyncio
313 |     async def test_performance_systems_initialization(self):
314 |         """Test initialization of all performance systems."""
315 |         with patch.object(redis_manager, "initialize") as mock_init:
316 |             mock_init.return_value = True
317 | 
318 |             result = await initialize_performance_systems()
319 | 
320 |             assert isinstance(result, dict)
321 |             assert "redis_manager" in result
322 |             assert "request_cache" in result
323 |             assert "query_optimizer" in result
324 | 
325 |     @pytest.mark.asyncio
326 |     async def test_performance_metrics_collection(self):
327 |         """Test comprehensive performance metrics collection."""
328 |         with patch.object(redis_manager, "get_metrics") as mock_redis_metrics:
329 |             with patch.object(request_cache, "get_metrics") as mock_cache_metrics:
330 |                 mock_redis_metrics.return_value = {"healthy": True}
331 |                 mock_cache_metrics.return_value = {"hit_rate": 0.8}
332 | 
333 |                 metrics = await get_performance_metrics()
334 | 
335 |                 assert "redis_manager" in metrics
336 |                 assert "request_cache" in metrics
337 |                 assert "query_optimizer" in metrics
338 |                 assert "timestamp" in metrics
339 | 
340 | 
341 | class TestDatabaseIndexAnalysis:
342 |     """Test database index analysis functionality."""
343 | 
344 |     @pytest.mark.asyncio
345 |     @pytest.mark.integration
346 |     async def test_database_index_analysis(self):
347 |         """Test database index analysis (integration test)."""
348 |         try:
349 |             from maverick_mcp.data.models import get_db
350 | 
351 |             session = next(get_db())
352 |             try:
353 |                 # Test that the analysis doesn't crash
354 |                 # Actual results depend on database state
355 |                 recommendations = await analyze_database_indexes()
356 | 
357 |                 # Should return a dictionary structure
358 |                 assert isinstance(recommendations, dict)
359 | 
360 |             finally:
361 |                 session.close()
362 | 
363 |         except Exception as e:
364 |             # Database may not be available in test environment
365 |             pytest.skip(f"Database not available for integration test: {e}")
366 | 
367 | 
368 | @pytest.mark.asyncio
369 | async def test_performance_system_integration():
370 |     """Test integration between all performance systems."""
371 |     # This is a basic integration test that ensures the systems can work together
372 |     try:
373 |         # Initialize systems
374 |         init_result = await initialize_performance_systems()
375 |         assert isinstance(init_result, dict)
376 | 
377 |         # Get metrics
378 |         metrics = await get_performance_metrics()
379 |         assert isinstance(metrics, dict)
380 | 
381 |         # Test caching
382 |         cache_result = await request_cache.set("test_key", "test_value", ttl=60)
383 |         await request_cache.get("test_key")
384 | 
385 |         # Clean up
386 |         if cache_result:
387 |             await request_cache.delete("test_key")
388 | 
389 |     except Exception as e:
390 |         # Some operations may fail in test environment
391 |         pytest.skip(f"Integration test skipped due to environment: {e}")
392 | 
393 | 
394 | if __name__ == "__main__":
395 |     # Run basic tests
396 |     pytest.main([__file__, "-v"])
397 | 
```
Page 11/39FirstPrevNextLast