#
tokens: 49286/50000 15/435 files (page 7/39)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 7 of 39. Use http://codebase.md/wshobson/maverick-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .github
│   ├── dependabot.yml
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── feature_request.md
│   │   ├── question.md
│   │   └── security_report.md
│   ├── pull_request_template.md
│   └── workflows
│       ├── claude-code-review.yml
│       └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│   ├── launch.json
│   └── settings.json
├── alembic
│   ├── env.py
│   ├── script.py.mako
│   └── versions
│       ├── 001_initial_schema.py
│       ├── 003_add_performance_indexes.py
│       ├── 006_rename_metadata_columns.py
│       ├── 008_performance_optimization_indexes.py
│       ├── 009_rename_to_supply_demand.py
│       ├── 010_self_contained_schema.py
│       ├── 011_remove_proprietary_terms.py
│       ├── 013_add_backtest_persistence_models.py
│       ├── 014_add_portfolio_models.py
│       ├── 08e3945a0c93_merge_heads.py
│       ├── 9374a5c9b679_merge_heads_for_testing.py
│       ├── abf9b9afb134_merge_multiple_heads.py
│       ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│       ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│       ├── f0696e2cac15_add_essential_performance_indexes.py
│       └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── api
│   │   └── backtesting.md
│   ├── BACKTESTING.md
│   ├── COST_BASIS_SPECIFICATION.md
│   ├── deep_research_agent.md
│   ├── exa_research_testing_strategy.md
│   ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│   ├── PORTFOLIO.md
│   ├── SETUP_SELF_CONTAINED.md
│   └── speed_testing_framework.md
├── examples
│   ├── complete_speed_validation.py
│   ├── deep_research_integration.py
│   ├── llm_optimization_example.py
│   ├── llm_speed_demo.py
│   ├── monitoring_example.py
│   ├── parallel_research_example.py
│   ├── speed_optimization_demo.py
│   └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│   ├── __init__.py
│   ├── agents
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── circuit_breaker.py
│   │   ├── deep_research.py
│   │   ├── market_analysis.py
│   │   ├── optimized_research.py
│   │   ├── supervisor.py
│   │   └── technical_analysis.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── api_server.py
│   │   ├── connection_manager.py
│   │   ├── dependencies
│   │   │   ├── __init__.py
│   │   │   ├── stock_analysis.py
│   │   │   └── technical_analysis.py
│   │   ├── error_handling.py
│   │   ├── inspector_compatible_sse.py
│   │   ├── inspector_sse.py
│   │   ├── middleware
│   │   │   ├── error_handling.py
│   │   │   ├── mcp_logging.py
│   │   │   ├── rate_limiting_enhanced.py
│   │   │   └── security.py
│   │   ├── openapi_config.py
│   │   ├── routers
│   │   │   ├── __init__.py
│   │   │   ├── agents.py
│   │   │   ├── backtesting.py
│   │   │   ├── data_enhanced.py
│   │   │   ├── data.py
│   │   │   ├── health_enhanced.py
│   │   │   ├── health_tools.py
│   │   │   ├── health.py
│   │   │   ├── intelligent_backtesting.py
│   │   │   ├── introspection.py
│   │   │   ├── mcp_prompts.py
│   │   │   ├── monitoring.py
│   │   │   ├── news_sentiment_enhanced.py
│   │   │   ├── performance.py
│   │   │   ├── portfolio.py
│   │   │   ├── research.py
│   │   │   ├── screening_ddd.py
│   │   │   ├── screening_parallel.py
│   │   │   ├── screening.py
│   │   │   ├── technical_ddd.py
│   │   │   ├── technical_enhanced.py
│   │   │   ├── technical.py
│   │   │   └── tool_registry.py
│   │   ├── server.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   ├── base_service.py
│   │   │   ├── market_service.py
│   │   │   ├── portfolio_service.py
│   │   │   ├── prompt_service.py
│   │   │   └── resource_service.py
│   │   ├── simple_sse.py
│   │   └── utils
│   │       ├── __init__.py
│   │       ├── insomnia_export.py
│   │       └── postman_export.py
│   ├── application
│   │   ├── __init__.py
│   │   ├── commands
│   │   │   └── __init__.py
│   │   ├── dto
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_dto.py
│   │   ├── queries
│   │   │   ├── __init__.py
│   │   │   └── get_technical_analysis.py
│   │   └── screening
│   │       ├── __init__.py
│   │       ├── dtos.py
│   │       └── queries.py
│   ├── backtesting
│   │   ├── __init__.py
│   │   ├── ab_testing.py
│   │   ├── analysis.py
│   │   ├── batch_processing_stub.py
│   │   ├── batch_processing.py
│   │   ├── model_manager.py
│   │   ├── optimization.py
│   │   ├── persistence.py
│   │   ├── retraining_pipeline.py
│   │   ├── strategies
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── ml
│   │   │   │   ├── __init__.py
│   │   │   │   ├── adaptive.py
│   │   │   │   ├── ensemble.py
│   │   │   │   ├── feature_engineering.py
│   │   │   │   └── regime_aware.py
│   │   │   ├── ml_strategies.py
│   │   │   ├── parser.py
│   │   │   └── templates.py
│   │   ├── strategy_executor.py
│   │   ├── vectorbt_engine.py
│   │   └── visualization.py
│   ├── config
│   │   ├── __init__.py
│   │   ├── constants.py
│   │   ├── database_self_contained.py
│   │   ├── database.py
│   │   ├── llm_optimization_config.py
│   │   ├── logging_settings.py
│   │   ├── plotly_config.py
│   │   ├── security_utils.py
│   │   ├── security.py
│   │   ├── settings.py
│   │   ├── technical_constants.py
│   │   ├── tool_estimation.py
│   │   └── validation.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── technical_analysis.py
│   │   └── visualization.py
│   ├── data
│   │   ├── __init__.py
│   │   ├── cache_manager.py
│   │   ├── cache.py
│   │   ├── django_adapter.py
│   │   ├── health.py
│   │   ├── models.py
│   │   ├── performance.py
│   │   ├── session_management.py
│   │   └── validation.py
│   ├── database
│   │   ├── __init__.py
│   │   ├── base.py
│   │   └── optimization.py
│   ├── dependencies.py
│   ├── domain
│   │   ├── __init__.py
│   │   ├── entities
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis.py
│   │   ├── events
│   │   │   └── __init__.py
│   │   ├── portfolio.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   ├── entities.py
│   │   │   ├── services.py
│   │   │   └── value_objects.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_service.py
│   │   ├── stock_analysis
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis_service.py
│   │   └── value_objects
│   │       ├── __init__.py
│   │       └── technical_indicators.py
│   ├── exceptions.py
│   ├── infrastructure
│   │   ├── __init__.py
│   │   ├── cache
│   │   │   └── __init__.py
│   │   ├── caching
│   │   │   ├── __init__.py
│   │   │   └── cache_management_service.py
│   │   ├── connection_manager.py
│   │   ├── data_fetching
│   │   │   ├── __init__.py
│   │   │   └── stock_data_service.py
│   │   ├── health
│   │   │   ├── __init__.py
│   │   │   └── health_checker.py
│   │   ├── persistence
│   │   │   ├── __init__.py
│   │   │   └── stock_repository.py
│   │   ├── providers
│   │   │   └── __init__.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   └── repositories.py
│   │   └── sse_optimizer.py
│   ├── langchain_tools
│   │   ├── __init__.py
│   │   ├── adapters.py
│   │   └── registry.py
│   ├── logging_config.py
│   ├── memory
│   │   ├── __init__.py
│   │   └── stores.py
│   ├── monitoring
│   │   ├── __init__.py
│   │   ├── health_check.py
│   │   ├── health_monitor.py
│   │   ├── integration_example.py
│   │   ├── metrics.py
│   │   ├── middleware.py
│   │   └── status_dashboard.py
│   ├── providers
│   │   ├── __init__.py
│   │   ├── dependencies.py
│   │   ├── factories
│   │   │   ├── __init__.py
│   │   │   ├── config_factory.py
│   │   │   └── provider_factory.py
│   │   ├── implementations
│   │   │   ├── __init__.py
│   │   │   ├── cache_adapter.py
│   │   │   ├── macro_data_adapter.py
│   │   │   ├── market_data_adapter.py
│   │   │   ├── persistence_adapter.py
│   │   │   └── stock_data_adapter.py
│   │   ├── interfaces
│   │   │   ├── __init__.py
│   │   │   ├── cache.py
│   │   │   ├── config.py
│   │   │   ├── macro_data.py
│   │   │   ├── market_data.py
│   │   │   ├── persistence.py
│   │   │   └── stock_data.py
│   │   ├── llm_factory.py
│   │   ├── macro_data.py
│   │   ├── market_data.py
│   │   ├── mocks
│   │   │   ├── __init__.py
│   │   │   ├── mock_cache.py
│   │   │   ├── mock_config.py
│   │   │   ├── mock_macro_data.py
│   │   │   ├── mock_market_data.py
│   │   │   ├── mock_persistence.py
│   │   │   └── mock_stock_data.py
│   │   ├── openrouter_provider.py
│   │   ├── optimized_screening.py
│   │   ├── optimized_stock_data.py
│   │   └── stock_data.py
│   ├── README.md
│   ├── tests
│   │   ├── __init__.py
│   │   ├── README_INMEMORY_TESTS.md
│   │   ├── test_cache_debug.py
│   │   ├── test_fixes_validation.py
│   │   ├── test_in_memory_routers.py
│   │   ├── test_in_memory_server.py
│   │   ├── test_macro_data_provider.py
│   │   ├── test_mailgun_email.py
│   │   ├── test_market_calendar_caching.py
│   │   ├── test_mcp_tool_fixes_pytest.py
│   │   ├── test_mcp_tool_fixes.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_models_functional.py
│   │   ├── test_server.py
│   │   ├── test_stock_data_enhanced.py
│   │   ├── test_stock_data_provider.py
│   │   └── test_technical_analysis.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── performance_monitoring.py
│   │   ├── portfolio_manager.py
│   │   ├── risk_management.py
│   │   └── sentiment_analysis.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── agent_errors.py
│   │   ├── batch_processing.py
│   │   ├── cache_warmer.py
│   │   ├── circuit_breaker_decorators.py
│   │   ├── circuit_breaker_services.py
│   │   ├── circuit_breaker.py
│   │   ├── data_chunking.py
│   │   ├── database_monitoring.py
│   │   ├── debug_utils.py
│   │   ├── fallback_strategies.py
│   │   ├── llm_optimization.py
│   │   ├── logging_example.py
│   │   ├── logging_init.py
│   │   ├── logging.py
│   │   ├── mcp_logging.py
│   │   ├── memory_profiler.py
│   │   ├── monitoring_middleware.py
│   │   ├── monitoring.py
│   │   ├── orchestration_logging.py
│   │   ├── parallel_research.py
│   │   ├── parallel_screening.py
│   │   ├── quick_cache.py
│   │   ├── resource_manager.py
│   │   ├── shutdown.py
│   │   ├── stock_helpers.py
│   │   ├── structured_logger.py
│   │   ├── tool_monitoring.py
│   │   ├── tracing.py
│   │   └── yfinance_pool.py
│   ├── validation
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── data.py
│   │   ├── middleware.py
│   │   ├── portfolio.py
│   │   ├── responses.py
│   │   ├── screening.py
│   │   └── technical.py
│   └── workflows
│       ├── __init__.py
│       ├── agents
│       │   ├── __init__.py
│       │   ├── market_analyzer.py
│       │   ├── optimizer_agent.py
│       │   ├── strategy_selector.py
│       │   └── validator_agent.py
│       ├── backtesting_workflow.py
│       └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│   ├── dev.sh
│   ├── INSTALLATION_GUIDE.md
│   ├── load_example.py
│   ├── load_market_data.py
│   ├── load_tiingo_data.py
│   ├── migrate_db.py
│   ├── README_TIINGO_LOADER.md
│   ├── requirements_tiingo.txt
│   ├── run_stock_screening.py
│   ├── run-migrations.sh
│   ├── seed_db.py
│   ├── seed_sp500.py
│   ├── setup_database.sh
│   ├── setup_self_contained.py
│   ├── setup_sp500_database.sh
│   ├── test_seeded_data.py
│   ├── test_tiingo_loader.py
│   ├── tiingo_config.py
│   └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── core
│   │   └── test_technical_analysis.py
│   ├── data
│   │   └── test_portfolio_models.py
│   ├── domain
│   │   ├── conftest.py
│   │   ├── test_portfolio_entities.py
│   │   └── test_technical_analysis_service.py
│   ├── fixtures
│   │   └── orchestration_fixtures.py
│   ├── integration
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── README.md
│   │   ├── run_integration_tests.sh
│   │   ├── test_api_technical.py
│   │   ├── test_chaos_engineering.py
│   │   ├── test_config_management.py
│   │   ├── test_full_backtest_workflow_advanced.py
│   │   ├── test_full_backtest_workflow.py
│   │   ├── test_high_volume.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_orchestration_complete.py
│   │   ├── test_portfolio_persistence.py
│   │   ├── test_redis_cache.py
│   │   ├── test_security_integration.py.disabled
│   │   └── vcr_setup.py
│   ├── performance
│   │   ├── __init__.py
│   │   ├── test_benchmarks.py
│   │   ├── test_load.py
│   │   ├── test_profiling.py
│   │   └── test_stress.py
│   ├── providers
│   │   └── test_stock_data_simple.py
│   ├── README.md
│   ├── test_agents_router_mcp.py
│   ├── test_backtest_persistence.py
│   ├── test_cache_management_service.py
│   ├── test_cache_serialization.py
│   ├── test_circuit_breaker.py
│   ├── test_database_pool_config_simple.py
│   ├── test_database_pool_config.py
│   ├── test_deep_research_functional.py
│   ├── test_deep_research_integration.py
│   ├── test_deep_research_parallel_execution.py
│   ├── test_error_handling.py
│   ├── test_event_loop_integrity.py
│   ├── test_exa_research_integration.py
│   ├── test_exception_hierarchy.py
│   ├── test_financial_search.py
│   ├── test_graceful_shutdown.py
│   ├── test_integration_simple.py
│   ├── test_langgraph_workflow.py
│   ├── test_market_data_async.py
│   ├── test_market_data_simple.py
│   ├── test_mcp_orchestration_functional.py
│   ├── test_ml_strategies.py
│   ├── test_optimized_research_agent.py
│   ├── test_orchestration_integration.py
│   ├── test_orchestration_logging.py
│   ├── test_orchestration_tools_simple.py
│   ├── test_parallel_research_integration.py
│   ├── test_parallel_research_orchestrator.py
│   ├── test_parallel_research_performance.py
│   ├── test_performance_optimizations.py
│   ├── test_production_validation.py
│   ├── test_provider_architecture.py
│   ├── test_rate_limiting_enhanced.py
│   ├── test_runner_validation.py
│   ├── test_security_comprehensive.py.disabled
│   ├── test_security_cors.py
│   ├── test_security_enhancements.py.disabled
│   ├── test_security_headers.py
│   ├── test_security_penetration.py
│   ├── test_session_management.py
│   ├── test_speed_optimization_validation.py
│   ├── test_stock_analysis_dependencies.py
│   ├── test_stock_analysis_service.py
│   ├── test_stock_data_fetching_service.py
│   ├── test_supervisor_agent.py
│   ├── test_supervisor_functional.py
│   ├── test_tool_estimation_config.py
│   ├── test_visualization.py
│   └── utils
│       ├── test_agent_errors.py
│       ├── test_logging.py
│       ├── test_parallel_screening.py
│       └── test_quick_cache.py
├── tools
│   ├── check_orchestration_config.py
│   ├── experiments
│   │   ├── validation_examples.py
│   │   └── validation_fixed.py
│   ├── fast_dev.sh
│   ├── hot_reload.py
│   ├── quick_test.py
│   └── templates
│       ├── new_router_template.py
│       ├── new_tool_template.py
│       ├── screening_strategy_template.py
│       └── test_template.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/scripts/INSTALLATION_GUIDE.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Tiingo Data Loader Installation Guide
  2 | 
  3 | This guide will help you set up and use the comprehensive Tiingo data loader for Maverick-MCP.
  4 | 
  5 | ## 📋 What You Get
  6 | 
  7 | The Tiingo data loader provides:
  8 | 
  9 | - **Comprehensive Data Loading**: Fetch stock metadata, OHLCV price data from Tiingo API
 10 | - **Technical Indicators**: 50+ indicators calculated using pandas-ta
 11 | - **Screening Algorithms**: Built-in Maverick, Bear Market, and Supply/Demand screens
 12 | - **Progress Tracking**: Resume interrupted loads with checkpoint files
 13 | - **Performance Optimized**: Async operations with rate limiting and batch processing
 14 | - **Production Ready**: Error handling, logging, and database optimization
 15 | 
 16 | ## 🚀 Quick Start
 17 | 
 18 | ### 1. Check Your Setup
 19 | ```bash
 20 | cd /path/to/maverick-mcp
 21 | python3 scripts/validate_setup.py
 22 | ```
 23 | 
 24 | This will show you exactly what needs to be installed or configured.
 25 | 
 26 | ### 2. Install Dependencies
 27 | ```bash
 28 | # Install required Python packages
 29 | pip install -r scripts/requirements_tiingo.txt
 30 | 
 31 | # Or install individually:
 32 | pip install aiohttp pandas pandas-ta sqlalchemy psycopg2-binary
 33 | ```
 34 | 
 35 | ### 3. Get Tiingo API Token
 36 | 1. Sign up at [tiingo.com](https://www.tiingo.com) (free tier gives 2400 requests/hour)
 37 | 2. Get your API token from the dashboard
 38 | 3. Set environment variable:
 39 | ```bash
 40 | export TIINGO_API_TOKEN=your_token_here
 41 | ```
 42 | 
 43 | ### 4. Configure Database
 44 | ```bash
 45 | # Set your database URL
 46 | export DATABASE_URL=postgresql://user:password@localhost/maverick_mcp
 47 | 
 48 | # Or use existing environment variables
 49 | export POSTGRES_URL=postgresql://user:password@localhost/maverick_mcp
 50 | ```
 51 | 
 52 | ### 5. Verify Setup
 53 | ```bash
 54 | python3 scripts/validate_setup.py
 55 | ```
 56 | You should see "🎉 Setup validation PASSED!"
 57 | 
 58 | ## 📊 Usage Examples
 59 | 
 60 | ### Load Sample Stocks
 61 | ```bash
 62 | # Load 5 popular stocks with 2 years of data
 63 | python3 scripts/load_tiingo_data.py --symbols AAPL,MSFT,GOOGL,AMZN,TSLA --years 2 --calculate-indicators
 64 | ```
 65 | 
 66 | ### Load S&P 500 (Top 100)
 67 | ```bash
 68 | # Load top 100 S&P 500 stocks with screening
 69 | python3 scripts/load_tiingo_data.py --sp500 --years 1 --run-screening
 70 | ```
 71 | 
 72 | ### Load from File
 73 | ```bash
 74 | # Create symbol file
 75 | echo -e "AAPL\nMSFT\nGOOGL\nTSLA\nNVDA" > my_stocks.txt
 76 | 
 77 | # Load from file
 78 | python3 scripts/load_tiingo_data.py --file my_stocks.txt --calculate-indicators --run-screening
 79 | ```
 80 | 
 81 | ### Interactive Examples
 82 | ```bash
 83 | # Run guided examples
 84 | python3 scripts/load_example.py
 85 | ```
 86 | 
 87 | ## 🏗️ Architecture
 88 | 
 89 | ### Files Created
 90 | - **`load_tiingo_data.py`**: Main comprehensive data loader script
 91 | - **`tiingo_config.py`**: Configuration settings and symbol lists  
 92 | - **`load_example.py`**: Interactive examples and tutorials
 93 | - **`validate_setup.py`**: Setup validation and dependency checking
 94 | - **`test_tiingo_loader.py`**: Unit tests and validation
 95 | - **`requirements_tiingo.txt`**: Python package requirements
 96 | - **`README_TIINGO_LOADER.md`**: Comprehensive documentation
 97 | 
 98 | ### Data Flow
 99 | 1. **Fetch Metadata**: Get stock information from Tiingo
100 | 2. **Load Prices**: Download historical OHLCV data
101 | 3. **Calculate Indicators**: Compute 50+ technical indicators
102 | 4. **Store Data**: Bulk insert into Maverick-MCP database tables
103 | 5. **Run Screens**: Execute screening algorithms
104 | 6. **Track Progress**: Save checkpoints for resume capability
105 | 
106 | ### Database Tables
107 | - **`mcp_stocks`**: Basic stock information
108 | - **`mcp_price_cache`**: Historical OHLCV price data  
109 | - **`mcp_technical_cache`**: Calculated technical indicators
110 | - **`mcp_maverick_stocks`**: Momentum screening results
111 | - **`mcp_maverick_bear_stocks`**: Bear market screening results
112 | - **`mcp_supply_demand_breakouts`**: Supply/demand pattern results
113 | 
114 | ## ⚙️ Configuration Options
115 | 
116 | ### Environment Variables
117 | ```bash
118 | # Required
119 | export TIINGO_API_TOKEN=your_token
120 | export DATABASE_URL=postgresql://user:pass@localhost/db
121 | 
122 | # Optional
123 | export DB_POOL_SIZE=20
124 | export DB_ECHO=false
125 | export ENVIRONMENT=development
126 | ```
127 | 
128 | ### Symbol Sources
129 | - **S&P 500**: `--sp500` (top 100) or `--sp500-full` (all 500)
130 | - **Custom**: `--symbols AAPL,MSFT,GOOGL`
131 | - **File**: `--file symbols.txt`
132 | - **All Supported**: `--supported` (3000+ symbols)
133 | 
134 | ### Performance Tuning
135 | - **Batch Size**: `--batch-size 100` (default: 50)
136 | - **Concurrency**: `--max-concurrent 10` (default: 5)
137 | - **Date Range**: `--years 5` or `--start-date 2020-01-01`
138 | 
139 | ### Processing Options
140 | - **Technical Indicators**: `--calculate-indicators` (default: on)
141 | - **Screening**: `--run-screening` (run after data load)
142 | - **Resume**: `--resume` (continue from checkpoint)
143 | 
144 | ## 📈 Technical Indicators
145 | 
146 | ### Trend Indicators
147 | - Simple Moving Averages (SMA 20, 50, 150, 200)
148 | - Exponential Moving Average (EMA 21)
149 | - Average Directional Index (ADX 14)
150 | 
151 | ### Momentum Indicators  
152 | - Relative Strength Index (RSI 14)
153 | - MACD (12, 26, 9)
154 | - Stochastic Oscillator (14, 3, 3)
155 | - Relative Strength Rating vs Market
156 | 
157 | ### Volatility Indicators
158 | - Average True Range (ATR 14)
159 | - Bollinger Bands (20, 2.0)
160 | - Average Daily Range percentage
161 | 
162 | ### Volume Indicators
163 | - Volume Moving Averages
164 | - Volume Ratio vs Average
165 | - Volume-Weighted Average Price (VWAP)
166 | 
167 | ### Custom Indicators
168 | - Price Momentum (10, 20 period)
169 | - Bollinger Band Squeeze Detection
170 | - Position vs Moving Averages
171 | 
172 | ## 🔍 Screening Algorithms
173 | 
174 | ### Maverick Momentum Screen
175 | **Criteria:**
176 | - Price > 21-day EMA
177 | - EMA-21 > SMA-50  
178 | - SMA-50 > SMA-200
179 | - Relative Strength > 70
180 | - Volume > 500K daily
181 | 
182 | **Scoring:** 0-10 points based on strength of signals
183 | 
184 | ### Bear Market Screen
185 | **Criteria:**
186 | - Price < 21-day EMA
187 | - EMA-21 < SMA-50
188 | - Relative Strength < 30
189 | - High volume on declines
190 | 
191 | **Use Case:** Short candidates or stocks to avoid
192 | 
193 | ### Supply/Demand Breakout Screen  
194 | **Criteria:**
195 | - Price > SMA-50 and SMA-200
196 | - Strong relative strength (>60)
197 | - Accumulation patterns
198 | - Institutional buying signals
199 | 
200 | **Use Case:** Stocks with strong fundamental demand
201 | 
202 | ## 🚨 Troubleshooting
203 | 
204 | ### Common Issues
205 | 
206 | #### 1. Missing Dependencies
207 | ```bash
208 | # Error: ModuleNotFoundError: No module named 'aiohttp'
209 | pip install -r scripts/requirements_tiingo.txt
210 | ```
211 | 
212 | #### 2. API Rate Limiting
213 | ```bash
214 | # Reduce concurrency if getting rate limited
215 | python3 scripts/load_tiingo_data.py --symbols AAPL --max-concurrent 2
216 | ```
217 | 
218 | #### 3. Database Connection Issues
219 | ```bash
220 | # Test database connection
221 | python3 -c "
222 | from maverick_mcp.data.models import SessionLocal
223 | with SessionLocal() as session:
224 |     print('Database connection OK')
225 | "
226 | ```
227 | 
228 | #### 4. Memory Issues
229 | ```bash
230 | # Reduce batch size for large loads
231 | python3 scripts/load_tiingo_data.py --sp500 --batch-size 25 --max-concurrent 3
232 | ```
233 | 
234 | #### 5. Checkpoint File Corruption
235 | ```bash
236 | # Remove corrupted checkpoint and restart
237 | rm load_progress.json
238 | python3 scripts/load_tiingo_data.py --symbols AAPL,MSFT
239 | ```
240 | 
241 | ### Getting Help
242 | 1. **Validation Script**: `python3 scripts/validate_setup.py`
243 | 2. **Check Logs**: `tail -f tiingo_data_loader.log`
244 | 3. **Test Individual Components**: `python3 scripts/test_tiingo_loader.py`
245 | 4. **Interactive Examples**: `python3 scripts/load_example.py`
246 | 
247 | ## 🎯 Best Practices
248 | 
249 | ### For Development
250 | ```bash
251 | # Start small for testing
252 | python3 scripts/load_tiingo_data.py --symbols AAPL,MSFT --years 0.5 --batch-size 10
253 | 
254 | # Use checkpoints for large loads
255 | python3 scripts/load_tiingo_data.py --sp500 --checkpoint-file dev_progress.json
256 | ```
257 | 
258 | ### For Production
259 | ```bash
260 | # Higher performance settings
261 | python3 scripts/load_tiingo_data.py --sp500-full \
262 |     --batch-size 100 \
263 |     --max-concurrent 10 \
264 |     --years 2 \
265 |     --run-screening
266 | 
267 | # Schedule regular updates
268 | # Add to crontab: 0 18 * * 1-5 /path/to/load_script.sh
269 | ```
270 | 
271 | ### For Resume Operations
272 | ```bash
273 | # Always use checkpoints for large operations
274 | python3 scripts/load_tiingo_data.py --supported --checkpoint-file full_load.json
275 | 
276 | # If interrupted, resume with:
277 | python3 scripts/load_tiingo_data.py --resume --checkpoint-file full_load.json
278 | ```
279 | 
280 | ## 📊 Performance Benchmarks
281 | 
282 | **Typical Loading Times (on modern hardware):**
283 | - 10 symbols, 1 year: 2-3 minutes
284 | - 100 symbols, 2 years: 15-20 minutes  
285 | - 500 symbols, 2 years: 1-2 hours
286 | - 3000+ symbols, 2 years: 6-12 hours
287 | 
288 | **Rate Limits:**
289 | - Tiingo Free: 2400 requests/hour
290 | - Recommended: 5 concurrent requests max
291 | - With indicators: ~1.5 seconds per symbol
292 | 
293 | ## 🔗 Integration
294 | 
295 | ### With Maverick-MCP API
296 | The loaded data is immediately available through:
297 | - `/api/v1/stocks` - Stock metadata
298 | - `/api/v1/prices/{symbol}` - Price data
299 | - `/api/v1/technical/{symbol}` - Technical indicators
300 | - `/api/v1/screening/*` - Screening results
301 | 
302 | ### With MCP Tools
303 | - `get_stock_analysis` - Uses loaded data
304 | - `run_screening` - Operates on cached data
305 | - `portfolio_analysis` - Leverages technical indicators
306 | 
307 | ### Custom Workflows
308 | ```python
309 | # Example: Load data then run custom analysis
310 | from scripts.load_tiingo_data import TiingoDataLoader
311 | from maverick_mcp.data.models import SessionLocal
312 | 
313 | async with TiingoDataLoader() as loader:
314 |     await loader.load_symbol_data("AAPL", "2023-01-01")
315 | 
316 | with SessionLocal() as session:
317 |     # Your custom analysis here
318 |     pass
319 | ```
320 | 
321 | ## 🎉 Success!
322 | 
323 | Once setup is complete, you should be able to:
324 | 
325 | 1. ✅ Load market data from Tiingo efficiently
326 | 2. ✅ Calculate comprehensive technical indicators  
327 | 3. ✅ Run sophisticated screening algorithms
328 | 4. ✅ Resume interrupted loads seamlessly
329 | 5. ✅ Access all data through Maverick-MCP APIs
330 | 6. ✅ Build custom trading strategies
331 | 
332 | **Next Steps:**
333 | - Explore the interactive examples: `python3 scripts/load_example.py`
334 | - Read the full documentation: `scripts/README_TIINGO_LOADER.md`
335 | - Set up automated daily updates
336 | - Customize screening algorithms for your strategy
```

--------------------------------------------------------------------------------
/maverick_mcp/utils/quick_cache.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Quick in-memory cache decorator for development.
  3 | 
  4 | This module provides a simple LRU cache decorator with TTL support
  5 | to avoid repeated API calls during development and testing.
  6 | """
  7 | 
  8 | import asyncio
  9 | import functools
 10 | import hashlib
 11 | import json
 12 | import time
 13 | from collections import OrderedDict
 14 | from collections.abc import Callable
 15 | from typing import Any, TypeVar
 16 | 
 17 | from maverick_mcp.config.settings import settings
 18 | from maverick_mcp.utils.logging import get_logger
 19 | 
 20 | logger = get_logger(__name__)
 21 | 
 22 | T = TypeVar("T")
 23 | 
 24 | 
 25 | class QuickCache:
 26 |     """Simple in-memory LRU cache with TTL support."""
 27 | 
 28 |     def __init__(self, max_size: int = 1000):
 29 |         self.cache: OrderedDict[str, tuple[Any, float]] = OrderedDict()
 30 |         self.max_size = max_size
 31 |         self.hits = 0
 32 |         self.misses = 0
 33 |         self._lock = asyncio.Lock()
 34 | 
 35 |     def make_key(self, func_name: str, args: tuple, kwargs: dict) -> str:
 36 |         """Generate a cache key from function name and arguments."""
 37 |         # Convert args and kwargs to a stable string representation
 38 |         key_data = {
 39 |             "func": func_name,
 40 |             "args": args,
 41 |             "kwargs": sorted(kwargs.items()),
 42 |         }
 43 |         key_str = json.dumps(key_data, sort_keys=True, default=str)
 44 |         # Use hash for shorter keys
 45 |         return hashlib.md5(key_str.encode()).hexdigest()
 46 | 
 47 |     async def get(self, key: str) -> Any | None:
 48 |         """Get value from cache if not expired."""
 49 |         async with self._lock:
 50 |             if key in self.cache:
 51 |                 value, expiry = self.cache[key]
 52 |                 if time.time() < expiry:
 53 |                     # Move to end (LRU)
 54 |                     self.cache.move_to_end(key)
 55 |                     self.hits += 1
 56 |                     return value
 57 |                 else:
 58 |                     # Expired, remove it
 59 |                     del self.cache[key]
 60 | 
 61 |             self.misses += 1
 62 |             return None
 63 | 
 64 |     async def set(self, key: str, value: Any, ttl_seconds: float):
 65 |         """Set value in cache with TTL."""
 66 |         async with self._lock:
 67 |             expiry = time.time() + ttl_seconds
 68 | 
 69 |             # Remove oldest if at capacity
 70 |             if len(self.cache) >= self.max_size:
 71 |                 self.cache.popitem(last=False)
 72 | 
 73 |             self.cache[key] = (value, expiry)
 74 | 
 75 |     def get_stats(self) -> dict[str, Any]:
 76 |         """Get cache statistics."""
 77 |         total = self.hits + self.misses
 78 |         hit_rate = (self.hits / total * 100) if total > 0 else 0
 79 | 
 80 |         return {
 81 |             "hits": self.hits,
 82 |             "misses": self.misses,
 83 |             "total": total,
 84 |             "hit_rate": round(hit_rate, 2),
 85 |             "size": len(self.cache),
 86 |             "max_size": self.max_size,
 87 |         }
 88 | 
 89 |     def clear(self):
 90 |         """Clear the cache."""
 91 |         self.cache.clear()
 92 |         self.hits = 0
 93 |         self.misses = 0
 94 | 
 95 | 
 96 | # Global cache instance
 97 | _cache = QuickCache()
 98 | 
 99 | 
100 | def quick_cache(
101 |     ttl_seconds: float = 300,  # 5 minutes default
102 |     max_size: int = 1000,
103 |     key_prefix: str = "",
104 |     log_stats: bool | None = None,
105 | ) -> Callable[[Callable[..., T]], Callable[..., T]]:
106 |     """
107 |     Decorator for in-memory caching with TTL.
108 | 
109 |     Args:
110 |         ttl_seconds: Time to live in seconds (default: 300)
111 |         max_size: Maximum cache size (default: 1000)
112 |         key_prefix: Optional prefix for cache keys
113 |         log_stats: Whether to log cache statistics (default: settings.api.debug)
114 | 
115 |     Usage:
116 |         @quick_cache(ttl_seconds=60)
117 |         async def expensive_api_call(symbol: str):
118 |             return await fetch_data(symbol)
119 | 
120 |         @quick_cache(ttl_seconds=300, key_prefix="stock_data")
121 |         def get_stock_info(symbol: str, period: str):
122 |             return fetch_stock_data(symbol, period)
123 |     """
124 |     if log_stats is None:
125 |         log_stats = settings.api.debug
126 | 
127 |     def decorator(func: Callable[..., T]) -> Callable[..., T]:
128 |         # Update global cache size if specified
129 |         if max_size != _cache.max_size:
130 |             _cache.max_size = max_size
131 | 
132 |         @functools.wraps(func)
133 |         async def async_wrapper(*args: Any, **kwargs: Any) -> T:
134 |             # Generate cache key
135 |             cache_key = _cache.make_key(
136 |                 f"{key_prefix}:{func.__name__}" if key_prefix else func.__name__,
137 |                 args,
138 |                 kwargs,
139 |             )
140 | 
141 |             # Try to get from cache
142 |             cached_value = await _cache.get(cache_key)
143 |             if cached_value is not None:
144 |                 if log_stats:
145 |                     stats = _cache.get_stats()
146 |                     logger.debug(
147 |                         f"Cache HIT for {func.__name__}",
148 |                         extra={
149 |                             "function": func.__name__,
150 |                             "cache_key": cache_key[:8] + "...",
151 |                             "hit_rate": stats["hit_rate"],
152 |                             "cache_size": stats["size"],
153 |                         },
154 |                     )
155 |                 return cached_value
156 | 
157 |             # Cache miss - execute function
158 |             if log_stats:
159 |                 logger.debug(
160 |                     f"Cache MISS for {func.__name__}",
161 |                     extra={
162 |                         "function": func.__name__,
163 |                         "cache_key": cache_key[:8] + "...",
164 |                     },
165 |                 )
166 | 
167 |             # Execute the function
168 |             start_time = time.time()
169 |             # func is guaranteed to be async since we're in async_wrapper
170 |             result = await func(*args, **kwargs)  # type: ignore[misc]
171 |             execution_time = time.time() - start_time
172 | 
173 |             # Cache the result
174 |             await _cache.set(cache_key, result, ttl_seconds)
175 | 
176 |             if log_stats:
177 |                 stats = _cache.get_stats()
178 |                 logger.debug(
179 |                     f"Cached result for {func.__name__}",
180 |                     extra={
181 |                         "function": func.__name__,
182 |                         "execution_time": round(execution_time, 3),
183 |                         "ttl_seconds": ttl_seconds,
184 |                         "cache_stats": stats,
185 |                     },
186 |                 )
187 | 
188 |             return result
189 | 
190 |         @functools.wraps(func)
191 |         def sync_wrapper(*args: Any, **kwargs: Any) -> T:
192 |             # For sync functions, we need to run the async cache operations
193 |             # in a thread to avoid blocking
194 |             loop_policy = asyncio.get_event_loop_policy()
195 |             try:
196 |                 previous_loop = loop_policy.get_event_loop()
197 |             except RuntimeError:
198 |                 previous_loop = None
199 | 
200 |             loop = loop_policy.new_event_loop()
201 |             asyncio.set_event_loop(loop)
202 |             try:
203 |                 cache_key = _cache.make_key(
204 |                     f"{key_prefix}:{func.__name__}" if key_prefix else func.__name__,
205 |                     args,
206 |                     kwargs,
207 |                 )
208 | 
209 |                 # Try to get from cache (sync version)
210 |                 cached_value = loop.run_until_complete(_cache.get(cache_key))
211 |                 if cached_value is not None:
212 |                     if log_stats:
213 |                         stats = _cache.get_stats()
214 |                         logger.debug(
215 |                             f"Cache HIT for {func.__name__}",
216 |                             extra={
217 |                                 "function": func.__name__,
218 |                                 "hit_rate": stats["hit_rate"],
219 |                             },
220 |                         )
221 |                     return cached_value
222 | 
223 |                 # Cache miss
224 |                 result = func(*args, **kwargs)
225 | 
226 |                 # Cache the result
227 |                 loop.run_until_complete(_cache.set(cache_key, result, ttl_seconds))
228 | 
229 |                 return result
230 |             finally:
231 |                 loop.close()
232 |                 if previous_loop is not None:
233 |                     asyncio.set_event_loop(previous_loop)
234 |                 else:
235 |                     asyncio.set_event_loop(None)
236 | 
237 |         # Return appropriate wrapper based on function type
238 |         if asyncio.iscoroutinefunction(func):
239 |             return async_wrapper  # type: ignore[return-value]
240 |         else:
241 |             return sync_wrapper
242 | 
243 |     return decorator
244 | 
245 | 
246 | def get_cache_stats() -> dict[str, Any]:
247 |     """Get global cache statistics."""
248 |     return _cache.get_stats()
249 | 
250 | 
251 | def clear_cache():
252 |     """Clear the global cache."""
253 |     _cache.clear()
254 |     logger.info("Cache cleared")
255 | 
256 | 
257 | # Convenience decorators with common TTLs
258 | cache_1min = functools.partial(quick_cache, ttl_seconds=60)
259 | cache_5min = functools.partial(quick_cache, ttl_seconds=300)
260 | cache_15min = functools.partial(quick_cache, ttl_seconds=900)
261 | cache_1hour = functools.partial(quick_cache, ttl_seconds=3600)
262 | 
263 | 
264 | # Example usage for API calls
265 | @quick_cache(ttl_seconds=300, key_prefix="stock")
266 | async def cached_stock_data(symbol: str, start_date: str, end_date: str) -> dict:
267 |     """Example of caching stock data API calls."""
268 |     # This would normally make an expensive API call
269 |     logger.info(f"Fetching stock data for {symbol}")
270 |     # Simulate API call
271 |     await asyncio.sleep(0.1)
272 |     return {
273 |         "symbol": symbol,
274 |         "start": start_date,
275 |         "end": end_date,
276 |         "data": "mock_data",
277 |     }
278 | 
279 | 
280 | # Cache management commands for development
281 | if settings.api.debug:
282 | 
283 |     @quick_cache(ttl_seconds=1)  # Very short TTL for testing
284 |     def test_cache_function(value: str) -> str:
285 |         """Test function for cache debugging."""
286 |         return f"processed_{value}_{time.time()}"
287 | 
```

--------------------------------------------------------------------------------
/docs/COST_BASIS_SPECIFICATION.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Cost Basis Specification for Portfolio Management
  2 | 
  3 | ## 1. Overview
  4 | 
  5 | This document specifies the cost basis tracking algorithm for MaverickMCP's portfolio management system. The system uses the **Average Cost Method** for educational simplicity.
  6 | 
  7 | ## 2. Cost Basis Method: Average Cost
  8 | 
  9 | ### Definition
 10 | The average cost method calculates the cost basis by taking the total cost of all shares purchased and dividing by the total number of shares owned.
 11 | 
 12 | ### Formula
 13 | ```
 14 | Average Cost Basis = Total Cost of All Shares / Total Number of Shares
 15 | ```
 16 | 
 17 | ### Why Average Cost?
 18 | 1. **Simplicity**: Easiest to understand for educational purposes
 19 | 2. **Consistency**: Matches existing `PortfolioManager` implementation
 20 | 3. **No Tax Complexity**: Avoids FIFO/LIFO tax accounting rules
 21 | 4. **Educational Focus**: Appropriate for learning, not tax optimization
 22 | 
 23 | ## 3. Edge Cases and Handling
 24 | 
 25 | ### 3.1 Multiple Purchases at Different Prices
 26 | 
 27 | **Scenario**: User buys same stock multiple times at different prices
 28 | 
 29 | **Example**:
 30 | ```
 31 | Purchase 1: 10 shares @ $150.00 = $1,500.00
 32 | Purchase 2: 10 shares @ $170.00 = $1,700.00
 33 | Result: 20 shares @ $160.00 average cost = $3,200.00 total
 34 | ```
 35 | 
 36 | **Algorithm**:
 37 | ```python
 38 | new_total_shares = existing_shares + new_shares
 39 | new_total_cost = existing_total_cost + (new_shares * new_price)
 40 | new_average_cost = new_total_cost / new_total_shares
 41 | ```
 42 | 
 43 | **Precision**: Use Decimal type throughout, round final result to 4 decimal places
 44 | 
 45 | ### 3.2 Partial Position Sales
 46 | 
 47 | **Scenario**: User sells portion of position
 48 | 
 49 | **Example**:
 50 | ```
 51 | Holding: 20 shares @ $160.00 average cost = $3,200.00 total
 52 | Sell: 10 shares
 53 | Result: 10 shares @ $160.00 average cost = $1,600.00 total
 54 | ```
 55 | 
 56 | **Algorithm**:
 57 | ```python
 58 | new_shares = existing_shares - sold_shares
 59 | new_total_cost = new_shares * average_cost_basis
 60 | # Average cost basis remains unchanged
 61 | ```
 62 | 
 63 | **Important**: Average cost basis does NOT change on partial sales
 64 | 
 65 | ### 3.3 Full Position Close
 66 | 
 67 | **Scenario**: User sells all shares
 68 | 
 69 | **Algorithm**:
 70 | ```python
 71 | if sold_shares >= existing_shares:
 72 |     # Remove position entirely from portfolio
 73 |     position = None
 74 | ```
 75 | 
 76 | **Database**: Delete PortfolioPosition row
 77 | 
 78 | ### 3.4 Zero or Negative Shares
 79 | 
 80 | **Validation Rules**:
 81 | - Shares to add: Must be > 0
 82 | - Shares to remove: Must be > 0
 83 | - Result after removal: Must be >= 0
 84 | 
 85 | **Error Handling**:
 86 | ```python
 87 | if new_shares <= 0:
 88 |     raise ValueError("Invalid share quantity")
 89 | ```
 90 | 
 91 | ### 3.5 Zero or Negative Prices
 92 | 
 93 | **Validation Rules**:
 94 | - Purchase price: Must be > 0
 95 | - Sell price: Optional (not used in cost basis calculation)
 96 | 
 97 | ### 3.6 Fractional Shares
 98 | 
 99 | **Support**: YES - Use Numeric(20, 8) for up to 8 decimal places
100 | 
101 | **Example**:
102 | ```
103 | Purchase: 10.5 shares @ $150.25 = $1,577.625
104 | Valid and supported
105 | ```
106 | 
107 | ### 3.7 Rounding and Precision
108 | 
109 | **Database Storage**:
110 | - Shares: `Numeric(20, 8)` - 8 decimal places
111 | - Prices: `Numeric(12, 4)` - 4 decimal places (cents precision)
112 | - Total Cost: `Numeric(20, 4)` - 4 decimal places
113 | 
114 | **Calculation Precision**:
115 | - Use Python `Decimal` type throughout calculations
116 | - Only round when storing to database or displaying to user
117 | - Never use float for financial calculations
118 | 
119 | **Rounding Rules**:
120 | ```python
121 | from decimal import Decimal, ROUND_HALF_UP
122 | 
123 | # For display (2 decimal places)
124 | display_value = value.quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
125 | 
126 | # For database storage (4 decimal places for prices)
127 | db_price = price.quantize(Decimal('0.0001'), rounding=ROUND_HALF_UP)
128 | 
129 | # For database storage (8 decimal places for shares)
130 | db_shares = shares.quantize(Decimal('0.00000001'), rounding=ROUND_HALF_UP)
131 | ```
132 | 
133 | ### 3.8 Division by Zero
134 | 
135 | **Scenario**: Calculating average when shares = 0
136 | 
137 | **Prevention**:
138 | ```python
139 | if total_shares == 0:
140 |     raise ValueError("Cannot calculate average cost with zero shares")
141 | ```
142 | 
143 | **Should never occur** due to validation preventing zero-share positions
144 | 
145 | ## 4. P&L Calculation
146 | 
147 | ### Unrealized P&L Formula
148 | ```
149 | Current Value = Shares × Current Price
150 | Unrealized P&L = Current Value - Total Cost
151 | P&L Percentage = (Unrealized P&L / Total Cost) × 100
152 | ```
153 | 
154 | ### Example
155 | ```
156 | Position: 20 shares @ $160.00 cost basis = $3,200.00 total cost
157 | Current Price: $175.50
158 | Current Value: 20 × $175.50 = $3,510.00
159 | Unrealized P&L: $3,510.00 - $3,200.00 = $310.00
160 | P&L %: ($310.00 / $3,200.00) × 100 = 9.69%
161 | ```
162 | 
163 | ### Edge Cases
164 | - **Current price unavailable**: Use cost basis as fallback
165 | - **Zero cost basis**: Return 0% (should never occur with validation)
166 | 
167 | ## 5. Database Constraints
168 | 
169 | ### Unique Constraint
170 | ```sql
171 | UNIQUE (portfolio_id, ticker)
172 | ```
173 | **Rationale**: One position per ticker per portfolio
174 | 
175 | ### Check Constraints (Optional - Enforce in Application Layer)
176 | ```python
177 | # Application-level validation (preferred)
178 | assert shares > 0, "Shares must be positive"
179 | assert average_cost_basis > 0, "Cost basis must be positive"
180 | assert total_cost > 0, "Total cost must be positive"
181 | ```
182 | 
183 | ## 6. Concurrency Considerations
184 | 
185 | ### Single-User System
186 | - No concurrent writes expected (personal use)
187 | - Database-level unique constraints prevent duplicates
188 | - SQLAlchemy sessions with auto-rollback handle errors
189 | 
190 | ### Future Multi-User Support
191 | - Would require row-level locking: `SELECT FOR UPDATE`
192 | - Optimistic concurrency with version column
193 | - Currently not needed for personal use
194 | 
195 | ## 7. Performance Benchmarks
196 | 
197 | ### Expected Performance (100 Positions, 1000 Transactions)
198 | - Add position: < 10ms (with database write)
199 | - Calculate portfolio value: < 50ms (without live prices)
200 | - Calculate portfolio value with live prices: < 2s (network bound)
201 | 
202 | ### Optimization Strategies
203 | - Batch price fetches for portfolio valuation
204 | - Cache live prices (5-minute expiry)
205 | - Use database indexes for ticker lookups
206 | - Lazy-load positions only when needed
207 | 
208 | ## 8. Migration Strategy
209 | 
210 | ### Initial Migration (014_add_portfolio_models)
211 | ```sql
212 | CREATE TABLE mcp_portfolios (
213 |     id UUID PRIMARY KEY,
214 |     user_id VARCHAR(50) DEFAULT 'default',
215 |     name VARCHAR(200) DEFAULT 'My Portfolio',
216 |     created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
217 |     updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
218 | );
219 | 
220 | CREATE TABLE mcp_portfolio_positions (
221 |     id UUID PRIMARY KEY,
222 |     portfolio_id UUID REFERENCES mcp_portfolios(id) ON DELETE CASCADE,
223 |     ticker VARCHAR(20) NOT NULL,
224 |     shares NUMERIC(20, 8) NOT NULL,
225 |     average_cost_basis NUMERIC(12, 4) NOT NULL,
226 |     total_cost NUMERIC(20, 4) NOT NULL,
227 |     purchase_date TIMESTAMP WITH TIME ZONE NOT NULL,
228 |     notes TEXT,
229 |     created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
230 |     updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
231 |     UNIQUE(portfolio_id, ticker)
232 | );
233 | ```
234 | 
235 | ### Data Migration from PortfolioManager (Future)
236 | If users have existing portfolio JSON files:
237 | ```python
238 | def migrate_from_json(json_file: str) -> None:
239 |     """Migrate existing portfolio from JSON to database."""
240 |     # Load JSON portfolio
241 |     # Create UserPortfolio
242 |     # Create PortfolioPositions for each holding
243 |     # Verify cost basis calculations match
244 | ```
245 | 
246 | ## 9. Testing Requirements
247 | 
248 | ### Unit Tests (Domain Layer)
249 | - ✅ Add shares: Multiple purchases, average calculation
250 | - ✅ Remove shares: Partial removal, full removal
251 | - ✅ P&L calculation: Various price scenarios
252 | - ✅ Edge cases: Zero shares, negative values, division by zero
253 | - ✅ Precision: Decimal arithmetic accuracy
254 | 
255 | ### Integration Tests (Database Layer)
256 | - ✅ CRUD operations: Create, read, update, delete positions
257 | - ✅ Unique constraint: Prevent duplicate tickers
258 | - ✅ Cascade delete: Portfolio deletion removes positions
259 | - ✅ Transaction rollback: Error handling
260 | 
261 | ### Property-Based Tests
262 | - ✅ Adding and removing shares always maintains valid state
263 | - ✅ Average cost formula always correct
264 | - ✅ P&L calculations always sum correctly
265 | 
266 | ## 10. Example Scenarios
267 | 
268 | ### Scenario 1: Build Position Over Time
269 | ```
270 | Day 1: Buy 10 AAPL @ $150.00
271 |   - Shares: 10, Avg Cost: $150.00, Total: $1,500.00
272 | 
273 | Day 30: Buy 5 AAPL @ $160.00
274 |   - Shares: 15, Avg Cost: $153.33, Total: $2,300.00
275 | 
276 | Day 60: Buy 10 AAPL @ $145.00
277 |   - Shares: 25, Avg Cost: $150.80, Total: $3,770.00
278 | ```
279 | 
280 | ### Scenario 2: Take Profits
281 | ```
282 | Start: 25 AAPL @ $150.80 = $3,770.00
283 | Current Price: $175.50
284 | Unrealized P&L: +$617.50 (+16.38%)
285 | 
286 | Sell 10 shares @ $175.50 (realized gain: $247.00)
287 | Remaining: 15 AAPL @ $150.80 = $2,262.00
288 | Current Value @ $175.50: $2,632.50
289 | Unrealized P&L: +$370.50 (+16.38% - same percentage)
290 | ```
291 | 
292 | ### Scenario 3: Dollar-Cost Averaging
293 | ```
294 | Monthly purchases of $1,000:
295 | Month 1: 6.67 shares @ $150.00 = $1,000.00
296 | Month 2: 6.25 shares @ $160.00 = $1,000.00
297 | Month 3: 6.90 shares @ $145.00 = $1,000.00
298 | Total: 19.82 shares @ $151.26 avg = $3,000.00
299 | ```
300 | 
301 | ## 11. Compliance and Disclaimers
302 | 
303 | ### Educational Purpose
304 | This cost basis tracking is for **educational purposes only** and should not be used for tax reporting.
305 | 
306 | ### Tax Reporting
307 | Users should consult tax professionals and use official brokerage cost basis reporting for tax purposes.
308 | 
309 | ### Disclaimers in Tools
310 | All portfolio tools include:
311 | ```
312 | DISCLAIMER: This portfolio tracking is for educational purposes only and does not
313 | constitute investment advice. All investments carry risk of loss. Consult qualified
314 | financial and tax professionals for investment and tax advice.
315 | ```
316 | 
317 | ## 12. References
318 | 
319 | - **IRS Publication 550**: Investment Income and Expenses
320 | - **Existing Code**: `maverick_mcp/tools/portfolio_manager.py` (average cost implementation)
321 | - **Financial Precision**: IEEE 754 vs Decimal arithmetic
322 | - **SQLAlchemy Numeric**: Column type documentation
323 | 
324 | ---
325 | 
326 | **Document Version**: 1.0
327 | **Last Updated**: 2025-11-01
328 | **Author**: Portfolio Personalization Feature Team
329 | 
```

--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Pytest configuration for MaverickMCP integration testing.
  3 | 
  4 | This module sets up test containers for PostgreSQL and Redis to enable
  5 | real integration testing without mocking database or cache dependencies.
  6 | """
  7 | 
  8 | # Set test environment before any other imports
  9 | import os
 10 | 
 11 | os.environ["MAVERICK_TEST_ENV"] = "true"
 12 | 
 13 | import asyncio
 14 | import sys
 15 | from collections.abc import AsyncGenerator, Generator
 16 | 
 17 | import pytest
 18 | from httpx import ASGITransport, AsyncClient
 19 | from sqlalchemy import create_engine
 20 | from sqlalchemy.orm import Session, sessionmaker
 21 | from testcontainers.postgres import PostgresContainer
 22 | from testcontainers.redis import RedisContainer
 23 | 
 24 | # Add the parent directory to the path to enable imports
 25 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 26 | 
 27 | from maverick_mcp.api.api_server import create_api_app
 28 | 
 29 | # Import all models to ensure they're registered with Base
 30 | from maverick_mcp.data.models import get_db
 31 | from maverick_mcp.database.base import Base
 32 | 
 33 | 
 34 | # Container fixtures (session scope for efficiency)
 35 | @pytest.fixture(scope="session")
 36 | def postgres_container():
 37 |     """Create a PostgreSQL test container for the test session."""
 38 |     with PostgresContainer("postgres:15-alpine") as postgres:
 39 |         postgres.with_env("POSTGRES_PASSWORD", "test")
 40 |         postgres.with_env("POSTGRES_USER", "test")
 41 |         postgres.with_env("POSTGRES_DB", "test")
 42 |         yield postgres
 43 | 
 44 | 
 45 | @pytest.fixture(scope="session")
 46 | def redis_container():
 47 |     """Create a Redis test container for the test session."""
 48 |     with RedisContainer("redis:7-alpine") as redis:
 49 |         yield redis
 50 | 
 51 | 
 52 | # Database setup fixtures
 53 | @pytest.fixture(scope="session")
 54 | def database_url(postgres_container: PostgresContainer) -> str:
 55 |     """Get the database URL from the test container."""
 56 |     return postgres_container.get_connection_url()
 57 | 
 58 | 
 59 | @pytest.fixture(scope="session")
 60 | def redis_url(redis_container: RedisContainer) -> str:
 61 |     """Get the Redis URL from the test container."""
 62 |     host = redis_container.get_container_host_ip()
 63 |     port = redis_container.get_exposed_port(6379)
 64 |     return f"redis://{host}:{port}/0"
 65 | 
 66 | 
 67 | @pytest.fixture(scope="session")
 68 | def engine(database_url: str):
 69 |     """Create a SQLAlchemy engine for the test database."""
 70 |     engine = create_engine(database_url)
 71 | 
 72 |     # Create all tables in proper order, handling duplicate errors
 73 |     try:
 74 |         Base.metadata.create_all(bind=engine, checkfirst=True)
 75 |     except Exception as e:
 76 |         # Only ignore duplicate table/index errors, attempt partial creation
 77 |         if "already exists" in str(e) or "DuplicateTable" in str(type(e)):
 78 |             # Try to create tables individually
 79 |             for _table_name, table in Base.metadata.tables.items():
 80 |                 try:
 81 |                     table.create(bind=engine, checkfirst=True)
 82 |                 except Exception as table_error:
 83 |                     if "already exists" not in str(table_error):
 84 |                         # Re-raise non-duplicate errors
 85 |                         raise table_error
 86 |         else:
 87 |             raise
 88 | 
 89 |     yield engine
 90 | 
 91 |     # Drop all tables after tests
 92 |     try:
 93 |         Base.metadata.drop_all(bind=engine)
 94 |     except Exception:
 95 |         # Ignore errors when dropping tables
 96 |         pass
 97 | 
 98 | 
 99 | @pytest.fixture(scope="function")
100 | def db_session(engine) -> Generator[Session, None, None]:
101 |     """Create a database session for each test."""
102 |     SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
103 |     session = SessionLocal()
104 |     try:
105 |         yield session
106 |     finally:
107 |         session.rollback()
108 |         session.close()
109 | 
110 | 
111 | # Environment setup
112 | @pytest.fixture(scope="session", autouse=True)
113 | def setup_test_env(database_url: str, redis_url: str):
114 |     """Set up test environment variables."""
115 |     os.environ["DATABASE_URL"] = database_url
116 |     os.environ["REDIS_URL"] = redis_url
117 |     os.environ["ENVIRONMENT"] = "test"
118 |     os.environ["AUTH_ENABLED"] = "true"
119 |     os.environ["LOG_LEVEL"] = "INFO"
120 |     # Use test JWT keys
121 |     os.environ["JWT_PRIVATE_KEY"] = """-----BEGIN PRIVATE KEY-----
122 | MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQCONQjZiRlHlDGO
123 | XHjbUyfyQhDWJsvzeaXtFcDGw0qCY+AITiCBVBukzDWf/1wGJ/lhdYX5c1DuNVXq
124 | +JDFY15RjcR9fCbxtiEeuSJ2sh3hVDrQ1BmAWAUV4cFUJXAxC+1PmcqCQEGwfzUi
125 | 89Jq76hLyMtxlia2OefN+Cv3hKp37PKrPkdv3SU/moXs5RM5hx01E2dQELzl7X39
126 | O+vzhI4EvIILFqCBKbSv4ylHADrFZH6MiFjhxdPZNdoLbUs5mBjjFXhLOtjFiHRx
127 | 6hTYdb6q6fUBWaKtG9jyXs6q8J1lxovgsNHwXCDGeIAaWtmK4V0mRrRfKPFeArwD
128 | Ez5A0rxtAgMBAAECgf9lbytBbqZMN/lkx28p1uf5jlHGNSUBd/3nkqFxfFj7c53l
129 | oMYpXLzsBOQK7tI3iEI8ne6ICbkflg2CkedpYf7pwtnAxUHM91GtWbMLMTa5loaN
130 | wG8nwSNrkHC2toTl0vfdK05pX/NeNUFkZJm8ISLlhi20Y7MSlWamAbrdM4B3/6uM
131 | EXYBSOV2u50g8a3pytsp/dvdkXgJ0BroztJM3FMtY52vUaF3D7xesqv6gS0sxpbn
132 | NyOl8hk9SQhEI3L0p/daozuXjNa3y2p4R0h9+ibEnUlNeREFGkIOAt1F6pClLjAh
133 | elOkYkm4uG0LE8GkKYtiTUrMouYvplPla/ryS8ECgYEAxSga2KYIOCglSyDdvXw6
134 | tkkiNDvNj2v02EFxV4X8TzDdmKPoGUQ+fUTua8j/kclfZ1C/AMwyt4e1S14mbk0A
135 | R/jat49uoXNqT8qVAWvbekLTLXwTfmubrfvOUnrlya13PZ9F5pE7Fxw4FARALP8n
136 | MK/5Tg+WFqY/m027em1MKKUCgYEAuKZ5eAy24gsfSPUlakfnz90oUcB5ETITvpc5
137 | hn6yAlvPdnjqm4MM+mx2zEGT2764BfYED3Qt5A9+9ayI6lynZlpigdOrqJTktsXP
138 | XVxyKdzHS4Z8AknjDTIt9cISkPZMmnMxMfY68+EuH1ZWf2rGy5jaIJMFIBXLt+iI
139 | xKHwMikCgYARPNpsCsg5MLliAjOg95Wijm5hJsFoQsYbik1Am8RdoCYfzGTkoKTe
140 | CwLVhbNiqbqfq92nUjM0/LaLKmYtyqm1oTpuRiokD5VB+LJid22vGNyh43FI4luw
141 | MI3vhDNHGNWOG7je2d/Su3LjvSNnS7+/cANaId67iDmTeI5lu9ymyQKBgGbRpD/Z
142 | 7JgwE0qf3yawRX+0qXfkUkXl+aKeOJUQxXSUxRA2QoU30yk67mfMeFXbfEMte5NT
143 | YR5mFo8cdNzznO9ckw+x2xszVawEt/RHvvZajssaZsErfXfioj7/wzDfRUaXsCQe
144 | 9TLKB9HBVMb8oRfL1GJhG3CDUn3kyQudFNAJAoGBAJNTpD53wyyPor7RpPXy1huD
145 | UwLk4MGD0X6AGzl7m5ZS7VppTrM0WLgCDICetyc35yjQto3lrlr7Wer33gIZRe+g
146 | QFbUNCZrfvHzFj5Ug9gLwj7V+7hfEk+Obx0azY2C7UT9lbDI+rpn6TT10kuN3KZN
147 | VLVde7wz9h17BALhp84I
148 | -----END PRIVATE KEY-----"""
149 |     os.environ["JWT_PUBLIC_KEY"] = """-----BEGIN PUBLIC KEY-----
150 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAjjUI2YkZR5Qxjlx421Mn
151 | 8kIQ1ibL83ml7RXAxsNKgmPgCE4ggVQbpMw1n/9cBif5YXWF+XNQ7jVV6viQxWNe
152 | UY3EfXwm8bYhHrkidrId4VQ60NQZgFgFFeHBVCVwMQvtT5nKgkBBsH81IvPSau+o
153 | S8jLcZYmtjnnzfgr94Sqd+zyqz5Hb90lP5qF7OUTOYcdNRNnUBC85e19/Tvr84SO
154 | BLyCCxaggSm0r+MpRwA6xWR+jIhY4cXT2TXaC21LOZgY4xV4SzrYxYh0ceoU2HW+
155 | qun1AVmirRvY8l7OqvCdZcaL4LDR8FwgxniAGlrZiuFdJka0XyjxXgK8AxM+QNK8
156 | bQIDAQAB
157 | -----END PUBLIC KEY-----"""
158 |     yield
159 |     # Clean up (optional)
160 | 
161 | 
162 | # FastAPI test client fixtures
163 | @pytest.fixture(scope="function")
164 | async def app(db_session: Session):
165 |     """Create a FastAPI app instance for testing."""
166 |     app = create_api_app()
167 | 
168 |     # Override the database dependency
169 |     def override_get_db():
170 |         try:
171 |             yield db_session
172 |         finally:
173 |             pass
174 | 
175 |     app.dependency_overrides[get_db] = override_get_db
176 | 
177 |     yield app
178 | 
179 |     # Clean up overrides
180 |     app.dependency_overrides.clear()
181 | 
182 | 
183 | @pytest.fixture(scope="function")
184 | async def client(app) -> AsyncGenerator[AsyncClient, None]:
185 |     """Create an async HTTP client for testing API endpoints."""
186 |     transport = ASGITransport(app=app)
187 |     async with AsyncClient(transport=transport, base_url="http://test") as client:
188 |         yield client
189 | 
190 | 
191 | # Authentication fixtures (disabled for personal use)
192 | @pytest.fixture
193 | async def test_user(db_session: Session):
194 |     """Create a test user for authenticated scenarios (legacy billing disabled)."""
195 |     # Auth disabled for personal use - return None
196 |     # All auth-related imports and functionality removed
197 |     return None
198 | 
199 | 
200 | @pytest.fixture
201 | async def auth_headers(client: AsyncClient, test_user):
202 |     """Get authentication headers for a test user (disabled for personal use)."""
203 |     # Auth disabled for personal use - return empty headers
204 |     return {}
205 | 
206 | 
207 | # Event loop configuration for async tests
208 | @pytest.fixture(scope="session")
209 | def event_loop():
210 |     """Create an event loop for the test session."""
211 |     loop = asyncio.get_event_loop_policy().new_event_loop()
212 |     yield loop
213 |     loop.close()
214 | 
215 | 
216 | # Mock fixtures for external APIs
217 | @pytest.fixture
218 | def vcr_config():
219 |     """Configure VCR for recording/replaying HTTP requests."""
220 |     return {
221 |         "filter_headers": ["authorization", "api-key", "x-api-key"],
222 |         "filter_query_parameters": ["apikey", "token"],
223 |         "filter_post_data_parameters": ["api_key", "token"],
224 |         "record_mode": "once",  # Record once, then replay
225 |         "match_on": ["method", "scheme", "host", "port", "path", "query"],
226 |     }
227 | 
228 | 
229 | # Utility fixtures
230 | @pytest.fixture
231 | def sample_stock_data():
232 |     """Provide sample stock data for testing."""
233 |     from datetime import datetime
234 | 
235 |     import numpy as np
236 |     import pandas as pd
237 | 
238 |     dates = pd.date_range(end=datetime.now(), periods=100, freq="D")
239 |     data = {
240 |         "Open": np.random.uniform(100, 200, 100),
241 |         "High": np.random.uniform(100, 200, 100),
242 |         "Low": np.random.uniform(100, 200, 100),
243 |         "Close": np.random.uniform(100, 200, 100),
244 |         "Volume": np.random.randint(1000000, 10000000, 100),
245 |     }
246 |     df = pd.DataFrame(data, index=dates)
247 |     # Ensure High >= Open, Close, Low and Low <= Open, Close, High
248 |     df["High"] = df[["Open", "High", "Close"]].max(axis=1)
249 |     df["Low"] = df[["Open", "Low", "Close"]].min(axis=1)
250 |     return df
251 | 
252 | 
253 | # Performance testing utilities
254 | @pytest.fixture
255 | def benchmark_timer():
256 |     """Simple timer for performance benchmarking."""
257 |     import time
258 | 
259 |     class Timer:
260 |         def __init__(self):
261 |             self.start_time = None
262 |             self.elapsed = None
263 | 
264 |         def __enter__(self):
265 |             self.start_time = time.time()
266 |             return self
267 | 
268 |         def __exit__(self, *args):
269 |             self.elapsed = time.time() - self.start_time
270 | 
271 |     return Timer
272 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/providers/factories/config_factory.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Configuration factory for creating configuration providers.
  3 | 
  4 | This module provides factory methods for creating different types of
  5 | configuration providers based on the environment or requirements.
  6 | """
  7 | 
  8 | import logging
  9 | 
 10 | from maverick_mcp.providers.interfaces.config import (
 11 |     EnvironmentConfigurationProvider,
 12 |     IConfigurationProvider,
 13 | )
 14 | 
 15 | logger = logging.getLogger(__name__)
 16 | 
 17 | 
 18 | class ConfigurationFactory:
 19 |     """
 20 |     Factory class for creating configuration provider instances.
 21 | 
 22 |     This factory provides methods to create different types of configuration
 23 |     providers based on the deployment environment or specific requirements.
 24 |     """
 25 | 
 26 |     @staticmethod
 27 |     def create_environment_config() -> IConfigurationProvider:
 28 |         """
 29 |         Create a configuration provider that reads from environment variables.
 30 | 
 31 |         Returns:
 32 |             Environment-based configuration provider
 33 |         """
 34 |         logger.debug("Creating environment configuration provider")
 35 |         return EnvironmentConfigurationProvider()
 36 | 
 37 |     @staticmethod
 38 |     def create_test_config(
 39 |         overrides: dict[str, str] | None = None,
 40 |     ) -> IConfigurationProvider:
 41 |         """
 42 |         Create a configuration provider for testing with optional overrides.
 43 | 
 44 |         Args:
 45 |             overrides: Dictionary of configuration overrides for testing
 46 | 
 47 |         Returns:
 48 |             Test configuration provider
 49 |         """
 50 |         logger.debug("Creating test configuration provider")
 51 | 
 52 |         # Create a test implementation that uses safe defaults
 53 |         class TestConfigurationProvider:
 54 |             def __init__(self, overrides: dict[str, str] | None = None):
 55 |                 self._overrides = overrides or {}
 56 |                 self._defaults = {
 57 |                     "DATABASE_URL": "sqlite:///:memory:",
 58 |                     "REDIS_HOST": "localhost",
 59 |                     "REDIS_PORT": "6379",
 60 |                     "REDIS_DB": "1",  # Use different DB for tests
 61 |                     "CACHE_ENABLED": "false",  # Disable cache in tests by default
 62 |                     "LOG_LEVEL": "DEBUG",
 63 |                     "ENVIRONMENT": "test",
 64 |                     "REQUEST_TIMEOUT": "5",
 65 |                     "MAX_RETRIES": "1",
 66 |                     "DB_POOL_SIZE": "1",
 67 |                     "DB_MAX_OVERFLOW": "0",
 68 |                 }
 69 | 
 70 |             def get_database_url(self) -> str:
 71 |                 return self._overrides.get(
 72 |                     "DATABASE_URL", self._defaults["DATABASE_URL"]
 73 |                 )
 74 | 
 75 |             def get_redis_host(self) -> str:
 76 |                 return self._overrides.get("REDIS_HOST", self._defaults["REDIS_HOST"])
 77 | 
 78 |             def get_redis_port(self) -> int:
 79 |                 return int(
 80 |                     self._overrides.get("REDIS_PORT", self._defaults["REDIS_PORT"])
 81 |                 )
 82 | 
 83 |             def get_redis_db(self) -> int:
 84 |                 return int(self._overrides.get("REDIS_DB", self._defaults["REDIS_DB"]))
 85 | 
 86 |             def get_redis_password(self) -> str | None:
 87 |                 password = self._overrides.get("REDIS_PASSWORD", "")
 88 |                 return password if password else None
 89 | 
 90 |             def get_redis_ssl(self) -> bool:
 91 |                 return self._overrides.get("REDIS_SSL", "false").lower() == "true"
 92 | 
 93 |             def is_cache_enabled(self) -> bool:
 94 |                 return (
 95 |                     self._overrides.get(
 96 |                         "CACHE_ENABLED", self._defaults["CACHE_ENABLED"]
 97 |                     ).lower()
 98 |                     == "true"
 99 |                 )
100 | 
101 |             def get_cache_ttl(self) -> int:
102 |                 return int(
103 |                     self._overrides.get("CACHE_TTL_SECONDS", "300")
104 |                 )  # 5 minutes for tests
105 | 
106 |             def get_fred_api_key(self) -> str:
107 |                 return self._overrides.get("FRED_API_KEY", "")
108 | 
109 |             def get_external_api_key(self) -> str:
110 |                 return self._overrides.get("CAPITAL_COMPANION_API_KEY", "")
111 | 
112 |             def get_tiingo_api_key(self) -> str:
113 |                 return self._overrides.get("TIINGO_API_KEY", "")
114 | 
115 |             def get_log_level(self) -> str:
116 |                 return self._overrides.get("LOG_LEVEL", self._defaults["LOG_LEVEL"])
117 | 
118 |             def is_development_mode(self) -> bool:
119 |                 env = self._overrides.get(
120 |                     "ENVIRONMENT", self._defaults["ENVIRONMENT"]
121 |                 ).lower()
122 |                 return env in ("development", "dev", "test")
123 | 
124 |             def is_production_mode(self) -> bool:
125 |                 env = self._overrides.get(
126 |                     "ENVIRONMENT", self._defaults["ENVIRONMENT"]
127 |                 ).lower()
128 |                 return env in ("production", "prod")
129 | 
130 |             def get_request_timeout(self) -> int:
131 |                 return int(
132 |                     self._overrides.get(
133 |                         "REQUEST_TIMEOUT", self._defaults["REQUEST_TIMEOUT"]
134 |                     )
135 |                 )
136 | 
137 |             def get_max_retries(self) -> int:
138 |                 return int(
139 |                     self._overrides.get("MAX_RETRIES", self._defaults["MAX_RETRIES"])
140 |                 )
141 | 
142 |             def get_pool_size(self) -> int:
143 |                 return int(
144 |                     self._overrides.get("DB_POOL_SIZE", self._defaults["DB_POOL_SIZE"])
145 |                 )
146 | 
147 |             def get_max_overflow(self) -> int:
148 |                 return int(
149 |                     self._overrides.get(
150 |                         "DB_MAX_OVERFLOW", self._defaults["DB_MAX_OVERFLOW"]
151 |                     )
152 |                 )
153 | 
154 |             def get_config_value(self, key: str, default=None):
155 |                 return self._overrides.get(key, self._defaults.get(key, default))
156 | 
157 |             def set_config_value(self, key: str, value) -> None:
158 |                 self._overrides[key] = str(value)
159 | 
160 |             def get_all_config(self) -> dict[str, str]:
161 |                 config = self._defaults.copy()
162 |                 config.update(self._overrides)
163 |                 return config
164 | 
165 |             def reload_config(self) -> None:
166 |                 pass  # No-op for test config
167 | 
168 |         return TestConfigurationProvider(overrides)
169 | 
170 |     @staticmethod
171 |     def create_production_config() -> IConfigurationProvider:
172 |         """
173 |         Create a configuration provider optimized for production.
174 | 
175 |         Returns:
176 |             Production-optimized configuration provider
177 |         """
178 |         logger.debug("Creating production configuration provider")
179 | 
180 |         # For now, use the environment provider but could be enhanced with
181 |         # additional validation, secret management, etc.
182 |         config = EnvironmentConfigurationProvider()
183 | 
184 |         # Validate production requirements
185 |         errors = []
186 |         if not config.get_database_url().startswith(("postgresql://", "mysql://")):
187 |             errors.append("Production requires PostgreSQL or MySQL database")
188 | 
189 |         if config.is_development_mode():
190 |             logger.warning("Running production config in development mode")
191 | 
192 |         if errors:
193 |             error_msg = "Production configuration validation failed: " + ", ".join(
194 |                 errors
195 |             )
196 |             logger.error(error_msg)
197 |             raise ValueError(error_msg)
198 | 
199 |         return config
200 | 
201 |     @staticmethod
202 |     def create_development_config() -> IConfigurationProvider:
203 |         """
204 |         Create a configuration provider optimized for development.
205 | 
206 |         Returns:
207 |             Development-optimized configuration provider
208 |         """
209 |         logger.debug("Creating development configuration provider")
210 |         return EnvironmentConfigurationProvider()
211 | 
212 |     @staticmethod
213 |     def auto_detect_config() -> IConfigurationProvider:
214 |         """
215 |         Auto-detect the appropriate configuration provider based on environment.
216 | 
217 |         Returns:
218 |             Appropriate configuration provider for the current environment
219 |         """
220 |         # Check environment variables to determine the mode
221 |         import os
222 | 
223 |         environment = os.getenv("ENVIRONMENT", "development").lower()
224 | 
225 |         if environment in ("production", "prod"):
226 |             return ConfigurationFactory.create_production_config()
227 |         elif environment in ("test", "testing"):
228 |             return ConfigurationFactory.create_test_config()
229 |         else:
230 |             return ConfigurationFactory.create_development_config()
231 | 
232 |     @staticmethod
233 |     def validate_config(config: IConfigurationProvider) -> list[str]:
234 |         """
235 |         Validate a configuration provider for common issues.
236 | 
237 |         Args:
238 |             config: Configuration provider to validate
239 | 
240 |         Returns:
241 |             List of validation errors (empty if valid)
242 |         """
243 |         errors = []
244 | 
245 |         # Check required configuration
246 |         if not config.get_database_url():
247 |             errors.append("Database URL is required")
248 | 
249 |         # Check production-specific requirements
250 |         if config.is_production_mode():
251 |             if config.get_database_url().startswith("sqlite://"):
252 |                 errors.append("SQLite is not recommended for production")
253 | 
254 |         # Check cache configuration consistency
255 |         if config.is_cache_enabled():
256 |             if not config.get_redis_host():
257 |                 errors.append("Redis host is required when caching is enabled")
258 | 
259 |             if config.get_redis_port() <= 0 or config.get_redis_port() > 65535:
260 |                 errors.append("Invalid Redis port number")
261 | 
262 |         # Check timeout values
263 |         if config.get_request_timeout() <= 0:
264 |             errors.append("Request timeout must be positive")
265 | 
266 |         if config.get_max_retries() < 0:
267 |             errors.append("Max retries cannot be negative")
268 | 
269 |         return errors
270 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/api/routers/screening_parallel.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Enhanced parallel screening router for Maverick-MCP.
  3 | 
  4 | This router provides parallel versions of screening operations
  5 | for significantly improved performance.
  6 | """
  7 | 
  8 | import time
  9 | from typing import Any
 10 | 
 11 | from fastapi import APIRouter, HTTPException, Query
 12 | from pydantic import BaseModel, Field
 13 | 
 14 | from maverick_mcp.utils.logging import get_logger
 15 | from maverick_mcp.utils.parallel_screening import (
 16 |     make_parallel_safe,
 17 |     parallel_screen_async,
 18 | )
 19 | 
 20 | logger = get_logger(__name__)
 21 | 
 22 | # Create router
 23 | router = APIRouter(
 24 |     prefix="/screening/parallel",
 25 |     tags=["parallel_screening"],
 26 | )
 27 | 
 28 | 
 29 | class ParallelScreeningRequest(BaseModel):
 30 |     """Request model for parallel screening."""
 31 | 
 32 |     symbols: list[str] = Field(..., description="List of symbols to screen")
 33 |     strategy: str = Field("momentum", description="Screening strategy to use")
 34 |     max_workers: int | None = Field(None, description="Maximum parallel workers")
 35 |     min_score: float = Field(70.0, ge=0, le=100, description="Minimum score to pass")
 36 | 
 37 | 
 38 | class ScreeningResult(BaseModel):
 39 |     """Individual screening result."""
 40 | 
 41 |     symbol: str
 42 |     passed: bool
 43 |     score: float
 44 |     metrics: dict[str, Any]
 45 | 
 46 | 
 47 | class ParallelScreeningResponse(BaseModel):
 48 |     """Response model for parallel screening."""
 49 | 
 50 |     status: str
 51 |     total_symbols: int
 52 |     passed_count: int
 53 |     execution_time: float
 54 |     results: list[ScreeningResult]
 55 |     speedup_factor: float
 56 | 
 57 | 
 58 | # Module-level screening functions (required for multiprocessing)
 59 | @make_parallel_safe
 60 | def screen_momentum_parallel(symbol: str, min_score: float = 70.0) -> dict[str, Any]:
 61 |     """Momentum screening function for parallel execution."""
 62 |     from maverick_mcp.core.technical_analysis import (
 63 |         calculate_macd,
 64 |         calculate_rsi,
 65 |         calculate_sma,
 66 |     )
 67 |     from maverick_mcp.providers.stock_data import StockDataProvider
 68 | 
 69 |     try:
 70 |         provider = StockDataProvider(use_cache=False)
 71 |         data = provider.get_stock_data(symbol, "2023-06-01", "2024-01-01")
 72 | 
 73 |         if len(data) < 50:
 74 |             return {"symbol": symbol, "passed": False, "score": 0}
 75 | 
 76 |         # Calculate indicators
 77 |         current_price = data["Close"].iloc[-1]
 78 |         sma_20 = calculate_sma(data, 20).iloc[-1]
 79 |         sma_50 = calculate_sma(data, 50).iloc[-1]
 80 |         rsi = calculate_rsi(data, 14).iloc[-1]
 81 |         macd_line, signal_line, _ = calculate_macd(data)
 82 | 
 83 |         # Calculate score
 84 |         score = 0.0
 85 |         if current_price > sma_20:
 86 |             score += 25
 87 |         if current_price > sma_50:
 88 |             score += 25
 89 |         if 40 <= rsi <= 70:
 90 |             score += 25
 91 |         if macd_line.iloc[-1] > signal_line.iloc[-1]:
 92 |             score += 25
 93 | 
 94 |         return {
 95 |             "symbol": symbol,
 96 |             "passed": score >= min_score,
 97 |             "score": score,
 98 |             "metrics": {
 99 |                 "price": round(current_price, 2),
100 |                 "sma_20": round(sma_20, 2),
101 |                 "sma_50": round(sma_50, 2),
102 |                 "rsi": round(rsi, 2),
103 |                 "above_sma_20": current_price > sma_20,
104 |                 "above_sma_50": current_price > sma_50,
105 |                 "macd_bullish": macd_line.iloc[-1] > signal_line.iloc[-1],
106 |             },
107 |         }
108 | 
109 |     except Exception as e:
110 |         logger.error(f"Error screening {symbol}: {e}")
111 |         return {"symbol": symbol, "passed": False, "score": 0, "error": str(e)}
112 | 
113 | 
114 | @make_parallel_safe
115 | def screen_value_parallel(symbol: str, min_score: float = 70.0) -> dict[str, Any]:
116 |     """Value screening function for parallel execution."""
117 |     from maverick_mcp.core.technical_analysis import calculate_rsi, calculate_sma
118 |     from maverick_mcp.providers.stock_data import StockDataProvider
119 | 
120 |     try:
121 |         provider = StockDataProvider(use_cache=False)
122 |         data = provider.get_stock_data(symbol, "2023-01-01", "2024-01-01")
123 | 
124 |         if len(data) < 200:
125 |             return {"symbol": symbol, "passed": False, "score": 0}
126 | 
127 |         # Calculate value metrics
128 |         current_price = data["Close"].iloc[-1]
129 |         sma_200 = calculate_sma(data, 200).iloc[-1]
130 |         year_high = data["High"].max()
131 |         year_low = data["Low"].min()
132 |         price_range_position = (current_price - year_low) / (year_high - year_low)
133 | 
134 |         # RSI for oversold conditions
135 |         rsi = calculate_rsi(data, 14).iloc[-1]
136 | 
137 |         # Value scoring
138 |         score = 0.0
139 |         if current_price < sma_200 * 0.95:  # 5% below 200 SMA
140 |             score += 30
141 |         if price_range_position < 0.3:  # Lower 30% of range
142 |             score += 30
143 |         if rsi < 35:  # Oversold
144 |             score += 20
145 |         if current_price < year_high * 0.7:  # 30% off highs
146 |             score += 20
147 | 
148 |         return {
149 |             "symbol": symbol,
150 |             "passed": score >= min_score,
151 |             "score": score,
152 |             "metrics": {
153 |                 "price": round(current_price, 2),
154 |                 "sma_200": round(sma_200, 2),
155 |                 "year_high": round(year_high, 2),
156 |                 "year_low": round(year_low, 2),
157 |                 "rsi": round(rsi, 2),
158 |                 "discount_from_high": round((1 - current_price / year_high) * 100, 2),
159 |                 "below_sma_200": current_price < sma_200,
160 |             },
161 |         }
162 | 
163 |     except Exception as e:
164 |         logger.error(f"Error screening {symbol}: {e}")
165 |         return {"symbol": symbol, "passed": False, "score": 0, "error": str(e)}
166 | 
167 | 
168 | # Screening strategy mapping
169 | SCREENING_STRATEGIES = {
170 |     "momentum": screen_momentum_parallel,
171 |     "value": screen_value_parallel,
172 | }
173 | 
174 | 
175 | @router.post("/screen", response_model=ParallelScreeningResponse)
176 | async def parallel_screen_stocks(request: ParallelScreeningRequest):
177 |     """
178 |     Screen multiple stocks in parallel for improved performance.
179 | 
180 |     This endpoint uses multiprocessing to analyze multiple stocks
181 |     simultaneously, providing up to 4x speedup compared to sequential
182 |     processing.
183 |     """
184 |     start_time = time.time()
185 | 
186 |     # Get screening function
187 |     screening_func = SCREENING_STRATEGIES.get(request.strategy)
188 |     if not screening_func:
189 |         raise HTTPException(
190 |             status_code=400,
191 |             detail=f"Unknown strategy: {request.strategy}. "
192 |             f"Available: {list(SCREENING_STRATEGIES.keys())}",
193 |         )
194 | 
195 |     # Create partial function with min_score
196 |     def screen_func(symbol):
197 |         return screening_func(symbol, request.min_score)
198 | 
199 |     try:
200 |         # Run parallel screening
201 |         results = await parallel_screen_async(
202 |             symbols=request.symbols,
203 |             screening_func=screen_func,
204 |             max_workers=request.max_workers,
205 |             batch_size=10,
206 |         )
207 | 
208 |         # Calculate execution time and speedup
209 |         execution_time = time.time() - start_time
210 |         sequential_estimate = len(request.symbols) * 0.5  # Assume 0.5s per symbol
211 |         speedup_factor = sequential_estimate / execution_time
212 | 
213 |         # Format results
214 |         formatted_results = [
215 |             ScreeningResult(
216 |                 symbol=r["symbol"],
217 |                 passed=r.get("passed", False),
218 |                 score=r.get("score", 0),
219 |                 metrics=r.get("metrics", {}),
220 |             )
221 |             for r in results
222 |         ]
223 | 
224 |         passed_count = sum(1 for r in results if r.get("passed", False))
225 | 
226 |         return ParallelScreeningResponse(
227 |             status="success",
228 |             total_symbols=len(request.symbols),
229 |             passed_count=passed_count,
230 |             execution_time=round(execution_time, 2),
231 |             results=formatted_results,
232 |             speedup_factor=round(speedup_factor, 2),
233 |         )
234 | 
235 |     except Exception as e:
236 |         logger.error(f"Parallel screening error: {e}", exc_info=True)
237 |         raise HTTPException(status_code=500, detail=str(e))
238 | 
239 | 
240 | @router.get("/benchmark")
241 | async def benchmark_parallel_screening(
242 |     symbols: list[str] = Query(..., description="Symbols to benchmark"),
243 |     strategy: str = Query("momentum", description="Strategy to benchmark"),
244 | ):
245 |     """
246 |     Benchmark parallel vs sequential screening performance.
247 | 
248 |     Useful for demonstrating the performance improvements.
249 |     """
250 | 
251 |     screening_func = SCREENING_STRATEGIES.get(strategy)
252 |     if not screening_func:
253 |         raise HTTPException(status_code=400, detail=f"Unknown strategy: {strategy}")
254 | 
255 |     # Sequential timing
256 |     sequential_start = time.time()
257 |     sequential_results = []
258 |     for symbol in symbols[:5]:  # Limit sequential test
259 |         result = screening_func(symbol)
260 |         sequential_results.append(result)
261 |     sequential_time = (time.time() - sequential_start) * (
262 |         len(symbols) / 5
263 |     )  # Extrapolate
264 | 
265 |     # Parallel timing
266 |     parallel_start = time.time()
267 |     parallel_results = await parallel_screen_async(
268 |         symbols=symbols,
269 |         screening_func=screening_func,
270 |         max_workers=4,
271 |     )
272 |     parallel_time = time.time() - parallel_start
273 | 
274 |     return {
275 |         "symbols_count": len(symbols),
276 |         "sequential_time_estimate": round(sequential_time, 2),
277 |         "parallel_time_actual": round(parallel_time, 2),
278 |         "speedup_factor": round(sequential_time / parallel_time, 2),
279 |         "parallel_results_count": len(parallel_results),
280 |         "performance_gain": f"{round((sequential_time / parallel_time - 1) * 100, 1)}%",
281 |     }
282 | 
283 | 
284 | @router.get("/progress/{task_id}")
285 | async def get_screening_progress(task_id: str):
286 |     """
287 |     Get progress of a running screening task.
288 | 
289 |     For future implementation with background tasks.
290 |     """
291 |     # TODO: Implement with background task queue
292 |     return {
293 |         "task_id": task_id,
294 |         "status": "not_implemented",
295 |         "message": "Background task tracking coming soon",
296 |     }
297 | 
```

--------------------------------------------------------------------------------
/scripts/run-migrations.sh:
--------------------------------------------------------------------------------

```bash
  1 | #!/bin/bash
  2 | 
  3 | # MaverickMCP Database Migration Script
  4 | # This script manages database migrations separately from server startup
  5 | 
  6 | set -e
  7 | 
  8 | # Colors
  9 | GREEN='\033[0;32m'
 10 | YELLOW='\033[1;33m'
 11 | RED='\033[0;31m'
 12 | BLUE='\033[0;34m'
 13 | NC='\033[0m'
 14 | 
 15 | # Change to project root
 16 | cd "$(dirname "$0")/.."
 17 | 
 18 | # Load environment variables
 19 | if [ -f .env ]; then
 20 |     source .env
 21 | else
 22 |     echo -e "${RED}Error: .env file not found${NC}"
 23 |     exit 1
 24 | fi
 25 | 
 26 | # Function to display usage
 27 | usage() {
 28 |     echo -e "${BLUE}MaverickMCP Database Migration Tool${NC}"
 29 |     echo ""
 30 |     echo "Usage: $0 [command] [options]"
 31 |     echo ""
 32 |     echo "Commands:"
 33 |     echo "  status      Show current migration status"
 34 |     echo "  upgrade     Apply all pending migrations"
 35 |     echo "  downgrade   Downgrade to a specific revision"
 36 |     echo "  history     Show migration history"
 37 |     echo "  validate    Validate migration files"
 38 |     echo "  backup      Create database backup before migration"
 39 |     echo ""
 40 |     echo "Options:"
 41 |     echo "  -r, --revision <rev>   Target revision for downgrade"
 42 |     echo "  -n, --dry-run         Show what would be done without applying"
 43 |     echo "  -f, --force           Skip confirmation prompts"
 44 |     echo "  -h, --help            Show this help message"
 45 |     echo ""
 46 |     echo "Examples:"
 47 |     echo "  $0 status                    # Check migration status"
 48 |     echo "  $0 upgrade                   # Apply all migrations"
 49 |     echo "  $0 upgrade --dry-run         # Preview migrations"
 50 |     echo "  $0 downgrade -r 001          # Downgrade to revision 001"
 51 |     echo "  $0 backup                    # Create backup"
 52 | }
 53 | 
 54 | # Function to check database connection
 55 | check_database() {
 56 |     echo -e "${YELLOW}Checking database connection...${NC}"
 57 |     
 58 |     if [ -z "$DATABASE_URL" ]; then
 59 |         echo -e "${RED}Error: DATABASE_URL not set${NC}"
 60 |         exit 1
 61 |     fi
 62 |     
 63 |     # Extract database name from URL
 64 |     DB_NAME=$(echo $DATABASE_URL | sed -n 's/.*\/\([^?]*\).*/\1/p')
 65 |     
 66 |     # Test connection with Python
 67 |     uv run python -c "
 68 | import sys
 69 | from sqlalchemy import create_engine, text
 70 | try:
 71 |     engine = create_engine('$DATABASE_URL')
 72 |     with engine.connect() as conn:
 73 |         result = conn.execute(text('SELECT 1'))
 74 |         print('Database connection successful')
 75 | except Exception as e:
 76 |     print(f'Database connection failed: {e}')
 77 |     sys.exit(1)
 78 | " || exit 1
 79 |     
 80 |     echo -e "${GREEN}✓ Connected to database: $DB_NAME${NC}"
 81 | }
 82 | 
 83 | # Function to validate alembic configuration
 84 | validate_alembic() {
 85 |     if [ ! -f alembic.ini ]; then
 86 |         echo -e "${RED}Error: alembic.ini not found${NC}"
 87 |         exit 1
 88 |     fi
 89 |     
 90 |     if [ ! -d alembic/versions ]; then
 91 |         echo -e "${RED}Error: alembic/versions directory not found${NC}"
 92 |         exit 1
 93 |     fi
 94 |     
 95 |     echo -e "${GREEN}✓ Alembic configuration validated${NC}"
 96 | }
 97 | 
 98 | # Function to show migration status
 99 | show_status() {
100 |     echo -e "${BLUE}Current Migration Status${NC}"
101 |     echo "========================"
102 |     
103 |     # Show current revision
104 |     echo -e "\n${YELLOW}Current database revision:${NC}"
105 |     alembic current 2>/dev/null || echo "No migrations applied"
106 |     
107 |     # Show pending migrations
108 |     echo -e "\n${YELLOW}Pending migrations:${NC}"
109 |     alembic heads 2>/dev/null || echo "No pending migrations"
110 |     
111 |     # Count migration files
112 |     MIGRATION_COUNT=$(find alembic/versions -name "*.py" | grep -v __pycache__ | wc -l)
113 |     echo -e "\n${YELLOW}Total migration files:${NC} $MIGRATION_COUNT"
114 | }
115 | 
116 | # Function to show migration history
117 | show_history() {
118 |     echo -e "${BLUE}Migration History${NC}"
119 |     echo "================="
120 |     alembic history --verbose
121 | }
122 | 
123 | # Function to validate migrations
124 | validate_migrations() {
125 |     echo -e "${BLUE}Validating Migrations${NC}"
126 |     echo "===================="
127 |     
128 |     # Check for duplicate revisions
129 |     echo -e "${YELLOW}Checking for duplicate revisions...${NC}"
130 |     DUPLICATES=$(find alembic/versions -name "*.py" -exec grep -H "^revision = " {} \; | 
131 |                  grep -v __pycache__ | 
132 |                  awk -F: '{print $2}' | 
133 |                  sort | uniq -d)
134 |     
135 |     if [ -n "$DUPLICATES" ]; then
136 |         echo -e "${RED}Error: Duplicate revisions found:${NC}"
137 |         echo "$DUPLICATES"
138 |         exit 1
139 |     else
140 |         echo -e "${GREEN}✓ No duplicate revisions${NC}"
141 |     fi
142 |     
143 |     # Check for broken dependencies
144 |     echo -e "${YELLOW}Checking migration dependencies...${NC}"
145 |     uv run python -c "
146 | from alembic.config import Config
147 | from alembic.script import ScriptDirectory
148 | config = Config('alembic.ini')
149 | script_dir = ScriptDirectory.from_config(config)
150 | try:
151 |     script_dir.walk_revisions()
152 |     print('✓ All migration dependencies valid')
153 | except Exception as e:
154 |     print(f'Error: {e}')
155 |     exit(1)
156 | " || exit 1
157 | }
158 | 
159 | # Function to create database backup
160 | create_backup() {
161 |     echo -e "${BLUE}Creating Database Backup${NC}"
162 |     echo "======================"
163 |     
164 |     # Extract connection details from DATABASE_URL
165 |     DB_HOST=$(echo $DATABASE_URL | sed -n 's/.*@\([^:]*\):.*/\1/p')
166 |     DB_NAME=$(echo $DATABASE_URL | sed -n 's/.*\/\([^?]*\).*/\1/p')
167 |     
168 |     BACKUP_FILE="backups/db_backup_$(date +%Y%m%d_%H%M%S).sql"
169 |     mkdir -p backups
170 |     
171 |     echo -e "${YELLOW}Creating backup: $BACKUP_FILE${NC}"
172 |     
173 |     # Use pg_dump if PostgreSQL
174 |     if [[ $DATABASE_URL == *"postgresql"* ]]; then
175 |         pg_dump $DATABASE_URL > $BACKUP_FILE
176 |     else
177 |         echo -e "${RED}Backup not implemented for this database type${NC}"
178 |         exit 1
179 |     fi
180 |     
181 |     if [ -f $BACKUP_FILE ]; then
182 |         SIZE=$(du -h $BACKUP_FILE | cut -f1)
183 |         echo -e "${GREEN}✓ Backup created: $BACKUP_FILE ($SIZE)${NC}"
184 |     else
185 |         echo -e "${RED}Error: Backup failed${NC}"
186 |         exit 1
187 |     fi
188 | }
189 | 
190 | # Function to apply migrations
191 | apply_migrations() {
192 |     local DRY_RUN=$1
193 |     local FORCE=$2
194 |     
195 |     echo -e "${BLUE}Applying Migrations${NC}"
196 |     echo "=================="
197 |     
198 |     # Show pending migrations
199 |     echo -e "${YELLOW}Checking for pending migrations...${NC}"
200 |     PENDING=$(alembic upgrade head --sql 2>/dev/null | grep -c "UPDATE alembic_version" || echo "0")
201 |     
202 |     if [ "$PENDING" -eq "0" ]; then
203 |         echo -e "${GREEN}✓ Database is up to date${NC}"
204 |         return 0
205 |     fi
206 |     
207 |     echo -e "${YELLOW}Found pending migrations${NC}"
208 |     
209 |     # Dry run mode
210 |     if [ "$DRY_RUN" == "true" ]; then
211 |         echo -e "\n${YELLOW}SQL to be executed:${NC}"
212 |         alembic upgrade head --sql
213 |         return 0
214 |     fi
215 |     
216 |     # Confirmation prompt
217 |     if [ "$FORCE" != "true" ]; then
218 |         echo -e "\n${YELLOW}Do you want to apply these migrations? (y/N)${NC}"
219 |         read -r response
220 |         if [[ ! "$response" =~ ^[Yy]$ ]]; then
221 |             echo -e "${RED}Migration cancelled${NC}"
222 |             exit 0
223 |         fi
224 |     fi
225 |     
226 |     # Apply migrations
227 |     echo -e "\n${YELLOW}Applying migrations...${NC}"
228 |     alembic upgrade head
229 |     
230 |     echo -e "${GREEN}✓ Migrations applied successfully${NC}"
231 |     
232 |     # Show new status
233 |     echo -e "\n${YELLOW}New database revision:${NC}"
234 |     alembic current
235 | }
236 | 
237 | # Function to downgrade
238 | downgrade_migration() {
239 |     local REVISION=$1
240 |     local DRY_RUN=$2
241 |     local FORCE=$3
242 |     
243 |     echo -e "${BLUE}Downgrading Migration${NC}"
244 |     echo "==================="
245 |     
246 |     if [ -z "$REVISION" ]; then
247 |         echo -e "${RED}Error: Revision required for downgrade${NC}"
248 |         usage
249 |         exit 1
250 |     fi
251 |     
252 |     # Show current revision
253 |     echo -e "${YELLOW}Current revision:${NC}"
254 |     alembic current
255 |     
256 |     # Dry run mode
257 |     if [ "$DRY_RUN" == "true" ]; then
258 |         echo -e "\n${YELLOW}SQL to be executed:${NC}"
259 |         alembic downgrade $REVISION --sql
260 |         return 0
261 |     fi
262 |     
263 |     # Confirmation prompt
264 |     if [ "$FORCE" != "true" ]; then
265 |         echo -e "\n${RED}WARNING: This will downgrade the database to revision $REVISION${NC}"
266 |         echo -e "${YELLOW}Do you want to continue? (y/N)${NC}"
267 |         read -r response
268 |         if [[ ! "$response" =~ ^[Yy]$ ]]; then
269 |             echo -e "${RED}Downgrade cancelled${NC}"
270 |             exit 0
271 |         fi
272 |     fi
273 |     
274 |     # Create backup first
275 |     create_backup
276 |     
277 |     # Apply downgrade
278 |     echo -e "\n${YELLOW}Downgrading to revision $REVISION...${NC}"
279 |     alembic downgrade $REVISION
280 |     
281 |     echo -e "${GREEN}✓ Downgrade completed successfully${NC}"
282 |     
283 |     # Show new status
284 |     echo -e "\n${YELLOW}New database revision:${NC}"
285 |     alembic current
286 | }
287 | 
288 | # Parse command line arguments
289 | COMMAND=""
290 | REVISION=""
291 | DRY_RUN=false
292 | FORCE=false
293 | 
294 | while [[ $# -gt 0 ]]; do
295 |     case $1 in
296 |         status|upgrade|downgrade|history|validate|backup)
297 |             COMMAND=$1
298 |             shift
299 |             ;;
300 |         -r|--revision)
301 |             REVISION="$2"
302 |             shift 2
303 |             ;;
304 |         -n|--dry-run)
305 |             DRY_RUN=true
306 |             shift
307 |             ;;
308 |         -f|--force)
309 |             FORCE=true
310 |             shift
311 |             ;;
312 |         -h|--help)
313 |             usage
314 |             exit 0
315 |             ;;
316 |         *)
317 |             echo -e "${RED}Unknown option: $1${NC}"
318 |             usage
319 |             exit 1
320 |             ;;
321 |     esac
322 | done
323 | 
324 | # Validate environment
325 | check_database
326 | validate_alembic
327 | 
328 | # Execute command
329 | case $COMMAND in
330 |     status)
331 |         show_status
332 |         ;;
333 |     upgrade)
334 |         apply_migrations $DRY_RUN $FORCE
335 |         ;;
336 |     downgrade)
337 |         downgrade_migration $REVISION $DRY_RUN $FORCE
338 |         ;;
339 |     history)
340 |         show_history
341 |         ;;
342 |     validate)
343 |         validate_migrations
344 |         ;;
345 |     backup)
346 |         create_backup
347 |         ;;
348 |     *)
349 |         echo -e "${RED}Error: Command required${NC}"
350 |         usage
351 |         exit 1
352 |         ;;
353 | esac
354 | 
355 | echo -e "\n${GREEN}Migration tool completed successfully${NC}"
```

--------------------------------------------------------------------------------
/maverick_mcp/config/validation.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Environment configuration validation for MaverickMCP.
  3 | 
  4 | This module validates all required environment variables and configuration
  5 | settings at startup to prevent runtime errors in production.
  6 | """
  7 | 
  8 | import os
  9 | import sys
 10 | from typing import Any
 11 | from urllib.parse import urlparse
 12 | 
 13 | from maverick_mcp.config.settings import settings
 14 | from maverick_mcp.utils.logging import get_logger
 15 | 
 16 | logger = get_logger(__name__)
 17 | 
 18 | 
 19 | class ConfigurationError(Exception):
 20 |     """Raised when configuration validation fails."""
 21 | 
 22 |     pass
 23 | 
 24 | 
 25 | class EnvironmentValidator:
 26 |     """Validates environment configuration at startup."""
 27 | 
 28 |     def __init__(self):
 29 |         self.errors: list[str] = []
 30 |         self.warnings: list[str] = []
 31 |         self.validated_vars: set[str] = set()
 32 | 
 33 |     def validate_all(self) -> bool:
 34 |         """
 35 |         Run all validation checks.
 36 | 
 37 |         Returns:
 38 |             True if validation passes, False otherwise
 39 |         """
 40 |         logger.info("Starting environment validation...")
 41 | 
 42 |         # Core settings
 43 |         self._validate_core_settings()
 44 | 
 45 |         # Database settings
 46 |         self._validate_database_settings()
 47 | 
 48 |         # Redis settings
 49 |         self._validate_redis_settings()
 50 | 
 51 |         # API settings
 52 |         self._validate_api_settings()
 53 | 
 54 |         # External service settings
 55 |         self._validate_external_services()
 56 | 
 57 |         # Report results
 58 |         self._report_results()
 59 | 
 60 |         return len(self.errors) == 0
 61 | 
 62 |     def _validate_core_settings(self):
 63 |         """Validate core application settings."""
 64 |         # App name
 65 |         if not settings.app_name:
 66 |             self.errors.append("APP_NAME is required")
 67 | 
 68 |         # Environment
 69 |         if settings.environment not in ["development", "staging", "production"]:
 70 |             self.warnings.append(
 71 |                 f"Unknown environment: {settings.environment}. "
 72 |                 "Expected: development, staging, or production"
 73 |             )
 74 | 
 75 |         # Production-specific checks
 76 |         if settings.environment == "production":
 77 |             if settings.api.debug:
 78 |                 self.errors.append("DEBUG must be false in production")
 79 | 
 80 |     def _validate_database_settings(self):
 81 |         """Validate database configuration."""
 82 |         if not settings.database.url:
 83 |             self.errors.append("DATABASE_URL is required")
 84 |             return
 85 | 
 86 |         # Parse and validate URL
 87 |         try:
 88 |             parsed = urlparse(settings.database.url)
 89 | 
 90 |             if not parsed.scheme:
 91 |                 self.errors.append("DATABASE_URL missing scheme")
 92 |                 return
 93 | 
 94 |             # SQLite validation (for personal use)
 95 |             if parsed.scheme == "sqlite":
 96 |                 if not parsed.path:
 97 |                     self.errors.append("SQLite DATABASE_URL missing database path")
 98 |                 return
 99 | 
100 |             # PostgreSQL validation (for production)
101 |             if parsed.scheme.startswith("postgresql"):
102 |                 if not parsed.hostname:
103 |                     self.errors.append("PostgreSQL DATABASE_URL missing hostname")
104 | 
105 |                 if not parsed.path or parsed.path == "/":
106 |                     self.errors.append("PostgreSQL DATABASE_URL missing database name")
107 |             else:
108 |                 self.warnings.append(
109 |                     f"Database scheme: {parsed.scheme}. "
110 |                     "MaverickMCP supports SQLite (personal use) and PostgreSQL (production)."
111 |                 )
112 | 
113 |             # Production-specific PostgreSQL checks
114 |             if settings.environment == "production" and parsed.scheme.startswith(
115 |                 "postgresql"
116 |             ):
117 |                 if parsed.hostname in ["localhost", "127.0.0.1"]:
118 |                     self.warnings.append(
119 |                         "Using localhost database in production. "
120 |                         "Consider using a managed database service."
121 |                     )
122 | 
123 |                 # SSL mode check
124 |                 query_params = dict(
125 |                     param.split("=")
126 |                     for param in (parsed.query.split("&") if parsed.query else [])
127 |                 )
128 |                 if query_params.get("sslmode") != "require":
129 |                     self.warnings.append(
130 |                         "DATABASE_URL should use sslmode=require in production"
131 |                     )
132 | 
133 |         except Exception as e:
134 |             self.errors.append(f"Invalid DATABASE_URL format: {e}")
135 | 
136 |     def _validate_redis_settings(self):
137 |         """Validate Redis configuration."""
138 |         redis_url = settings.redis.url
139 | 
140 |         if not redis_url:
141 |             self.warnings.append(
142 |                 "Redis URL not configured. Performance may be impacted."
143 |             )
144 |             return
145 | 
146 |         # Production Redis checks
147 |         if settings.environment == "production":
148 |             if "localhost" in redis_url or "127.0.0.1" in redis_url:
149 |                 self.warnings.append(
150 |                     "Using localhost Redis in production. "
151 |                     "Consider using a managed Redis service."
152 |                 )
153 | 
154 |             if settings.redis.password is None:
155 |                 self.warnings.append(
156 |                     "Consider using password-protected Redis in production"
157 |                 )
158 | 
159 |             if not settings.redis.ssl:
160 |                 self.warnings.append("Consider using SSL for Redis in production")
161 | 
162 |     def _validate_api_settings(self):
163 |         """Validate API settings."""
164 |         # CORS origins
165 |         if settings.environment == "production":
166 |             if "*" in settings.api.cors_origins:
167 |                 self.errors.append(
168 |                     "CORS wildcard (*) not allowed in production. "
169 |                     "Set specific allowed origins."
170 |                 )
171 | 
172 |             if not settings.api.cors_origins:
173 |                 self.warnings.append("No CORS origins configured")
174 |             else:
175 |                 # Validate each origin
176 |                 for origin in settings.api.cors_origins:
177 |                     if (
178 |                         origin.startswith("http://")
179 |                         and origin != "http://localhost:3000"
180 |                     ):
181 |                         self.warnings.append(
182 |                             f"Insecure HTTP origin in production: {origin}"
183 |                         )
184 | 
185 |         # Rate limiting validation - check environment variables directly
186 |         rate_limit_per_ip = os.getenv("RATE_LIMIT_PER_IP")
187 |         if rate_limit_per_ip:
188 |             try:
189 |                 if int(rate_limit_per_ip) <= 0:
190 |                     self.errors.append("RATE_LIMIT_PER_IP must be positive")
191 |             except ValueError:
192 |                 self.errors.append("RATE_LIMIT_PER_IP must be a valid integer")
193 | 
194 |     def _validate_external_services(self):
195 |         """Validate external service configurations."""
196 |         # Email service (if configured)
197 |         if os.getenv("MAILGUN_API_KEY"):
198 |             if not os.getenv("MAILGUN_DOMAIN"):
199 |                 self.errors.append(
200 |                     "MAILGUN_DOMAIN required when MAILGUN_API_KEY is set"
201 |                 )
202 | 
203 |             if not os.getenv("MAILGUN_FROM_EMAIL"):
204 |                 self.warnings.append("MAILGUN_FROM_EMAIL not set, using default")
205 | 
206 |         # Monitoring services
207 |         if settings.environment == "production":
208 |             if not os.getenv("SENTRY_DSN"):
209 |                 self.warnings.append(
210 |                     "SENTRY_DSN not configured. Error tracking is disabled."
211 |                 )
212 | 
213 |         # Optional API keys
214 |         optional_keys = [
215 |             "FRED_API_KEY",
216 |             "TIINGO_API_KEY",
217 |             "OPENAI_API_KEY",
218 |             "ANTHROPIC_API_KEY",
219 |             "CAPITAL_COMPANION_API_KEY",
220 |         ]
221 | 
222 |         missing_optional = [key for key in optional_keys if not os.getenv(key)]
223 |         if missing_optional:
224 |             self.warnings.append(
225 |                 f"Optional API keys not configured: {', '.join(missing_optional)}. "
226 |                 "Some features may be limited."
227 |             )
228 | 
229 |     def _report_results(self):
230 |         """Report validation results."""
231 |         if self.errors:
232 |             logger.error(
233 |                 f"Environment validation failed with {len(self.errors)} errors:"
234 |             )
235 |             for error in self.errors:
236 |                 logger.error(f"  ✗ {error}")
237 | 
238 |         if self.warnings:
239 |             logger.warning(
240 |                 f"Environment validation found {len(self.warnings)} warnings:"
241 |             )
242 |             for warning in self.warnings:
243 |                 logger.warning(f"  ⚠ {warning}")
244 | 
245 |         if not self.errors and not self.warnings:
246 |             logger.info("✓ Environment validation passed successfully")
247 |         elif not self.errors:
248 |             logger.info(
249 |                 f"✓ Environment validation passed with {len(self.warnings)} warnings"
250 |             )
251 | 
252 |     def get_status_dict(self) -> dict[str, Any]:
253 |         """Get validation status as a dictionary."""
254 |         return {
255 |             "valid": len(self.errors) == 0,
256 |             "errors": self.errors,
257 |             "warnings": self.warnings,
258 |             "environment": settings.environment,
259 |             "auth_enabled": False,
260 |         }
261 | 
262 | 
263 | def validate_environment(fail_on_error: bool = True) -> bool:
264 |     """
265 |     Validate environment configuration.
266 | 
267 |     Args:
268 |         fail_on_error: If True, exit process on validation errors
269 | 
270 |     Returns:
271 |         True if validation passes, False otherwise
272 |     """
273 |     validator = EnvironmentValidator()
274 |     is_valid = validator.validate_all()
275 | 
276 |     if not is_valid and fail_on_error:
277 |         logger.error("Environment validation failed. Exiting...")
278 |         sys.exit(1)
279 | 
280 |     return is_valid
281 | 
282 | 
283 | def get_validation_status() -> dict[str, Any]:
284 |     """Get current validation status without failing."""
285 |     validator = EnvironmentValidator()
286 |     validator.validate_all()
287 |     return validator.get_status_dict()
288 | 
```

--------------------------------------------------------------------------------
/tests/test_stock_data_fetching_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for StockDataFetchingService.
  3 | """
  4 | 
  5 | from unittest.mock import Mock, patch
  6 | 
  7 | import pandas as pd
  8 | import pytest
  9 | 
 10 | from maverick_mcp.infrastructure.data_fetching import StockDataFetchingService
 11 | 
 12 | 
 13 | class TestStockDataFetchingService:
 14 |     """Test cases for StockDataFetchingService."""
 15 | 
 16 |     def setup_method(self):
 17 |         """Set up test fixtures."""
 18 |         self.service = StockDataFetchingService(timeout=30, max_retries=3)
 19 | 
 20 |     def test_init(self):
 21 |         """Test service initialization."""
 22 |         assert self.service.timeout == 30
 23 |         assert self.service.max_retries == 3
 24 | 
 25 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
 26 |     def test_fetch_stock_data_with_period(self, mock_ticker_class):
 27 |         """Test fetching stock data with period parameter."""
 28 |         # Mock data
 29 |         mock_data = pd.DataFrame(
 30 |             {
 31 |                 "Open": [150.0, 151.0],
 32 |                 "High": [152.0, 153.0],
 33 |                 "Low": [149.0, 150.0],
 34 |                 "Close": [151.0, 152.0],
 35 |                 "Volume": [1000000, 1100000],
 36 |             },
 37 |             index=pd.date_range("2024-01-01", periods=2),
 38 |         )
 39 | 
 40 |         mock_ticker = Mock()
 41 |         mock_ticker.history.return_value = mock_data
 42 |         mock_ticker_class.return_value = mock_ticker
 43 | 
 44 |         # Test
 45 |         result = self.service.fetch_stock_data("AAPL", period="1mo")
 46 | 
 47 |         # Assertions
 48 |         assert not result.empty
 49 |         assert len(result) == 2
 50 |         assert list(result.columns) == ["Open", "High", "Low", "Close", "Volume"]
 51 |         assert result.index.name == "Date"
 52 |         mock_ticker.history.assert_called_once_with(period="1mo", interval="1d")
 53 | 
 54 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
 55 |     def test_fetch_stock_data_with_dates(self, mock_ticker_class):
 56 |         """Test fetching stock data with start and end dates."""
 57 |         # Mock data
 58 |         mock_data = pd.DataFrame(
 59 |             {
 60 |                 "Open": [150.0, 151.0],
 61 |                 "High": [152.0, 153.0],
 62 |                 "Low": [149.0, 150.0],
 63 |                 "Close": [151.0, 152.0],
 64 |                 "Volume": [1000000, 1100000],
 65 |             },
 66 |             index=pd.date_range("2024-01-01", periods=2),
 67 |         )
 68 | 
 69 |         mock_ticker = Mock()
 70 |         mock_ticker.history.return_value = mock_data
 71 |         mock_ticker_class.return_value = mock_ticker
 72 | 
 73 |         # Test
 74 |         result = self.service.fetch_stock_data(
 75 |             "AAPL", start_date="2024-01-01", end_date="2024-01-02"
 76 |         )
 77 | 
 78 |         # Assertions
 79 |         assert not result.empty
 80 |         assert len(result) == 2
 81 |         mock_ticker.history.assert_called_once_with(
 82 |             start="2024-01-01", end="2024-01-02", interval="1d"
 83 |         )
 84 | 
 85 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
 86 |     def test_fetch_stock_data_empty_response(self, mock_ticker_class):
 87 |         """Test handling of empty response from data source."""
 88 |         mock_ticker = Mock()
 89 |         mock_ticker.history.return_value = pd.DataFrame()
 90 |         mock_ticker_class.return_value = mock_ticker
 91 | 
 92 |         # Test
 93 |         result = self.service.fetch_stock_data("INVALID")
 94 | 
 95 |         # Assertions
 96 |         assert result.empty  # Should return empty DataFrame with correct columns
 97 |         assert list(result.columns) == ["Open", "High", "Low", "Close", "Volume"]
 98 | 
 99 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
100 |     def test_fetch_stock_data_missing_columns(self, mock_ticker_class):
101 |         """Test handling of missing columns in response."""
102 |         # Mock data missing some columns
103 |         mock_data = pd.DataFrame(
104 |             {
105 |                 "Open": [150.0, 151.0],
106 |                 "Close": [151.0, 152.0],
107 |                 # Missing High, Low, Volume
108 |             },
109 |             index=pd.date_range("2024-01-01", periods=2),
110 |         )
111 | 
112 |         mock_ticker = Mock()
113 |         mock_ticker.history.return_value = mock_data
114 |         mock_ticker_class.return_value = mock_ticker
115 | 
116 |         # Test
117 |         result = self.service.fetch_stock_data("AAPL")
118 | 
119 |         # Assertions
120 |         assert not result.empty
121 |         assert "High" in result.columns
122 |         assert "Low" in result.columns
123 |         assert "Volume" in result.columns
124 |         # Check that missing columns are filled with appropriate defaults
125 |         assert (result["Volume"] == 0).all()
126 |         assert (result["High"] == 0.0).all()
127 | 
128 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
129 |     def test_fetch_stock_info(self, mock_ticker_class):
130 |         """Test fetching stock information."""
131 |         mock_info = {
132 |             "longName": "Apple Inc.",
133 |             "sector": "Technology",
134 |             "industry": "Consumer Electronics",
135 |         }
136 | 
137 |         mock_ticker = Mock()
138 |         mock_ticker.info = mock_info
139 |         mock_ticker_class.return_value = mock_ticker
140 | 
141 |         # Test
142 |         result = self.service.fetch_stock_info("AAPL")
143 | 
144 |         # Assertions
145 |         assert result == mock_info
146 | 
147 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
148 |     def test_fetch_realtime_data_success(self, mock_ticker_class):
149 |         """Test successful real-time data fetching."""
150 |         # Mock history data
151 |         mock_history = pd.DataFrame(
152 |             {
153 |                 "Close": [150.0],
154 |                 "Volume": [1000000],
155 |             },
156 |             index=pd.date_range("2024-01-01", periods=1),
157 |         )
158 | 
159 |         # Mock info data
160 |         mock_info = {"previousClose": 149.0}
161 | 
162 |         mock_ticker = Mock()
163 |         mock_ticker.history.return_value = mock_history
164 |         mock_ticker.info = mock_info
165 |         mock_ticker_class.return_value = mock_ticker
166 | 
167 |         # Test
168 |         result = self.service.fetch_realtime_data("AAPL")
169 | 
170 |         # Assertions
171 |         assert result is not None
172 |         assert result["symbol"] == "AAPL"
173 |         assert result["price"] == 150.0
174 |         assert result["change"] == 1.0
175 |         assert result["change_percent"] == pytest.approx(0.67, rel=1e-1)
176 |         assert result["volume"] == 1000000
177 |         assert result["is_real_time"] is False
178 | 
179 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
180 |     def test_fetch_realtime_data_empty(self, mock_ticker_class):
181 |         """Test real-time data fetching with empty response."""
182 |         mock_ticker = Mock()
183 |         mock_ticker.history.return_value = pd.DataFrame()
184 |         mock_ticker_class.return_value = mock_ticker
185 | 
186 |         # Test
187 |         result = self.service.fetch_realtime_data("INVALID")
188 | 
189 |         # Assertions
190 |         assert result is None
191 | 
192 |     def test_fetch_multiple_realtime_data(self):
193 |         """Test fetching real-time data for multiple symbols."""
194 |         with patch.object(self.service, "fetch_realtime_data") as mock_fetch:
195 |             # Mock responses
196 |             mock_fetch.side_effect = [
197 |                 {"symbol": "AAPL", "price": 150.0},
198 |                 None,  # Failed for INVALID
199 |                 {"symbol": "MSFT", "price": 300.0},
200 |             ]
201 | 
202 |             # Test
203 |             result = self.service.fetch_multiple_realtime_data(
204 |                 ["AAPL", "INVALID", "MSFT"]
205 |             )
206 | 
207 |             # Assertions
208 |             assert len(result) == 2  # Only successful fetches
209 |             assert "AAPL" in result
210 |             assert "MSFT" in result
211 |             assert "INVALID" not in result
212 | 
213 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
214 |     def test_fetch_news(self, mock_ticker_class):
215 |         """Test fetching news data."""
216 |         mock_news = [
217 |             {
218 |                 "title": "Apple Reports Strong Earnings",
219 |                 "publisher": "Reuters",
220 |                 "link": "https://example.com",
221 |                 "providerPublishTime": 1640995200,  # Unix timestamp
222 |                 "type": "STORY",
223 |             }
224 |         ]
225 | 
226 |         mock_ticker = Mock()
227 |         mock_ticker.news = mock_news
228 |         mock_ticker_class.return_value = mock_ticker
229 | 
230 |         # Test
231 |         result = self.service.fetch_news("AAPL", limit=1)
232 | 
233 |         # Assertions
234 |         assert not result.empty
235 |         assert len(result) == 1
236 |         assert result.iloc[0]["title"] == "Apple Reports Strong Earnings"
237 |         assert "providerPublishTime" in result.columns
238 | 
239 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
240 |     def test_check_if_etf_true(self, mock_ticker_class):
241 |         """Test ETF check returning True."""
242 |         mock_info = {"quoteType": "ETF"}
243 | 
244 |         mock_ticker = Mock()
245 |         mock_ticker.info = mock_info
246 |         mock_ticker_class.return_value = mock_ticker
247 | 
248 |         # Test
249 |         result = self.service.check_if_etf("SPY")
250 | 
251 |         # Assertions
252 |         assert result is True
253 | 
254 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
255 |     def test_check_if_etf_false(self, mock_ticker_class):
256 |         """Test ETF check returning False."""
257 |         mock_info = {"quoteType": "EQUITY"}
258 | 
259 |         mock_ticker = Mock()
260 |         mock_ticker.info = mock_info
261 |         mock_ticker_class.return_value = mock_ticker
262 | 
263 |         # Test
264 |         result = self.service.check_if_etf("AAPL")
265 | 
266 |         # Assertions
267 |         assert result is False
268 | 
269 |     @patch("maverick_mcp.infrastructure.data_fetching.stock_data_service.yf.Ticker")
270 |     def test_check_if_etf_fallback(self, mock_ticker_class):
271 |         """Test ETF check using fallback logic."""
272 |         mock_info = {}  # No quoteType
273 | 
274 |         mock_ticker = Mock()
275 |         mock_ticker.info = mock_info
276 |         mock_ticker_class.return_value = mock_ticker
277 | 
278 |         # Test with known ETF symbol
279 |         result = self.service.check_if_etf("QQQ")
280 | 
281 |         # Assertions
282 |         assert result is True
283 | 
```

--------------------------------------------------------------------------------
/examples/monitoring_example.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Example demonstrating MaverickMCP monitoring and observability features.
  4 | 
  5 | This example shows how to:
  6 | 1. Enable monitoring and tracing
  7 | 2. Use monitoring utilities in your code
  8 | 3. Access monitoring endpoints
  9 | 4. View metrics and traces
 10 | """
 11 | 
 12 | import asyncio
 13 | import os
 14 | import time
 15 | from contextlib import asynccontextmanager
 16 | 
 17 | # Set environment variables for monitoring
 18 | os.environ.update(
 19 |     {
 20 |         "OTEL_TRACING_ENABLED": "true",
 21 |         "JAEGER_ENDPOINT": "http://localhost:14268/api/traces",
 22 |         "SENTRY_DSN": "",  # Optional: add your Sentry DSN
 23 |     }
 24 | )
 25 | 
 26 | from maverick_mcp.utils.logging import get_logger
 27 | from maverick_mcp.utils.monitoring import (
 28 |     track_cache_operation,
 29 |     track_external_api_call,
 30 |     track_tool_usage,
 31 |     update_performance_metrics,
 32 | )
 33 | from maverick_mcp.utils.tracing import (
 34 |     trace_cache_operation,
 35 |     trace_database_query,
 36 |     trace_external_api_call,
 37 |     trace_operation,
 38 | )
 39 | 
 40 | logger = get_logger(__name__)
 41 | 
 42 | 
 43 | @asynccontextmanager
 44 | async def example_database_operation(query_type: str, table: str):
 45 |     """Example of monitoring a database operation."""
 46 |     with trace_database_query(query_type, table, f"SELECT * FROM {table}"):
 47 |         start_time = time.time()
 48 |         try:
 49 |             # Simulate database operation
 50 |             await asyncio.sleep(0.1)
 51 |             yield "mock_result"
 52 | 
 53 |             # Track successful operation
 54 |             duration = time.time() - start_time
 55 |             from maverick_mcp.utils.monitoring import track_database_query
 56 | 
 57 |             track_database_query(query_type, table, duration, "success")
 58 | 
 59 |         except Exception:
 60 |             # Track failed operation
 61 |             duration = time.time() - start_time
 62 |             from maverick_mcp.utils.monitoring import track_database_query
 63 | 
 64 |             track_database_query(query_type, table, duration, "error")
 65 |             raise
 66 | 
 67 | 
 68 | @asynccontextmanager
 69 | async def example_external_api_call(service: str, endpoint: str):
 70 |     """Example of monitoring an external API call."""
 71 |     with trace_external_api_call(service, endpoint):
 72 |         start_time = time.time()
 73 |         try:
 74 |             # Simulate external API call
 75 |             await asyncio.sleep(0.2)
 76 |             status_code = 200
 77 |             yield {"data": "mock_response"}
 78 | 
 79 |             # Track successful API call
 80 |             duration = time.time() - start_time
 81 |             track_external_api_call(service, endpoint, "GET", status_code, duration)
 82 | 
 83 |         except Exception as e:
 84 |             # Track failed API call
 85 |             duration = time.time() - start_time
 86 |             track_external_api_call(
 87 |                 service, endpoint, "GET", 500, duration, str(type(e).__name__)
 88 |             )
 89 |             raise
 90 | 
 91 | 
 92 | @asynccontextmanager
 93 | async def example_cache_operation(key: str):
 94 |     """Example of monitoring a cache operation."""
 95 |     with trace_cache_operation("get", "redis"):
 96 |         time.time()
 97 |         try:
 98 |             # Simulate cache operation
 99 |             await asyncio.sleep(0.01)
100 |             hit = True  # Simulate cache hit
101 |             yield "cached_value"
102 | 
103 |             # Track cache operation
104 |             track_cache_operation("redis", "get", hit, key.split(":")[0])
105 | 
106 |         except Exception:
107 |             # Track cache miss/error
108 |             track_cache_operation("redis", "get", False, key.split(":")[0])
109 |             raise
110 | 
111 | 
112 | async def example_tool_execution(tool_name: str, user_id: str):
113 |     """Example of monitoring a tool execution."""
114 |     with trace_operation(
115 |         f"tool.{tool_name}", {"tool.name": tool_name, "user.id": user_id}
116 |     ):
117 |         start_time = time.time()
118 | 
119 |         try:
120 |             # Simulate tool execution with some operations
121 |             logger.info(f"Executing tool: {tool_name}", extra={"user_id": user_id})
122 | 
123 |             # Example: Database query
124 |             async with example_database_operation("SELECT", "stocks") as db_result:
125 |                 logger.info(f"Database query result: {db_result}")
126 | 
127 |             # Example: External API call
128 |             async with example_external_api_call(
129 |                 "yahoo_finance", "/quote/AAPL"
130 |             ) as api_result:
131 |                 logger.info(f"API call result: {api_result}")
132 | 
133 |             # Example: Cache operation
134 |             async with example_cache_operation("stock:AAPL:price") as cache_result:
135 |                 logger.info(f"Cache result: {cache_result}")
136 | 
137 |             # Simulate processing time
138 |             await asyncio.sleep(0.5)
139 | 
140 |             # Track successful tool execution
141 |             duration = time.time() - start_time
142 |             track_tool_usage(
143 |                 tool_name=tool_name,
144 |                 user_id=user_id,
145 |                 duration=duration,
146 |                 status="success",
147 |                 complexity="standard",
148 |             )
149 | 
150 |             return {
151 |                 "status": "success",
152 |                 "data": "Tool execution completed",
153 |                 "duration_ms": int(duration * 1000),
154 |             }
155 | 
156 |         except Exception as e:
157 |             # Track failed tool execution
158 |             duration = time.time() - start_time
159 |             from maverick_mcp.utils.monitoring import track_tool_error
160 | 
161 |             track_tool_error(tool_name, type(e).__name__, "standard")
162 | 
163 |             logger.error(f"Tool execution failed: {tool_name}", exc_info=True)
164 |             raise
165 | 
166 | 
167 | async def demonstrate_monitoring():
168 |     """Demonstrate various monitoring features."""
169 |     logger.info("Starting monitoring demonstration...")
170 | 
171 |     # Initialize monitoring (this would normally be done by the server)
172 |     from maverick_mcp.utils.monitoring import initialize_monitoring
173 |     from maverick_mcp.utils.tracing import initialize_tracing
174 | 
175 |     initialize_monitoring()
176 |     initialize_tracing()
177 | 
178 |     # Example 1: Tool execution monitoring
179 |     logger.info("=== Example 1: Tool Execution Monitoring ===")
180 |     result = await example_tool_execution("get_stock_data", "user123")
181 |     print(f"Tool result: {result}")
182 | 
183 |     # Example 2: Performance metrics
184 |     logger.info("=== Example 2: Performance Metrics ===")
185 |     update_performance_metrics()
186 |     print("Performance metrics updated")
187 | 
188 |     # Example 3: Multiple tool executions for metrics
189 |     logger.info("=== Example 3: Multiple Tool Executions ===")
190 |     tools = ["get_technical_analysis", "screen_stocks", "get_portfolio_data"]
191 |     users = ["user123", "user456", "user789"]
192 | 
193 |     for i in range(5):
194 |         tool = tools[i % len(tools)]
195 |         user = users[i % len(users)]
196 |         try:
197 |             result = await example_tool_execution(tool, user)
198 |             print(f"Tool {tool} for {user}: {result['status']}")
199 |         except Exception as e:
200 |             print(f"Tool {tool} for {user}: FAILED - {e}")
201 | 
202 |         # Small delay between executions
203 |         await asyncio.sleep(0.1)
204 | 
205 |     # Example 4: Error scenarios
206 |     logger.info("=== Example 4: Error Scenarios ===")
207 |     try:
208 |         # Simulate a tool that fails
209 |         with trace_operation("tool.failing_tool", {"tool.name": "failing_tool"}):
210 |             raise ValueError("Simulated tool failure")
211 |     except ValueError as e:
212 |         logger.error(f"Expected error: {e}")
213 |         from maverick_mcp.utils.monitoring import track_tool_error
214 | 
215 |         track_tool_error("failing_tool", "ValueError", "standard")
216 | 
217 |     # Example 5: Security events
218 |     logger.info("=== Example 5: Security Events ===")
219 |     from maverick_mcp.utils.monitoring import (
220 |         track_authentication,
221 |         track_security_violation,
222 |     )
223 | 
224 |     # Simulate authentication attempts
225 |     track_authentication("bearer_token", "success", "Mozilla/5.0")
226 |     track_authentication("bearer_token", "failure", "suspicious-bot/1.0")
227 | 
228 |     # Simulate security violation
229 |     track_security_violation("invalid_token", "high")
230 | 
231 |     print("Security events tracked")
232 | 
233 |     # Example 6: Business metrics
234 |     logger.info("=== Example 6: Business Metrics ===")
235 |     from maverick_mcp.utils.monitoring import (
236 |         track_user_session,
237 |         update_active_users,
238 |     )
239 | 
240 |     # Simulate engagement events
241 |     track_user_session("registered", "api_key", duration=360.0)
242 |     track_user_session("anonymous", "public", duration=120.0)
243 |     update_active_users(daily_count=42, monthly_count=156)
244 | 
245 |     print("Business metrics tracked")
246 | 
247 |     logger.info("Monitoring demonstration completed!")
248 |     print("\n=== Check Your Monitoring Stack ===")
249 |     print("1. Prometheus metrics: http://localhost:9090")
250 |     print("2. Grafana dashboards: http://localhost:3000")
251 |     print("3. Jaeger traces: http://localhost:16686")
252 |     print("4. MaverickMCP metrics: http://localhost:8000/metrics")
253 |     print("5. MaverickMCP health: http://localhost:8000/health")
254 | 
255 | 
256 | def print_monitoring_setup_instructions():
257 |     """Print instructions for setting up the monitoring stack."""
258 |     print("=== MaverickMCP Monitoring Setup ===")
259 |     print()
260 |     print("1. Start the monitoring stack:")
261 |     print("   cd monitoring/")
262 |     print("   docker-compose -f docker-compose.monitoring.yml up -d")
263 |     print()
264 |     print("2. Install monitoring dependencies:")
265 |     print("   pip install prometheus-client opentelemetry-distro sentry-sdk")
266 |     print()
267 |     print("3. Set environment variables:")
268 |     print("   export OTEL_TRACING_ENABLED=true")
269 |     print("   export JAEGER_ENDPOINT=http://localhost:14268/api/traces")
270 |     print()
271 |     print("4. Start MaverickMCP:")
272 |     print("   make dev")
273 |     print()
274 |     print("5. Access monitoring services:")
275 |     print("   - Grafana: http://localhost:3000 (admin/admin)")
276 |     print("   - Prometheus: http://localhost:9090")
277 |     print("   - Jaeger: http://localhost:16686")
278 |     print("   - MaverickMCP metrics: http://localhost:8000/metrics")
279 |     print()
280 | 
281 | 
282 | if __name__ == "__main__":
283 |     import sys
284 | 
285 |     if len(sys.argv) > 1 and sys.argv[1] == "--setup":
286 |         print_monitoring_setup_instructions()
287 |     else:
288 |         # Run the monitoring demonstration
289 |         asyncio.run(demonstrate_monitoring())
290 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/tests/test_technical_analysis.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for technical analysis module.
  3 | """
  4 | 
  5 | import unittest
  6 | from datetime import datetime, timedelta
  7 | 
  8 | import numpy as np
  9 | import pandas as pd
 10 | 
 11 | from maverick_mcp.core.technical_analysis import (
 12 |     add_technical_indicators,
 13 |     analyze_bollinger_bands,
 14 |     analyze_macd,
 15 |     analyze_rsi,
 16 |     analyze_stochastic,
 17 |     analyze_trend,
 18 |     analyze_volume,
 19 |     generate_outlook,
 20 |     identify_chart_patterns,
 21 |     identify_resistance_levels,
 22 |     identify_support_levels,
 23 | )
 24 | 
 25 | 
 26 | def create_test_dataframe(days=100):
 27 |     """Create a test dataframe with price data."""
 28 |     date_today = datetime.now()
 29 |     dates = [date_today - timedelta(days=i) for i in range(days)]
 30 |     dates.reverse()  # Oldest first
 31 | 
 32 |     # Start with a seed value and generate slightly random walk data
 33 |     np.random.seed(42)  # For reproducibility
 34 | 
 35 |     close_price = 100.0
 36 |     prices = []
 37 |     volumes = []
 38 | 
 39 |     for _ in range(days):
 40 |         # Simulate some volatility with random noise and a slight upward trend
 41 |         pct_change = np.random.normal(0.0005, 0.015)  # mean, std dev
 42 |         close_price = close_price * (1 + pct_change)
 43 | 
 44 |         # Calculate OHLC and volume
 45 |         open_price = close_price * (1 + np.random.normal(0, 0.005))
 46 |         high_price = max(open_price, close_price) * (
 47 |             1 + abs(np.random.normal(0, 0.008))
 48 |         )
 49 |         low_price = min(open_price, close_price) * (1 - abs(np.random.normal(0, 0.008)))
 50 |         volume = int(np.random.normal(1000000, 300000))
 51 | 
 52 |         prices.append([open_price, high_price, low_price, close_price])
 53 |         volumes.append(volume)
 54 | 
 55 |     # Create DataFrame
 56 |     df = pd.DataFrame(
 57 |         prices, index=pd.DatetimeIndex(dates), columns=["open", "high", "low", "close"]
 58 |     )
 59 |     df["volume"] = volumes
 60 | 
 61 |     return df
 62 | 
 63 | 
 64 | class TestTechnicalAnalysis(unittest.TestCase):
 65 |     """Test cases for technical analysis functions."""
 66 | 
 67 |     def setUp(self):
 68 |         """Set up test fixtures."""
 69 |         self.df = create_test_dataframe(days=200)
 70 |         self.df_with_indicators = add_technical_indicators(self.df)
 71 | 
 72 |     def test_add_technical_indicators(self):
 73 |         """Test that indicators are added to the dataframe."""
 74 |         result = add_technical_indicators(self.df)
 75 | 
 76 |         # Check that all expected columns are present
 77 |         expected_columns = [
 78 |             "open",
 79 |             "high",
 80 |             "low",
 81 |             "close",
 82 |             "volume",
 83 |             "ema_21",
 84 |             "sma_50",
 85 |             "sma_200",
 86 |             "rsi",
 87 |             "macd_12_26_9",
 88 |             "macds_12_26_9",
 89 |             "macdh_12_26_9",
 90 |             "sma_20",
 91 |             "stdev",
 92 |             "bbu_20_2.0",
 93 |             "bbl_20_2.0",
 94 |             "atr",
 95 |             "stochk_14_3_3",
 96 |             "stochd_14_3_3",
 97 |             "adx_14",
 98 |         ]
 99 | 
100 |         for col in expected_columns:
101 |             self.assertIn(col, result.columns)
102 | 
103 |         # Check that NaN values are only in the beginning (for moving windows)
104 |         self.assertTrue(
105 |             pd.notna(result["sma_200"].iloc[199])
106 |             or isinstance(result["sma_200"].iloc[199], float)
107 |         )
108 | 
109 |     def test_identify_support_levels(self):
110 |         """Test identification of support levels."""
111 |         support_levels = identify_support_levels(self.df_with_indicators)
112 | 
113 |         # We expect at least one support level
114 |         self.assertGreater(len(support_levels), 0)
115 | 
116 |         # Support levels should be sorted
117 |         self.assertEqual(support_levels, sorted(support_levels))
118 | 
119 |         # Support levels should be below current price
120 |         current_price = self.df_with_indicators["close"].iloc[-1]
121 |         self.assertLessEqual(support_levels[0], current_price)
122 | 
123 |     def test_identify_resistance_levels(self):
124 |         """Test identification of resistance levels."""
125 |         resistance_levels = identify_resistance_levels(self.df_with_indicators)
126 | 
127 |         # We expect at least one resistance level
128 |         self.assertGreater(len(resistance_levels), 0)
129 | 
130 |         # Resistance levels should be sorted
131 |         self.assertEqual(resistance_levels, sorted(resistance_levels))
132 | 
133 |         # At least one resistance level should be above current price
134 |         current_price = self.df_with_indicators["close"].iloc[-1]
135 |         self.assertGreaterEqual(resistance_levels[-1], current_price)
136 | 
137 |     def test_analyze_trend(self):
138 |         """Test trend analysis function."""
139 |         trend = analyze_trend(self.df_with_indicators)
140 | 
141 |         # Check that trend is an integer between 0 and 7 (trend strength score)
142 |         self.assertIsInstance(trend, int)
143 |         self.assertGreaterEqual(trend, 0)
144 |         self.assertLessEqual(trend, 7)
145 | 
146 |     def test_analyze_rsi(self):
147 |         """Test RSI analysis function."""
148 |         rsi_analysis = analyze_rsi(self.df_with_indicators)
149 | 
150 |         # Check that analysis contains expected keys
151 |         expected_keys = ["current", "signal", "description"]
152 |         for key in expected_keys:
153 |             self.assertIn(key, rsi_analysis)
154 | 
155 |         # Check value ranges
156 |         self.assertGreaterEqual(rsi_analysis["current"], 0)
157 |         self.assertLessEqual(rsi_analysis["current"], 100)
158 | 
159 |         # Check signal values
160 |         self.assertIn(
161 |             rsi_analysis["signal"], ["bullish", "bearish", "overbought", "oversold"]
162 |         )
163 | 
164 |     def test_analyze_macd(self):
165 |         """Test MACD analysis function."""
166 |         macd_analysis = analyze_macd(self.df_with_indicators)
167 | 
168 |         # Check that analysis contains expected keys
169 |         expected_keys = [
170 |             "macd",
171 |             "signal",
172 |             "histogram",
173 |             "indicator",
174 |             "crossover",
175 |             "description",
176 |         ]
177 |         for key in expected_keys:
178 |             self.assertIn(key, macd_analysis)
179 | 
180 |         # Check signal values
181 |         self.assertIn(
182 |             macd_analysis["indicator"],
183 |             ["bullish", "bearish", "improving", "weakening", "neutral"],
184 |         )
185 | 
186 |         self.assertIn(
187 |             macd_analysis["crossover"],
188 |             [
189 |                 "bullish crossover detected",
190 |                 "bearish crossover detected",
191 |                 "no recent crossover",
192 |             ],
193 |         )
194 | 
195 |     def test_generate_outlook(self):
196 |         """Test outlook generation function."""
197 |         # First, get required analyses
198 |         trend = analyze_trend(self.df_with_indicators)
199 |         rsi_analysis = analyze_rsi(self.df_with_indicators)
200 |         macd_analysis = analyze_macd(self.df_with_indicators)
201 |         stoch_analysis = analyze_stochastic(self.df_with_indicators)
202 | 
203 |         # Generate outlook
204 |         trend_direction = (
205 |             "bullish" if trend > 3 else "bearish" if trend < -3 else "neutral"
206 |         )
207 |         outlook = generate_outlook(
208 |             self.df_with_indicators,
209 |             trend_direction,
210 |             rsi_analysis,
211 |             macd_analysis,
212 |             stoch_analysis,
213 |         )
214 | 
215 |         # Check output
216 |         self.assertIn(
217 |             outlook,
218 |             [
219 |                 "strongly bullish",
220 |                 "moderately bullish",
221 |                 "strongly bearish",
222 |                 "moderately bearish",
223 |                 "neutral",
224 |             ],
225 |         )
226 | 
227 |     def test_analyze_bollinger_bands(self):
228 |         """Test Bollinger Bands analysis function."""
229 |         bb_analysis = analyze_bollinger_bands(self.df_with_indicators)
230 | 
231 |         # Check that analysis contains expected keys
232 |         expected_keys = [
233 |             "upper_band",
234 |             "middle_band",
235 |             "lower_band",
236 |             "position",
237 |             "signal",
238 |             "volatility",
239 |             "description",
240 |         ]
241 |         for key in expected_keys:
242 |             self.assertIn(key, bb_analysis)
243 | 
244 |         # Check value types
245 |         self.assertIsInstance(bb_analysis["upper_band"], float)
246 |         self.assertIsInstance(bb_analysis["middle_band"], float)
247 |         self.assertIsInstance(bb_analysis["lower_band"], float)
248 |         self.assertIsInstance(bb_analysis["position"], str)
249 |         self.assertIsInstance(bb_analysis["signal"], str)
250 |         self.assertIsInstance(bb_analysis["volatility"], str)
251 |         self.assertIsInstance(bb_analysis["description"], str)
252 | 
253 |         # Check plausible signal values
254 |         self.assertIn(
255 |             bb_analysis["signal"], ["overbought", "oversold", "bullish", "bearish"]
256 |         )
257 | 
258 |     def test_analyze_volume(self):
259 |         """Test volume analysis function."""
260 |         volume_analysis = analyze_volume(self.df_with_indicators)
261 | 
262 |         # Check that analysis contains expected keys
263 |         expected_keys = ["current", "average", "ratio", "description", "signal"]
264 |         for key in expected_keys:
265 |             self.assertIn(key, volume_analysis)
266 | 
267 |         # Check value types
268 |         self.assertIsInstance(volume_analysis["current"], int)
269 |         self.assertIsInstance(volume_analysis["average"], int)
270 |         self.assertIsInstance(volume_analysis["ratio"], float)
271 |         self.assertIsInstance(volume_analysis["description"], str)
272 |         self.assertIsInstance(volume_analysis["signal"], str)
273 | 
274 |         # Check plausible signal values
275 |         self.assertIn(
276 |             volume_analysis["signal"],
277 |             [
278 |                 "bullish (high volume on up move)",
279 |                 "bearish (high volume on down move)",
280 |                 "weak conviction",
281 |                 "neutral",
282 |             ],
283 |         )
284 | 
285 |     def test_identify_chart_patterns(self):
286 |         """Test chart pattern identification function."""
287 |         patterns = identify_chart_patterns(self.df_with_indicators)
288 | 
289 |         # Should return a list
290 |         self.assertIsInstance(patterns, list)
291 | 
292 |         # All elements should be strings
293 |         for pattern in patterns:
294 |             self.assertIsInstance(pattern, str)
295 | 
296 |         # All patterns should be from the known set
297 |         known_patterns = [
298 |             "Double Bottom (W)",
299 |             "Double Top (M)",
300 |             "Bullish Flag/Pennant",
301 |             "Bearish Flag/Pennant",
302 |         ]
303 |         for pattern in patterns:
304 |             self.assertIn(pattern, known_patterns)
305 | 
306 | 
307 | if __name__ == "__main__":
308 |     unittest.main()
309 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/api/services/portfolio_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Portfolio service for MaverickMCP API.
  3 | 
  4 | Handles portfolio analysis, watchlist management, and portfolio-related operations.
  5 | Extracted from server.py to improve code organization and maintainability.
  6 | """
  7 | 
  8 | from typing import Any
  9 | 
 10 | from .base_service import BaseService
 11 | 
 12 | 
 13 | class PortfolioService(BaseService):
 14 |     """
 15 |     Service class for portfolio operations.
 16 | 
 17 |     Provides portfolio summaries, watchlist management, and portfolio analysis functionality.
 18 |     """
 19 | 
 20 |     def register_tools(self):
 21 |         """Register portfolio tools with MCP."""
 22 | 
 23 |         @self.mcp.tool()
 24 |         async def get_user_portfolio_summary() -> dict[str, Any]:
 25 |             """
 26 |             Get comprehensive portfolio summary for the authenticated user.
 27 | 
 28 |             Requires authentication. Provides detailed portfolio analytics including
 29 |             holdings, performance metrics, risk analysis, and recommendations.
 30 | 
 31 |             Returns:
 32 |                 Dictionary containing comprehensive portfolio analysis
 33 |             """
 34 |             return await self._get_user_portfolio_summary()
 35 | 
 36 |         @self.mcp.tool()
 37 |         async def get_watchlist(limit: int = 20) -> dict[str, Any]:
 38 |             """
 39 |             Get user's stock watchlist with current prices and key metrics.
 40 | 
 41 |             Args:
 42 |                 limit: Maximum number of stocks to return (1-100, default: 20)
 43 | 
 44 |             Returns:
 45 |                 Dictionary containing watchlist stocks with current market data
 46 |             """
 47 |             return await self._get_watchlist(limit)
 48 | 
 49 |     async def _get_user_portfolio_summary(self) -> dict[str, Any]:
 50 |         """Get user portfolio summary implementation."""
 51 |         if not self.is_auth_enabled():
 52 |             return {
 53 |                 "error": "Authentication required for portfolio access",
 54 |                 "auth_required": True,
 55 |             }
 56 | 
 57 |         try:
 58 |             # TODO: Implement actual portfolio retrieval from database
 59 |             # This would integrate with user portfolio data
 60 | 
 61 |             # Placeholder portfolio data
 62 |             portfolio_summary = {
 63 |                 "account_info": {
 64 |                     "account_value": 125_450.67,
 65 |                     "cash_balance": 12_340.50,
 66 |                     "invested_amount": 113_110.17,
 67 |                     "currency": "USD",
 68 |                 },
 69 |                 "performance": {
 70 |                     "total_return": 15_450.67,
 71 |                     "total_return_pct": 14.05,
 72 |                     "day_change": -523.45,
 73 |                     "day_change_pct": -0.42,
 74 |                     "ytd_return": 8_950.23,
 75 |                     "ytd_return_pct": 7.68,
 76 |                 },
 77 |                 "holdings": [
 78 |                     {
 79 |                         "symbol": "AAPL",
 80 |                         "name": "Apple Inc.",
 81 |                         "shares": 50,
 82 |                         "avg_cost": 150.25,
 83 |                         "current_price": 175.80,
 84 |                         "market_value": 8_790.00,
 85 |                         "unrealized_gain": 1_277.50,
 86 |                         "unrealized_gain_pct": 17.00,
 87 |                         "weight": 7.01,
 88 |                     },
 89 |                     {
 90 |                         "symbol": "MSFT",
 91 |                         "name": "Microsoft Corporation",
 92 |                         "shares": 25,
 93 |                         "avg_cost": 280.50,
 94 |                         "current_price": 310.45,
 95 |                         "market_value": 7_761.25,
 96 |                         "unrealized_gain": 748.75,
 97 |                         "unrealized_gain_pct": 10.67,
 98 |                         "weight": 6.19,
 99 |                     },
100 |                     # ... more holdings
101 |                 ],
102 |                 "asset_allocation": {
103 |                     "stocks": 85.5,
104 |                     "cash": 9.8,
105 |                     "bonds": 4.7,
106 |                 },
107 |                 "sector_allocation": {
108 |                     "Technology": 35.2,
109 |                     "Healthcare": 18.5,
110 |                     "Financial Services": 12.3,
111 |                     "Consumer Cyclical": 10.8,
112 |                     "Other": 23.2,
113 |                 },
114 |                 "risk_metrics": {
115 |                     "beta": 1.15,
116 |                     "sharpe_ratio": 1.42,
117 |                     "max_drawdown": -8.5,
118 |                     "volatility": 16.8,
119 |                 },
120 |                 "recommendations": [
121 |                     "Consider rebalancing technology allocation (currently 35.2%)",
122 |                     "Increase cash position for upcoming opportunities",
123 |                     "Review underperforming positions",
124 |                 ],
125 |                 "last_updated": self._get_current_timestamp(),
126 |             }
127 | 
128 |             self.log_tool_usage("get_user_portfolio_summary")
129 | 
130 |             return portfolio_summary
131 | 
132 |         except Exception as e:
133 |             self.logger.error(f"Failed to get portfolio summary: {e}")
134 |             return {
135 |                 "error": f"Failed to retrieve portfolio summary: {str(e)}",
136 |                 "auth_required": self.is_auth_enabled(),
137 |             }
138 | 
139 |     async def _get_watchlist(self, limit: int = 20) -> dict[str, Any]:
140 |         """Get watchlist implementation."""
141 |         # Validate limit
142 |         if not isinstance(limit, int) or limit < 1 or limit > 100:
143 |             return {
144 |                 "error": "Limit must be an integer between 1 and 100",
145 |                 "provided_limit": limit,
146 |             }
147 | 
148 |         try:
149 |             from maverick_mcp.providers.stock_data import StockDataProvider
150 | 
151 |             # TODO: Get actual user watchlist from database
152 |             # For now, use a sample watchlist
153 |             watchlist_symbols = [
154 |                 "AAPL",
155 |                 "MSFT",
156 |                 "GOOGL",
157 |                 "AMZN",
158 |                 "TSLA",
159 |                 "NVDA",
160 |                 "META",
161 |                 "NFLX",
162 |                 "ADBE",
163 |                 "CRM",
164 |                 "ORCL",
165 |                 "INTC",
166 |                 "AMD",
167 |                 "PYPL",
168 |                 "ZOOM",
169 |             ]
170 | 
171 |             # Limit the symbols
172 |             limited_symbols = watchlist_symbols[:limit]
173 | 
174 |             stock_provider = StockDataProvider()
175 |             watchlist_data = []
176 | 
177 |             for symbol in limited_symbols:
178 |                 try:
179 |                     # Get current stock data
180 |                     stock_info = await stock_provider.get_stock_info_async(symbol)
181 | 
182 |                     # Get price data for trend analysis
183 |                     price_data = await stock_provider.get_stock_data_async(
184 |                         symbol, days=30
185 |                     )
186 | 
187 |                     if not price_data.empty:
188 |                         current_price = price_data["Close"].iloc[-1]
189 |                         prev_close = (
190 |                             price_data["Close"].iloc[-2]
191 |                             if len(price_data) > 1
192 |                             else current_price
193 |                         )
194 |                         day_change = current_price - prev_close
195 |                         day_change_pct = (
196 |                             (day_change / prev_close) * 100 if prev_close != 0 else 0
197 |                         )
198 | 
199 |                         # Calculate 30-day trend
200 |                         thirty_day_change = current_price - price_data["Close"].iloc[0]
201 |                         thirty_day_change_pct = (
202 |                             thirty_day_change / price_data["Close"].iloc[0]
203 |                         ) * 100
204 | 
205 |                         watchlist_item = {
206 |                             "symbol": symbol,
207 |                             "name": stock_info.get(
208 |                                 "longName", stock_info.get("shortName", symbol)
209 |                             ),
210 |                             "current_price": round(current_price, 2),
211 |                             "day_change": round(day_change, 2),
212 |                             "day_change_pct": round(day_change_pct, 2),
213 |                             "thirty_day_change": round(thirty_day_change, 2),
214 |                             "thirty_day_change_pct": round(thirty_day_change_pct, 2),
215 |                             "volume": int(price_data["Volume"].iloc[-1]),
216 |                             "market_cap": stock_info.get("marketCap"),
217 |                             "pe_ratio": stock_info.get("trailingPE"),
218 |                             "sector": stock_info.get("sector"),
219 |                             "industry": stock_info.get("industry"),
220 |                         }
221 | 
222 |                         watchlist_data.append(watchlist_item)
223 | 
224 |                 except Exception as e:
225 |                     self.logger.warning(f"Failed to get data for {symbol}: {e}")
226 |                     continue
227 | 
228 |             result = {
229 |                 "watchlist": watchlist_data,
230 |                 "total_symbols": len(watchlist_data),
231 |                 "requested_limit": limit,
232 |                 "market_status": "open",  # This would be determined by market hours
233 |                 "last_updated": self._get_current_timestamp(),
234 |                 "summary": {
235 |                     "gainers": len(
236 |                         [item for item in watchlist_data if item["day_change_pct"] > 0]
237 |                     ),
238 |                     "losers": len(
239 |                         [item for item in watchlist_data if item["day_change_pct"] < 0]
240 |                     ),
241 |                     "unchanged": len(
242 |                         [item for item in watchlist_data if item["day_change_pct"] == 0]
243 |                     ),
244 |                 },
245 |             }
246 | 
247 |             self.log_tool_usage(
248 |                 "get_watchlist", limit=limit, symbols_returned=len(watchlist_data)
249 |             )
250 | 
251 |             return result
252 | 
253 |         except Exception as e:
254 |             self.logger.error(f"Failed to get watchlist: {e}")
255 |             return {
256 |                 "error": f"Failed to retrieve watchlist: {str(e)}",
257 |                 "requested_limit": limit,
258 |             }
259 | 
260 |     def _get_current_timestamp(self) -> str:
261 |         """Get current timestamp in ISO format."""
262 |         from datetime import UTC, datetime
263 | 
264 |         return datetime.now(UTC).isoformat()
265 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/providers/mocks/mock_persistence.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Mock data persistence implementation for testing.
  3 | """
  4 | 
  5 | from datetime import datetime
  6 | from typing import Any
  7 | 
  8 | import pandas as pd
  9 | from sqlalchemy.orm import Session
 10 | 
 11 | 
 12 | class MockSession:
 13 |     """Mock SQLAlchemy session for testing."""
 14 | 
 15 |     def __init__(self):
 16 |         self.closed = False
 17 |         self.committed = False
 18 |         self.rolled_back = False
 19 | 
 20 |     def close(self):
 21 |         self.closed = True
 22 | 
 23 |     def commit(self):
 24 |         self.committed = True
 25 | 
 26 |     def rollback(self):
 27 |         self.rolled_back = True
 28 | 
 29 | 
 30 | class MockDataPersistence:
 31 |     """
 32 |     Mock implementation of IDataPersistence for testing.
 33 |     """
 34 | 
 35 |     def __init__(self):
 36 |         """Initialize the mock persistence layer."""
 37 |         self._price_data: dict[str, pd.DataFrame] = {}
 38 |         self._stock_records: dict[str, dict[str, Any]] = {}
 39 |         self._screening_results: dict[str, list[dict[str, Any]]] = {}
 40 |         self._call_log: list[dict[str, Any]] = []
 41 | 
 42 |     async def get_session(self) -> Session:
 43 |         """Get a mock database session."""
 44 |         self._log_call("get_session", {})
 45 |         return MockSession()
 46 | 
 47 |     async def get_read_only_session(self) -> Session:
 48 |         """Get a mock read-only database session."""
 49 |         self._log_call("get_read_only_session", {})
 50 |         return MockSession()
 51 | 
 52 |     async def save_price_data(
 53 |         self, session: Session, symbol: str, data: pd.DataFrame
 54 |     ) -> int:
 55 |         """Save mock price data."""
 56 |         self._log_call("save_price_data", {"symbol": symbol, "data_length": len(data)})
 57 | 
 58 |         symbol = symbol.upper()
 59 | 
 60 |         # Store the data
 61 |         if symbol in self._price_data:
 62 |             # Merge with existing data
 63 |             existing_data = self._price_data[symbol]
 64 |             combined = pd.concat([existing_data, data])
 65 |             # Remove duplicates and sort
 66 |             combined = combined[~combined.index.duplicated(keep="last")].sort_index()
 67 |             self._price_data[symbol] = combined
 68 |         else:
 69 |             self._price_data[symbol] = data.copy()
 70 | 
 71 |         return len(data)
 72 | 
 73 |     async def get_price_data(
 74 |         self,
 75 |         session: Session,
 76 |         symbol: str,
 77 |         start_date: str,
 78 |         end_date: str,
 79 |     ) -> pd.DataFrame:
 80 |         """Retrieve mock price data."""
 81 |         self._log_call(
 82 |             "get_price_data",
 83 |             {
 84 |                 "symbol": symbol,
 85 |                 "start_date": start_date,
 86 |                 "end_date": end_date,
 87 |             },
 88 |         )
 89 | 
 90 |         symbol = symbol.upper()
 91 | 
 92 |         if symbol not in self._price_data:
 93 |             # Return empty DataFrame with proper columns
 94 |             return pd.DataFrame(columns=["Open", "High", "Low", "Close", "Volume"])
 95 | 
 96 |         data = self._price_data[symbol].copy()
 97 | 
 98 |         # Filter by date range
 99 |         if start_date:
100 |             data = data[data.index >= start_date]
101 |         if end_date:
102 |             data = data[data.index <= end_date]
103 | 
104 |         return data
105 | 
106 |     async def get_or_create_stock(self, session: Session, symbol: str) -> Any:
107 |         """Get or create a mock stock record."""
108 |         self._log_call("get_or_create_stock", {"symbol": symbol})
109 | 
110 |         symbol = symbol.upper()
111 | 
112 |         if symbol not in self._stock_records:
113 |             self._stock_records[symbol] = {
114 |                 "symbol": symbol,
115 |                 "company_name": f"{symbol} Inc.",
116 |                 "sector": "Technology",
117 |                 "industry": "Software",
118 |                 "exchange": "NASDAQ",
119 |                 "currency": "USD",
120 |                 "country": "US",
121 |                 "created_at": datetime.now(),
122 |             }
123 | 
124 |         return self._stock_records[symbol]
125 | 
126 |     async def save_screening_results(
127 |         self,
128 |         session: Session,
129 |         screening_type: str,
130 |         results: list[dict[str, Any]],
131 |     ) -> int:
132 |         """Save mock screening results."""
133 |         self._log_call(
134 |             "save_screening_results",
135 |             {
136 |                 "screening_type": screening_type,
137 |                 "results_count": len(results),
138 |             },
139 |         )
140 | 
141 |         self._screening_results[screening_type] = results.copy()
142 |         return len(results)
143 | 
144 |     async def get_screening_results(
145 |         self,
146 |         session: Session,
147 |         screening_type: str,
148 |         limit: int | None = None,
149 |         min_score: float | None = None,
150 |     ) -> list[dict[str, Any]]:
151 |         """Retrieve mock screening results."""
152 |         self._log_call(
153 |             "get_screening_results",
154 |             {
155 |                 "screening_type": screening_type,
156 |                 "limit": limit,
157 |                 "min_score": min_score,
158 |             },
159 |         )
160 | 
161 |         if screening_type not in self._screening_results:
162 |             return self._generate_default_screening_results(screening_type)
163 | 
164 |         results = self._screening_results[screening_type].copy()
165 | 
166 |         # Apply filters
167 |         if min_score is not None:
168 |             if screening_type == "maverick":
169 |                 results = [
170 |                     r for r in results if r.get("combined_score", 0) >= min_score
171 |                 ]
172 |             elif screening_type == "bearish":
173 |                 results = [r for r in results if r.get("score", 0) >= min_score]
174 |             elif screening_type == "trending":
175 |                 results = [
176 |                     r for r in results if r.get("momentum_score", 0) >= min_score
177 |                 ]
178 | 
179 |         if limit is not None:
180 |             results = results[:limit]
181 | 
182 |         return results
183 | 
184 |     async def get_latest_screening_data(self) -> dict[str, list[dict[str, Any]]]:
185 |         """Get mock latest screening data."""
186 |         self._log_call("get_latest_screening_data", {})
187 | 
188 |         return {
189 |             "maverick_stocks": await self.get_screening_results(None, "maverick"),
190 |             "maverick_bear_stocks": await self.get_screening_results(None, "bearish"),
191 |             "supply_demand_breakouts": await self.get_screening_results(
192 |                 None, "trending"
193 |             ),
194 |         }
195 | 
196 |     async def check_data_freshness(self, symbol: str, max_age_hours: int = 24) -> bool:
197 |         """Check mock data freshness."""
198 |         self._log_call(
199 |             "check_data_freshness", {"symbol": symbol, "max_age_hours": max_age_hours}
200 |         )
201 | 
202 |         # For testing, assume data is fresh if it exists
203 |         symbol = symbol.upper()
204 |         return symbol in self._price_data
205 | 
206 |     async def bulk_save_price_data(
207 |         self, session: Session, symbol: str, data: pd.DataFrame
208 |     ) -> int:
209 |         """Bulk save mock price data."""
210 |         return await self.save_price_data(session, symbol, data)
211 | 
212 |     async def get_symbols_with_data(
213 |         self, session: Session, limit: int | None = None
214 |     ) -> list[str]:
215 |         """Get mock list of symbols with data."""
216 |         self._log_call("get_symbols_with_data", {"limit": limit})
217 | 
218 |         symbols = list(self._price_data.keys())
219 | 
220 |         if limit is not None:
221 |             symbols = symbols[:limit]
222 | 
223 |         return symbols
224 | 
225 |     async def cleanup_old_data(self, session: Session, days_to_keep: int = 365) -> int:
226 |         """Mock cleanup of old data."""
227 |         self._log_call("cleanup_old_data", {"days_to_keep": days_to_keep})
228 | 
229 |         # For testing, return 0 (no cleanup performed)
230 |         return 0
231 | 
232 |     def _generate_default_screening_results(
233 |         self, screening_type: str
234 |     ) -> list[dict[str, Any]]:
235 |         """Generate default screening results for testing."""
236 |         if screening_type == "maverick":
237 |             return [
238 |                 {
239 |                     "symbol": "TEST1",
240 |                     "combined_score": 95,
241 |                     "momentum_score": 92,
242 |                     "pattern": "Cup with Handle",
243 |                     "consolidation": "yes",
244 |                     "squeeze": "firing",
245 |                 },
246 |                 {
247 |                     "symbol": "TEST2",
248 |                     "combined_score": 88,
249 |                     "momentum_score": 85,
250 |                     "pattern": "Flat Base",
251 |                     "consolidation": "no",
252 |                     "squeeze": "setup",
253 |                 },
254 |             ]
255 |         elif screening_type == "bearish":
256 |             return [
257 |                 {
258 |                     "symbol": "BEAR1",
259 |                     "score": 92,
260 |                     "momentum_score": 25,
261 |                     "rsi_14": 28,
262 |                     "atr_contraction": True,
263 |                     "big_down_vol": True,
264 |                 },
265 |             ]
266 |         elif screening_type == "trending":
267 |             return [
268 |                 {
269 |                     "symbol": "TREND1",
270 |                     "momentum_score": 95,
271 |                     "close": 150.25,
272 |                     "sma_50": 145.50,
273 |                     "sma_150": 140.25,
274 |                     "sma_200": 135.75,
275 |                     "pattern": "Breakout",
276 |                 },
277 |             ]
278 |         else:
279 |             return []
280 | 
281 |     # Testing utilities
282 | 
283 |     def _log_call(self, method: str, args: dict[str, Any]) -> None:
284 |         """Log method calls for testing verification."""
285 |         self._call_log.append(
286 |             {
287 |                 "method": method,
288 |                 "args": args,
289 |                 "timestamp": datetime.now(),
290 |             }
291 |         )
292 | 
293 |     def get_call_log(self) -> list[dict[str, Any]]:
294 |         """Get the log of method calls."""
295 |         return self._call_log.copy()
296 | 
297 |     def clear_call_log(self) -> None:
298 |         """Clear the method call log."""
299 |         self._call_log.clear()
300 | 
301 |     def set_price_data(self, symbol: str, data: pd.DataFrame) -> None:
302 |         """Set price data for testing."""
303 |         self._price_data[symbol.upper()] = data
304 | 
305 |     def get_stored_price_data(self, symbol: str) -> pd.DataFrame | None:
306 |         """Get stored price data for testing verification."""
307 |         return self._price_data.get(symbol.upper())
308 | 
309 |     def set_screening_results(
310 |         self, screening_type: str, results: list[dict[str, Any]]
311 |     ) -> None:
312 |         """Set screening results for testing."""
313 |         self._screening_results[screening_type] = results
314 | 
315 |     def clear_all_data(self) -> None:
316 |         """Clear all stored data."""
317 |         self._price_data.clear()
318 |         self._stock_records.clear()
319 |         self._screening_results.clear()
320 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/caching/cache_management_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Cache Management Service - Responsible only for cache operations.
  3 | """
  4 | 
  5 | import logging
  6 | 
  7 | import pandas as pd
  8 | from sqlalchemy.orm import Session
  9 | 
 10 | from maverick_mcp.data.models import (
 11 |     PriceCache,
 12 |     SessionLocal,
 13 |     Stock,
 14 |     bulk_insert_price_data,
 15 | )
 16 | 
 17 | logger = logging.getLogger("maverick_mcp.cache_management")
 18 | 
 19 | 
 20 | class CacheManagementService:
 21 |     """
 22 |     Service responsible ONLY for cache operations.
 23 | 
 24 |     This service:
 25 |     - Manages Redis and database cache layers
 26 |     - Handles cache key generation and TTL management
 27 |     - Contains no data fetching logic
 28 |     - Contains no business logic beyond caching
 29 |     """
 30 | 
 31 |     def __init__(self, db_session: Session | None = None, cache_days: int = 1):
 32 |         """
 33 |         Initialize the cache management service.
 34 | 
 35 |         Args:
 36 |             db_session: Optional database session for dependency injection
 37 |             cache_days: Number of days to cache data
 38 |         """
 39 |         self.cache_days = cache_days
 40 |         self._db_session = db_session
 41 | 
 42 |     def get_cached_data(
 43 |         self, symbol: str, start_date: str, end_date: str
 44 |     ) -> pd.DataFrame | None:
 45 |         """
 46 |         Get cached data from database within date range.
 47 | 
 48 |         Args:
 49 |             symbol: Stock ticker symbol (will be uppercased)
 50 |             start_date: Start date in YYYY-MM-DD format
 51 |             end_date: End date in YYYY-MM-DD format
 52 | 
 53 |         Returns:
 54 |             DataFrame with cached data or None if not found
 55 |         """
 56 |         symbol = symbol.upper()
 57 |         session, should_close = self._get_db_session()
 58 | 
 59 |         try:
 60 |             logger.info(f"Checking cache for {symbol} from {start_date} to {end_date}")
 61 | 
 62 |             # Get whatever data exists in the range
 63 |             df = PriceCache.get_price_data(session, symbol, start_date, end_date)
 64 | 
 65 |             if df.empty:
 66 |                 logger.info(f"No cached data found for {symbol}")
 67 |                 return None
 68 | 
 69 |             logger.info(f"Found {len(df)} cached records for {symbol}")
 70 | 
 71 |             # Normalize the data to match expected format
 72 |             df = self._normalize_cached_data(df)
 73 |             return df
 74 | 
 75 |         except Exception as e:
 76 |             logger.error(f"Error getting cached data for {symbol}: {e}")
 77 |             return None
 78 |         finally:
 79 |             if should_close:
 80 |                 session.close()
 81 | 
 82 |     def cache_data(self, symbol: str, df: pd.DataFrame) -> bool:
 83 |         """
 84 |         Cache price data in the database.
 85 | 
 86 |         Args:
 87 |             symbol: Stock ticker symbol
 88 |             df: DataFrame with price data
 89 | 
 90 |         Returns:
 91 |             True if caching was successful, False otherwise
 92 |         """
 93 |         if df.empty:
 94 |             logger.info(f"Empty DataFrame provided for {symbol}, skipping cache")
 95 |             return True
 96 | 
 97 |         symbol = symbol.upper()
 98 |         session, should_close = self._get_db_session()
 99 | 
100 |         try:
101 |             logger.info(f"Caching {len(df)} records for {symbol}")
102 | 
103 |             # Ensure stock exists in database
104 |             self._ensure_stock_exists(session, symbol)
105 | 
106 |             # Prepare DataFrame for caching
107 |             cache_df = self._prepare_data_for_cache(df)
108 | 
109 |             # Insert data
110 |             count = bulk_insert_price_data(session, symbol, cache_df)
111 |             if count == 0:
112 |                 logger.info(
113 |                     f"No new records cached for {symbol} (data may already exist)"
114 |                 )
115 |             else:
116 |                 logger.info(
117 |                     f"Successfully cached {count} new price records for {symbol}"
118 |                 )
119 | 
120 |             return True
121 | 
122 |         except Exception as e:
123 |             logger.error(f"Error caching price data for {symbol}: {e}", exc_info=True)
124 |             session.rollback()
125 |             return False
126 |         finally:
127 |             if should_close:
128 |                 session.close()
129 | 
130 |     def invalidate_cache(self, symbol: str, start_date: str, end_date: str) -> bool:
131 |         """
132 |         Invalidate cached data for a symbol within a date range.
133 | 
134 |         Args:
135 |             symbol: Stock ticker symbol
136 |             start_date: Start date in YYYY-MM-DD format
137 |             end_date: End date in YYYY-MM-DD format
138 | 
139 |         Returns:
140 |             True if invalidation was successful, False otherwise
141 |         """
142 |         symbol = symbol.upper()
143 |         session, should_close = self._get_db_session()
144 | 
145 |         try:
146 |             logger.info(
147 |                 f"Invalidating cache for {symbol} from {start_date} to {end_date}"
148 |             )
149 | 
150 |             # Delete cached data in the specified range
151 |             deleted_count = PriceCache.delete_price_data(
152 |                 session, symbol, start_date, end_date
153 |             )
154 |             logger.info(f"Invalidated {deleted_count} cached records for {symbol}")
155 | 
156 |             return True
157 | 
158 |         except Exception as e:
159 |             logger.error(f"Error invalidating cache for {symbol}: {e}")
160 |             return False
161 |         finally:
162 |             if should_close:
163 |                 session.close()
164 | 
165 |     def get_cache_stats(self, symbol: str) -> dict:
166 |         """
167 |         Get cache statistics for a symbol.
168 | 
169 |         Args:
170 |             symbol: Stock ticker symbol
171 | 
172 |         Returns:
173 |             Dictionary with cache statistics
174 |         """
175 |         symbol = symbol.upper()
176 |         session, should_close = self._get_db_session()
177 | 
178 |         try:
179 |             stats = PriceCache.get_cache_stats(session, symbol)
180 |             return {
181 |                 "symbol": symbol,
182 |                 "total_records": stats.get("total_records", 0),
183 |                 "date_range": stats.get("date_range", {}),
184 |                 "last_updated": stats.get("last_updated"),
185 |             }
186 |         except Exception as e:
187 |             logger.error(f"Error getting cache stats for {symbol}: {e}")
188 |             return {
189 |                 "symbol": symbol,
190 |                 "total_records": 0,
191 |                 "date_range": {},
192 |                 "last_updated": None,
193 |             }
194 |         finally:
195 |             if should_close:
196 |                 session.close()
197 | 
198 |     def _get_db_session(self) -> tuple[Session, bool]:
199 |         """
200 |         Get a database session.
201 | 
202 |         Returns:
203 |             Tuple of (session, should_close) where should_close indicates
204 |             whether the caller should close the session.
205 |         """
206 |         # Use injected session if available - should NOT be closed
207 |         if self._db_session:
208 |             return self._db_session, False
209 | 
210 |         # Otherwise, create a new session - should be closed
211 |         try:
212 |             session = SessionLocal()
213 |             return session, True
214 |         except Exception as e:
215 |             logger.error(f"Failed to get database session: {e}", exc_info=True)
216 |             raise
217 | 
218 |     def _normalize_cached_data(self, df: pd.DataFrame) -> pd.DataFrame:
219 |         """
220 |         Normalize cached data to match expected format.
221 | 
222 |         Args:
223 |             df: Raw DataFrame from cache
224 | 
225 |         Returns:
226 |             Normalized DataFrame
227 |         """
228 |         # Add expected columns for compatibility
229 |         for col in ["Dividends", "Stock Splits"]:
230 |             if col not in df.columns:
231 |                 df[col] = 0.0
232 | 
233 |         # Ensure column names match yfinance format
234 |         column_mapping = {
235 |             "open": "Open",
236 |             "high": "High",
237 |             "low": "Low",
238 |             "close": "Close",
239 |             "volume": "Volume",
240 |         }
241 |         df.rename(columns=column_mapping, inplace=True)
242 | 
243 |         # Ensure proper data types to match yfinance
244 |         for col in ["Open", "High", "Low", "Close"]:
245 |             if col in df.columns:
246 |                 df[col] = pd.to_numeric(df[col], errors="coerce").astype("float64")
247 | 
248 |         # Convert volume to int
249 |         if "Volume" in df.columns:
250 |             df["Volume"] = (
251 |                 pd.to_numeric(df["Volume"], errors="coerce").fillna(0).astype("int64")
252 |             )
253 | 
254 |         return df
255 | 
256 |     def _prepare_data_for_cache(self, df: pd.DataFrame) -> pd.DataFrame:
257 |         """
258 |         Prepare DataFrame for caching by normalizing column names.
259 | 
260 |         Args:
261 |             df: DataFrame to prepare
262 | 
263 |         Returns:
264 |             Prepared DataFrame
265 |         """
266 |         cache_df = df.copy()
267 | 
268 |         # Ensure proper column names for database
269 |         column_mapping = {
270 |             "Open": "open",
271 |             "High": "high",
272 |             "Low": "low",
273 |             "Close": "close",
274 |             "Volume": "volume",
275 |         }
276 |         cache_df.rename(columns=column_mapping, inplace=True)
277 | 
278 |         logger.debug(
279 |             f"DataFrame columns after preparation: {cache_df.columns.tolist()}"
280 |         )
281 |         logger.debug(f"DataFrame shape: {cache_df.shape}")
282 | 
283 |         return cache_df
284 | 
285 |     def _ensure_stock_exists(self, session: Session, symbol: str) -> Stock:
286 |         """
287 |         Ensure a stock exists in the database.
288 | 
289 |         Args:
290 |             session: Database session
291 |             symbol: Stock ticker symbol
292 | 
293 |         Returns:
294 |             Stock object
295 |         """
296 |         try:
297 |             stock = Stock.get_or_create(session, symbol)
298 |             return stock
299 |         except Exception as e:
300 |             logger.error(f"Error ensuring stock {symbol} exists: {e}")
301 |             raise
302 | 
303 |     def check_cache_health(self) -> dict:
304 |         """
305 |         Check the health of the cache system.
306 | 
307 |         Returns:
308 |             Dictionary with cache health information
309 |         """
310 |         try:
311 |             session, should_close = self._get_db_session()
312 |             try:
313 |                 # Test basic database connectivity
314 |                 result = session.execute("SELECT 1")
315 |                 result.fetchone()
316 | 
317 |                 # Get basic cache statistics
318 |                 total_records = session.query(PriceCache).count()
319 | 
320 |                 return {
321 |                     "status": "healthy",
322 |                     "database_connection": True,
323 |                     "total_cached_records": total_records,
324 |                 }
325 |             finally:
326 |                 if should_close:
327 |                     session.close()
328 | 
329 |         except Exception as e:
330 |             logger.error(f"Cache health check failed: {e}")
331 |             return {
332 |                 "status": "unhealthy",
333 |                 "database_connection": False,
334 |                 "error": str(e),
335 |             }
336 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/api/routers/intelligent_backtesting.py:
--------------------------------------------------------------------------------

```python
  1 | """MCP router for intelligent backtesting workflow."""
  2 | 
  3 | import logging
  4 | from typing import Any
  5 | 
  6 | from fastmcp import Context
  7 | 
  8 | from maverick_mcp.workflows import BacktestingWorkflow
  9 | 
 10 | logger = logging.getLogger(__name__)
 11 | 
 12 | 
 13 | def setup_intelligent_backtesting_tools(mcp):
 14 |     """Set up intelligent backtesting tools for MCP.
 15 | 
 16 |     Args:
 17 |         mcp: FastMCP instance
 18 |     """
 19 | 
 20 |     @mcp.tool()
 21 |     async def run_intelligent_backtest(
 22 |         ctx: Context,
 23 |         symbol: str,
 24 |         start_date: str | None = None,
 25 |         end_date: str | None = None,
 26 |         initial_capital: float = 10000.0,
 27 |         requested_strategy: str | None = None,
 28 |     ) -> dict[str, Any]:
 29 |         """Run intelligent backtesting workflow with market regime analysis and strategy optimization.
 30 | 
 31 |         This advanced workflow analyzes market conditions, intelligently selects appropriate strategies,
 32 |         optimizes parameters, and validates results through walk-forward analysis and Monte Carlo simulation.
 33 | 
 34 |         Args:
 35 |             symbol: Stock symbol to analyze (e.g., 'AAPL', 'TSLA')
 36 |             start_date: Start date (YYYY-MM-DD), defaults to 1 year ago
 37 |             end_date: End date (YYYY-MM-DD), defaults to today
 38 |             initial_capital: Starting capital for backtest (default: $10,000)
 39 |             requested_strategy: User-preferred strategy (optional, e.g., 'sma_cross', 'rsi', 'macd')
 40 | 
 41 |         Returns:
 42 |             Comprehensive analysis including:
 43 |             - Market regime classification (trending/ranging/volatile)
 44 |             - Intelligent strategy recommendations with confidence scores
 45 |             - Optimized parameters for best performance
 46 |             - Validation through walk-forward analysis
 47 |             - Risk assessment and confidence-scored final recommendation
 48 | 
 49 |         Examples:
 50 |             run_intelligent_backtest("AAPL") # Full analysis for Apple
 51 |             run_intelligent_backtest("TSLA", "2022-01-01", "2023-12-31") # Specific period
 52 |             run_intelligent_backtest("MSFT", requested_strategy="rsi") # With strategy preference
 53 |         """
 54 |         try:
 55 |             # Initialize workflow
 56 |             workflow = BacktestingWorkflow()
 57 | 
 58 |             # Run intelligent backtesting
 59 |             results = await workflow.run_intelligent_backtest(
 60 |                 symbol=symbol,
 61 |                 start_date=start_date,
 62 |                 end_date=end_date,
 63 |                 initial_capital=initial_capital,
 64 |                 requested_strategy=requested_strategy,
 65 |             )
 66 | 
 67 |             return results
 68 | 
 69 |         except Exception as e:
 70 |             logger.error(f"Intelligent backtest failed for {symbol}: {e}")
 71 |             return {
 72 |                 "symbol": symbol,
 73 |                 "error": str(e),
 74 |                 "message": "Intelligent backtesting failed. Please check symbol and date range.",
 75 |             }
 76 | 
 77 |     @mcp.tool()
 78 |     async def quick_market_regime_analysis(
 79 |         ctx: Context,
 80 |         symbol: str,
 81 |         start_date: str | None = None,
 82 |         end_date: str | None = None,
 83 |     ) -> dict[str, Any]:
 84 |         """Perform quick market regime analysis and strategy recommendations.
 85 | 
 86 |         This is a faster alternative to full backtesting that provides market regime classification
 87 |         and basic strategy recommendations without parameter optimization.
 88 | 
 89 |         Args:
 90 |             symbol: Stock symbol to analyze
 91 |             start_date: Start date (YYYY-MM-DD), defaults to 1 year ago
 92 |             end_date: End date (YYYY-MM-DD), defaults to today
 93 | 
 94 |         Returns:
 95 |             Quick analysis results with:
 96 |             - Market regime classification (trending/ranging/volatile)
 97 |             - Top 3 recommended strategies for current conditions
 98 |             - Strategy fitness scores
 99 |             - Market conditions summary
100 |             - Execution metadata
101 | 
102 |         Examples:
103 |             quick_market_regime_analysis("AAPL")
104 |             quick_market_regime_analysis("BTC-USD", "2023-01-01", "2023-12-31")
105 |         """
106 |         try:
107 |             # Initialize workflow
108 |             workflow = BacktestingWorkflow()
109 | 
110 |             # Run quick analysis
111 |             results = await workflow.run_quick_analysis(
112 |                 symbol=symbol,
113 |                 start_date=start_date,
114 |                 end_date=end_date,
115 |             )
116 | 
117 |             return results
118 | 
119 |         except Exception as e:
120 |             logger.error(f"Quick market analysis failed for {symbol}: {e}")
121 |             return {
122 |                 "symbol": symbol,
123 |                 "analysis_type": "quick_analysis",
124 |                 "error": str(e),
125 |                 "message": "Quick market analysis failed. Please check symbol and date range.",
126 |             }
127 | 
128 |     @mcp.tool()
129 |     async def explain_market_regime(
130 |         ctx: Context,
131 |         regime: str,
132 |     ) -> dict[str, Any]:
133 |         """Explain market regime characteristics and suitable strategies.
134 | 
135 |         Args:
136 |             regime: Market regime to explain (trending, ranging, volatile, etc.)
137 | 
138 |         Returns:
139 |             Detailed explanation of the regime and strategy recommendations
140 |         """
141 |         regime_explanations = {
142 |             "trending": {
143 |                 "description": "A market in a clear directional movement (up or down trend)",
144 |                 "characteristics": [
145 |                     "Strong directional price movement",
146 |                     "Higher highs and higher lows (uptrend) or lower highs and lower lows (downtrend)",
147 |                     "Good momentum indicators",
148 |                     "Volume supporting the trend direction",
149 |                 ],
150 |                 "best_strategies": [
151 |                     "sma_cross",
152 |                     "ema_cross",
153 |                     "macd",
154 |                     "breakout",
155 |                     "momentum",
156 |                 ],
157 |                 "avoid_strategies": ["rsi", "mean_reversion", "bollinger"],
158 |                 "risk_factors": [
159 |                     "Trend reversals can be sudden",
160 |                     "False breakouts in weak trends",
161 |                     "Momentum strategies can give late signals",
162 |                 ],
163 |             },
164 |             "ranging": {
165 |                 "description": "A market moving sideways within a defined price range",
166 |                 "characteristics": [
167 |                     "Price oscillates between support and resistance",
168 |                     "No clear directional bias",
169 |                     "Mean reversion tendencies",
170 |                     "Lower volatility within the range",
171 |                 ],
172 |                 "best_strategies": ["rsi", "bollinger", "mean_reversion"],
173 |                 "avoid_strategies": ["sma_cross", "breakout", "momentum"],
174 |                 "risk_factors": [
175 |                     "False breakouts from range",
176 |                     "Choppy price action can cause whipsaws",
177 |                     "Range can persist longer than expected",
178 |                 ],
179 |             },
180 |             "volatile": {
181 |                 "description": "A market with high price variability and unpredictable movements",
182 |                 "characteristics": [
183 |                     "Large price swings in short periods",
184 |                     "High volatility percentile",
185 |                     "Unpredictable direction changes",
186 |                     "Often associated with news events or uncertainty",
187 |                 ],
188 |                 "best_strategies": ["breakout", "volatility_breakout", "momentum"],
189 |                 "avoid_strategies": ["mean_reversion", "sma_cross"],
190 |                 "risk_factors": [
191 |                     "High drawdown potential",
192 |                     "Many false signals",
193 |                     "Requires wider stops and position sizing",
194 |                 ],
195 |             },
196 |             "volatile_trending": {
197 |                 "description": "A trending market with high volatility - strong moves with significant pullbacks",
198 |                 "characteristics": [
199 |                     "Clear trend direction but with high volatility",
200 |                     "Strong moves followed by sharp retracements",
201 |                     "Higher risk but potentially higher rewards",
202 |                     "Often seen in growth stocks or emerging trends",
203 |                 ],
204 |                 "best_strategies": [
205 |                     "breakout",
206 |                     "momentum",
207 |                     "volatility_breakout",
208 |                     "macd",
209 |                 ],
210 |                 "avoid_strategies": ["rsi", "mean_reversion"],
211 |                 "risk_factors": [
212 |                     "High drawdown during pullbacks",
213 |                     "Requires strong risk management",
214 |                     "Emotional stress from volatility",
215 |                 ],
216 |             },
217 |             "low_volume": {
218 |                 "description": "A market with below-average trading volume",
219 |                 "characteristics": [
220 |                     "Lower than average volume",
221 |                     "Potentially less reliable signals",
222 |                     "Wider bid-ask spreads",
223 |                     "Less institutional participation",
224 |                 ],
225 |                 "best_strategies": ["sma_cross", "ema_cross", "rsi"],
226 |                 "avoid_strategies": ["breakout", "momentum"],
227 |                 "risk_factors": [
228 |                     "Lower liquidity",
229 |                     "Breakouts may not sustain",
230 |                     "Slippage on entries and exits",
231 |                 ],
232 |             },
233 |         }
234 | 
235 |         if regime.lower() in regime_explanations:
236 |             return {
237 |                 "regime": regime,
238 |                 "explanation": regime_explanations[regime.lower()],
239 |                 "trading_tips": [
240 |                     f"Focus on {', '.join(regime_explanations[regime.lower()]['best_strategies'])} strategies",
241 |                     f"Avoid {', '.join(regime_explanations[regime.lower()]['avoid_strategies'])} strategies",
242 |                     "Always use proper risk management",
243 |                     "Consider the broader market context",
244 |                 ],
245 |             }
246 |         else:
247 |             return {
248 |                 "regime": regime,
249 |                 "error": f"Unknown regime '{regime}'",
250 |                 "available_regimes": list(regime_explanations.keys()),
251 |                 "message": "Please specify one of the available market regimes.",
252 |             }
253 | 
```
Page 7/39FirstPrevNextLast